2012-03-18 23:29:11 +01:00
|
|
|
|
|
|
|
# tests are fairly 'live' (but safe to run)
|
|
|
|
# setup authorized_keys for logged in user such
|
|
|
|
# that the user can log in as themselves before running tests
|
|
|
|
|
|
|
|
import unittest
|
|
|
|
import getpass
|
|
|
|
import ansible.playbook
|
2012-03-18 23:50:25 +01:00
|
|
|
import ansible.utils as utils
|
2012-03-26 01:05:27 +02:00
|
|
|
import ansible.callbacks as ans_callbacks
|
2012-03-18 23:29:11 +01:00
|
|
|
import os
|
|
|
|
import shutil
|
2013-03-10 00:30:18 +01:00
|
|
|
import ansible.constants as C
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS = []
|
|
|
|
|
2012-03-18 23:29:11 +01:00
|
|
|
class TestCallbacks(object):
|
2012-03-26 01:05:27 +02:00
|
|
|
# using same callbacks class for both runner and playbook
|
2012-03-18 23:29:11 +01:00
|
|
|
|
|
|
|
def __init__(self):
|
2012-03-26 01:05:27 +02:00
|
|
|
pass
|
2012-03-18 23:29:11 +01:00
|
|
|
|
|
|
|
def set_playbook(self, playbook):
|
|
|
|
self.playbook = playbook
|
|
|
|
|
2012-11-19 16:30:30 +01:00
|
|
|
def on_no_hosts_remaining(self):
|
|
|
|
pass
|
|
|
|
|
2012-03-18 23:29:11 +01:00
|
|
|
def on_start(self):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append('start')
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-08-01 04:26:45 +02:00
|
|
|
def on_skipped(self, host, item=None):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'skipped', [ host ]])
|
2012-03-24 01:51:15 +01:00
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
def on_import_for_host(self, host, filename):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'import', [ host, filename ]])
|
2012-03-20 03:42:31 +01:00
|
|
|
|
2012-03-31 17:45:29 +02:00
|
|
|
def on_error(self, host, msg):
|
2012-04-03 02:08:40 +02:00
|
|
|
EVENTS.append([ 'stderr', [ host, msg ]])
|
2012-03-31 17:45:29 +02:00
|
|
|
|
2012-03-21 02:44:01 +01:00
|
|
|
def on_not_import_for_host(self, host, missing_filename):
|
|
|
|
pass
|
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
def on_notify(self, host, handler):
|
|
|
|
EVENTS.append([ 'notify', [ host, handler ]])
|
|
|
|
|
2012-03-18 23:29:11 +01:00
|
|
|
def on_task_start(self, name, is_conditional):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'task start', [ name, is_conditional ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-08-01 21:17:16 +02:00
|
|
|
def on_failed(self, host, results, ignore_errors):
|
|
|
|
EVENTS.append([ 'failed', [ host, results, ignore_errors ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-03-24 01:51:15 +01:00
|
|
|
def on_ok(self, host, result):
|
2012-03-19 00:25:56 +01:00
|
|
|
# delete certain info from host_result to make test comparisons easier
|
2012-03-24 01:51:15 +01:00
|
|
|
host_result = result.copy()
|
2012-06-20 03:55:57 +02:00
|
|
|
for k in [ 'ansible_job_id', 'results_file', 'md5sum', 'delta', 'start', 'end' ]:
|
2012-03-19 00:25:56 +01:00
|
|
|
if k in host_result:
|
|
|
|
del host_result[k]
|
2012-03-21 03:29:21 +01:00
|
|
|
for k in host_result.keys():
|
|
|
|
if k.startswith('facter_') or k.startswith('ohai_'):
|
2012-08-07 03:00:21 +02:00
|
|
|
del host_result[k]
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'ok', [ host, host_result ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
|
|
|
def on_play_start(self, pattern):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'play start', [ pattern ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-04-26 20:35:19 +02:00
|
|
|
def on_async_ok(self, host, res, jid):
|
|
|
|
EVENTS.append([ 'async ok', [ host ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-04-26 20:35:19 +02:00
|
|
|
def on_async_poll(self, host, res, jid, clock):
|
2012-03-26 01:05:27 +02:00
|
|
|
EVENTS.append([ 'async poll', [ host ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-04-26 20:35:19 +02:00
|
|
|
def on_async_failed(self, host, res, jid):
|
|
|
|
EVENTS.append([ 'async failed', [ host ]])
|
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
def on_unreachable(self, host, msg):
|
|
|
|
EVENTS.append([ 'failed/dark', [ host, msg ]])
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2012-07-14 18:24:19 +02:00
|
|
|
def on_setup(self):
|
2012-03-21 02:44:01 +01:00
|
|
|
pass
|
2012-08-07 03:00:21 +02:00
|
|
|
|
2012-04-12 03:05:46 +02:00
|
|
|
def on_no_hosts(self):
|
|
|
|
pass
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
|
2012-04-12 02:16:28 +02:00
|
|
|
class TestPlaybook(unittest.TestCase):
|
2012-03-18 23:29:11 +01:00
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
def setUp(self):
|
|
|
|
self.user = getpass.getuser()
|
|
|
|
self.cwd = os.getcwd()
|
|
|
|
self.test_dir = os.path.join(self.cwd, 'test')
|
|
|
|
self.stage_dir = self._prepare_stage_dir()
|
|
|
|
|
|
|
|
if os.path.exists('/tmp/ansible_test_data_copy.out'):
|
|
|
|
os.unlink('/tmp/ansible_test_data_copy.out')
|
|
|
|
if os.path.exists('/tmp/ansible_test_data_template.out'):
|
|
|
|
os.unlink('/tmp/ansible_test_data_template.out')
|
|
|
|
if os.path.exists('/tmp/ansible_test_messages.out'):
|
|
|
|
os.unlink('/tmp/ansible_test_messages.out')
|
|
|
|
if os.path.exists('/tmp/ansible_test_role_messages.out'):
|
|
|
|
os.unlink('/tmp/ansible_test_role_messages.out')
|
|
|
|
|
|
|
|
def _prepare_stage_dir(self):
|
|
|
|
stage_path = os.path.join(self.test_dir, 'test_data')
|
|
|
|
if os.path.exists(stage_path):
|
|
|
|
shutil.rmtree(stage_path, ignore_errors=False)
|
|
|
|
assert not os.path.exists(stage_path)
|
|
|
|
os.makedirs(stage_path)
|
|
|
|
assert os.path.exists(stage_path)
|
|
|
|
return stage_path
|
|
|
|
|
|
|
|
def _get_test_file(self, filename):
|
|
|
|
# get a file inside the test input directory
|
|
|
|
filename = os.path.join(self.test_dir, filename)
|
|
|
|
assert os.path.exists(filename)
|
|
|
|
return filename
|
|
|
|
|
|
|
|
def _get_stage_file(self, filename):
|
|
|
|
# get a file inside the test output directory
|
|
|
|
filename = os.path.join(self.stage_dir, filename)
|
|
|
|
return filename
|
|
|
|
|
|
|
|
def _run(self, test_playbook, host_list='test/ansible_hosts', extra_vars=None):
|
|
|
|
''' run a module and get the localhost results '''
|
|
|
|
# This ensures tests are independent of eachother
|
|
|
|
global EVENTS
|
|
|
|
ansible.playbook.SETUP_CACHE.clear()
|
|
|
|
EVENTS = []
|
|
|
|
|
|
|
|
self.test_callbacks = TestCallbacks()
|
|
|
|
self.playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook = test_playbook,
|
|
|
|
host_list = host_list,
|
|
|
|
module_path = 'library/',
|
|
|
|
forks = 1,
|
|
|
|
timeout = 5,
|
|
|
|
remote_user = self.user,
|
|
|
|
remote_pass = None,
|
|
|
|
extra_vars = extra_vars,
|
|
|
|
stats = ans_callbacks.AggregateStats(),
|
|
|
|
callbacks = self.test_callbacks,
|
|
|
|
runner_callbacks = self.test_callbacks
|
|
|
|
)
|
|
|
|
result = self.playbook.run()
|
|
|
|
return result
|
|
|
|
|
|
|
|
def test_playbook_vars(self):
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'test_playbook_vars', 'playbook.yml'),
|
|
|
|
host_list='test/test_playbook_vars/hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
playbook.run()
|
|
|
|
|
|
|
|
def _test_playbook_undefined_vars(self, playbook, fail_on_undefined):
|
|
|
|
# save DEFAULT_UNDEFINED_VAR_BEHAVIOR so we can restore it in the end of the test
|
|
|
|
saved_undefined_var_behavior = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
|
|
|
|
C.DEFAULT_UNDEFINED_VAR_BEHAVIOR = fail_on_undefined
|
|
|
|
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'test_playbook_undefined_vars', playbook),
|
|
|
|
host_list='test/test_playbook_undefined_vars/hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
actual = playbook.run()
|
|
|
|
|
|
|
|
C.DEFAULT_UNDEFINED_VAR_BEHAVIOR = saved_undefined_var_behavior
|
|
|
|
|
|
|
|
# if different, this will output to screen
|
|
|
|
print "**ACTUAL**"
|
|
|
|
print utils.jsonify(actual, format=True)
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 0,
|
|
|
|
"failures": 0,
|
|
|
|
"ok": int(not fail_on_undefined) + 1,
|
|
|
|
"skipped": 0,
|
|
|
|
"unreachable": int(fail_on_undefined)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
print "**EXPECTED**"
|
|
|
|
print utils.jsonify(expected, format=True)
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual, format=True)
|
|
|
|
|
|
|
|
#def test_playbook_undefined_vars1_ignore(self):
|
|
|
|
# self._test_playbook_undefined_vars('playbook1.yml', False)
|
|
|
|
|
|
|
|
#def test_playbook_undefined_vars1_fail(self):
|
|
|
|
# self._test_playbook_undefined_vars('playbook1.yml', True)
|
|
|
|
|
|
|
|
#def test_playbook_undefined_vars2_ignore(self):
|
|
|
|
# self._test_playbook_undefined_vars('playbook2.yml', False)
|
|
|
|
|
|
|
|
#def test_playbook_undefined_vars2_fail(self):
|
|
|
|
# self._test_playbook_undefined_vars('playbook2.yml', True)
|
|
|
|
|
|
|
|
def test_yaml_hosts_list(self):
|
|
|
|
# Make sure playbooks support hosts: [host1, host2]
|
|
|
|
# TODO: Actually run the play on more than one host
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'hosts_list.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
play = ansible.playbook.Play(playbook, playbook.playbook[0], os.getcwd())
|
|
|
|
assert play.hosts == ';'.join(('host1', 'host2', 'host3'))
|
|
|
|
|
|
|
|
def test_playbook_hash_replace(self):
|
|
|
|
# save default hash behavior so we can restore it in the end of the test
|
|
|
|
saved_hash_behavior = C.DEFAULT_HASH_BEHAVIOUR
|
|
|
|
C.DEFAULT_HASH_BEHAVIOUR = "replace"
|
|
|
|
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'test_hash_behavior', 'playbook.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
playbook.run()
|
|
|
|
|
|
|
|
filename = '/tmp/ansible_test_messages.out'
|
|
|
|
expected_lines = [
|
|
|
|
"goodbye: Goodbye World!"
|
|
|
|
]
|
|
|
|
self._compare_file_output(filename, expected_lines)
|
|
|
|
|
|
|
|
filename = '/tmp/ansible_test_role_messages.out'
|
|
|
|
expected_lines = [
|
|
|
|
"inside_a_role: Indeed!"
|
|
|
|
]
|
|
|
|
self._compare_file_output(filename, expected_lines)
|
|
|
|
|
|
|
|
# restore default hash behavior
|
|
|
|
C.DEFAULT_HASH_BEHAVIOUR = saved_hash_behavior
|
|
|
|
|
|
|
|
def test_playbook_hash_merge(self):
|
|
|
|
# save default hash behavior so we can restore it in the end of the test
|
|
|
|
saved_hash_behavior = C.DEFAULT_HASH_BEHAVIOUR
|
|
|
|
C.DEFAULT_HASH_BEHAVIOUR = "merge"
|
|
|
|
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'test_hash_behavior', 'playbook.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
playbook.run()
|
|
|
|
|
|
|
|
filename = '/tmp/ansible_test_messages.out'
|
|
|
|
expected_lines = [
|
|
|
|
"goodbye: Goodbye World!",
|
|
|
|
"hello: Hello World!"
|
|
|
|
]
|
|
|
|
self._compare_file_output(filename, expected_lines)
|
|
|
|
|
|
|
|
filename = '/tmp/ansible_test_role_messages.out'
|
|
|
|
expected_lines = [
|
|
|
|
"goodbye: Goodbye World!",
|
|
|
|
"hello: Hello World!",
|
|
|
|
"inside_a_role: Indeed!"
|
|
|
|
]
|
|
|
|
self._compare_file_output(filename, expected_lines)
|
|
|
|
|
|
|
|
# restore default hash behavior
|
|
|
|
C.DEFAULT_HASH_BEHAVIOUR = saved_hash_behavior
|
|
|
|
|
|
|
|
def test_playbook_ignore_errors(self):
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'playbook-ignore-errors.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
actual = playbook.run()
|
|
|
|
|
|
|
|
# if different, this will output to screen
|
|
|
|
print "**ACTUAL**"
|
|
|
|
print utils.jsonify(actual, format=True)
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 1,
|
|
|
|
"failures": 1,
|
|
|
|
"ok": 1,
|
|
|
|
"skipped": 0,
|
|
|
|
"unreachable": 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
print "**EXPECTED**"
|
|
|
|
print utils.jsonify(expected, format=True)
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
|
|
|
|
|
|
|
|
def test_playbook_changed_when(self):
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'playbook-changed_when.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
actual = playbook.run()
|
|
|
|
|
|
|
|
# if different, this will output to screen
|
|
|
|
print "**ACTUAL**"
|
|
|
|
print utils.jsonify(actual, format=True)
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 3,
|
|
|
|
"failures": 0,
|
|
|
|
"ok": 6,
|
|
|
|
"skipped": 0,
|
|
|
|
"unreachable": 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
print "**EXPECTED**"
|
|
|
|
print utils.jsonify(expected, format=True)
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
|
|
|
|
|
|
|
|
def test_playbook_failed_when(self):
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'playbook-failed_when.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks
|
|
|
|
)
|
|
|
|
actual = playbook.run()
|
|
|
|
|
|
|
|
# if different, this will output to screen
|
|
|
|
print "**ACTUAL**"
|
|
|
|
print utils.jsonify(actual, format=True)
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 2,
|
|
|
|
"failures": 1,
|
|
|
|
"ok": 2,
|
|
|
|
"skipped": 0,
|
|
|
|
"unreachable": 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
print "**EXPECTED**"
|
|
|
|
print utils.jsonify(expected, format=True)
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
|
|
|
|
|
|
|
|
|
|
|
|
def test_playbook_always_run(self):
|
|
|
|
test_callbacks = TestCallbacks()
|
|
|
|
playbook = ansible.playbook.PlayBook(
|
|
|
|
playbook=os.path.join(self.test_dir, 'playbook-always-run.yml'),
|
|
|
|
host_list='test/ansible_hosts',
|
|
|
|
stats=ans_callbacks.AggregateStats(),
|
|
|
|
callbacks=test_callbacks,
|
|
|
|
runner_callbacks=test_callbacks,
|
|
|
|
check=True
|
|
|
|
)
|
|
|
|
actual = playbook.run()
|
|
|
|
|
|
|
|
# if different, this will output to screen
|
|
|
|
print "**ACTUAL**"
|
|
|
|
print utils.jsonify(actual, format=True)
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 4,
|
|
|
|
"failures": 0,
|
|
|
|
"ok": 4,
|
|
|
|
"skipped": 8,
|
|
|
|
"unreachable": 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
print "**EXPECTED**"
|
|
|
|
print utils.jsonify(expected, format=True)
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
|
|
|
|
|
2014-02-17 23:56:08 +01:00
|
|
|
def test_playbook_logging_non_ascii(self):
|
|
|
|
pb = 'test/playbook-logging-non-ascii.yml'
|
|
|
|
actual = self._run(pb)
|
|
|
|
|
|
|
|
expected = {
|
|
|
|
"localhost": {
|
|
|
|
"changed": 3,
|
|
|
|
"failures": 0,
|
|
|
|
"ok": 3,
|
|
|
|
"skipped": 0,
|
|
|
|
"unreachable": 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert utils.jsonify(expected, format=True) == utils.jsonify(actual, format=True)
|
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
|
2014-02-12 18:26:42 +01:00
|
|
|
# Disabled for now as there are permissions issues that happen if you are not the owner that created files
|
|
|
|
# in the archive.
|
|
|
|
# def test_unarchive(self):
|
|
|
|
# pb = 'test/playbook-unarchive.yml'
|
|
|
|
# actual = self._run(pb)
|
|
|
|
#
|
|
|
|
# expected = {
|
|
|
|
# "localhost": {
|
|
|
|
# "changed": 29,
|
|
|
|
# "failures": 0,
|
|
|
|
# "ok": 33,
|
|
|
|
# "skipped": 12,
|
|
|
|
# "unreachable": 0
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
#
|
|
|
|
# assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
|
2013-10-14 21:13:10 +02:00
|
|
|
|
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
def _compare_file_output(self, filename, expected_lines):
|
|
|
|
actual_lines = []
|
|
|
|
with open(filename) as f:
|
|
|
|
actual_lines = [l.strip() for l in f.readlines()]
|
|
|
|
actual_lines = sorted(actual_lines)
|
2013-06-04 00:19:11 +02:00
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
print "**ACTUAL**"
|
|
|
|
print actual_lines
|
2013-06-04 00:19:11 +02:00
|
|
|
|
2014-02-12 08:57:00 +01:00
|
|
|
print "**EXPECTED**"
|
|
|
|
print expected_lines
|
2013-06-04 00:19:11 +02:00
|
|
|
|
2014-02-19 23:57:39 +01:00
|
|
|
assert actual_lines == expected_lines
|