diff --git a/apel/db/loader/loader.py b/apel/db/loader/loader.py index 962b3d4aa..d4d05e20e 100644 --- a/apel/db/loader/loader.py +++ b/apel/db/loader/loader.py @@ -93,10 +93,9 @@ def startup(self): log.warning("Check that the dbloader is not running, then remove the file.") raise LoaderException("The dbloader cannot start while pidfile exists.") try: - f = open(self._pidfile, "w") - f.write(str(os.getpid())) - f.write("\n") - f.close() + with open(self._pidfile, "w") as f: + f.write(str(os.getpid())) + f.write("\n") except IOError as e: log.warning("Failed to create pidfile %s: %s", self._pidfile, e) diff --git a/apel/db/unloader.py b/apel/db/unloader.py index ddc9ffeb0..fbd88ba06 100644 --- a/apel/db/unloader.py +++ b/apel/db/unloader.py @@ -263,7 +263,6 @@ def _write_xml(self, records): self._msgq.add(buf.getvalue()) buf.close() - del buf def _write_apel(self, records): ''' @@ -279,7 +278,7 @@ def _write_apel(self, records): self._msgq.add(buf.getvalue()) buf.close() - del buf + def get_start_of_previous_month(dt): ''' diff --git a/bin/parser.py b/bin/parser.py index 7b66ea614..b9a416d7a 100644 --- a/bin/parser.py +++ b/bin/parser.py @@ -201,17 +201,14 @@ def scan_dir(parser, dirpath, reparse, expr, apel_db, processed): # format we will get IOError, empty files can # give EOFError as well. for method in (bz2.BZ2File, gzip.open, open): - try: # this is for Python < 2.5 - try: - fp = method(abs_file, 'rb') + try: + with method(abs_file, 'rb') as fp: parsed, total = parse_file(parser, apel_db, fp, reparse) - break - except (IOError, EOFError): - if method == open: - raise - finally: - fp.close() + break + except (IOError, EOFError): + if method == open: + raise except IOError as e: log.error('Cannot parse file %s: %s', item, e) except ApelDbException as e: diff --git a/bin/retrieve_dns.py b/bin/retrieve_dns.py index 8697026a6..acb650575 100644 --- a/bin/retrieve_dns.py +++ b/bin/retrieve_dns.py @@ -201,11 +201,10 @@ def dns_from_file(path): assume that they're DNs, but we'll check later. ''' - dn_file = open(path) - dns = dn_file.readlines() + with open(path) as dn_file: + dns = dn_file.readlines() # get rid of any whitespace in the list of strings dns = [dn.strip() for dn in dns] - dn_file.close() return dns @@ -293,7 +292,7 @@ def runprocess(config_file, log_config_file): log.warning('Is the URL correct?') fetch_failed = True except AttributeError: - # gocdb_url == None + # gocdb_url is None log.info("No GOCDB URL specified - won't fetch URLs.") except IOError as e: log.info("Failed to retrieve XML - is the URL correct?") diff --git a/bin/summariser.py b/bin/summariser.py index 5dd3df5fa..39a80aa59 100644 --- a/bin/summariser.py +++ b/bin/summariser.py @@ -100,10 +100,9 @@ def runprocess(db_config_file, config_file, log_config_file): print("Error initialising summariser: %s" % err) sys.exit(1) try: - f = open(pidfile, "w") - f.write(str(os.getpid())) - f.write("\n") - f.close() + with open(pidfile, "w") as f: + f.write(str(os.getpid())) + f.write("\n") except IOError as e: log.warning("Failed to create pidfile %s: %s", pidfile, e) # If we fail to create a pidfile, don't start the summariser diff --git a/scripts/migrate_apel.py b/scripts/migrate_apel.py index cf3998d23..b258f1c80 100755 --- a/scripts/migrate_apel.py +++ b/scripts/migrate_apel.py @@ -146,13 +146,13 @@ def copy_records(db1, db2, cutoff): role, group, vo = parse_fqan(fqan) - if global_user_name == None: + if global_user_name is None: global_user_name = 'None' - if role == None: + if role is None: role = 'None' - if group == None: + if group is None: group = 'None' - if vo == None: + if vo is None: vo = 'None' start_time = parse_timestamp(start_time) diff --git a/test/test_aur_parser.py b/test/test_aur_parser.py index a3927e601..8c9b4b343 100644 --- a/test/test_aur_parser.py +++ b/test/test_aur_parser.py @@ -97,7 +97,7 @@ def test_aur_parser(self): for field in ('Site', 'Month', 'Year', 'WallDuration', 'CpuDuration', 'NumberOfJobs'): # Also need: 'NormalisedWallDuration', 'NormalisedCpuDuration' - self.assertTrue(field in cont, "Field '%s' not found" % field) + self.assertIn(field, cont, "Field '%s' not found" % field) for key in cases[aur]: if isinstance(cont[key], datetime): diff --git a/test/test_blah.py b/test/test_blah.py index dd5c52e1b..0a10bb581 100644 --- a/test/test_blah.py +++ b/test/test_blah.py @@ -144,7 +144,7 @@ def test_multiple_fqans(self): for key in list(cases[line].keys()): # Check all fields are present - self.assertTrue(key in cont, "Key '%s' not in record." % key) + self.assertIn(key, cont, "Key '%s' not in record." % key) # Check values are correct self.assertEqual(cont[key], cases[line][key], "'%s' != '%s' for key '%s'" % diff --git a/test/test_car_parser.py b/test/test_car_parser.py index dca7c49dd..09af24b64 100644 --- a/test/test_car_parser.py +++ b/test/test_car_parser.py @@ -170,13 +170,13 @@ def test_car_parser(self): cont = record._record_content # Mandatory fields - need checking. - self.assertTrue('Site' in cont) - self.assertTrue('Queue' in cont) - self.assertTrue('LocalJobId' in cont) - self.assertTrue('WallDuration' in cont) - self.assertTrue('CpuDuration' in cont) - self.assertTrue('StartTime' in cont) - self.assertTrue('EndTime' in cont) + self.assertIn('Site', cont) + self.assertIn('Queue', cont) + self.assertIn('LocalJobId', cont) + self.assertIn('WallDuration', cont) + self.assertIn('CpuDuration', cont) + self.assertIn('StartTime', cont) + self.assertIn('EndTime', cont) for key in list(cases[car].keys()): if isinstance(cont[key], datetime.datetime): diff --git a/test/test_hashing.py b/test/test_hashing.py index 84903b5bc..e951659e3 100644 --- a/test/test_hashing.py +++ b/test/test_hashing.py @@ -16,9 +16,8 @@ def test_calculate_hash(self): data_hash = '3d1eb00cc63828b36882f076f35c8cdd' tmpname = tempfile.mktemp('hashtest') - fp = open(tmpname, 'wb') - fp.write(data) - fp.close() + with open(tmpname, 'wb') as fp: + fp.write(data) self.assertEqual(data_hash, calculate_hash(tmpname)) diff --git a/test/test_htcondor_parser.py b/test/test_htcondor_parser.py index a9cc0fe4f..c96ace809 100644 --- a/test/test_htcondor_parser.py +++ b/test/test_htcondor_parser.py @@ -66,7 +66,7 @@ def test_parse_line_no_cputmult(self): self.assertEqual(cont['Infrastructure'], 'APEL-CREAM-HTCONDOR') for key in list(cases[line].keys()): - self.assertTrue(key in cont, "Key '%s' not in record." % key) + self.assertIn(key, cont, "Key '%s' not in record." % key) for key in list(cases[line].keys()): self.assertEqual(cont[key], cases[line][key], @@ -125,7 +125,7 @@ def test_parse_line_with_cputmult(self): self.assertEqual(cont['Infrastructure'], 'APEL-CREAM-HTCONDOR') for key in list(cases[line].keys()): - self.assertTrue(key in cont, "Key '%s' not in record." % key) + self.assertIn(key, cont, "Key '%s' not in record." % key) for key in list(cases[line].keys()): self.assertEqual(cont[key], cases[line][key], diff --git a/test/test_logging.py b/test/test_logging.py index cd15b1c94..beced1fdd 100644 --- a/test/test_logging.py +++ b/test/test_logging.py @@ -50,9 +50,8 @@ def test_stdout_logging(self): # Only check bit after timestamp as that doesn't change. self.assertEqual(output[23:], " - test_logging - INFO - out\n") - f = open(self.path) - self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n") - f.close() + with open(self.path) as f: + self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n") def test_boring_logging(self): """Check that logging without handlers at least runs without errors.""" diff --git a/test/test_lsf.py b/test/test_lsf.py index d08a0a4b1..246e01572 100644 --- a/test/test_lsf.py +++ b/test/test_lsf.py @@ -107,7 +107,7 @@ def test_parse(self): self.assertEqual(cont['Infrastructure'], 'APEL-CREAM-LSF') for field in list(cases[line].keys()): - self.assertTrue(field in cont, "Field '%s' not in record: %s" % (field, cont)) + self.assertIn(field, cont, "Field '%s' not in record: %s" % (field, cont)) for key in list(cases[line].keys()): self.assertEqual(cont[key], cases[line][key], "%s != %s for key %s" % (cont[key], cases[line][key], key)) diff --git a/test/test_pbs.py b/test/test_pbs.py index 17318c7d6..bf3470fd3 100644 --- a/test/test_pbs.py +++ b/test/test_pbs.py @@ -114,7 +114,7 @@ def test_parse(self): for key in list(cases[line].keys()): # Check all fields are present - self.assertTrue(key in cont, "Key '%s' not in record." % key) + self.assertIn(key, cont, "Key '%s' not in record." % key) # Check values are correct self.assertEqual(cont[key], cases[line][key], "%s != %s for key %s" % (cont[key], cases[line][key], key)) diff --git a/test/test_record_factory.py b/test/test_record_factory.py index 64903cdc3..25680a6a2 100644 --- a/test/test_record_factory.py +++ b/test/test_record_factory.py @@ -32,7 +32,7 @@ def test_create_car(self): '' ) - self.assertTrue(isinstance(record_list[0], JobRecord)) + self.assertIsInstance(record_list[0], JobRecord) def test_create_aur(self): """Check that trying to create an AUR record fails.""" @@ -48,7 +48,7 @@ def test_create_star(self): '02/storagerecord">' ) - self.assertTrue(isinstance(record_list[0], StorageRecord)) + self.assertIsInstance(record_list[0], StorageRecord) def test_create_bad_xml(self): """Check that trying to create a record from non-record XML fails.""" @@ -63,36 +63,36 @@ def test_create_bad_apel(self): def test_create_jrs(self): """Check that creating job records returns JobRecords.""" records = self._rf.create_records(self._jr_text) - self.assertTrue(len(records), 2) + self.assertEqual(len(records), 2) for record in records: - self.assertTrue(isinstance(record, JobRecord)) + self.assertIsInstance(record, JobRecord) def test_create_srs(self): """Check that creating summary records returns SummaryRecords.""" records = self._rf.create_records(self._sr_text) - self.assertTrue(len(records), 2) + self.assertEqual(len(records), 2) for record in records: - self.assertTrue(isinstance(record, SummaryRecord)) + self.assertIsInstance(record, SummaryRecord) def test_create_normalised_summary(self): """Check that creating a normalised summary returns the right record.""" records = self._rf.create_records(self._nsr_text) - self.assertTrue(isinstance(records[0], NormalisedSummaryRecord)) + self.assertIsInstance(records[0], NormalisedSummaryRecord) def test_create_sync(self): """Check that creating a sync record returns a SyncRecord.""" records = self._rf.create_records(self._sync_text) - self.assertTrue(isinstance(records[0], SyncRecord)) + self.assertIsInstance(records[0], SyncRecord) def test_create_cloud(self): """Check that creating a cloud record returns a CloudRecord.""" records = self._rf.create_records(self._cr_text) - self.assertTrue(isinstance(records[0], CloudRecord)) + self.assertIsInstance(records[0], CloudRecord) def test_create_cloud_summary(self): """Check that creating a cloud summary returns a CloudSummaryRecord.""" records = self._rf.create_records(self._csr_text) - self.assertTrue(isinstance(records[0], CloudSummaryRecord)) + self.assertIsInstance(records[0], CloudSummaryRecord) def _get_msg_text(self): # Below, I've just got some test data hard-coded. diff --git a/test/test_slurm.py b/test/test_slurm.py index 9727ccace..4bf0f2ff6 100644 --- a/test/test_slurm.py +++ b/test/test_slurm.py @@ -115,7 +115,7 @@ def test_parse_line(self): del cases[line]['Queue'] for key in list(cases[line].keys()): - self.assertTrue(key in cont, "Key '%s' not in record." % key) + self.assertIn(key, cont, "Key '%s' not in record." % key) for key in list(cases[line].keys()): self.assertEqual(cont[key], cases[line][key], "%s != %s for key %s." % (cont[key], cases[line][key], key)) diff --git a/test/test_star_parser.py b/test/test_star_parser.py index d34b1d93d..41995981f 100644 --- a/test/test_star_parser.py +++ b/test/test_star_parser.py @@ -110,7 +110,7 @@ def test_star_parser(self): # Mandatory fields for field in ("RecordId", "CreateTime", "StorageSystem", "StartTime", "EndTime", "ResourceCapacityUsed"): - self.assertTrue(field in cont, "Field '%s' not found" % field) + self.assertIn(field, cont, "Field '%s' not found" % field) for key in list(cases[star].keys()): if isinstance(cont[key], datetime):