From a0e3cf9dbbd933df96a16df0c5f00dbe77fcd09b Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Mon, 29 Apr 2024 22:08:52 +0200
Subject: [PATCH 1/6] Add `poetry` compatibility, enhance logging and removing
 `-no-pass`

---
 bloodhound.py                   |   5 -
 bloodhound/__init__.py          | 531 ++++++++++++--------
 bloodhound/ad/authentication.py | 510 +++++++++++++-------
 bloodhound/ad/computer.py       | 825 ++++++++++++++++++++------------
 bloodhound/ad/domain.py         | 713 +++++++++++++++++----------
 poetry.lock                     | 673 ++++++++++++++++++++++++++
 poetry.toml                     |   5 +
 pyproject.toml                  |  41 ++
 8 files changed, 2366 insertions(+), 937 deletions(-)
 delete mode 100755 bloodhound.py
 create mode 100644 poetry.lock
 create mode 100644 poetry.toml
 create mode 100644 pyproject.toml

diff --git a/bloodhound.py b/bloodhound.py
deleted file mode 100755
index a9e7cde..0000000
--- a/bloodhound.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /usr/bin/env python
-
-import bloodhound
-
-bloodhound.main()
diff --git a/bloodhound/__init__.py b/bloodhound/__init__.py
index 48875cf..db6be20 100644
--- a/bloodhound/__init__.py
+++ b/bloodhound/__init__.py
@@ -33,6 +33,8 @@
 """
 BloodHound.py is a Python port of BloodHound, designed to run on Linux and Windows.
 """
+
+
 class BloodHound(object):
     def __init__(self, ad):
         self.ad = ad
@@ -40,95 +42,174 @@ def __init__(self, ad):
         self.pdc = None
         self.sessions = []
 
-
     def connect(self):
         if len(self.ad.dcs()) == 0:
-            logging.error('Could not find a domain controller. Consider specifying a domain and/or DNS server.')
+            logging.error(
+                "Could not find a domain controller. Consider specifying a domain and/or DNS server."
+            )
             sys.exit(1)
 
         if not self.ad.baseDN:
-            logging.error('Could not figure out the domain to query. Please specify this manually with -d')
+            logging.error(
+                "Could not figure out the domain to query. Please specify this manually with -d"
+            )
             sys.exit(1)
 
         pdc = self.ad.dcs()[0]
-        logging.debug('Using LDAP server: %s', pdc)
-        logging.debug('Using base DN: %s', self.ad.baseDN)
+        logging.debug("Using LDAP server: %s", pdc)
+        logging.debug("Using base DN: %s", self.ad.baseDN)
 
         if len(self.ad.kdcs()) > 0:
             kdc = self.ad.auth.kdc
-            logging.debug('Using kerberos KDC: %s', kdc)
-            logging.debug('Using kerberos realm: %s', self.ad.realm())
+            logging.debug("Using kerberos KDC: %s", kdc)
+            logging.debug("Using kerberos realm: %s", self.ad.realm())
 
         # Create a domain controller object
         self.pdc = ADDC(pdc, self.ad)
         # Create an object resolver
         self.ad.create_objectresolver(self.pdc)
 
-
-    def run(self, collect, num_workers=10, disable_pooling=False, timestamp="", computerfile="", cachefile=None, exclude_dcs=False, fileNamePrefix=""):
+    def run(
+        self,
+        collect,
+        num_workers=10,
+        disable_pooling=False,
+        timestamp="",
+        computerfile="",
+        cachefile=None,
+        exclude_dcs=False,
+        fileNamePrefix="",
+    ):
         start_time = time.time()
         if cachefile:
             self.ad.load_cachefile(cachefile)
 
         # Check early if we should enumerate computers as well
-        do_computer_enum = any(method in collect for method in ['localadmin', 'session', 'loggedon', 'experimental', 'rdp', 'dcom', 'psremote'])
-
-        if 'group' in collect or 'objectprops' in collect or 'acl' in collect:
+        do_computer_enum = any(
+            method in collect
+            for method in [
+                "localadmin",
+                "session",
+                "loggedon",
+                "experimental",
+                "rdp",
+                "dcom",
+                "psremote",
+            ]
+        )
+
+        if "group" in collect or "objectprops" in collect or "acl" in collect:
             # Fetch domains for later, computers if needed
-            self.pdc.prefetch_info('objectprops' in collect, 'acl' in collect, cache_computers=do_computer_enum)
+            self.pdc.prefetch_info(
+                "objectprops" in collect,
+                "acl" in collect,
+                cache_computers=do_computer_enum,
+            )
             # Initialize enumerator
-            membership_enum = MembershipEnumerator(self.ad, self.pdc, collect, disable_pooling)
-            membership_enum.enumerate_memberships(timestamp=timestamp, fileNamePrefix=fileNamePrefix)
-        elif 'container' in collect:
+            membership_enum = MembershipEnumerator(
+                self.ad, self.pdc, collect, disable_pooling
+            )
+            membership_enum.enumerate_memberships(
+                timestamp=timestamp, fileNamePrefix=fileNamePrefix
+            )
+        elif "container" in collect:
             # Fetch domains for later, computers if needed
-            self.pdc.prefetch_info('objectprops' in collect, 'acl' in collect, cache_computers=do_computer_enum)
+            self.pdc.prefetch_info(
+                "objectprops" in collect,
+                "acl" in collect,
+                cache_computers=do_computer_enum,
+            )
             # Initialize enumerator
-            membership_enum = MembershipEnumerator(self.ad, self.pdc, collect, disable_pooling)
+            membership_enum = MembershipEnumerator(
+                self.ad, self.pdc, collect, disable_pooling
+            )
             membership_enum.do_container_collection(timestamp=timestamp)
         elif do_computer_enum:
             # We need to know which computers to query regardless
             # We also need the domains to have a mapping from NETBIOS -> FQDN for local admins
-            self.pdc.prefetch_info('objectprops' in collect, 'acl' in collect, cache_computers=True)
-        elif 'trusts' in collect:
+            self.pdc.prefetch_info(
+                "objectprops" in collect, "acl" in collect, cache_computers=True
+            )
+        elif "trusts" in collect:
             # Prefetch domains
-            self.pdc.get_domains('acl' in collect)
-        if 'trusts' in collect or 'acl' in collect or 'objectprops' in collect:
+            self.pdc.get_domains("acl" in collect)
+        if "trusts" in collect or "acl" in collect or "objectprops" in collect:
             trusts_enum = DomainEnumerator(self.ad, self.pdc)
-            trusts_enum.dump_domain(collect,timestamp=timestamp,fileNamePrefix=fileNamePrefix)
+            trusts_enum.dump_domain(
+                collect, timestamp=timestamp, fileNamePrefix=fileNamePrefix
+            )
         if do_computer_enum:
             # If we don't have a GC server, don't use it for deconflictation
             have_gc = len(self.ad.gcs()) > 0
-            computer_enum = ComputerEnumerator(self.ad, self.pdc, collect, do_gc_lookup=have_gc, computerfile=computerfile, exclude_dcs=exclude_dcs)
-            computer_enum.enumerate_computers(self.ad.computers, num_workers=num_workers, timestamp=timestamp, fileNamePrefix=fileNamePrefix)
+            computer_enum = ComputerEnumerator(
+                self.ad,
+                self.pdc,
+                collect,
+                do_gc_lookup=have_gc,
+                computerfile=computerfile,
+                exclude_dcs=exclude_dcs,
+            )
+            computer_enum.enumerate_computers(
+                self.ad.computers,
+                num_workers=num_workers,
+                timestamp=timestamp,
+                fileNamePrefix=fileNamePrefix,
+            )
         end_time = time.time()
-        minutes, seconds = divmod(int(end_time-start_time),60)
-        logging.info('Done in %02dM %02dS' % (minutes, seconds))
+        minutes, seconds = divmod(int(end_time - start_time), 60)
+        logging.info("Done in %02dM %02dS" % (minutes, seconds))
+
 
 def resolve_collection_methods(methods):
     """
     Convert methods (string) to list of validated methods to resolve
     """
-    valid_methods = ['group', 'localadmin', 'session', 'trusts', 'default', 'all', 'loggedon',
-                     'objectprops', 'experimental', 'acl', 'dcom', 'rdp', 'psremote', 'dconly',
-                     'container']
-    default_methods = ['group', 'localadmin', 'session', 'trusts']
+    valid_methods = [
+        "group",
+        "localadmin",
+        "session",
+        "trusts",
+        "default",
+        "all",
+        "loggedon",
+        "objectprops",
+        "experimental",
+        "acl",
+        "dcom",
+        "rdp",
+        "psremote",
+        "dconly",
+        "container",
+    ]
+    default_methods = ["group", "localadmin", "session", "trusts"]
     # Similar to SharpHound, All is not really all, it excludes loggedon
-    all_methods = ['group', 'localadmin', 'session', 'trusts', 'objectprops', 'acl', 'dcom', 'rdp', 'psremote', 'container']
+    all_methods = [
+        "group",
+        "localadmin",
+        "session",
+        "trusts",
+        "objectprops",
+        "acl",
+        "dcom",
+        "rdp",
+        "psremote",
+        "container",
+    ]
     # DC only, does not collect to computers
-    dconly_methods = ['group', 'trusts', 'objectprops', 'acl', 'container']
-    if ',' in methods:
-        method_list = [method.lower() for method in methods.split(',')]
+    dconly_methods = ["group", "trusts", "objectprops", "acl", "container"]
+    if "," in methods:
+        method_list = [method.lower() for method in methods.split(",")]
         validated_methods = []
         for method in method_list:
             if method not in valid_methods:
-                logging.error('Invalid collection method specified: %s', method)
+                logging.error("Invalid collection method specified: %s", method)
                 return False
 
-            if method == 'default':
+            if method == "default":
                 validated_methods += default_methods
-            elif method == 'all':
+            elif method == "all":
                 validated_methods += all_methods
-            elif method == 'dconly':
+            elif method == "dconly":
                 validated_methods += dconly_methods
             else:
                 validated_methods.append(method)
@@ -138,234 +219,279 @@ def resolve_collection_methods(methods):
         # It is only one
         method = methods.lower()
         if method in valid_methods:
-            if method == 'default':
+            if method == "default":
                 validated_methods += default_methods
-            elif method == 'all':
+            elif method == "all":
                 validated_methods += all_methods
-            elif method == 'dconly':
+            elif method == "dconly":
                 validated_methods += dconly_methods
             else:
                 validated_methods.append(method)
             return set(validated_methods)
         else:
-            logging.error('Invalid collection method specified: %s', method)
+            logging.error("Invalid collection method specified: %s", method)
             return False
 
+
 def main():
-#    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
+    #    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
 
     logger = logging.getLogger()
     logger.setLevel(logging.INFO)
     stream = logging.StreamHandler(sys.stderr)
     stream.setLevel(logging.DEBUG)
-    formatter = logging.Formatter('%(levelname)s: %(message)s')
-#    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+    formatter = logging.Formatter("%(levelname)s: %(message)s")
+    #    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
     stream.setFormatter(formatter)
     logger.addHandler(stream)
 
-    parser = argparse.ArgumentParser(add_help=True, description='Python based ingestor for BloodHound\nFor help or reporting issues, visit https://github.com/Fox-IT/BloodHound.py', formatter_class=argparse.RawDescriptionHelpFormatter)
-
-    parser.add_argument('-c',
-                        '--collectionmethod',
-                        action='store',
-                        default='Default',
-                        help='Which information to collect. Supported: Group, LocalAdmin, Session, '
-                             'Trusts, Default (all previous), DCOnly (no computer connections), DCOM, RDP,'
-                             'PSRemote, LoggedOn, Container, ObjectProps, ACL, All (all except LoggedOn). '
-                             'You can specify more than one by separating them with a comma. (default: Default)')
-    parser.add_argument('-d',
-                        '--domain',
-                        action='store',
-                        default='',
-                        help='Domain to query.')
-    parser.add_argument('-v',
-                        action='store_true',
-                        help='Enable verbose output')
-    helptext = 'Specify one or more authentication options. \n' \
-               'By default Kerberos authentication is used and NTLM is used as fallback. \n' \
-               'Kerberos tickets are automatically requested if a password or hashes are specified.'
-    auopts = parser.add_argument_group('authentication options', description=helptext)
-    auopts.add_argument('-u',
-                        '--username',
-                        action='store',
-                        help='Username. Format: username[@domain]; If the domain is unspecified, the current domain is used.')
-    auopts.add_argument('-p',
-                        '--password',
-                        action='store',
-                        help='Password')
-    auopts.add_argument('-k',
-                        '--kerberos',
-                        action='store_true',
-                        help='Use kerberos')
-    auopts.add_argument('--hashes',
-                        action='store',
-                        help='LM:NLTM hashes')
-    auopts.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
-    auopts.add_argument('-aesKey',
-                        action="store",
-                        metavar="hex key",
-                        help='AES key to use for Kerberos Authentication (128 or 256 bits)')
-    auopts.add_argument('--auth-method',
-                        choices=('auto','ntlm','kerberos'),
-                        default='auto',
-                        action='store',
-                        help='Authentication methods. Force Kerberos or NTLM only or use auto for Kerberos with NTLM fallback')
-    coopts = parser.add_argument_group('collection options')
-    coopts.add_argument('-ns',
-                        '--nameserver',
-                        action='store',
-                        help='Alternative name server to use for queries')
-    coopts.add_argument('--dns-tcp',
-                        action='store_true',
-                        help='Use TCP instead of UDP for DNS queries')
-    coopts.add_argument('--dns-timeout',
-                        action='store',
-                        type=int,
-                        default=3,
-                        help='DNS query timeout in seconds (default: 3)')
-    coopts.add_argument('-dc',
-                        '--domain-controller',
-                        metavar='HOST',
-                        action='store',
-                        help='Override which DC to query (hostname)')
-    coopts.add_argument('-gc',
-                        '--global-catalog',
-                        metavar='HOST',
-                        action='store',
-                        help='Override which GC to query (hostname)')
-    coopts.add_argument('-w',
-                        '--workers',
-                        action='store',
-                        type=int,
-                        default=10,
-                        help='Number of workers for computer enumeration (default: 10)')
-    coopts.add_argument('--exclude-dcs',
-                        action='store_true',
-                        help='Skip DCs during computer enumeration')
-    coopts.add_argument('--disable-pooling',
-                        action='store_true',
-                        help='Don\'t use subprocesses for ACL parsing (only for debugging purposes)')
-    coopts.add_argument('--disable-autogc',
-                        action='store_true',
-                        help='Don\'t automatically select a Global Catalog (use only if it gives errors)')
-    coopts.add_argument('--zip',
-                        action='store_true',
-                        help='Compress the JSON output files into a zip archive')
-    coopts.add_argument('--computerfile',
-                        action='store',
-                        help='File containing computer FQDNs to use as allowlist for any computer based methods')
-    coopts.add_argument('--cachefile',
-                        action='store',
-                        help='Cache file (experimental)')
-    coopts.add_argument('--use-ldaps',
-                        action='store_true',
-                        help='Use LDAP over TLS on port 636 by default')
-    coopts.add_argument('-op',
-                        '--outputprefix',
-                        metavar='PREFIX_NAME',
-                        action='store',
-                        help='String to prepend to output file names')
-
-
+    parser = argparse.ArgumentParser(
+        add_help=True,
+        description="Python based ingestor for BloodHound\nFor help or reporting issues, visit https://github.com/Fox-IT/BloodHound.py",
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+    )
+
+    parser.add_argument(
+        "-c",
+        "--collectionmethod",
+        action="store",
+        default="Default",
+        help="Which information to collect. Supported: Group, LocalAdmin, Session, "
+        "Trusts, Default (all previous), DCOnly (no computer connections), DCOM, RDP,"
+        "PSRemote, LoggedOn, Container, ObjectProps, ACL, All (all except LoggedOn). "
+        "You can specify more than one by separating them with a comma. (default: Default)",
+    )
+    parser.add_argument(
+        "-d", "--domain", action="store", default="", help="Domain to query."
+    )
+    parser.add_argument("-v", action="store_true", help="Enable verbose output")
+    helptext = (
+        "Specify one or more authentication options. \n"
+        "By default Kerberos authentication is used and NTLM is used as fallback. \n"
+        "Kerberos tickets are automatically requested if a password or hashes are specified."
+    )
+    auopts = parser.add_argument_group("authentication options", description=helptext)
+    auopts.add_argument(
+        "-u",
+        "--username",
+        action="store",
+        help="Username. Format: username[@domain]; If the domain is unspecified, the current domain is used.",
+    )
+    auopts.add_argument("-p", "--password", action="store", help="Password")
+    auopts.add_argument("-k", "--kerberos", action="store_true", help="Use kerberos")
+    auopts.add_argument("--hashes", action="store", help="LM:NLTM hashes")
+    auopts.add_argument(
+        "-aesKey",
+        action="store",
+        metavar="hex key",
+        help="AES key to use for Kerberos Authentication (128 or 256 bits)",
+    )
+    auopts.add_argument(
+        "--auth-method",
+        choices=("auto", "ntlm", "kerberos"),
+        default="auto",
+        action="store",
+        help="Authentication methods. Force Kerberos or NTLM only or use auto for Kerberos with NTLM fallback",
+    )
+    coopts = parser.add_argument_group("collection options")
+    coopts.add_argument(
+        "-ns",
+        "--nameserver",
+        action="store",
+        help="Alternative name server to use for queries",
+    )
+    coopts.add_argument(
+        "--dns-tcp", action="store_true", help="Use TCP instead of UDP for DNS queries"
+    )
+    coopts.add_argument(
+        "--dns-timeout",
+        action="store",
+        type=int,
+        default=3,
+        help="DNS query timeout in seconds (default: 3)",
+    )
+    coopts.add_argument(
+        "-dc",
+        "--domain-controller",
+        metavar="HOST",
+        action="store",
+        help="Override which DC to query (hostname)",
+    )
+    coopts.add_argument(
+        "-gc",
+        "--global-catalog",
+        metavar="HOST",
+        action="store",
+        help="Override which GC to query (hostname)",
+    )
+    coopts.add_argument(
+        "-w",
+        "--workers",
+        action="store",
+        type=int,
+        default=10,
+        help="Number of workers for computer enumeration (default: 10)",
+    )
+    coopts.add_argument(
+        "--exclude-dcs",
+        action="store_true",
+        help="Skip DCs during computer enumeration",
+    )
+    coopts.add_argument(
+        "--disable-pooling",
+        action="store_true",
+        help="Don't use subprocesses for ACL parsing (only for debugging purposes)",
+    )
+    coopts.add_argument(
+        "--disable-autogc",
+        action="store_true",
+        help="Don't automatically select a Global Catalog (use only if it gives errors)",
+    )
+    coopts.add_argument(
+        "--zip",
+        action="store_true",
+        help="Compress the JSON output files into a zip archive",
+    )
+    coopts.add_argument(
+        "--computerfile",
+        action="store",
+        help="File containing computer FQDNs to use as allowlist for any computer based methods",
+    )
+    coopts.add_argument("--cachefile", action="store", help="Cache file (experimental)")
+    coopts.add_argument(
+        "--use-ldaps",
+        action="store_true",
+        help="Use LDAP over TLS on port 636 by default",
+    )
+    coopts.add_argument(
+        "-op",
+        "--outputprefix",
+        metavar="PREFIX_NAME",
+        action="store",
+        help="String to prepend to output file names",
+    )
 
     args = parser.parse_args()
 
     if args.v is True:
         logger.setLevel(logging.DEBUG)
 
-    if args.username is not None and args.password is not None:
-        logging.debug('Authentication: username/password')
-        auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain, auth_method=args.auth_method)
-    elif args.username is not None and args.password is None and args.hashes is None and args.aesKey is None and args.no_pass is not None:
-        args.password = getpass.getpass()
-        auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain, auth_method=args.auth_method)
-    elif args.username is None and (args.password is not None or args.hashes is not None):
-        logging.error('Authentication: password or hashes provided without username')
-        sys.exit(1)
-    elif args.hashes is not None and args.username is not None:
-        logging.debug('Authentication: NT hash')
-        lm, nt = args.hashes.split(":")
-        auth = ADAuthentication(lm_hash=lm, nt_hash=nt, username=args.username, domain=args.domain, auth_method=args.auth_method)
-    elif args.aesKey is not None and args.username is not None:
-        logging.debug('Authentication: Kerberos AES')
-        auth = ADAuthentication(username=args.username, domain=args.domain, aeskey=args.aesKey, auth_method=args.auth_method)
-    else:
-        if not args.kerberos:
-            parser.print_help()
+    # Initialize variables for LM and NT hashes
+    lm, nt = "", ""
+
+    # Only attempt to split hashes if they are provided
+    if args.hashes:
+        try:
+            lm, nt = args.hashes.split(":")
+        except ValueError:
+            logger.error(
+                "Hashes provided in an incorrect format. Expected format: LM:NT"
+            )
             sys.exit(1)
-        else:
-            auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain, auth_method=args.auth_method)
 
-    ad = AD(auth=auth, domain=args.domain, nameserver=args.nameserver, dns_tcp=args.dns_tcp, dns_timeout=args.dns_timeout, use_ldaps=args.use_ldaps)
+    nameserver = args.nameserver
+
+    auth = ADAuthentication(
+        username=args.username,
+        password=args.password,
+        domain=args.domain,
+        auth_method=args.auth_method,
+        lm_hash=lm,
+        nt_hash=nt,
+        aeskey=args.aesKey,
+    )
+
+    ad = AD(
+        auth=auth,
+        domain=args.domain,
+        nameserver=nameserver,
+        dns_tcp=args.dns_tcp,
+        dns_timeout=args.dns_timeout,
+        use_ldaps=args.use_ldaps,
+    )
 
     # Resolve collection methods
     collect = resolve_collection_methods(args.collectionmethod)
     if not collect:
         return
-    logging.debug('Resolved collection methods: %s', ', '.join(list(collect)))
+    logging.debug("Resolved collection methods: %s", ", ".join(list(collect)))
 
-    logging.debug('Using DNS to retrieve domain information')
+    logging.debug("Using DNS to retrieve domain information")
     ad.dns_resolve(domain=args.domain, options=args)
 
     # Override the detected DC / GC if specified
     if args.domain_controller:
-        if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', args.domain_controller):
-            logging.error('The specified domain controller %s looks like an IP address, but requires a hostname (FQDN).\n'\
-                          'Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.',
-                          args.domain_controller)
+        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", args.domain_controller):
+            logging.error(
+                "The specified domain controller %s looks like an IP address, but requires a hostname (FQDN).\n"
+                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
+                args.domain_controller,
+            )
             sys.exit(1)
         ad.override_dc(args.domain_controller)
-        logging.debug('Using supplied domain controller as KDC')
+        logging.debug("Using supplied domain controller as KDC")
         auth.set_kdc(args.domain_controller)
 
     if args.global_catalog:
-        if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', args.global_catalog):
-            logging.error('The specified global catalog server %s looks like an IP address, but requires a hostname (FQDN).\n'\
-                          'Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.',
-                          args.global_catalog)
+        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", args.global_catalog):
+            logging.error(
+                "The specified global catalog server %s looks like an IP address, but requires a hostname (FQDN).\n"
+                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
+                args.global_catalog,
+            )
             sys.exit(1)
         ad.override_gc(args.global_catalog)
 
-    if args.auth_method in ('auto', 'kerberos'):
+    if args.auth_method in ("auto", "kerberos"):
         if args.kerberos is True:
-            logging.debug('Authentication: Kerberos ccache')
+            logging.debug("Authentication: Kerberos ccache")
             # kerberize()
             if not auth.load_ccache():
-                logging.debug('Could not load ticket from ccache, trying to request a TGT instead')
+                logging.debug(
+                    "Could not load ticket from ccache, trying to request a TGT instead"
+                )
                 auth.get_tgt()
         else:
             auth.get_tgt()
 
-    # For adding timestamp prefix to the outputfiles 
-    timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S') + "_"
+    # For adding timestamp prefix to the outputfiles
+    timestamp = (
+        datetime.datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S") + "_"
+    )
     bloodhound = BloodHound(ad)
     bloodhound.connect()
-    bloodhound.run(collect=collect,
-                   num_workers=args.workers,
-                   disable_pooling=args.disable_pooling,
-                   timestamp=timestamp,
-                   computerfile=args.computerfile,
-                   cachefile=args.cachefile,
-                   exclude_dcs=args.exclude_dcs,
-                   fileNamePrefix=args.outputprefix)
-    #If args --zip is true, the compress output  
+    bloodhound.run(
+        collect=collect,
+        num_workers=args.workers,
+        disable_pooling=args.disable_pooling,
+        timestamp=timestamp,
+        computerfile=args.computerfile,
+        cachefile=args.cachefile,
+        exclude_dcs=args.exclude_dcs,
+        fileNamePrefix=args.outputprefix,
+    )
+    # If args --zip is true, the compress output
     if args.zip:
         logging.info("Compressing output into " + timestamp + "bloodhound.zip")
         # Get a list of files in the current dir
         list_of_files = os.listdir(os.getcwd())
         # Create handle to zip file with timestamp prefix
-        if(args.outputprefix!=None):
-            with ZipFile(args.outputprefix + "_" + timestamp + "bloodhound.zip",'w') as zip:
+        if args.outputprefix != None:
+            with ZipFile(
+                args.outputprefix + "_" + timestamp + "bloodhound.zip", "w"
+            ) as zip:
                 # For each of those files we fetched
                 for each_file in list_of_files:
                     # If the files starts with the current timestamp and ends in json
-                    if each_file.startswith(args.outputprefix) and each_file.endswith("json"):
+                    if each_file.startswith(args.outputprefix) and each_file.endswith(
+                        "json"
+                    ):
                         # Write it to the zip
                         zip.write(each_file)
                         # Remove it from disk
                         os.remove(each_file)
         else:
-            with ZipFile(timestamp + "bloodhound.zip",'w') as zip:
+            with ZipFile(timestamp + "bloodhound.zip", "w") as zip:
                 # For each of those files we fetched
                 for each_file in list_of_files:
                     # If the files starts with the current timestamp and ends in json
@@ -376,6 +502,5 @@ def main():
                         os.remove(each_file)
 
 
-
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
diff --git a/bloodhound/ad/authentication.py b/bloodhound/ad/authentication.py
index 2bac69d..cc2b49c 100644
--- a/bloodhound/ad/authentication.py
+++ b/bloodhound/ad/authentication.py
@@ -22,50 +22,89 @@
 #
 ####################
 
+# Built-in imports
 import logging
 import os
+import sys
 import traceback
-from bloodhound.ad.utils import CollectionException
 from binascii import unhexlify
+import datetime
+
+# Third party library imports
 from ldap3 import Server, Connection, NTLM, ALL, SASL, KERBEROS
 from ldap3.core.results import RESULT_STRONGER_AUTH_REQUIRED
 from ldap3.operation.bind import bind_operation
 from impacket.krb5.ccache import CCache
 from impacket.krb5.types import Principal, KerberosTime, Ticket
 from pyasn1.codec.der import decoder, encoder
-from impacket.krb5.asn1 import AP_REQ, AS_REP, TGS_REQ, Authenticator, TGS_REP, seq_set, seq_set_iter, PA_FOR_USER_ENC, \
-    Ticket as TicketAsn1, EncTGSRepPart
+from impacket.krb5.asn1 import (
+    AP_REQ,
+    AS_REP,
+    TGS_REQ,
+    Authenticator,
+    TGS_REP,
+    seq_set,
+    seq_set_iter,
+    PA_FOR_USER_ENC,
+    Ticket as TicketAsn1,
+    EncTGSRepPart,
+)
+from impacket.spnego import SPNEGO_NegTokenInit, TypesMech
 from impacket.krb5 import constants
 from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS, sendReceive
-import datetime
 from pyasn1.type.univ import noValue
-from impacket.spnego import SPNEGO_NegTokenInit, TypesMech
+
+
+# Local library imports
+from bloodhound.ad.utils import CollectionException
 
 """
 Active Directory authentication helper
 """
+
+
 class ADAuthentication(object):
-    def __init__(self, username='', password='', domain='',
-                 lm_hash='', nt_hash='', aeskey='', kdc=None, auth_method='auto'):
-        self.username = username
-        # Assume user domain and enum domain are same
+    def __init__(
+        self,
+        username="",
+        password="",
+        domain="",
+        lm_hash="",
+        nt_hash="",
+        aeskey="",
+        kdc=None,
+        auth_method="auto",
+    ):
+        if not domain:
+            raise ValueError("Domain must be specified and cannot be empty.")
+
         self.domain = domain.lower()
-        self.userdomain = domain.lower()
-        # If not, override userdomain
-        if '@' in self.username:
-            self.username, self.userdomain = self.username.lower().rsplit('@', 1)
+        self.userdomain = self.domain
+
+        self.username = username.lower() if username else ""
+        if "@" in self.username:
+            self.username, self.userdomain = self.username.rsplit("@", 1)
+
         self.password = password
         self.lm_hash = lm_hash
         self.nt_hash = nt_hash
         self.aeskey = aeskey
-        # KDC for domain we query
         self.kdc = kdc
-        # KDC for domain of the user - fill with domain first, will be resolved later
-        self.userdomain_kdc = self.domain
+        self.userdomain_kdc = self.kdc or self.domain
         self.auth_method = auth_method
-
-        # Kerberos
         self.tgt = None
+        # Log all relevant information at debug level
+        logging.debug(f"Initializing ADAuthentication with parameters:")
+        logging.debug(f"\tUsername: {self.username}")
+        logging.debug(f"\tDomain: {self.domain}")
+        logging.debug(f"\tUser domain: {self.userdomain}")
+        logging.debug(f"\tPassword: {self.password}")
+        logging.debug(f"\tLM Hash: {self.lm_hash}")
+        logging.debug(f"\tNT Hash: {self.nt_hash}")
+        logging.debug(f"\tAES Key: {self.aeskey}")
+        logging.debug(f"\tKDC: {self.kdc if self.kdc else 'Default KDC'}")
+        logging.debug(f"\tUser Domain KDC: {self.userdomain_kdc}")
+        logging.debug(f"\tAuthentication Method: {self.auth_method}")
 
     def set_aeskey(self, aeskey):
         self.aeskey = aeskey
@@ -77,90 +116,159 @@ def set_kdc(self, kdc):
             # Also set it for user domain if this is equal
             self.userdomain_kdc = kdc
 
-    def getLDAPConnection(self, hostname='', ip='', baseDN='', protocol='ldaps', gc=False):
-        if gc:
-            # Global Catalog connection
-            if protocol == 'ldaps':
-                # Ldap SSL
-                server = Server("%s://%s:3269" % (protocol, ip), get_info=ALL)
-            else:
-                # Plain LDAP
-                server = Server("%s://%s:3268" % (protocol, ip), get_info=ALL)
-        else:
-            server = Server("%s://%s" % (protocol, ip), get_info=ALL)
-        # ldap3 supports auth with the NT hash. LM hash is actually ignored since only NTLMv2 is used.
-        if self.nt_hash != '':
-            ldappass = self.lm_hash + ':' + self.nt_hash
-        else:
-            ldappass = self.password
-        ldaplogin = '%s\\%s' % (self.userdomain, self.username)
-        conn = Connection(server, user=ldaplogin, auto_referrals=False, password=ldappass, authentication=NTLM, receive_timeout=60, auto_range=True)
+    def getLDAPConnection(
+        self,
+        hostname="",
+        ip_address="",
+        base_dn="",
+        protocol="ldaps",
+        use_global_catalog=False,
+    ):
+        # Log incoming parameters to help with debugging
+        logging.debug(f"Initializing LDAP Connection with the following parameters:")
+        logging.debug(f"\tHostname: {hostname}")
+        logging.debug(f"\tIP Address: {ip_address}")
+        logging.debug(f"\tBase DN: {base_dn}")
+        logging.debug(f"\tProtocol: {protocol}")
+        logging.debug(f"\tUse Global Catalog: {use_global_catalog}")
+
+        # Directly use the IP address for the server URL
+        port = (
+            3269
+            if use_global_catalog and protocol == "ldaps"
+            else 3268 if use_global_catalog else 636 if protocol == "ldaps" else 389
+        )
+        server_url = f"{protocol}://{ip_address}:{port}"
+
+        logging.debug(f"Server url: {server_url}")
+
+        server = Server(server_url, get_info=ALL)
+
+        ldap_username = f"{self.userdomain}\\{self.username}"
+        ldap_password = (
+            f"{self.lm_hash}:{self.nt_hash}" if self.nt_hash else self.password
+        )
+
+        conn = Connection(
+            server,
+            user=ldap_username,
+            password=ldap_password,
+            authentication=NTLM,
+            auto_referrals=False,
+            receive_timeout=60,
+            auto_range=True,
+        )
+
         bound = False
-        if self.tgt is not None and self.auth_method in ('kerberos', 'auto'):
-            conn = Connection(server, user=ldaplogin, auto_referrals=False, password=ldappass, authentication=SASL, sasl_mechanism=KERBEROS)
-            logging.debug('Authenticating to LDAP server with Kerberos')
+
+        # Attempt Kerberos authentication if a TGT is available
+        if self.tgt is not None and self.auth_method in ("kerberos", "auto"):
+            logging.debug("Authenticating to LDAP server with Kerberos")
             try:
+                conn = Connection(
+                    server,
+                    user=ldap_username,
+                    password=ldap_password,
+                    authentication=SASL,
+                    sasl_mechanism=KERBEROS,
+                    auto_referrals=False,
+                )
                 bound = self.ldap_kerberos(conn, hostname)
+            except OSError as error:
+                if "Name or service not known" in str(error):
+                    logging.error(
+                        f"DNS resolution error. Please, ensure than your system DNS is able to resolve {hostname}"
+                    )
+                    sys.exit(1)
+
             except Exception as exc:
-                if self.auth_method == 'auto':
-                    logging.debug(traceback.format_exc())
-                    logging.info('Kerberos auth to LDAP failed, trying NTLM')
+
+                logging.debug(f"Kerberos authentication failed: {exc}")
+                if self.auth_method == "auto":
+                    logging.info("Kerberos auth failed, falling back to NTLM")
                     bound = False
                 else:
-                    logging.debug('Kerberos auth to LDAP failed, no authentication methods left')
+                    logging.error("Kerberos authentication failed, no fallback enabled")
 
+        # Fallback to NTLM if Kerberos did not succeed
         if not bound:
-            conn = Connection(server, user=ldaplogin, auto_referrals=False, password=ldappass, authentication=NTLM)
-            logging.debug('Authenticating to LDAP server with NTLM')
+            conn = Connection(
+                server,
+                user=ldap_username,
+                password=ldap_password,
+                authentication=NTLM,
+                auto_referrals=False,
+            )
+            logging.debug("Authenticating to LDAP server with NTLM")
             bound = conn.bind()
 
+        # Handle unsuccessful binds
         if not bound:
             result = conn.result
-            if result['result'] == RESULT_STRONGER_AUTH_REQUIRED and protocol == 'ldap':
-                logging.warning('LDAP Authentication is refused because LDAP signing is enabled. '
-                                'Trying to connect over LDAPS instead...')
-                return self.getLDAPConnection(hostname, ip, baseDN, 'ldaps')
+            if (
+                result["result"] == 49 and protocol == "ldap"
+            ):  # LDAP result code for invalid credentials
+                logging.warning(
+                    "LDAP Authentication failed because LDAP signing is enabled. Trying LDAPS..."
+                )
+                return self.getLDAPConnection(hostname, ip_address, base_dn, "ldaps")
             else:
-                logging.error('Failure to authenticate with LDAP! Error %s' % result['message'])
-                raise CollectionException('Could not authenticate to LDAP. Check your credentials and LDAP server requirements.')
+                error_message = result.get("message", "Unknown error during LDAP bind")
+                logging.error(
+                    f"Failure to authenticate with LDAP! Error: {error_message}"
+                )
+                raise CollectionException(
+                    "Could not authenticate to LDAP. Check your credentials and LDAP server requirements."
+                )
+
         return conn
 
     def ldap_kerberos(self, connection, hostname):
         # Hackery to authenticate with ldap3 using impacket Kerberos stack
 
-        username = Principal(self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
-        servername = Principal('ldap/%s' % hostname, type=constants.PrincipalNameType.NT_SRV_INST.value)
-        tgs, cipher, _, sessionkey = getKerberosTGS(servername, self.domain, self.kdc,
-                                                                self.tgt['KDC_REP'], self.tgt['cipher'], self.tgt['sessionKey'])
+        username = Principal(
+            self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value
+        )
+        servername = Principal(
+            "ldap/%s" % hostname, type=constants.PrincipalNameType.NT_SRV_INST.value
+        )
+        tgs, cipher, _, sessionkey = getKerberosTGS(
+            servername,
+            self.domain,
+            self.kdc,
+            self.tgt["KDC_REP"],
+            self.tgt["cipher"],
+            self.tgt["sessionKey"],
+        )
 
         # Let's build a NegTokenInit with a Kerberos AP_REQ
         blob = SPNEGO_NegTokenInit()
 
         # Kerberos
-        blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]
+        blob["MechTypes"] = [TypesMech["MS KRB5 - Microsoft Kerberos 5"]]
 
         # Let's extract the ticket from the TGS
         tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
         ticket = Ticket()
-        ticket.from_asn1(tgs['ticket'])
+        ticket.from_asn1(tgs["ticket"])
 
         # Now let's build the AP_REQ
         apReq = AP_REQ()
-        apReq['pvno'] = 5
-        apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)
+        apReq["pvno"] = 5
+        apReq["msg-type"] = int(constants.ApplicationTagNumbers.AP_REQ.value)
 
         opts = []
-        apReq['ap-options'] = constants.encodeFlags(opts)
-        seq_set(apReq, 'ticket', ticket.to_asn1)
+        apReq["ap-options"] = constants.encodeFlags(opts)
+        seq_set(apReq, "ticket", ticket.to_asn1)
 
         authenticator = Authenticator()
-        authenticator['authenticator-vno'] = 5
-        authenticator['crealm'] = self.userdomain
-        seq_set(authenticator, 'cname', username.components_to_asn1)
+        authenticator["authenticator-vno"] = 5
+        authenticator["crealm"] = self.userdomain
+        seq_set(authenticator, "cname", username.components_to_asn1)
         now = datetime.datetime.utcnow()
 
-        authenticator['cusec'] = now.microsecond
-        authenticator['ctime'] = KerberosTime.to_asn1(now)
+        authenticator["cusec"] = now.microsecond
+        authenticator["ctime"] = KerberosTime.to_asn1(now)
 
         encodedAuthenticator = encoder.encode(authenticator)
 
@@ -168,145 +276,211 @@ def ldap_kerberos(self, connection, hostname):
         # AP-REQ Authenticator (includes application authenticator
         # subkey), encrypted with the application session key
         # (Section 5.5.1)
-        encryptedEncodedAuthenticator = cipher.encrypt(sessionkey, 11, encodedAuthenticator, None)
+        encryptedEncodedAuthenticator = cipher.encrypt(
+            sessionkey, 11, encodedAuthenticator, None
+        )
 
-        apReq['authenticator'] = noValue
-        apReq['authenticator']['etype'] = cipher.enctype
-        apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator
+        apReq["authenticator"] = noValue
+        apReq["authenticator"]["etype"] = cipher.enctype
+        apReq["authenticator"]["cipher"] = encryptedEncodedAuthenticator
 
-        blob['MechToken'] = encoder.encode(apReq)
+        blob["MechToken"] = encoder.encode(apReq)
 
         # From here back to ldap3
         connection.open(read_server_info=False)
-        request = bind_operation(connection.version, SASL, None, None, connection.sasl_mechanism, blob.getData())
-        response = connection.post_send_single_response(connection.send('bindRequest', request, None))[0]
+        request = bind_operation(
+            connection.version,
+            SASL,
+            None,
+            None,
+            connection.sasl_mechanism,
+            blob.getData(),
+        )
+        response = connection.post_send_single_response(
+            connection.send("bindRequest", request, None)
+        )[0]
         connection.result = response
-        if response['result'] == 0:
+        if response["result"] == 0:
             connection.bound = True
             connection.refresh_server_info()
-        return response['result'] == 0
+        return response["result"] == 0
 
     def get_tgt(self):
         """
         Request a Kerberos TGT given our provided inputs.
         """
-        username = Principal(self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
-        logging.info('Getting TGT for user')
+        username = Principal(
+            self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value
+        )
+        logging.info(f"Attempting to get Kerberos TGT for user {self.username}")
 
         try:
-            tgt, cipher, _, session_key = getKerberosTGT(username, self.password, self.userdomain,
-                                                         unhexlify(self.lm_hash), unhexlify(self.nt_hash),
-                                                         self.aeskey,
-                                                         self.userdomain_kdc)
-        except Exception as exc:
-            logging.debug(traceback.format_exc())
-            if self.auth_method == 'auto':
-                logging.warning('Failed to get Kerberos TGT. Falling back to NTLM authentication. Error: %s', str(exc))
+            tgt, cipher, _, session_key = getKerberosTGT(
+                username,
+                self.password,
+                self.userdomain,
+                unhexlify(self.lm_hash),
+                unhexlify(self.nt_hash),
+                self.aeskey,
+                self.userdomain_kdc,
+            )
+            logging.info(
+                f"Successfully retrieved initial TGT for user domain: {self.userdomain}."
+            )
+        except Exception:
+            logging.error("Failed to retrieve initial TGT", exc_info=True)
+            if self.auth_method == "auto":
+                logging.warning(
+                    "Falling back to NTLM authentication due to TGT retrieval failure."
+                )
                 return
             else:
-                # No other auth methods, so raise exception
-                logging.error('Failed to get Kerberos TGT.')
                 raise
 
-        if self.userdomain != self.domain:
-            # Try to get inter-realm TGT
-            username = Principal(self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
-            servername = Principal('krbtgt/%s' % self.domain, type=constants.PrincipalNameType.NT_SRV_INST.value)
-            # Get referral TGT
-            tgs, cipher, _, sessionkey = getKerberosTGS(servername, self.userdomain, self.userdomain_kdc,
-                                                                    tgt, cipher, session_key)
+        if self.userdomain == self.domain:
+            self.tgt = {"KDC_REP": tgt, "cipher": cipher, "sessionKey": session_key}
+            logging.info("Stored TGT for same-realm use.")
+        else:
+            logging.info("Detected inter-realm trust scenario.")
+            username = Principal(
+                self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value
+            )
+            servername = Principal(
+                "krbtgt/%s" % self.domain,
+                type=constants.PrincipalNameType.NT_SRV_INST.value,
+            )
+
+            tgs, cipher, _, sessionkey = getKerberosTGS(
+                servername,
+                self.userdomain,
+                self.userdomain_kdc,
+                tgt,
+                cipher,
+                session_key,
+            )
+            logging.info(
+                f"Retrieved initial referral TGS to access {self.domain} domain services."
+            )
+
             # See if this is a ticket for the correct domain
-            refneeded = True
-            while refneeded:
-                decoded_tgs = decoder.decode(tgs, asn1Spec = TGS_REP())[0]
-                next_realm = str(decoded_tgs['ticket']['sname']['name-string'][1])
+            while True:
+                decoded_tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
+                next_realm = str(decoded_tgs["ticket"]["sname"]["name-string"][1])
                 if next_realm.upper() == self.domain.upper():
-                    refneeded = False
+                    logging.info(
+                        f"Successfully obtained final TGS for domain {self.domain}."
+                    )
+                    break
                 else:
                     # Get next referral TGT
-                    logging.debug('Following referral across trust to get next TGT')
-                    servername = Principal('krbtgt/%s' % self.domain, type=constants.PrincipalNameType.NT_SRV_INST.value)
-                    tgs, cipher, _, sessionkey = getKerberosTGS(servername, next_realm, next_realm,
-                                                                            tgs, cipher, sessionkey)
-
-            # Get foreign domain TGT
-            servername = Principal('krbtgt/%s' % self.domain, type=constants.PrincipalNameType.NT_SRV_INST.value)
-            tgs, cipher, _, sessionkey = getKerberosTGS(servername, self.domain, self.kdc,
-                                                                    tgs, cipher, sessionkey)
+                    logging.info(
+                        f"Referral TGS from {self.userdomain} points to next realm {next_realm}."
+                    )
+                    servername = Principal(
+                        "krbtgt/%s" % self.domain,
+                        type=constants.PrincipalNameType.NT_SRV_INST.value,
+                    )
+                    logging.debug(
+                        f"Requesting TGS from {next_realm} to further navigate towards {self.domain}."
+                    )
+                    tgs, cipher, _, sessionkey = getKerberosTGS(
+                        servername, next_realm, next_realm, tgs, cipher, sessionkey
+                    )
+                    logging.debug(
+                        f"Retrieved subsequent referral TGS for realm {next_realm}, moving closer to target domain {self.domain}."
+                    )
+
+            servername = Principal(
+                "krbtgt/%s" % self.domain,
+                type=constants.PrincipalNameType.NT_SRV_INST.value,
+            )
+            tgs, cipher, _, sessionkey = getKerberosTGS(
+                servername, self.domain, self.kdc, tgs, cipher, sessionkey
+            )
+            logging.info(
+                "Successfully obtained final TGS, enabling access to the target domain's services."
+            )
             # Store this as our TGT
-            self.tgt = {
-                'KDC_REP': tgs,
-                'cipher': cipher,
-                'sessionKey': sessionkey
-            }
-        else:
-            TGT = dict()
-            TGT['KDC_REP'] = tgt
-            TGT['cipher'] = cipher
-            TGT['sessionKey'] = session_key
-            self.tgt = TGT
+            self.tgt = {"KDC_REP": tgs, "cipher": cipher, "sessionKey": sessionkey}
+
+        logging.info("Completed TGT acquisition process.")
 
     def get_tgs_for_smb(self, hostname):
         """
         Get a TGS for use with SMB Connection. We do this here to make sure the realms are correct,
         since impacket doesn't support cross-realm TGT usage and we don't want it to do its own Kerberos
         """
-        username = Principal(self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
-        servername = Principal('cifs/%s' % hostname, type=constants.PrincipalNameType.NT_SRV_INST.value)
-        tgs, cipher, _, sessionkey = getKerberosTGS(servername, self.domain, self.kdc,
-                                                                self.tgt['KDC_REP'], self.tgt['cipher'], self.tgt['sessionKey'])
-        return {
-            'KDC_REP': tgs,
-            'cipher': cipher,
-            'sessionKey': sessionkey
-        }
-
-    def load_ccache(self):
-        """
-        Extract a TGT from a ccache file.
+        username = Principal(
+            self.username, type=constants.PrincipalNameType.NT_PRINCIPAL.value
+        )
+        servername = Principal(
+            "cifs/%s" % hostname, type=constants.PrincipalNameType.NT_SRV_INST.value
+        )
+        tgs, cipher, _, sessionkey = getKerberosTGS(
+            servername,
+            self.domain,
+            self.kdc,
+            self.tgt["KDC_REP"],
+            self.tgt["cipher"],
+            self.tgt["sessionKey"],
+        )
+        return {"KDC_REP": tgs, "cipher": cipher, "sessionKey": sessionkey}
+
+    def load_ccache(self) -> bool:
         """
-        # If the kerberos credential cache is known, use that.
-        krb5cc = os.getenv('KRB5CCNAME')
+        Attempts to load a Kerberos Ticket-Granting Ticket (TGT) from a Kerberos credential cache (ccache) file.
+        This method verifies if the TGT found in the cache matches the expected domain and username (if provided).
 
-        # Otherwise, guess it.
-        if krb5cc is None:
-            try:
-                krb5cc = '/tmp/krb5cc_%u' % os.getuid()
-            except AttributeError:
-                # This fails on Windows
-                krb5cc = 'nonexistingfile'
+        Returns:
+            bool: True if a valid TGT was loaded and matches the expected username and domain; False otherwise.
 
-        if os.path.isfile(krb5cc):
-            logging.debug('Using kerberos credential cache: %s', krb5cc)
-        else:
-            logging.debug('No Kerberos credential cache file found, manually requesting TGT')
+        Raises:
+            FileNotFoundError: If the specified ccache file does not exist.
+            Exception: General exception if ccache file loading fails or credential processing encounters an error.
+        """
+        krb5cc = os.getenv("KRB5CCNAME", f"/tmp/krb5cc_{os.getuid()}")
+
+        if not os.path.isfile(krb5cc):
+            logging.debug(
+                f"No Kerberos credential cache file found at {krb5cc}, manually requesting TGT"
+            )
             return False
 
-        # Load TGT for our domain
-        ccache = CCache.loadFile(krb5cc)
-        principal = 'krbtgt/%s@%s' % (self.domain.upper(), self.domain.upper())
-        creds = ccache.getCredential(principal, anySPN=False)
-        if creds is not None:
-            TGT = creds.toTGT()
-            # This we store for later
-            self.tgt = TGT
-            tgt, cipher, session_key = TGT['KDC_REP'], TGT['cipher'], TGT['sessionKey']
-            logging.info('Using TGT from cache')
-        else:
-            logging.debug("No valid credentials found in cache. ")
+        logging.debug(f"Using Kerberos credential cache: {krb5cc}")
+
+        try:
+            ccache = CCache.loadFile(krb5cc)
+        except Exception as e:
+            logging.error(f"Failed to load ccache file from {krb5cc}: {e}")
             return False
 
-        # Verify if this ticket is actually for the specified user
-        ticket = Ticket()
-        decoded_tgt = decoder.decode(tgt, asn1Spec = AS_REP())[0]
-        ticket.from_asn1(decoded_tgt['ticket'])
-
-        tgt_principal = Principal()
-        tgt_principal.from_asn1(decoded_tgt, 'crealm', 'cname')
-        expected_principal = '%s@%s' % (self.username.lower(), self.domain.upper())
-        if expected_principal.upper() != str(tgt_principal).upper():
-            logging.warning('Username in ccache file does not match supplied username! %s != %s', tgt_principal, expected_principal)
+        principal_str = f"krbtgt/{self.domain.upper()}@{self.domain.upper()}"
+        creds = ccache.getCredential(principal_str, anySPN=False)
+
+        if creds is None:
+            logging.debug("No valid credentials found in cache.")
             return False
+
+        TGT = creds.toTGT()
+        self.tgt = TGT
+        logging.info("Using TGT from cache")
+
+        decoded_tgt = decoder.decode(TGT["KDC_REP"], asn1Spec=AS_REP())[0]
+        ticket_principal = Principal()
+        ticket_principal.from_asn1(decoded_tgt, "crealm", "cname")
+        formatted_principal = f"{ticket_principal}@{self.domain.upper()}"
+
+        if not self.username:
+            self.username = str(ticket_principal).split("@")[0]
+            logging.info(f"Extracted the username from TGT: {self.username}")
         else:
-            logging.info('Found TGT with correct principal in ccache file.')
+            expected_principal = f"{self.username.lower()}@{self.domain.upper()}"
+            if expected_principal.upper() != formatted_principal.upper():
+                logging.warning(
+                    f"Username in ccache file does not match supplied username! {formatted_principal} != {expected_principal}"
+                )
+                return False
+            else:
+                logging.info("Found TGT with correct principal in ccache file.")
+
         return True
diff --git a/bloodhound/ad/computer.py b/bloodhound/ad/computer.py
index af8f6b9..395ce21 100644
--- a/bloodhound/ad/computer.py
+++ b/bloodhound/ad/computer.py
@@ -27,7 +27,18 @@
 import calendar
 import time
 import re
-from impacket.dcerpc.v5 import transport, samr, srvs, lsat, lsad, nrpc, wkst, scmr, tsch, rrp
+from impacket.dcerpc.v5 import (
+    transport,
+    samr,
+    srvs,
+    lsat,
+    lsad,
+    nrpc,
+    wkst,
+    scmr,
+    tsch,
+    rrp,
+)
 from impacket.dcerpc.v5.rpcrt import DCERPCException, RPC_C_AUTHN_LEVEL_PKT_INTEGRITY
 from impacket.dcerpc.v5.ndr import NULL
 from impacket.dcerpc.v5.dtypes import RPC_SID, MAXIMUM_ALLOWED
@@ -40,16 +51,22 @@
 from impacket.smbconnection import SessionError
 from impacket import smb
 from impacket.smb3structs import SMB2_DIALECT_21
+
 # Try to import exceptions here, if this does not succeed, then impacket version is too old
 try:
-    HostnameValidationExceptions = (SMB3.HostnameValidationException, SMB.HostnameValidationException)
+    HostnameValidationExceptions = (
+        SMB3.HostnameValidationException,
+        SMB.HostnameValidationException,
+    )
 except AttributeError:
     HostnameValidationExceptions = ()
 
+
 class ADComputer(object):
     """
     Computer connected to Active Directory
     """
+
     def __init__(self, hostname=None, samname=None, ad=None, addc=None, objectsid=None):
         self.ad = ad
         self.addc = addc
@@ -81,195 +98,266 @@ def __init__(self, hostname=None, samname=None, ad=None, addc=None, objectsid=No
         self.permanentfailure = False
         # Process invalid hosts
         if not hostname:
-            self.hostname = '%s.%s' % (samname[:-1].upper(), self.ad.domain.upper())
+            self.hostname = "%s.%s" % (samname[:-1].upper(), self.ad.domain.upper())
         else:
             self.hostname = hostname
 
     def get_bloodhound_data(self, entry, collect, skip_acl=False):
         data = {
-            'ObjectIdentifier': self.objectsid,
-            'AllowedToAct': [],
-            'PrimaryGroupSID': self.primarygroup,
-            'LocalAdmins': {
-                'Collected': 'localadmin' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.admins,
+            "ObjectIdentifier": self.objectsid,
+            "AllowedToAct": [],
+            "PrimaryGroupSID": self.primarygroup,
+            "LocalAdmins": {
+                "Collected": "localadmin" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.admins,
             },
-            'PSRemoteUsers': {
-                'Collected': 'psremote' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.psremote
+            "PSRemoteUsers": {
+                "Collected": "psremote" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.psremote,
             },
-            'Properties': {
-                'name': self.hostname.upper(),
-                'domainsid': self.ad.domain_object.sid,
-                'domain': self.ad.domain.upper(),
-                'distinguishedname': ADUtils.get_entry_property(entry, 'distinguishedName').upper()
+            "Properties": {
+                "name": self.hostname.upper(),
+                "domainsid": self.ad.domain_object.sid,
+                "domain": self.ad.domain.upper(),
+                "distinguishedname": ADUtils.get_entry_property(
+                    entry, "distinguishedName"
+                ).upper(),
             },
-            'RemoteDesktopUsers': {
-                'Collected': 'rdp' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.rdp
+            "RemoteDesktopUsers": {
+                "Collected": "rdp" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.rdp,
             },
-            'DcomUsers': {
-                'Collected': 'dcom' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.dcom
+            "DcomUsers": {
+                "Collected": "dcom" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.dcom,
             },
-            'AllowedToDelegate': [],
-            'Sessions': {
-                'Collected': 'session' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.sessions
+            "AllowedToDelegate": [],
+            "Sessions": {
+                "Collected": "session" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.sessions,
             },
-            'PrivilegedSessions': {
-                'Collected': 'loggedon' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.loggedon
+            "PrivilegedSessions": {
+                "Collected": "loggedon" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.loggedon,
             },
-            'RegistrySessions': {
-                'Collected': 'loggedon' in collect and not self.permanentfailure,
-                'FailureReason': None,
-                'Results': self.registry_sessions
+            "RegistrySessions": {
+                "Collected": "loggedon" in collect and not self.permanentfailure,
+                "FailureReason": None,
+                "Results": self.registry_sessions,
             },
-            'Aces': [],
-            'HasSIDHistory': [],
-            'IsDeleted': ADUtils.get_entry_property(entry, 'isDeleted', default=False),
-            'Status': None
+            "Aces": [],
+            "HasSIDHistory": [],
+            "IsDeleted": ADUtils.get_entry_property(entry, "isDeleted", default=False),
+            "Status": None,
         }
 
-        props = data['Properties']
+        props = data["Properties"]
         # via the TRUSTED_FOR_DELEGATION (0x00080000) flag in UAC
-        props['unconstraineddelegation'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000
-        props['enabled'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 2 == 0
-        props['trustedtoauth'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x01000000 == 0x01000000
-        props['samaccountname'] = ADUtils.get_entry_property(entry, 'sAMAccountName')
-
-        if 'objectprops' in collect or 'acl' in collect:
-            props['haslaps'] = ADUtils.get_entry_property(entry, 'ms-mcs-admpwdexpirationtime', 0) != 0
+        props["unconstraineddelegation"] = (
+            ADUtils.get_entry_property(entry, "userAccountControl", default=0)
+            & 0x00080000
+            == 0x00080000
+        )
+        props["enabled"] = (
+            ADUtils.get_entry_property(entry, "userAccountControl", default=0) & 2 == 0
+        )
+        props["trustedtoauth"] = (
+            ADUtils.get_entry_property(entry, "userAccountControl", default=0)
+            & 0x01000000
+            == 0x01000000
+        )
+        props["samaccountname"] = ADUtils.get_entry_property(entry, "sAMAccountName")
+
+        if "objectprops" in collect or "acl" in collect:
+            props["haslaps"] = (
+                ADUtils.get_entry_property(entry, "ms-mcs-admpwdexpirationtime", 0) != 0
+            )
 
-        if 'objectprops' in collect:
-            props['lastlogon'] = ADUtils.win_timestamp_to_unix(
-                ADUtils.get_entry_property(entry, 'lastlogon', default=0, raw=True)
+        if "objectprops" in collect:
+            props["lastlogon"] = ADUtils.win_timestamp_to_unix(
+                ADUtils.get_entry_property(entry, "lastlogon", default=0, raw=True)
             )
-            props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix(
-                ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)
+            props["lastlogontimestamp"] = ADUtils.win_timestamp_to_unix(
+                ADUtils.get_entry_property(
+                    entry, "lastlogontimestamp", default=0, raw=True
+                )
             )
-            if props['lastlogontimestamp'] == 0:
-                props['lastlogontimestamp'] = -1
-            props['pwdlastset'] = ADUtils.win_timestamp_to_unix(
-                ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)
+            if props["lastlogontimestamp"] == 0:
+                props["lastlogontimestamp"] = -1
+            props["pwdlastset"] = ADUtils.win_timestamp_to_unix(
+                ADUtils.get_entry_property(entry, "pwdLastSet", default=0, raw=True)
             )
-            whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0)
+            whencreated = ADUtils.get_entry_property(entry, "whencreated", default=0)
             if not isinstance(whencreated, int):
                 whencreated = calendar.timegm(whencreated.timetuple())
-            props['whencreated'] = whencreated
-            props['serviceprincipalnames'] = ADUtils.get_entry_property(entry, 'servicePrincipalName', [])
-            props['description'] = ADUtils.get_entry_property(entry, 'description')
-            props['operatingsystem'] = ADUtils.get_entry_property(entry, 'operatingSystem')
+            props["whencreated"] = whencreated
+            props["serviceprincipalnames"] = ADUtils.get_entry_property(
+                entry, "servicePrincipalName", []
+            )
+            props["description"] = ADUtils.get_entry_property(entry, "description")
+            props["operatingsystem"] = ADUtils.get_entry_property(
+                entry, "operatingSystem"
+            )
             # Add SP to OS if specified
-            servicepack = ADUtils.get_entry_property(entry, 'operatingSystemServicePack')
+            servicepack = ADUtils.get_entry_property(
+                entry, "operatingSystemServicePack"
+            )
             if servicepack:
-                props['operatingsystem'] = '%s %s' % (props['operatingsystem'], servicepack)
-            props['sidhistory'] = [LDAP_SID(bsid).formatCanonical() for bsid in ADUtils.get_entry_property(entry, 'sIDHistory', [])]
-            delegatehosts = ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])
+                props["operatingsystem"] = "%s %s" % (
+                    props["operatingsystem"],
+                    servicepack,
+                )
+            props["sidhistory"] = [
+                LDAP_SID(bsid).formatCanonical()
+                for bsid in ADUtils.get_entry_property(entry, "sIDHistory", [])
+            ]
+            delegatehosts = ADUtils.get_entry_property(
+                entry, "msDS-AllowedToDelegateTo", []
+            )
             delegatehosts_cache = []
             for host in delegatehosts:
                 try:
-                    target = host.split('/')[1]
+                    target = host.split("/")[1]
                 except IndexError:
-                    logging.warning('Invalid delegation target: %s', host)
+                    logging.warning("Invalid delegation target: %s", host)
                     continue
                 try:
                     object_sid = self.ad.computersidcache.get(target.lower())
-                    data['AllowedToDelegate'].append({
-                        'ObjectIdentifier': object_sid,
-                        'ObjectType': ADUtils.resolve_ad_entry(
-                            self.ad.objectresolver.resolve_sid(object_sid)
-                        )['type'],
-                    })
+                    data["AllowedToDelegate"].append(
+                        {
+                            "ObjectIdentifier": object_sid,
+                            "ObjectType": ADUtils.resolve_ad_entry(
+                                self.ad.objectresolver.resolve_sid(object_sid)
+                            )["type"],
+                        }
+                    )
                 except KeyError:
                     object_sam = target.upper().split(".")[0]
-                    if object_sam in delegatehosts_cache: continue
+                    if object_sam in delegatehosts_cache:
+                        continue
                     delegatehosts_cache.append(object_sam)
-                    object_entry = self.ad.objectresolver.resolve_samname(object_sam + '*', allow_filter=True)
+                    object_entry = self.ad.objectresolver.resolve_samname(
+                        object_sam + "*", allow_filter=True
+                    )
                     if object_entry:
                         object_resolved = ADUtils.resolve_ad_entry(object_entry[0])
-                        data['AllowedToDelegate'].append({
-                            'ObjectIdentifier': object_resolved['objectid'],
-                            'ObjectType': object_resolved['type'],
-                        })
+                        data["AllowedToDelegate"].append(
+                            {
+                                "ObjectIdentifier": object_resolved["objectid"],
+                                "ObjectType": object_resolved["type"],
+                            }
+                        )
             if len(delegatehosts) > 0:
-                props['allowedtodelegate'] = delegatehosts
+                props["allowedtodelegate"] = delegatehosts
 
             # Process resource-based constrained delegation
-            _, aces = parse_binary_acl(data,
-                                       'computer',
-                                       ADUtils.get_entry_property(entry,
-                                                                  'msDS-AllowedToActOnBehalfOfOtherIdentity',
-                                                                  raw=True),
-                                       self.addc.objecttype_guid_map)
+            _, aces = parse_binary_acl(
+                data,
+                "computer",
+                ADUtils.get_entry_property(
+                    entry, "msDS-AllowedToActOnBehalfOfOtherIdentity", raw=True
+                ),
+                self.addc.objecttype_guid_map,
+            )
             outdata = self.aceresolver.resolve_aces(aces)
             for delegated in outdata:
-                if delegated['RightName'] == 'Owner':
+                if delegated["RightName"] == "Owner":
                     continue
-                if delegated['RightName'] == 'GenericAll':
-                    data['AllowedToAct'].append({'ObjectIdentifier': delegated['PrincipalSID'], 'ObjectType': delegated['PrincipalType']})
+                if delegated["RightName"] == "GenericAll":
+                    data["AllowedToAct"].append(
+                        {
+                            "ObjectIdentifier": delegated["PrincipalSID"],
+                            "ObjectType": delegated["PrincipalType"],
+                        }
+                    )
 
         # Run ACL collection if this was not already done centrally
-        if 'acl' in collect and not skip_acl:
-            _, aces = parse_binary_acl(data,
-                                       'computer',
-                                       ADUtils.get_entry_property(entry,
-                                                                  'nTSecurityDescriptor',
-                                                                  raw=True),
-                                       self.addc.objecttype_guid_map)
+        if "acl" in collect and not skip_acl:
+            _, aces = parse_binary_acl(
+                data,
+                "computer",
+                ADUtils.get_entry_property(entry, "nTSecurityDescriptor", raw=True),
+                self.addc.objecttype_guid_map,
+            )
             # Parse aces
-            data['Aces'] = self.aceresolver.resolve_aces(aces)
+            data["Aces"] = self.aceresolver.resolve_aces(aces)
 
         return data
 
-    def try_connect(self):
-        addr = None
+    def resolve_hostname(self, hostname: str) -> tuple[bool, str]:
+        """
+        Attempt to resolve the specified hostname to an IP address.
+
+        Args:
+            hostname (str): The hostname to resolve.
+
+        Returns:
+            tuple[bool, str]: A tuple containing a boolean indicating whether the resolution
+                            was successful, and the resolved IP address or None if unsuccessful.
+        """
         try:
-            addr = self.ad.dnscache.get(self.hostname)
-        except KeyError:
-            try:
-                q = self.ad.dnsresolver.query(self.hostname, 'A', tcp=self.ad.dns_tcp)
-                for r in q:
-                    addr = r.address
-
-                if addr == None:
-                    return False
-            # Do exit properly on keyboardinterrupts
-            except KeyboardInterrupt:
-                raise
-            except Exception as e:
-                # Doesn't exist
-                if "None of DNS query names exist" in str(e):
-                    logging.info('Skipping enumeration for %s since it could not be resolved.', self.hostname)
-                else:
-                    logging.warning('Could not resolve: %s: %s', self.hostname, e)
-                return False
+            # Attempt DNS resolution
+            dns_response = self.ad.dnsresolver.query(hostname, "A", tcp=self.ad.dns_tcp)
+            for record in dns_response:
+                if record.address:
+                    return True, record.address
+        except KeyboardInterrupt:
+            # Ensure a keyboard interrupt is handled correctly
+            raise
+        except Exception as e:
+            # Log the error for debugging purposes
+            logging.debug(f"Failed to resolve {hostname}: {e}")
+        return False, None
 
-            logging.debug('Resolved: %s' % addr)
+    def try_connect(self) -> bool:
+        """
+        Attempt to connect to the given hostname of a computer.
 
-            self.ad.dnscache.put(self.hostname, addr)
+        This method tries to resolve the hostname to an IP address and establish a connection.
+        If the initial DNS resolution fails, it appends the domain to the hostname and retries.
 
-        self.addr = addr
+        Caching is used to store successful DNS resolutions to speed up subsequent connections.
 
-        logging.debug('Trying connecting to computer: %s', self.hostname)
-        # We ping the host here, this adds a small overhead for setting up an extra socket
-        # but saves us from constructing RPC Objects for non-existing hosts. Also RPC over
-        # SMB does not support setting a connection timeout, so we catch this here.
-        return ADUtils.tcp_ping(addr, 445)
+        Returns:
+            bool: True if the connection is successful, False otherwise.
+        """
+        success = False
+        try:
+            # Attempt to get the address from cache
+            addr = self.ad.dnscache.get(self.hostname)
+            success = True
 
+        except KeyError:
+            # If not cached, try resolving the hostname
+            success, addr = self.resolve_hostname(self.hostname)
+            if not success:
+                # Append domain and retry if resolution fails
+                fqdn = f"{self.hostname}.{self.ad.domain}"
+                success, addr = self.resolve_hostname(fqdn)
+
+        if not success:
+            # Log a warning if both resolutions fail
+            logging.warning(f"Could not resolve: {self.hostname}")
+            return False
+
+        # Cache the address for future use and proceed with connection
+        self.ad.dnscache.put(self.hostname, addr)
+        self.addr = addr
+        logging.info(f"Resolved: {self.hostname} -> {addr}")
+        logging.debug(f"Attempting to connect to computer: {self.hostname}")
+        return ADUtils.tcp_ping(addr, 445)
 
     def dce_rpc_connect(self, binding, uuid, integrity=False):
         if self.permanentfailure:
-            logging.debug('Skipping connection because of previous failure')
+            logging.debug("Skipping connection because of previous failure")
             return None
-        logging.debug('DCE/RPC binding: %s', binding)
+        logging.debug("DCE/RPC binding: %s", binding)
 
         try:
             self.rpc = transport.DCERPCTransportFactory(binding)
@@ -280,49 +368,71 @@ def dce_rpc_connect(self, binding, uuid, integrity=False):
             self.rpc.setRemoteHost(self.addr)
 
             # Use Kerberos if we have a TGT
-            if hasattr(self.rpc, 'set_kerberos') and self.ad.auth.tgt and self.auth_method in ('auto', 'kerberos'):
+            if (
+                hasattr(self.rpc, "set_kerberos")
+                and self.ad.auth.tgt
+                and self.auth_method in ("auto", "kerberos")
+            ):
                 self.rpc.set_kerberos(True, self.ad.auth.kdc)
                 if not self.TGS:
                     try:
                         self.TGS = self.ad.auth.get_tgs_for_smb(self.hostname)
                     except Exception as exc:
                         logging.debug(traceback.format_exc())
-                        if self.auth_method == 'auto':
-                            logging.warning('Failed to get service ticket for %s, falling back to NTLM auth', self.hostname)
-                            self.auth_method = 'ntlm'
+                        if self.auth_method == "auto":
+                            logging.warning(
+                                "Failed to get service ticket for %s, falling back to NTLM auth",
+                                self.hostname,
+                            )
+                            self.auth_method = "ntlm"
                         else:
-                            logging.warning('Failed to get service ticket for %s, skipping host', self.hostname)
+                            logging.warning(
+                                "Failed to get service ticket for %s, skipping host",
+                                self.hostname,
+                            )
                             self.permanentfailure = True
                             return None
-                if hasattr(self.rpc, 'set_credentials'):
-                    if self.auth_method == 'auto':
+                if hasattr(self.rpc, "set_credentials"):
+                    if self.auth_method == "auto":
                         # Set all we have
-                        self.rpc.set_credentials(self.ad.auth.username, self.ad.auth.password,
-                                                 domain=self.ad.auth.userdomain,
-                                                 lmhash=self.ad.auth.lm_hash,
-                                                 nthash=self.ad.auth.nt_hash,
-                                                 aesKey=self.ad.auth.aeskey,
-                                                 TGS=self.TGS)
-                    elif self.auth_method == 'kerberos':
+                        self.rpc.set_credentials(
+                            self.ad.auth.username,
+                            self.ad.auth.password,
+                            domain=self.ad.auth.userdomain,
+                            lmhash=self.ad.auth.lm_hash,
+                            nthash=self.ad.auth.nt_hash,
+                            aesKey=self.ad.auth.aeskey,
+                            TGS=self.TGS,
+                        )
+                    elif self.auth_method == "kerberos":
                         # Kerberos only
-                        self.rpc.set_credentials(self.ad.auth.username, '',
-                                                 domain=self.ad.auth.userdomain,
-                                                 TGS=self.TGS)
+                        self.rpc.set_credentials(
+                            self.ad.auth.username,
+                            "",
+                            domain=self.ad.auth.userdomain,
+                            TGS=self.TGS,
+                        )
                     else:
                         # NTLM fallback triggered
-                        self.rpc.set_credentials(self.ad.auth.username, self.ad.auth.password,
-                                                 domain=self.ad.auth.userdomain,
-                                                 lmhash=self.ad.auth.lm_hash,
-                                                 nthash=self.ad.auth.nt_hash)
+                        self.rpc.set_credentials(
+                            self.ad.auth.username,
+                            self.ad.auth.password,
+                            domain=self.ad.auth.userdomain,
+                            lmhash=self.ad.auth.lm_hash,
+                            nthash=self.ad.auth.nt_hash,
+                        )
             # Else set the required stuff for NTLM
-            elif hasattr(self.rpc, 'set_credentials'):
-                self.rpc.set_credentials(self.ad.auth.username, self.ad.auth.password,
-                                         domain=self.ad.auth.userdomain,
-                                         lmhash=self.ad.auth.lm_hash,
-                                         nthash=self.ad.auth.nt_hash)
+            elif hasattr(self.rpc, "set_credentials"):
+                self.rpc.set_credentials(
+                    self.ad.auth.username,
+                    self.ad.auth.password,
+                    domain=self.ad.auth.userdomain,
+                    lmhash=self.ad.auth.lm_hash,
+                    nthash=self.ad.auth.nt_hash,
+                )
 
             # Use strict validation if possible
-            if hasattr(self.rpc, 'set_hostname_validation'):
+            if hasattr(self.rpc, "set_hostname_validation"):
                 self.rpc.set_hostname_validation(True, False, self.hostname)
 
             # Uncomment to force SMB2 (especially for development to prevent encryption)
@@ -343,19 +453,28 @@ def dce_rpc_connect(self, binding, uuid, integrity=False):
             try:
                 dce.connect()
             except HostnameValidationExceptions as exc:
-                logging.info('Ignoring host %s since its hostname does not match: %s', self.hostname, str(exc))
+                logging.info(
+                    "Ignoring host %s since its hostname does not match: %s",
+                    self.hostname,
+                    str(exc),
+                )
                 self.permanentfailure = True
                 return None
             except SessionError as exc:
-                if ('STATUS_PIPE_NOT_AVAILABLE' in str(exc) or 'STATUS_OBJECT_NAME_NOT_FOUND' in str(exc)) and 'winreg' in binding.lower():
+                if (
+                    "STATUS_PIPE_NOT_AVAILABLE" in str(exc)
+                    or "STATUS_OBJECT_NAME_NOT_FOUND" in str(exc)
+                ) and "winreg" in binding.lower():
                     # This can happen, silently ignore
                     return None
-                if 'STATUS_MORE_PROCESSING_REQUIRED' in str(exc):
-                    if self.auth_method == 'kerberos':
-                        logging.warning('Kerberos auth failed and no more auth methods to try.')
-                    elif self.auth_method == 'auto':
-                        logging.debug('Kerberos auth failed. Falling back to NTLM')
-                        self.auth_method = 'ntlm'
+                if "STATUS_MORE_PROCESSING_REQUIRED" in str(exc):
+                    if self.auth_method == "kerberos":
+                        logging.warning(
+                            "Kerberos auth failed and no more auth methods to try."
+                        )
+                    elif self.auth_method == "auto":
+                        logging.debug("Kerberos auth failed. Falling back to NTLM")
+                        self.auth_method = "ntlm"
                         # Close connection and retry
                         try:
                             self.rpc.get_smb_connection().close()
@@ -365,7 +484,7 @@ def dce_rpc_connect(self, binding, uuid, integrity=False):
                         return self.dce_rpc_connect(binding, uuid, integrity)
                 # Else, just log it
                 logging.debug(traceback.format_exc())
-                logging.warning('DCE/RPC connection failed: %s', str(exc))
+                logging.warning("DCE/RPC connection failed: %s", str(exc))
                 return None
 
             if self.smbconnection is None:
@@ -376,24 +495,28 @@ def dce_rpc_connect(self, binding, uuid, integrity=False):
 
             # Hostname validation
             authname = self.smbconnection.getServerName()
-            if authname and authname.lower() != self.hostname.split('.')[0].lower():
-                logging.info('Ignoring host %s since its reported name %s does not match', self.hostname, authname)
+            if authname and authname.lower() != self.hostname.split(".")[0].lower():
+                logging.info(
+                    "Ignoring host %s since its reported name %s does not match",
+                    self.hostname,
+                    authname,
+                )
                 self.permanentfailure = True
                 return None
 
             dce.bind(uuid)
         except DCERPCException as e:
             logging.debug(traceback.format_exc())
-            logging.warning('DCE/RPC connection failed: %s', str(e))
+            logging.warning("DCE/RPC connection failed: %s", str(e))
             return None
         except KeyboardInterrupt:
             raise
         except Exception as e:
             logging.debug(traceback.format_exc())
-            logging.warning('DCE/RPC connection failed: %s', e)
+            logging.warning("DCE/RPC connection failed: %s", e)
             return None
         except:
-            logging.warning('DCE/RPC connection failed (unknown error)')
+            logging.warning("DCE/RPC connection failed (unknown error)")
             return None
 
         return dce
@@ -403,36 +526,47 @@ def rpc_get_loggedon(self):
         Query logged on users via RPC.
         Requires admin privs
         """
-        binding = r'ncacn_np:%s[\PIPE\wkssvc]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\wkssvc]" % self.addr
         loggedonusers = set()
         dce = self.dce_rpc_connect(binding, wkst.MSRPC_UUID_WKST)
         if dce is None:
-            logging.warning('Connection failed: %s', binding)
+            logging.warning("Connection failed: %s", binding)
             return
         try:
             # 1 means more detail, including the domain
             resp = wkst.hNetrWkstaUserEnum(dce, 1)
-            for record in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
+            for record in resp["UserInfo"]["WkstaUserInfo"]["Level1"]["Buffer"]:
                 # Skip computer accounts
-                if record['wkui1_username'][-2] == '$':
+                if record["wkui1_username"][-2] == "$":
                     continue
                 # Skip sessions for local accounts
-                if record['wkui1_logon_domain'][:-1].upper() == self.samname[:-1].upper():
+                if (
+                    record["wkui1_logon_domain"][:-1].upper()
+                    == self.samname[:-1].upper()
+                ):
                     continue
-                domain = record['wkui1_logon_domain'][:-1].upper()
+                domain = record["wkui1_logon_domain"][:-1].upper()
                 domain_entry = self.ad.get_domain_by_name(domain)
                 if domain_entry is not None:
-                    domain = ADUtils.ldap2domain(domain_entry['attributes']['distinguishedName'])
-                logging.debug('Found logged on user at %s: %s@%s' % (self.hostname, record['wkui1_username'][:-1], domain))
-                loggedonusers.add((record['wkui1_username'][:-1], domain))
+                    domain = ADUtils.ldap2domain(
+                        domain_entry["attributes"]["distinguishedName"]
+                    )
+                logging.debug(
+                    "Found logged on user at %s: %s@%s"
+                    % (self.hostname, record["wkui1_username"][:-1], domain)
+                )
+                loggedonusers.add((record["wkui1_username"][:-1], domain))
         except DCERPCException as e:
-            if 'rpc_s_access_denied' in str(e):
-                logging.debug('Access denied while enumerating LoggedOn on %s, probably no admin privs', self.hostname)
+            if "rpc_s_access_denied" in str(e):
+                logging.debug(
+                    "Access denied while enumerating LoggedOn on %s, probably no admin privs",
+                    self.hostname,
+                )
             else:
-                logging.debug('Exception connecting to RPC: %s', e)
+                logging.debug("Exception connecting to RPC: %s", e)
         except Exception as e:
-            if 'connection reset' in str(e):
-                logging.debug('Connection was reset: %s', e)
+            if "connection reset" in str(e):
+                logging.debug("Connection was reset: %s", e)
             else:
                 raise e
 
@@ -444,7 +578,7 @@ def rpc_close(self):
             self.smbconnection.logoff()
 
     def rpc_get_sessions(self):
-        binding = r'ncacn_np:%s[\PIPE\srvsvc]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\srvsvc]" % self.addr
 
         dce = self.dce_rpc_connect(binding, srvs.MSRPC_UUID_SRVS)
 
@@ -452,29 +586,32 @@ def rpc_get_sessions(self):
             return
 
         try:
-            resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
+            resp = srvs.hNetrSessionEnum(dce, "\x00", NULL, 10)
         except DCERPCException as e:
-            if 'rpc_s_access_denied' in str(e):
-                logging.debug('Access denied while enumerating Sessions on %s, likely a patched OS', self.hostname)
+            if "rpc_s_access_denied" in str(e):
+                logging.debug(
+                    "Access denied while enumerating Sessions on %s, likely a patched OS",
+                    self.hostname,
+                )
                 return []
             else:
                 raise
         except Exception as e:
-            if str(e).find('Broken pipe') >= 0:
+            if str(e).find("Broken pipe") >= 0:
                 return
             else:
                 raise
 
         sessions = []
 
-        for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
-            userName = session['sesi10_username'][:-1]
-            ip = session['sesi10_cname'][:-1]
+        for session in resp["InfoStruct"]["SessionInfo"]["Level10"]["Buffer"]:
+            userName = session["sesi10_username"][:-1]
+            ip = session["sesi10_cname"][:-1]
             # Strip \\ from IPs
-            if ip[:2] == '\\\\':
+            if ip[:2] == "\\\\":
                 ip = ip[2:]
             # Skip empty IPs
-            if ip == '':
+            if ip == "":
                 continue
             # Skip our connection
             if userName == self.ad.auth.username:
@@ -483,25 +620,27 @@ def rpc_get_sessions(self):
             if len(userName) == 0:
                 continue
             # Skip machine accounts
-            if userName[-1] == '$':
+            if userName[-1] == "$":
                 continue
             # Skip local connections
-            if ip in ['127.0.0.1', '[::1]']:
+            if ip in ["127.0.0.1", "[::1]"]:
                 continue
             # IPv6 address
-            if ip[0] == '[' and ip[-1] == ']':
+            if ip[0] == "[" and ip[-1] == "]":
                 ip = ip[1:-1]
 
-            logging.info('User %s is logged in on %s from %s' % (userName, self.hostname, ip))
+            logging.info(
+                "User %s is logged in on %s from %s" % (userName, self.hostname, ip)
+            )
 
-            sessions.append({'user': userName, 'source': ip, 'target': self.hostname})
+            sessions.append({"user": userName, "source": ip, "target": self.hostname})
 
         dce.disconnect()
 
         return sessions
 
     def rpc_get_registry_sessions(self):
-        binding = r'ncacn_np:%s[\pipe\winreg]' % self.addr
+        binding = r"ncacn_np:%s[\pipe\winreg]" % self.addr
 
         # Try to bind to the Remote Registry RPC interface, if it fails try again once.
         binding_attempts = 2
@@ -520,7 +659,7 @@ def rpc_get_registry_sessions(self):
 
         # If the two binding attempts failed, silently return.
         if dce is None:
-            logging.debug('Failed opening remote registry after 2 attempts')
+            logging.debug("Failed opening remote registry after 2 attempts")
             return
 
         registry_sessions = []
@@ -529,34 +668,39 @@ def rpc_get_registry_sessions(self):
         try:
             resp = rrp.hOpenUsers(dce)
         except DCERPCException as e:
-            if 'rpc_s_access_denied' in str(e):
-                logging.debug('Access denied while enumerating Registry Sessions on %s', self.hostname)
+            if "rpc_s_access_denied" in str(e):
+                logging.debug(
+                    "Access denied while enumerating Registry Sessions on %s",
+                    self.hostname,
+                )
                 return []
             else:
-                logging.debug('Exception connecting to RPC: %s', e)
+                logging.debug("Exception connecting to RPC: %s", e)
         except Exception as e:
-            if str(e).find('Broken pipe') >= 0:
+            if str(e).find("Broken pipe") >= 0:
                 return
             else:
                 raise
 
         # Once we have a handle on the remote HKU hive, we can call 'BaseRegEnumKey' in a loop in
         # order to enumerate the subkeys which names are the SIDs of the logged in users.
-        key_handle = resp['phKey']
+        key_handle = resp["phKey"]
         index = 1
         sid_filter = "^S-1-5-21-[0-9]+-[0-9]+-[0-9]+-[0-9]+$"
         while True:
             try:
                 resp = rrp.hBaseRegEnumKey(dce, key_handle, index)
-                sid = resp['lpNameOut'].rstrip('\0')
+                sid = resp["lpNameOut"].rstrip("\0")
                 if re.match(sid_filter, sid):
-                    logging.info('User with SID %s is logged in on %s' % (sid, self.hostname))
+                    logging.info(
+                        "User with SID %s is logged in on %s" % (sid, self.hostname)
+                    )
                     # Ignore local accounts (best effort, self.sid is only
                     # populated if we enumerated a group before)
                     if self.sid and sid.startswith(self.sid):
                         index += 1
                         continue
-                    registry_sessions.append({'user': sid})
+                    registry_sessions.append({"user": sid})
                 index += 1
             except:
                 break
@@ -568,8 +712,9 @@ def rpc_get_registry_sessions(self):
 
     """
     """
+
     def rpc_get_domain_trusts(self):
-        binding = r'ncacn_np:%s[\PIPE\netlogon]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\netlogon]" % self.addr
 
         dce = self.dce_rpc_connect(binding, nrpc.MSRPC_UUID_NRPC)
 
@@ -578,131 +723,149 @@ def rpc_get_domain_trusts(self):
 
         try:
             req = nrpc.DsrEnumerateDomainTrusts()
-            req['ServerName'] = NULL
-            req['Flags'] = 1
+            req["ServerName"] = NULL
+            req["Flags"] = 1
             resp = dce.request(req)
         except Exception as e:
             raise e
 
-        for domain in resp['Domains']['Domains']:
-            logging.info('Found domain trust from %s to %s', self.hostname, domain['NetbiosDomainName'])
-            self.trusts.append({'domain': domain['DnsDomainName'],
-                                'type': domain['TrustType'],
-                                'flags': domain['Flags']})
+        for domain in resp["Domains"]["Domains"]:
+            logging.info(
+                "Found domain trust from %s to %s",
+                self.hostname,
+                domain["NetbiosDomainName"],
+            )
+            self.trusts.append(
+                {
+                    "domain": domain["DnsDomainName"],
+                    "type": domain["TrustType"],
+                    "flags": domain["Flags"],
+                }
+            )
 
         dce.disconnect()
 
-
     def rpc_get_services(self):
         """
         Query services with stored credentials via RPC.
         These credentials can be dumped with mimikatz via lsadump::secrets or via secretsdump.py
         """
-        binding = r'ncacn_np:%s[\PIPE\svcctl]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\svcctl]" % self.addr
         serviceusers = []
         dce = self.dce_rpc_connect(binding, scmr.MSRPC_UUID_SCMR)
         if dce is None:
             return serviceusers
         try:
             resp = scmr.hROpenSCManagerW(dce)
-            scManagerHandle = resp['lpScHandle']
+            scManagerHandle = resp["lpScHandle"]
             # TODO: Figure out if filtering out service types makes sense
-            resp = scmr.hREnumServicesStatusW(dce,
-                                              scManagerHandle,
-                                              dwServiceType=scmr.SERVICE_WIN32_OWN_PROCESS,
-                                              dwServiceState=scmr.SERVICE_STATE_ALL)
+            resp = scmr.hREnumServicesStatusW(
+                dce,
+                scManagerHandle,
+                dwServiceType=scmr.SERVICE_WIN32_OWN_PROCESS,
+                dwServiceState=scmr.SERVICE_STATE_ALL,
+            )
             # TODO: Skip well-known services to save on traffic
             for i in range(len(resp)):
                 try:
-                    ans = scmr.hROpenServiceW(dce, scManagerHandle, resp[i]['lpServiceName'][:-1])
-                    serviceHandle = ans['lpServiceHandle']
+                    ans = scmr.hROpenServiceW(
+                        dce, scManagerHandle, resp[i]["lpServiceName"][:-1]
+                    )
+                    serviceHandle = ans["lpServiceHandle"]
                     svcresp = scmr.hRQueryServiceConfigW(dce, serviceHandle)
-                    svc_user = svcresp['lpServiceConfig']['lpServiceStartName'][:-1]
-                    if '@' in svc_user:
-                        logging.info("Found user service: %s running as %s on %s",
-                                     resp[i]['lpServiceName'][:-1],
-                                     svc_user,
-                                     self.hostname)
+                    svc_user = svcresp["lpServiceConfig"]["lpServiceStartName"][:-1]
+                    if "@" in svc_user:
+                        logging.info(
+                            "Found user service: %s running as %s on %s",
+                            resp[i]["lpServiceName"][:-1],
+                            svc_user,
+                            self.hostname,
+                        )
                         serviceusers.append(svc_user)
                 except DCERPCException as e:
-                    if 'rpc_s_access_denied' not in str(e):
-                        logging.debug('Exception querying service %s via RPC: %s', resp[i]['lpServiceName'][:-1], e)
+                    if "rpc_s_access_denied" not in str(e):
+                        logging.debug(
+                            "Exception querying service %s via RPC: %s",
+                            resp[i]["lpServiceName"][:-1],
+                            e,
+                        )
         except DCERPCException as e:
-            logging.debug('Exception connecting to RPC: %s', e)
+            logging.debug("Exception connecting to RPC: %s", e)
         except Exception as e:
-            if 'connection reset' in str(e):
-                logging.debug('Connection was reset: %s', e)
+            if "connection reset" in str(e):
+                logging.debug("Connection was reset: %s", e)
             else:
                 raise e
 
         dce.disconnect()
         return serviceusers
 
-
     def rpc_get_schtasks(self):
         """
         Query the scheduled tasks via RPC. Requires admin privileges.
         These credentials can be dumped with mimikatz via vault::cred
         """
         # Blacklisted folders (Default ones)
-        blacklist = [u'Microsoft\x00']
+        blacklist = ["Microsoft\x00"]
         # Start with the root folder
-        folders = ['\\']
+        folders = ["\\"]
         tasks = []
         schtaskusers = []
-        binding = r'ncacn_np:%s[\PIPE\atsvc]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\atsvc]" % self.addr
         try:
             dce = self.dce_rpc_connect(binding, tsch.MSRPC_UUID_TSCHS, True)
             if dce is None:
                 return schtaskusers
             # Get root folder
-            resp = tsch.hSchRpcEnumFolders(dce, '\\')
-            for item in resp['pNames']:
-                data = item['Data']
+            resp = tsch.hSchRpcEnumFolders(dce, "\\")
+            for item in resp["pNames"]:
+                data = item["Data"]
                 if data not in blacklist:
-                    folders.append('\\'+data)
+                    folders.append("\\" + data)
 
             # Enumerate the folders we found
             # subfolders not supported yet
             for folder in folders:
                 try:
                     resp = tsch.hSchRpcEnumTasks(dce, folder)
-                    for item in resp['pNames']:
-                        data = item['Data']
-                        if folder != '\\':
+                    for item in resp["pNames"]:
+                        data = item["Data"]
+                        if folder != "\\":
                             # Make sure to strip the null byte
-                            tasks.append(folder[:-1]+'\\'+data)
+                            tasks.append(folder[:-1] + "\\" + data)
                         else:
-                            tasks.append(folder+data)
+                            tasks.append(folder + data)
                 except DCERPCException as e:
-                    logging.debug('Error enumerating task folder %s: %s', folder, e)
+                    logging.debug("Error enumerating task folder %s: %s", folder, e)
             for task in tasks:
                 try:
                     resp = tsch.hSchRpcRetrieveTask(dce, task)
                     # This returns a tuple (sid, logontype) or None
-                    userinfo = ADUtils.parse_task_xml(resp['pXml'])
+                    userinfo = ADUtils.parse_task_xml(resp["pXml"])
                     if userinfo:
-                        if userinfo[1] == u'Password':
+                        if userinfo[1] == "Password":
                             # Convert to byte string because our cache format is in bytes
                             schtaskusers.append(str(userinfo[0]))
-                            logging.info('Found scheduled task %s on %s with stored credentials for SID %s',
-                                         task,
-                                         self.hostname,
-                                         userinfo[0])
+                            logging.info(
+                                "Found scheduled task %s on %s with stored credentials for SID %s",
+                                task,
+                                self.hostname,
+                                userinfo[0],
+                            )
                 except DCERPCException as e:
-                    logging.debug('Error querying task %s: %s', task, e)
+                    logging.debug("Error querying task %s: %s", task, e)
         except DCERPCException as e:
-            logging.debug('Exception enumerating scheduled tasks: %s', e)
+            logging.debug("Exception enumerating scheduled tasks: %s", e)
 
         dce.disconnect()
         return schtaskusers
 
-
     """
     This magic is mostly borrowed from impacket/examples/netview.py
     """
+
     def rpc_get_group_members(self, group_rid, resultlist):
-        binding = r'ncacn_np:%s[\PIPE\samr]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\samr]" % self.addr
         unresolved = []
         dce = self.dce_rpc_connect(binding, samr.MSRPC_UUID_SAMR)
 
@@ -711,48 +874,52 @@ def rpc_get_group_members(self, group_rid, resultlist):
 
         try:
             resp = samr.hSamrConnect(dce)
-            serverHandle = resp['ServerHandle']
+            serverHandle = resp["ServerHandle"]
             # Attempt to get the SID from this computer to filter local accounts later
             try:
-                resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle, self.samname[:-1])
-                self.sid = resp['DomainId'].formatCanonical()
+                resp = samr.hSamrLookupDomainInSamServer(
+                    dce, serverHandle, self.samname[:-1]
+                )
+                self.sid = resp["DomainId"].formatCanonical()
             # This doesn't always work (for example on DCs)
             except DCERPCException as e:
                 # Make it a string which is guaranteed not to match a SID
-                self.sid = 'UNKNOWN'
-
+                self.sid = "UNKNOWN"
 
             # Enumerate the domains known to this computer
             resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
-            domains = resp['Buffer']['Buffer']
+            domains = resp["Buffer"]["Buffer"]
 
             # Query the builtin domain (derived from this SID)
             sid = RPC_SID()
-            sid.fromCanonical('S-1-5-32')
+            sid.fromCanonical("S-1-5-32")
 
-            logging.debug('Opening domain handle')
+            logging.debug("Opening domain handle")
             # Open a handle to this domain
-            resp = samr.hSamrOpenDomain(dce,
-                                        serverHandle=serverHandle,
-                                        desiredAccess=samr.DOMAIN_LOOKUP | MAXIMUM_ALLOWED,
-                                        domainId=sid)
-            domainHandle = resp['DomainHandle']
+            resp = samr.hSamrOpenDomain(
+                dce,
+                serverHandle=serverHandle,
+                desiredAccess=samr.DOMAIN_LOOKUP | MAXIMUM_ALLOWED,
+                domainId=sid,
+            )
+            domainHandle = resp["DomainHandle"]
             try:
-                resp = samr.hSamrOpenAlias(dce,
-                                           domainHandle,
-                                           desiredAccess=samr.ALIAS_LIST_MEMBERS | MAXIMUM_ALLOWED,
-                                           aliasId=group_rid)
+                resp = samr.hSamrOpenAlias(
+                    dce,
+                    domainHandle,
+                    desiredAccess=samr.ALIAS_LIST_MEMBERS | MAXIMUM_ALLOWED,
+                    aliasId=group_rid,
+                )
             except samr.DCERPCSessionError as error:
                 # Group does not exist
-                if 'STATUS_NO_SUCH_ALIAS' in str(error):
-                    logging.debug('No group with RID %d exists', group_rid)
+                if "STATUS_NO_SUCH_ALIAS" in str(error):
+                    logging.debug("No group with RID %d exists", group_rid)
                     return
-            resp = samr.hSamrGetMembersInAlias(dce,
-                                               aliasHandle=resp['AliasHandle'])
-            for member in resp['Members']['Sids']:
-                sid_string = member['SidPointer'].formatCanonical()
+            resp = samr.hSamrGetMembersInAlias(dce, aliasHandle=resp["AliasHandle"])
+            for member in resp["Members"]["Sids"]:
+                sid_string = member["SidPointer"].formatCanonical()
 
-                logging.debug('Found %d SID: %s', group_rid, sid_string)
+                logging.debug("Found %d SID: %s", group_rid, sid_string)
                 if not sid_string.startswith(self.sid):
                     # If the sid is known, we can add the admin value directly
                     try:
@@ -760,29 +927,35 @@ def rpc_get_group_members(self, group_rid, resultlist):
                         if siddata is None:
                             unresolved.append(sid_string)
                         else:
-                            logging.debug('Sid is cached: %s', siddata['principal'])
-                            resultlist.append({'ObjectIdentifier': sid_string,
-                                               'ObjectType': siddata['type'].capitalize()})
+                            logging.debug("Sid is cached: %s", siddata["principal"])
+                            resultlist.append(
+                                {
+                                    "ObjectIdentifier": sid_string,
+                                    "ObjectType": siddata["type"].capitalize(),
+                                }
+                            )
                     except KeyError:
                         # Append it to the list of unresolved SIDs
                         unresolved.append(sid_string)
                 else:
-                    logging.debug('Ignoring local group %s', sid_string)
+                    logging.debug("Ignoring local group %s", sid_string)
         except DCERPCException as e:
-            if 'rpc_s_access_denied' in str(e):
-                logging.debug('Access denied while enumerating groups on %s, likely a patched OS', self.hostname)
+            if "rpc_s_access_denied" in str(e):
+                logging.debug(
+                    "Access denied while enumerating groups on %s, likely a patched OS",
+                    self.hostname,
+                )
             else:
                 raise
         except Exception as e:
-            if 'connection reset' in str(e):
-                logging.debug('Connection was reset: %s', e)
+            if "connection reset" in str(e):
+                logging.debug("Connection was reset: %s", e)
             else:
                 raise e
 
         dce.disconnect()
         return unresolved
 
-
     def rpc_resolve_sids(self, sids, resultlist):
         """
         Resolve any remaining unknown SIDs for local accounts.
@@ -790,7 +963,7 @@ def rpc_resolve_sids(self, sids, resultlist):
         # If all sids were already cached, we can just return
         if sids is None or len(sids) == 0:
             return
-        binding = r'ncacn_np:%s[\PIPE\lsarpc]' % self.addr
+        binding = r"ncacn_np:%s[\PIPE\lsarpc]" % self.addr
 
         dce = self.dce_rpc_connect(binding, lsat.MSRPC_UUID_LSAT)
 
@@ -798,14 +971,16 @@ def rpc_resolve_sids(self, sids, resultlist):
             return
 
         try:
-            resp = lsad.hLsarOpenPolicy2(dce, lsat.POLICY_LOOKUP_NAMES | MAXIMUM_ALLOWED)
+            resp = lsad.hLsarOpenPolicy2(
+                dce, lsat.POLICY_LOOKUP_NAMES | MAXIMUM_ALLOWED
+            )
         except Exception as e:
-            if str(e).find('Broken pipe') >= 0:
+            if str(e).find("Broken pipe") >= 0:
                 return
             else:
                 raise
 
-        policyHandle = resp['PolicyHandle']
+        policyHandle = resp["PolicyHandle"]
 
         # We could look up the SIDs all at once, but if not all SIDs are mapped, we don't know which
         # ones were resolved and which not, making it impossible to map them in the cache.
@@ -813,41 +988,57 @@ def rpc_resolve_sids(self, sids, resultlist):
         # in our cache and this function doesn't even need to get called anymore.
         for sid_string in sids:
             try:
-                resp = lsat.hLsarLookupSids(dce, policyHandle, [sid_string], lsat.LSAP_LOOKUP_LEVEL.enumItems.LsapLookupWksta)
+                resp = lsat.hLsarLookupSids(
+                    dce,
+                    policyHandle,
+                    [sid_string],
+                    lsat.LSAP_LOOKUP_LEVEL.enumItems.LsapLookupWksta,
+                )
             except DCERPCException as e:
-                if str(e).find('STATUS_NONE_MAPPED') >= 0:
-                    logging.warning('SID %s lookup failed, return status: STATUS_NONE_MAPPED', sid_string)
+                if str(e).find("STATUS_NONE_MAPPED") >= 0:
+                    logging.warning(
+                        "SID %s lookup failed, return status: STATUS_NONE_MAPPED",
+                        sid_string,
+                    )
                     # Try next SID
                     continue
-                elif str(e).find('STATUS_SOME_NOT_MAPPED') >= 0:
+                elif str(e).find("STATUS_SOME_NOT_MAPPED") >= 0:
                     # Not all could be resolved, work with the ones that could
                     resp = e.get_packet()
                 else:
                     raise
             except NetBIOSTimeout as e:
-                logging.warning('Connection timed out while resolving sids')
+                logging.warning("Connection timed out while resolving sids")
                 continue
 
             domains = []
-            for entry in resp['ReferencedDomains']['Domains']:
-                domains.append(entry['Name'])
+            for entry in resp["ReferencedDomains"]["Domains"]:
+                domains.append(entry["Name"])
 
-            for entry in resp['TranslatedNames']['Names']:
-                domain = domains[entry['DomainIndex']]
+            for entry in resp["TranslatedNames"]["Names"]:
+                domain = domains[entry["DomainIndex"]]
                 domain_entry = self.ad.get_domain_by_name(domain)
                 if domain_entry is not None:
-                    domain = ADUtils.ldap2domain(domain_entry['attributes']['distinguishedName'])
+                    domain = ADUtils.ldap2domain(
+                        domain_entry["attributes"]["distinguishedName"]
+                    )
                 # TODO: what if it isn't? Should we fall back to LDAP?
 
-                if entry['Name'] != '':
+                if entry["Name"] != "":
                     resolved_entry = ADUtils.resolve_sid_entry(entry, domain)
-                    logging.debug('Resolved SID to name: %s', resolved_entry['principal'])
-                    resultlist.append({'ObjectIdentifier': sid_string,
-                                       'ObjectType': resolved_entry['type'].capitalize()})
+                    logging.debug(
+                        "Resolved SID to name: %s", resolved_entry["principal"]
+                    )
+                    resultlist.append(
+                        {
+                            "ObjectIdentifier": sid_string,
+                            "ObjectType": resolved_entry["type"].capitalize(),
+                        }
+                    )
                     # Add it to our cache
                     self.ad.sidcache.put(sid_string, resolved_entry)
                 else:
-                    logging.warning('Resolved name is empty [%s]', entry)
+                    logging.warning("Resolved name is empty [%s]", entry)
         try:
             dce.disconnect()
         except NetBIOSError:
diff --git a/bloodhound/ad/domain.py b/bloodhound/ad/domain.py
index 9b1c7e9..43ba815 100644
--- a/bloodhound/ad/domain.py
+++ b/bloodhound/ad/domain.py
@@ -30,9 +30,23 @@
 from uuid import UUID
 from dns import resolver
 from ldap3 import ALL_ATTRIBUTES, BASE, SUBTREE, LEVEL
-from ldap3.core.exceptions import LDAPKeyError, LDAPAttributeError, LDAPCursorError, LDAPNoSuchObjectResult, LDAPSocketReceiveError, LDAPSocketSendError, LDAPCommunicationError
+from ldap3.core.exceptions import (
+    LDAPKeyError,
+    LDAPAttributeError,
+    LDAPCursorError,
+    LDAPNoSuchObjectResult,
+    LDAPSocketReceiveError,
+    LDAPSocketSendError,
+    LDAPCommunicationError,
+)
 from ldap3.protocol.microsoft import security_descriptor_control
-from bloodhound.ad.utils import ADUtils, DNSCache, SidCache, SamCache, CollectionException
+from bloodhound.ad.utils import (
+    ADUtils,
+    DNSCache,
+    SidCache,
+    SamCache,
+    CollectionException,
+)
 from bloodhound.ad.computer import ADComputer
 from bloodhound.enumeration.objectresolver import ObjectResolver
 from future.utils import itervalues, iteritems, native_str
@@ -40,6 +54,8 @@
 """
 Active Directory Domain Controller
 """
+
+
 class ADDC(ADComputer):
     def __init__(self, hostname=None, ad=None):
         ADComputer.__init__(self, hostname)
@@ -60,8 +76,8 @@ def ldap_connect(self, protocol=None, resolver=False):
         if not protocol:
             protocol = self.ad.ldap_default_protocol
 
-        logging.info('Connecting to LDAP server: %s' % self.hostname)
-        logging.debug('Using protocol %s' % protocol)
+        logging.info("Connecting to LDAP server: %s" % self.hostname)
+        logging.debug("Using protocol %s" % protocol)
 
         # Convert the hostname to an IP, this prevents ldap3 from doing it
         # which doesn't use our custom nameservers
@@ -69,15 +85,19 @@ def ldap_connect(self, protocol=None, resolver=False):
         for r in q:
             ip = r.address
 
-        ldap = self.ad.auth.getLDAPConnection(hostname=self.hostname, ip=ip,
-                                              baseDN=self.ad.baseDN, protocol=protocol)
+        ldap = self.ad.auth.getLDAPConnection(
+            hostname=self.hostname,
+            ip_address=ip,
+            base_dn=self.ad.baseDN,
+            protocol=protocol,
+        )
         if resolver:
             self.resolverldap = ldap
         else:
             self.ldap = ldap
         return ldap is not None
 
-    def gc_connect(self, protocol='ldap'):
+    def gc_connect(self, protocol="ldap"):
         """
         Connect to the global catalog
         """
@@ -89,13 +109,15 @@ def gc_connect(self, protocol='ldap'):
             try:
                 initial_server = self.ad.gcs()[0]
             except IndexError:
-                logging.error('Could not find a Global Catalog in this domain!'\
-                              ' Resolving will be unreliable in forests with multiple domains')
+                logging.error(
+                    "Could not find a Global Catalog in this domain!"
+                    " Resolving will be unreliable in forests with multiple domains"
+                )
                 return False
         try:
             # Convert the hostname to an IP, this prevents ldap3 from doing it
             # which doesn't use our custom nameservers
-            logging.info('Connecting to GC LDAP server: %s' % initial_server)
+            logging.info("Connecting to GC LDAP server: %s" % initial_server)
             q = self.ad.dnsresolver.query(initial_server, tcp=self.ad.dns_tcp)
             for r in q:
                 ip = r.address
@@ -107,7 +129,7 @@ def gc_connect(self, protocol='ldap'):
                 try:
                     # Convert the hostname to an IP, this prevents ldap3 from doing it
                     # which doesn't use our custom nameservers
-                    logging.info('Connecting to GC LDAP server: %s' % server)
+                    logging.info("Connecting to GC LDAP server: %s" % server)
                     q = self.ad.dnsresolver.query(server, tcp=self.ad.dns_tcp)
                     for r in q:
                         ip = r.address
@@ -115,11 +137,27 @@ def gc_connect(self, protocol='ldap'):
                 except (resolver.NXDOMAIN, resolver.Timeout):
                     continue
 
-        self.gcldap = self.ad.auth.getLDAPConnection(hostname=self.hostname, ip=ip, gc=True,
-                                                     baseDN=self.ad.baseDN, protocol=protocol)
+        self.gcldap = self.ad.auth.getLDAPConnection(
+            hostname=self.hostname,
+            ip_address=ip,
+            base_dn=self.ad.baseDN,
+            protocol=protocol,
+            use_global_catalog=True,
+        )
         return self.gcldap is not None
 
-    def search(self, search_filter='(objectClass=*)',attributes=None, search_base=None, generator=True, use_gc=False, use_resolver=False, query_sd=False, is_retry=False,  search_scope=SUBTREE,):
+    def search(
+        self,
+        search_filter="(objectClass=*)",
+        attributes=None,
+        search_base=None,
+        generator=True,
+        use_gc=False,
+        use_resolver=False,
+        query_sd=False,
+        is_retry=False,
+        search_scope=SUBTREE,
+    ):
         """
         Search for objects in LDAP or Global Catalog LDAP.
         """
@@ -152,45 +190,71 @@ def search(self, search_filter='(objectClass=*)',attributes=None, search_base=No
                 searcher = self.ldap
 
         hadresults = False
-        sresult = searcher.extend.standard.paged_search(search_base,
-                                                        search_filter,
-                                                        attributes=attributes,
-                                                        paged_size=200,
-                                                        search_scope=search_scope,
-                                                        controls=controls,
-                                                        generator=generator)
+        sresult = searcher.extend.standard.paged_search(
+            search_base,
+            search_filter,
+            attributes=attributes,
+            paged_size=200,
+            search_scope=search_scope,
+            controls=controls,
+            generator=generator,
+        )
         try:
             # Use a generator for the result regardless of if the search function uses one
             for e in sresult:
-                if e['type'] != 'searchResEntry':
+                if e["type"] != "searchResEntry":
                     continue
                 if not hadresults:
                     hadresults = True
                 yield e
         except LDAPNoSuchObjectResult:
             # This may indicate the object doesn't exist or access is denied
-            logging.warning('LDAP Server reported that the search in %s for %s does not exist.', search_base, search_filter)
-        except (LDAPSocketReceiveError, LDAPSocketSendError, LDAPCommunicationError) as e:
+            logging.warning(
+                "LDAP Server reported that the search in %s for %s does not exist.",
+                search_base,
+                search_filter,
+            )
+        except (
+            LDAPSocketReceiveError,
+            LDAPSocketSendError,
+            LDAPCommunicationError,
+        ) as e:
             if is_retry:
-                logging.error('Connection to LDAP server lost during data gathering - reconnect failed - giving up on query %s', search_filter)
+                logging.error(
+                    "Connection to LDAP server lost during data gathering - reconnect failed - giving up on query %s",
+                    search_filter,
+                )
             else:
                 if hadresults:
-                    logging.error('Connection to LDAP server lost during data gathering. Query was cut short. Data may be inaccurate for query %s', search_filter)
+                    logging.error(
+                        "Connection to LDAP server lost during data gathering. Query was cut short. Data may be inaccurate for query %s",
+                        search_filter,
+                    )
                     if use_gc:
                         self.gc_connect()
                     else:
                         self.ldap_connect(resolver=use_resolver)
                 else:
-                    logging.warning('Re-establishing connection with server')
+                    logging.warning("Re-establishing connection with server")
                     if use_gc:
                         self.gc_connect()
                     else:
                         self.ldap_connect(resolver=use_resolver)
                     # Try again
-                    yield from self.search(search_filter, attributes, search_base, generator, use_gc, use_resolver, query_sd, is_retry=True)
-
-
-    def ldap_get_single(self, qobject, attributes=None, use_gc=False, use_resolver=False, is_retry=False):
+                    yield from self.search(
+                        search_filter,
+                        attributes,
+                        search_base,
+                        generator,
+                        use_gc,
+                        use_resolver,
+                        query_sd,
+                        is_retry=True,
+                    )
+
+    def ldap_get_single(
+        self, qobject, attributes=None, use_gc=False, use_resolver=False, is_retry=False
+    ):
         """
         Get a single object, requires full DN to object.
         This function supports searching both in the local directory and the Global Catalog.
@@ -207,48 +271,70 @@ def ldap_get_single(self, qobject, attributes=None, use_gc=False, use_resolver=F
         if attributes is None or attributes == []:
             attributes = ALL_ATTRIBUTES
         try:
-            sresult = searcher.extend.standard.paged_search(qobject,
-                                                            '(objectClass=*)',
-                                                            search_scope=BASE,
-                                                            attributes=attributes,
-                                                            paged_size=10,
-                                                            generator=False)
-        except (LDAPSocketReceiveError, LDAPSocketSendError, LDAPCommunicationError) as e:
+            sresult = searcher.extend.standard.paged_search(
+                qobject,
+                "(objectClass=*)",
+                search_scope=BASE,
+                attributes=attributes,
+                paged_size=10,
+                generator=False,
+            )
+        except (
+            LDAPSocketReceiveError,
+            LDAPSocketSendError,
+            LDAPCommunicationError,
+        ) as e:
             if is_retry:
-                logging.error('Connection to LDAP server lost during object resolving - reconnect failed - giving up on resolving %s', qobject)
+                logging.error(
+                    "Connection to LDAP server lost during object resolving - reconnect failed - giving up on resolving %s",
+                    qobject,
+                )
                 return None
             else:
-                logging.warning('Re-establishing connection with server')
+                logging.warning("Re-establishing connection with server")
                 if use_gc:
                     self.gc_connect()
                 else:
                     self.ldap_connect(resolver=use_resolver)
                 # Try again
-                return self.ldap_get_single(qobject, attributes, use_gc, use_resolver, is_retry=True)
+                return self.ldap_get_single(
+                    qobject, attributes, use_gc, use_resolver, is_retry=True
+                )
         except LDAPNoSuchObjectResult:
             # This may indicate the object doesn't exist or access is denied
-            logging.warning('LDAP Server reported that the object %s does not exist.', qobject)
+            logging.warning(
+                "LDAP Server reported that the object %s does not exist.", qobject
+            )
             return None
         for e in sresult:
-            if e['type'] != 'searchResEntry':
+            if e["type"] != "searchResEntry":
                 continue
             return e
 
     def get_domain_controllers(self):
-        entries = self.search('(userAccountControl:1.2.840.113556.1.4.803:=8192)',
-                              ['dnshostname', 'samaccounttype', 'samaccountname',
-                               'serviceprincipalname', 'objectSid'])
+        entries = self.search(
+            "(userAccountControl:1.2.840.113556.1.4.803:=8192)",
+            [
+                "dnshostname",
+                "samaccounttype",
+                "samaccountname",
+                "serviceprincipalname",
+                "objectSid",
+            ],
+        )
 
         return entries
 
-
     def get_netbios_name(self, context):
         try:
-            entries = self.search('(ncname=%s)' % context,
-                                  ['nETBIOSName'],
-                                  search_base="CN=Partitions,%s" % self.ldap.server.info.other['configurationNamingContext'][0])
+            entries = self.search(
+                "(ncname=%s)" % context,
+                ["nETBIOSName"],
+                search_base="CN=Partitions,%s"
+                % self.ldap.server.info.other["configurationNamingContext"][0],
+            )
         except (LDAPAttributeError, LDAPCursorError) as e:
-            logging.warning('Could not determine NetBiosname of the domain: %s', str(e))
+            logging.warning("Could not determine NetBiosname of the domain: %s", str(e))
         return next(entries)
 
     def get_objecttype(self):
@@ -260,58 +346,66 @@ def get_objecttype(self):
         if self.ldap is None:
             self.ldap_connect()
 
-        sresult = self.ldap.extend.standard.paged_search(self.ldap.server.info.other['schemaNamingContext'][0],
-                                                         '(objectClass=*)',
-                                                         attributes=['name', 'schemaidguid'])
+        sresult = self.ldap.extend.standard.paged_search(
+            self.ldap.server.info.other["schemaNamingContext"][0],
+            "(objectClass=*)",
+            attributes=["name", "schemaidguid"],
+        )
         for res in sresult:
-            if res['attributes']['schemaIDGUID']:
-                guid = str(UUID(bytes_le=res['attributes']['schemaIDGUID']))
-                self.objecttype_guid_map[res['attributes']['name'].lower()] = guid
+            if res["attributes"]["schemaIDGUID"]:
+                guid = str(UUID(bytes_le=res["attributes"]["schemaIDGUID"]))
+                self.objecttype_guid_map[res["attributes"]["name"].lower()] = guid
 
-        if 'ms-mcs-admpwdexpirationtime' in self.objecttype_guid_map:
-            logging.debug('Found LAPS attributes in schema')
+        if "ms-mcs-admpwdexpirationtime" in self.objecttype_guid_map:
+            logging.debug("Found LAPS attributes in schema")
             self.ad.has_laps = True
         else:
-            logging.debug('No LAPS attributes found in schema')
+            logging.debug("No LAPS attributes found in schema")
 
-        if 'ms-ds-key-credential-link' in self.objecttype_guid_map:
-            logging.debug('Found KeyCredentialLink attributes in schema')
+        if "ms-ds-key-credential-link" in self.objecttype_guid_map:
+            logging.debug("Found KeyCredentialLink attributes in schema")
             self.ad.has_keycredlink = True
         else:
-            logging.debug('No KeyCredentialLink attributes found in schema')
+            logging.debug("No KeyCredentialLink attributes found in schema")
 
     def get_domains(self, acl=False):
         """
         Function to get domains. This should only return the current domain.
         """
-        entries = self.search('(objectClass=domain)',
-                              [],
-                              generator=True,
-                              query_sd=acl)
+        entries = self.search("(objectClass=domain)", [], generator=True, query_sd=acl)
 
         entriesNum = 0
         for entry in entries:
             entriesNum += 1
             # Todo: actually use these objects instead of discarding them
             # means rewriting other functions
-            domain_object = ADDomain.fromLDAP(entry['attributes']['distinguishedName'], entry['attributes']['objectSid'])
+            domain_object = ADDomain.fromLDAP(
+                entry["attributes"]["distinguishedName"],
+                entry["attributes"]["objectSid"],
+            )
             self.ad.domain_object = domain_object
-            self.ad.domains[entry['attributes']['distinguishedName']] = entry
+            self.ad.domains[entry["attributes"]["distinguishedName"]] = entry
             try:
-                nbentry = self.get_netbios_name(entry['attributes']['distinguishedName'])
-                self.ad.nbdomains[nbentry['attributes']['nETBIOSName']] = entry
+                nbentry = self.get_netbios_name(
+                    entry["attributes"]["distinguishedName"]
+                )
+                self.ad.nbdomains[nbentry["attributes"]["nETBIOSName"]] = entry
             except IndexError:
                 pass
 
         if entriesNum == 0:
             # Raise exception if we somehow managed to authenticate but the domain is wrong
             # prevents confusing exceptions later
-            actualdn = self.ldap.server.info.other['defaultNamingContext'][0]
+            actualdn = self.ldap.server.info.other["defaultNamingContext"][0]
             actualdomain = ADUtils.ldap2domain(actualdn)
-            logging.error('Could not find the requested domain %s on this DC, LDAP server reports is domain as %s (you may want to try that?)', self.ad.domain, actualdomain)
+            logging.error(
+                "Could not find the requested domain %s on this DC, LDAP server reports is domain as %s (you may want to try that?)",
+                self.ad.domain,
+                actualdomain,
+            )
             raise CollectionException("Specified domain was not found in LDAP")
 
-        logging.info('Found %u domains', entriesNum)
+        logging.info("Found %u domains", entriesNum)
 
         return entries
 
@@ -323,34 +417,37 @@ def get_forest_domains(self):
         This searches the configuration, which is present only once in the forest but is replicated
         to every DC.
         """
-        entries = self.search('(objectClass=crossRef)',
-                              ['nETBIOSName', 'systemFlags', 'nCName', 'name'],
-                              search_base="CN=Partitions,%s" % self.ldap.server.info.other['configurationNamingContext'][0],
-                              generator=True)
+        entries = self.search(
+            "(objectClass=crossRef)",
+            ["nETBIOSName", "systemFlags", "nCName", "name"],
+            search_base="CN=Partitions,%s"
+            % self.ldap.server.info.other["configurationNamingContext"][0],
+            generator=True,
+        )
 
         entriesNum = 0
         for entry in entries:
             # Ensure systemFlags entry is not empty before running the naming context check.
-            if not entry['attributes']['systemFlags']:
+            if not entry["attributes"]["systemFlags"]:
                 continue
             # This is a naming context, but not a domain
-            if not entry['attributes']['systemFlags'] & 2:
+            if not entry["attributes"]["systemFlags"] & 2:
                 continue
-            entry['attributes']['distinguishedName'] = entry['attributes']['nCName']
+            entry["attributes"]["distinguishedName"] = entry["attributes"]["nCName"]
             entriesNum += 1
             # Todo: actually use these objects instead of discarding them
             # means rewriting other functions
-            d = ADDomain.fromLDAP(entry['attributes']['nCName'])
+            d = ADDomain.fromLDAP(entry["attributes"]["nCName"])
             # We don't want to add our own domain since this entry doesn't contain the sid
             # which we need later on
-            if entry['attributes']['nCName'] not in self.ad.domains:
-                self.ad.domains[entry['attributes']['nCName']] = entry
-                self.ad.nbdomains[entry['attributes']['nETBIOSName']] = entry
+            if entry["attributes"]["nCName"] not in self.ad.domains:
+                self.ad.domains[entry["attributes"]["nCName"]] = entry
+                self.ad.nbdomains[entry["attributes"]["nETBIOSName"]] = entry
 
         # Store this number so we can easily determine if we are in a multi-domain
         # forest later on.
         self.ad.num_domains = entriesNum
-        logging.info('Found %u domains in the forest', entriesNum)
+        logging.info("Found %u domains in the forest", entriesNum)
 
     def get_cache_items(self):
         self.get_objecttype()
@@ -359,158 +456,228 @@ def get_cache_items(self):
         sidcache = {}
         dncache = {}
         for nc, domain in self.ad.domains.items():
-            logging.info('Processing domain %s', domain['attributes']['name'])
-            query = '(|(&(objectCategory=person)(objectClass=user))(objectClass=group)(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2))))'
-            entries = self.search(query,
-                                  use_gc=True,
-                                  use_resolver=True,
-                                  attributes=['sAMAccountName', 'distinguishedName', 'sAMAccountType', 'objectSid', 'name'],
-                                  search_base=nc,
-                                  generator=True)
+            logging.info("Processing domain %s", domain["attributes"]["name"])
+            query = "(|(&(objectCategory=person)(objectClass=user))(objectClass=group)(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2))))"
+            entries = self.search(
+                query,
+                use_gc=True,
+                use_resolver=True,
+                attributes=[
+                    "sAMAccountName",
+                    "distinguishedName",
+                    "sAMAccountType",
+                    "objectSid",
+                    "name",
+                ],
+                search_base=nc,
+                generator=True,
+            )
             for lentry in entries:
                 resolved_entry = ADUtils.resolve_ad_entry(lentry)
                 cacheitem = {
-                    "ObjectIdentifier": resolved_entry['objectid'],
-                    "ObjectType": resolved_entry['type'].capitalize()
+                    "ObjectIdentifier": resolved_entry["objectid"],
+                    "ObjectType": resolved_entry["type"].capitalize(),
                 }
-                sidcache[resolved_entry['objectid']] = cacheitem
-                dncache[ADUtils.get_entry_property(lentry, 'distinguishedName').upper()] = cacheitem
+                sidcache[resolved_entry["objectid"]] = cacheitem
+                dncache[
+                    ADUtils.get_entry_property(lentry, "distinguishedName").upper()
+                ] = cacheitem
         return dncache, sidcache
 
     def get_groups(self, include_properties=False, acl=False):
-        properties = ['distinguishedName', 'samaccountname', 'samaccounttype', 'objectsid', 'member']
+        properties = [
+            "distinguishedName",
+            "samaccountname",
+            "samaccounttype",
+            "objectsid",
+            "member",
+        ]
         if include_properties:
-            properties += ['adminCount', 'description', 'whencreated']
+            properties += ["adminCount", "description", "whencreated"]
         if acl:
-            properties += ['nTSecurityDescriptor']
-        entries = self.search('(objectClass=group)',
-                              properties,
-                              generator=True,
-                              query_sd=acl)
+            properties += ["nTSecurityDescriptor"]
+        entries = self.search(
+            "(objectClass=group)", properties, generator=True, query_sd=acl
+        )
         return entries
 
     def get_gpos(self, include_properties=False, acl=False):
-        properties = ['distinguishedName', 'name', 'objectGUID', 'gPCFileSysPath', 'displayName']
+        properties = [
+            "distinguishedName",
+            "name",
+            "objectGUID",
+            "gPCFileSysPath",
+            "displayName",
+        ]
         if include_properties:
-            properties += ['description', 'whencreated']
+            properties += ["description", "whencreated"]
         if acl:
-            properties += ['nTSecurityDescriptor']
-        entries = self.search('(objectCategory=groupPolicyContainer)',
-                              properties,
-                              generator=True,
-                              query_sd=acl)
+            properties += ["nTSecurityDescriptor"]
+        entries = self.search(
+            "(objectCategory=groupPolicyContainer)",
+            properties,
+            generator=True,
+            query_sd=acl,
+        )
         return entries
 
     def get_ous(self, include_properties=False, acl=False):
-        properties = ['distinguishedName', 'name', 'objectGUID', 'gPLink', 'gPOptions']
+        properties = ["distinguishedName", "name", "objectGUID", "gPLink", "gPOptions"]
         if include_properties:
-            properties += ['description', 'whencreated']
+            properties += ["description", "whencreated"]
         if acl:
-            properties += ['nTSecurityDescriptor']
-        entries = self.search('(objectCategory=organizationalUnit)',
-                              properties,
-                              generator=True,
-                              query_sd=acl)
+            properties += ["nTSecurityDescriptor"]
+        entries = self.search(
+            "(objectCategory=organizationalUnit)",
+            properties,
+            generator=True,
+            query_sd=acl,
+        )
         return entries
 
-    def get_containers(self, include_properties=False, acl=False, dn=''):
-        properties = ['distinguishedName', 'name', 'objectGUID', 'isCriticalSystemObject','objectClass', 'objectCategory']
+    def get_containers(self, include_properties=False, acl=False, dn=""):
+        properties = [
+            "distinguishedName",
+            "name",
+            "objectGUID",
+            "isCriticalSystemObject",
+            "objectClass",
+            "objectCategory",
+        ]
         if include_properties:
-            properties += ['description', 'whencreated']
+            properties += ["description", "whencreated"]
         if acl:
-            properties += ['nTSecurityDescriptor']
-        entries = self.search('(&(objectCategory=container)(objectClass=container))',
-                              properties,
-                              generator=True,
-                              query_sd=acl,
-                              search_base=dn)
+            properties += ["nTSecurityDescriptor"]
+        entries = self.search(
+            "(&(objectCategory=container)(objectClass=container))",
+            properties,
+            generator=True,
+            query_sd=acl,
+            search_base=dn,
+        )
         return entries
 
     def get_users(self, include_properties=False, acl=False):
 
-        properties = ['sAMAccountName', 'distinguishedName', 'sAMAccountType',
-                      'objectSid', 'primaryGroupID', 'isDeleted', 'objectClass']
-        if 'ms-DS-GroupMSAMembership'.lower() in self.objecttype_guid_map:
-            properties.append('msDS-GroupMSAMembership')
+        properties = [
+            "sAMAccountName",
+            "distinguishedName",
+            "sAMAccountType",
+            "objectSid",
+            "primaryGroupID",
+            "isDeleted",
+            "objectClass",
+        ]
+        if "ms-DS-GroupMSAMembership".lower() in self.objecttype_guid_map:
+            properties.append("msDS-GroupMSAMembership")
 
         if include_properties:
-            properties += ['servicePrincipalName', 'userAccountControl', 'displayName',
-                           'lastLogon', 'lastLogonTimestamp', 'pwdLastSet', 'mail', 'title', 'homeDirectory',
-                           'description', 'userPassword', 'adminCount', 'msDS-AllowedToDelegateTo', 'sIDHistory',
-                           'whencreated', 'unicodepwd', 'scriptpath']
-            if 'unixuserpassword' in self.objecttype_guid_map:
-                properties.append('unixuserpassword')
+            properties += [
+                "servicePrincipalName",
+                "userAccountControl",
+                "displayName",
+                "lastLogon",
+                "lastLogonTimestamp",
+                "pwdLastSet",
+                "mail",
+                "title",
+                "homeDirectory",
+                "description",
+                "userPassword",
+                "adminCount",
+                "msDS-AllowedToDelegateTo",
+                "sIDHistory",
+                "whencreated",
+                "unicodepwd",
+                "scriptpath",
+            ]
+            if "unixuserpassword" in self.objecttype_guid_map:
+                properties.append("unixuserpassword")
         if acl:
-            properties.append('nTSecurityDescriptor')
+            properties.append("nTSecurityDescriptor")
 
         # Query for MSA only if server supports it
-        if 'msDS-GroupManagedServiceAccount' in self.ldap.server.schema.object_classes:
-            gmsa_filter = '(objectClass=msDS-GroupManagedServiceAccount)'
+        if "msDS-GroupManagedServiceAccount" in self.ldap.server.schema.object_classes:
+            gmsa_filter = "(objectClass=msDS-GroupManagedServiceAccount)"
         else:
-            logging.debug('No support for GMSA, skipping in query')
-            gmsa_filter = ''
+            logging.debug("No support for GMSA, skipping in query")
+            gmsa_filter = ""
 
-        if 'msDS-ManagedServiceAccount' in self.ldap.server.schema.object_classes:
-            smsa_filter = '(objectClass=msDS-ManagedServiceAccount)'
+        if "msDS-ManagedServiceAccount" in self.ldap.server.schema.object_classes:
+            smsa_filter = "(objectClass=msDS-ManagedServiceAccount)"
         else:
-            logging.debug('No support for SMSA, skipping in query')
-            smsa_filter = ''
+            logging.debug("No support for SMSA, skipping in query")
+            smsa_filter = ""
 
         if gmsa_filter or smsa_filter:
-            query = '(|(&(objectCategory=person)(objectClass=user)){}{})'.format(gmsa_filter, smsa_filter)
+            query = "(|(&(objectCategory=person)(objectClass=user)){}{})".format(
+                gmsa_filter, smsa_filter
+            )
         else:
-            query = '(&(objectCategory=person)(objectClass=user))'
-        entries = self.search(query,
-                              properties,
-                              generator=True,
-                              query_sd=acl)
+            query = "(&(objectCategory=person)(objectClass=user))"
+        entries = self.search(query, properties, generator=True, query_sd=acl)
         return entries
 
-
     def get_computers(self, include_properties=False, acl=False):
         """
         Get all computer objects. This purely gets them using LDAP. This function is used directly in case of DCOnly enum,
         or used to create a cache in case of computer enumeration later on.
         """
-        properties = ['samaccountname', 'userAccountControl', 'distinguishedname',
-                      'dnshostname', 'samaccounttype', 'objectSid', 'primaryGroupID',
-                      'isDeleted']
+        properties = [
+            "samaccountname",
+            "userAccountControl",
+            "distinguishedname",
+            "dnshostname",
+            "samaccounttype",
+            "objectSid",
+            "primaryGroupID",
+            "isDeleted",
+        ]
         if include_properties:
-            properties += ['servicePrincipalName', 'msDS-AllowedToDelegateTo', 'sIDHistory', 'whencreated',
-                           'lastLogon', 'lastLogonTimestamp', 'pwdLastSet', 'operatingSystem', 'description',
-                           'operatingSystemServicePack']
+            properties += [
+                "servicePrincipalName",
+                "msDS-AllowedToDelegateTo",
+                "sIDHistory",
+                "whencreated",
+                "lastLogon",
+                "lastLogonTimestamp",
+                "pwdLastSet",
+                "operatingSystem",
+                "description",
+                "operatingSystemServicePack",
+            ]
             # Difference between guid map which maps the lowercase schema object name and the property name itself
-            if 'ms-DS-Allowed-To-Act-On-Behalf-Of-Other-Identity'.lower() in self.objecttype_guid_map:
-                properties.append('msDS-AllowedToActOnBehalfOfOtherIdentity')
+            if (
+                "ms-DS-Allowed-To-Act-On-Behalf-Of-Other-Identity".lower()
+                in self.objecttype_guid_map
+            ):
+                properties.append("msDS-AllowedToActOnBehalfOfOtherIdentity")
             if self.ad.has_laps:
-                properties.append('ms-mcs-admpwdexpirationtime')
+                properties.append("ms-mcs-admpwdexpirationtime")
         if acl:
             # Also collect LAPS expiration time since this matters for reporting (no LAPS = no ACL reported)
             if self.ad.has_laps:
-                properties += ['nTSecurityDescriptor', 'ms-mcs-admpwdexpirationtime']
+                properties += ["nTSecurityDescriptor", "ms-mcs-admpwdexpirationtime"]
             else:
-                properties.append('nTSecurityDescriptor')
+                properties.append("nTSecurityDescriptor")
 
         # Exclude MSA only if server supports it
-        if 'msDS-GroupManagedServiceAccount' in self.ldap.server.schema.object_classes:
-            gmsa_filter = '(!(objectClass=msDS-GroupManagedServiceAccount))'
+        if "msDS-GroupManagedServiceAccount" in self.ldap.server.schema.object_classes:
+            gmsa_filter = "(!(objectClass=msDS-GroupManagedServiceAccount))"
         else:
-            gmsa_filter = ''
+            gmsa_filter = ""
 
-        if 'msDS-ManagedServiceAccount' in self.ldap.server.schema.object_classes:
-            smsa_filter = '(!(objectClass=msDS-ManagedServiceAccount))'
+        if "msDS-ManagedServiceAccount" in self.ldap.server.schema.object_classes:
+            smsa_filter = "(!(objectClass=msDS-ManagedServiceAccount))"
         else:
-            smsa_filter = ''
+            smsa_filter = ""
 
         if gmsa_filter or smsa_filter:
-            query = '(&(sAMAccountType=805306369){}{})'.format(gmsa_filter, smsa_filter)
+            query = "(&(sAMAccountType=805306369){}{})".format(gmsa_filter, smsa_filter)
         else:
-            query = '(&(sAMAccountType=805306369))'
+            query = "(&(sAMAccountType=805306369))"
 
-        entries = self.search(query,
-                              properties,
-                              generator=True,
-                              query_sd=acl)
+        entries = self.search(query, properties, generator=True, query_sd=acl)
 
         return entries
 
@@ -528,44 +695,78 @@ def get_computers_withcache(self, include_properties=False, acl=False):
             # Resolve it first for DN cache
             resolved_entry = ADUtils.resolve_ad_entry(entry)
             cacheitem = {
-                "ObjectIdentifier": resolved_entry['objectid'],
-                "ObjectType": resolved_entry['type'].capitalize()
+                "ObjectIdentifier": resolved_entry["objectid"],
+                "ObjectType": resolved_entry["type"].capitalize(),
             }
-            self.ad.dncache[ADUtils.get_entry_property(entry, 'distinguishedName', '').upper()] = cacheitem
+            self.ad.dncache[
+                ADUtils.get_entry_property(entry, "distinguishedName", "").upper()
+            ] = cacheitem
             # This list is used to process computers later on
-            self.ad.computers[ADUtils.get_entry_property(entry, 'distinguishedName', '')] = entry
-            self.ad.computersidcache.put(ADUtils.get_entry_property(entry, 'dNSHostname', '').lower(), entry['attributes']['objectSid'])
+            self.ad.computers[
+                ADUtils.get_entry_property(entry, "distinguishedName", "")
+            ] = entry
+            self.ad.computersidcache.put(
+                ADUtils.get_entry_property(entry, "dNSHostname", "").lower(),
+                entry["attributes"]["objectSid"],
+            )
 
-        logging.info('Found %u computers', entriesNum)
+        logging.info("Found %u computers", entriesNum)
 
         return entries
 
     def get_memberships(self):
-        entries = self.search('(|(memberof=*)(primarygroupid=*))',
-                              ['samaccountname', 'distinguishedname',
-                               'dnshostname', 'samaccounttype', 'primarygroupid',
-                               'memberof'],
-                              generator=False)
+        entries = self.search(
+            "(|(memberof=*)(primarygroupid=*))",
+            [
+                "samaccountname",
+                "distinguishedname",
+                "dnshostname",
+                "samaccounttype",
+                "primarygroupid",
+                "memberof",
+            ],
+            generator=False,
+        )
         return entries
 
     def get_sessions(self):
-        entries = self.search('(&(samAccountType=805306368)(!(userAccountControl:1.2.840.113556.1.4.803:=2))(|(homedirectory=*)(scriptpath=*)(profilepath=*)))',
-                              ['homedirectory', 'scriptpath', 'profilepath'])
+        entries = self.search(
+            "(&(samAccountType=805306368)(!(userAccountControl:1.2.840.113556.1.4.803:=2))(|(homedirectory=*)(scriptpath=*)(profilepath=*)))",
+            ["homedirectory", "scriptpath", "profilepath"],
+        )
         return entries
 
     def get_childobjects(self, dn, use_resolver=True):
-        entries = self.search('(|(objectClass=container)(objectClass=organizationalUnit)(sAMAccountType=805306369)(objectClass=group)(&(objectCategory=person)(objectClass=user)))',
-                              attributes=['objectSid', 'objectClass', 'objectGUID', 'distinguishedName', 'sAMAccountName', 'sAMAccountType'],
-                              search_base=dn,
-                              search_scope=LEVEL,
-                              use_resolver=use_resolver)
-                              
+        entries = self.search(
+            "(|(objectClass=container)(objectClass=organizationalUnit)(sAMAccountType=805306369)(objectClass=group)(&(objectCategory=person)(objectClass=user)))",
+            attributes=[
+                "objectSid",
+                "objectClass",
+                "objectGUID",
+                "distinguishedName",
+                "sAMAccountName",
+                "sAMAccountType",
+            ],
+            search_base=dn,
+            search_scope=LEVEL,
+            use_resolver=use_resolver,
+        )
+
         return entries
 
     def get_trusts(self):
-        entries = self.search('(objectClass=trustedDomain)',
-                              attributes=['flatName', 'name', 'securityIdentifier', 'trustAttributes', 'trustDirection', 'trustType'],
-                              generator=True)
+        entries = self.search(
+            "(objectClass=trustedDomain)",
+            attributes=[
+                "flatName",
+                "name",
+                "securityIdentifier",
+                "trustAttributes",
+                "trustDirection",
+                "trustType",
+            ],
+            generator=True,
+        )
         return entries
 
     def prefetch_info(self, props=False, acls=False, cache_computers=False):
@@ -576,15 +777,27 @@ def prefetch_info(self, props=False, acls=False, cache_computers=False):
             self.get_computers_withcache(include_properties=props, acl=acls)
 
     def get_root_domain(self):
-        return ADUtils.ldap2domain(self.ldap.server.info.other['configurationNamingContext'][0])
+        return ADUtils.ldap2domain(
+            self.ldap.server.info.other["configurationNamingContext"][0]
+        )
 
 
 """
 Active Directory data and cache
 """
+
+
 class AD(object):
 
-    def __init__(self, domain=None, auth=None, nameserver=None, dns_tcp=False, dns_timeout=3.0, use_ldaps=False):
+    def __init__(
+        self,
+        domain=None,
+        auth=None,
+        nameserver=None,
+        dns_tcp=False,
+        dns_timeout=3.0,
+        use_ldaps=False,
+    ):
         self.domain = domain
         # Object of type ADDomain, added later
         self.domain_object = None
@@ -599,10 +812,10 @@ def __init__(self, domain=None, auth=None, nameserver=None, dns_tcp=False, dns_t
 
         self.domains = {}
         self.nbdomains = {}
-        self.groups = {} # Groups by DN
-        self.groups_dnmap = {} # Group mapping from gid to DN
+        self.groups = {}  # Groups by DN
+        self.groups_dnmap = {}  # Group mapping from gid to DN
         self.computers = {}
-        self.users = {} # Users by DN
+        self.users = {}  # Users by DN
 
         # Create a resolver object
         self.dnsresolver = resolver.Resolver()
@@ -642,9 +855,9 @@ def __init__(self, domain=None, auth=None, nameserver=None, dns_tcp=False, dns_t
         else:
             self.baseDN = None
         if use_ldaps:
-            self.ldap_default_protocol = 'ldaps'
+            self.ldap_default_protocol = "ldaps"
         else:
-            self.ldap_default_protocol = 'ldap'
+            self.ldap_default_protocol = "ldap"
 
     def realm(self):
         if self.domain is not None:
@@ -671,24 +884,24 @@ def create_objectresolver(self, addc):
         self.objectresolver = ObjectResolver(addomain=self, addc=addc)
 
     def load_cachefile(self, cachefile):
-        with codecs.open(cachefile, 'r', 'utf-8') as cfile:
+        with codecs.open(cachefile, "r", "utf-8") as cfile:
             cachedata = json.load(cfile)
-        self.dncache = cachedata['dncache']
-        self.newsidcache.load(cachedata['sidcache'])
-        logging.info('Loaded cached DNs and SIDs from cachefile')
+        self.dncache = cachedata["dncache"]
+        self.newsidcache.load(cachedata["sidcache"])
+        logging.info("Loaded cached DNs and SIDs from cachefile")
 
     def save_cachefile(self, cachefile):
         pass
 
     def dns_resolve(self, domain=None, options=None):
-        logging.debug('Querying domain controller information from DNS')
+        logging.debug("Querying domain controller information from DNS")
 
-        basequery = '_ldap._tcp.pdc._msdcs'
+        basequery = "_ldap._tcp.pdc._msdcs"
         ad_domain = None
 
         if domain is not None:
-            logging.debug('Using domain hint: %s' % str(domain))
-            query = '_ldap._tcp.pdc._msdcs.%s' % domain
+            logging.debug("Using domain hint: %s" % str(domain))
+            query = "_ldap._tcp.pdc._msdcs.%s" % domain
         else:
             # Assume a DNS search domain is (correctly) configured on the host
             # in which case the resolver will autocomplete our request
@@ -696,11 +909,11 @@ def dns_resolve(self, domain=None, options=None):
 
         try:
 
-            q = self.dnsresolver.query(query, 'SRV', tcp=self.dns_tcp)
+            q = self.dnsresolver.query(query, "SRV", tcp=self.dns_tcp)
 
-            if str(q.qname).lower().startswith('_ldap._tcp.pdc._msdcs'):
-                ad_domain = str(q.qname).lower()[len(basequery):].strip('.')
-                logging.info('Found AD domain: %s' % ad_domain)
+            if str(q.qname).lower().startswith("_ldap._tcp.pdc._msdcs"):
+                ad_domain = str(q.qname).lower()[len(basequery) :].strip(".")
+                logging.info("Found AD domain: %s" % ad_domain)
 
                 self.domain = ad_domain
                 if self.auth.domain is None:
@@ -708,8 +921,8 @@ def dns_resolve(self, domain=None, options=None):
                 self.baseDN = ADUtils.domain2ldap(ad_domain)
 
             for r in q:
-                dc = str(r.target).rstrip('.')
-                logging.debug('Found primary DC: %s' % dc)
+                dc = str(r.target).rstrip(".")
+                logging.debug("Found primary DC: %s" % dc)
                 if dc not in self._dcs:
                     self._dcs.append(dc)
 
@@ -717,10 +930,12 @@ def dns_resolve(self, domain=None, options=None):
             pass
 
         try:
-            q = self.dnsresolver.query(query.replace('pdc','gc'), 'SRV', tcp=self.dns_tcp)
+            q = self.dnsresolver.query(
+                query.replace("pdc", "gc"), "SRV", tcp=self.dns_tcp
+            )
             for r in q:
-                gc = str(r.target).rstrip('.')
-                logging.debug('Found Global Catalog server: %s' % gc)
+                gc = str(r.target).rstrip(".")
+                logging.debug("Found Global Catalog server: %s" % gc)
                 if gc not in self._gcs:
                     self._gcs.append(gc)
 
@@ -728,19 +943,25 @@ def dns_resolve(self, domain=None, options=None):
             # Only show warning if we don't already have a GC specified manually
             if options and not options.global_catalog:
                 if not options.disable_autogc:
-                    logging.warning('Could not find a global catalog server, assuming the primary DC has this role\n'
-                                    'If this gives errors, either specify a hostname with -gc or disable gc resolution with --disable-autogc')
+                    logging.warning(
+                        "Could not find a global catalog server, assuming the primary DC has this role\n"
+                        "If this gives errors, either specify a hostname with -gc or disable gc resolution with --disable-autogc"
+                    )
                     self._gcs = self._dcs
                 else:
-                    logging.warning('Could not find a global catalog server. Please specify one with -gc')
+                    logging.warning(
+                        "Could not find a global catalog server. Please specify one with -gc"
+                    )
 
         try:
-            kquery = query.replace('pdc','dc').replace('_ldap','_kerberos')
-            q = self.dnsresolver.query(kquery, 'SRV', tcp=self.dns_tcp)
+            kquery = query.replace("pdc", "dc").replace("_ldap", "_kerberos")
+            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp)
             # TODO: Get the additional records here to get the DC ip immediately
             for r in q:
-                kdc = str(r.target).rstrip('.')
-                logging.debug('Found KDC for enumeration domain: %s' % str(r.target).rstrip('.'))
+                kdc = str(r.target).rstrip(".")
+                logging.debug(
+                    "Found KDC for enumeration domain: %s" % str(r.target).rstrip(".")
+                )
                 if kdc not in self._kdcs:
                     self._kdcs.append(kdc)
                     self.auth.kdc = self._kdcs[0]
@@ -752,26 +973,25 @@ def dns_resolve(self, domain=None, options=None):
             if domain:
                 ad_domain = domain
             else:
-                ad_domain = 'unknown'
+                ad_domain = "unknown"
 
         if self.auth.userdomain.lower() != ad_domain.lower():
             # Resolve KDC for user auth domain
-            kquery = '_kerberos._tcp.dc._msdcs.%s' % self.auth.userdomain
-            q = self.dnsresolver.query(kquery, 'SRV', tcp=self.dns_tcp)
+            kquery = "_kerberos._tcp.dc._msdcs.%s" % self.auth.userdomain
+            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp)
             for r in q:
-                kdc = str(r.target).rstrip('.')
-                logging.debug('Found KDC for user: %s' % str(r.target).rstrip('.'))
+                kdc = str(r.target).rstrip(".")
+                logging.debug("Found KDC for user: %s" % str(r.target).rstrip("."))
                 self.auth.userdomain_kdc = kdc
         else:
             self.auth.userdomain_kdc = self.auth.kdc
 
         return True
 
-
     def get_domain_by_name(self, name):
         for domain, entry in iteritems(self.domains):
-            if 'name' in entry['attributes']:
-                if entry['attributes']['name'].upper() == name.upper():
+            if "name" in entry["attributes"]:
+                if entry["attributes"]["name"].upper() == name.upper():
                     return entry
         # Also try domains by NETBIOS definition
         for domain, entry in iteritems(self.nbdomains):
@@ -779,26 +999,32 @@ def get_domain_by_name(self, name):
                 return entry
         return None
 
-
     def get_dn_from_cache_or_ldap(self, distinguishedname):
         try:
             linkentry = self.dncache[distinguishedname.upper()]
         except KeyError:
-            use_gc = ADUtils.ldap2domain(distinguishedname).lower() != self.domain.lower()
-            qobject = self.objectresolver.resolve_distinguishedname(distinguishedname, use_gc=use_gc)
+            use_gc = (
+                ADUtils.ldap2domain(distinguishedname).lower() != self.domain.lower()
+            )
+            qobject = self.objectresolver.resolve_distinguishedname(
+                distinguishedname, use_gc=use_gc
+            )
             if qobject is None:
                 return None
             resolved_entry = ADUtils.resolve_ad_entry(qobject)
             linkentry = {
-                "ObjectIdentifier": resolved_entry['objectid'],
-                "ObjectType": resolved_entry['type'].capitalize()
+                "ObjectIdentifier": resolved_entry["objectid"],
+                "ObjectType": resolved_entry["type"].capitalize(),
             }
             self.dncache[distinguishedname.upper()] = linkentry
         return linkentry
 
+
 """
 Active Directory Domain
 """
+
+
 class ADDomain(object):
     def __init__(self, name=None, netbios_name=None, sid=None, distinguishedname=None):
         self.name = name
@@ -806,7 +1032,6 @@ def __init__(self, name=None, netbios_name=None, sid=None, distinguishedname=Non
         self.sid = sid
         self.distinguishedname = distinguishedname
 
-
     @staticmethod
     def fromLDAP(identifier, sid=None):
         dns_name = ADUtils.ldap2domain(identifier)
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..89e17c1
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,673 @@
+# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
+
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = "*"
+files = [
+    {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+    {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+    {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+    {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+    {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+    {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+    {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+    {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+    {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+    {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+    {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+    {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+    {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+    {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+    {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+    {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+    {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+    {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+    {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+    {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+    {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+    {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+    {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+    {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+    {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+    {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+    {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+    {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+    {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+    {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+    {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+    {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+    {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+    {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+    {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+    {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+    {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+    {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+    {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+    {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+    {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+    {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+    {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+    {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+    {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+    {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+    {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+    {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+    {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+    {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+    {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+    {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+    {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+    {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+    {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+    {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+    {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+    {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+    {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+    {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+    {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+    {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+    {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+    {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "charset-normalizer"
+version = "3.0.1"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = "*"
+files = [
+    {file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"},
+    {file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"},
+    {file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"},
+    {file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"},
+    {file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"},
+    {file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"},
+    {file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"},
+    {file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"},
+]
+
+[[package]]
+name = "click"
+version = "8.0.4"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"},
+    {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.5"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+    {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
+    {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
+]
+
+[[package]]
+name = "cryptography"
+version = "40.0.2"
+description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"},
+    {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"},
+    {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"},
+    {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"},
+    {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"},
+    {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"},
+    {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"},
+    {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"},
+    {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"},
+    {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"},
+    {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"},
+    {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"},
+    {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"},
+    {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"},
+    {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"},
+    {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"},
+    {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"},
+    {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"},
+    {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"},
+]
+
+[package.dependencies]
+cffi = ">=1.12"
+
+[package.extras]
+docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
+docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
+pep8test = ["black", "check-manifest", "mypy", "ruff"]
+sdist = ["setuptools-rust (>=0.11.4)"]
+ssh = ["bcrypt (>=3.1.5)"]
+test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"]
+test-randomorder = ["pytest-randomly"]
+tox = ["tox"]
+
+[[package]]
+name = "dataclasses"
+version = "0.8"
+description = "A backport of the dataclasses module for Python 3.6"
+optional = false
+python-versions = ">=3.6, <3.7"
+files = [
+    {file = "dataclasses-0.8-py3-none-any.whl", hash = "sha256:0201d89fa866f68c8ebd9d08ee6ff50c0b255f8ec63a71c16fda7af82bb887bf"},
+    {file = "dataclasses-0.8.tar.gz", hash = "sha256:8479067f342acf957dc82ec415d355ab5edb7e7646b90dc6e2fd1d96ad084c97"},
+]
+
+[[package]]
+name = "dnspython"
+version = "2.2.1"
+description = "DNS toolkit"
+optional = false
+python-versions = ">=3.6,<4.0"
+files = [
+    {file = "dnspython-2.2.1-py3-none-any.whl", hash = "sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f"},
+    {file = "dnspython-2.2.1.tar.gz", hash = "sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e"},
+]
+
+[package.extras]
+curio = ["curio (>=1.2,<2.0)", "sniffio (>=1.1,<2.0)"]
+dnssec = ["cryptography (>=2.6,<37.0)"]
+doh = ["h2 (>=4.1.0)", "httpx (>=0.21.1)", "requests (>=2.23.0,<3.0.0)", "requests-toolbelt (>=0.9.1,<0.10.0)"]
+idna = ["idna (>=2.1,<4.0)"]
+trio = ["trio (>=0.14,<0.20)"]
+wmi = ["wmi (>=1.5.1,<2.0.0)"]
+
+[[package]]
+name = "dsinternals"
+version = "1.2.4"
+description = ""
+optional = false
+python-versions = ">=3.4"
+files = [
+    {file = "dsinternals-1.2.4.tar.gz", hash = "sha256:030f935a70583845f68d6cfc5a22be6ce3300907788ba74faba50d6df859e91d"},
+]
+
+[[package]]
+name = "flask"
+version = "2.0.3"
+description = "A simple framework for building complex web applications."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "Flask-2.0.3-py3-none-any.whl", hash = "sha256:59da8a3170004800a2837844bfa84d49b022550616070f7cb1a659682b2e7c9f"},
+    {file = "Flask-2.0.3.tar.gz", hash = "sha256:e1120c228ca2f553b470df4a5fa927ab66258467526069981b3eb0a91902687d"},
+]
+
+[package.dependencies]
+click = ">=7.1.2"
+itsdangerous = ">=2.0"
+Jinja2 = ">=3.0"
+Werkzeug = ">=2.0"
+
+[package.extras]
+async = ["asgiref (>=3.2)"]
+dotenv = ["python-dotenv"]
+
+[[package]]
+name = "future"
+version = "1.0.0"
+description = "Clean single-source support for Python 3 and 2"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+    {file = "future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216"},
+    {file = "future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05"},
+]
+
+[[package]]
+name = "impacket"
+version = "0.11.0"
+description = "Network protocols Constructors and Dissectors"
+optional = false
+python-versions = "*"
+files = [
+    {file = "impacket-0.11.0.tar.gz", hash = "sha256:ee4039b4d2aede8f5f64478bc59faac86036796be24dea8dc18f009fb0905e4a"},
+]
+
+[package.dependencies]
+charset_normalizer = "*"
+dsinternals = "*"
+flask = ">=1.0"
+future = "*"
+ldap3 = ">2.5.0,<2.5.2 || >2.5.2,<2.6 || >2.6"
+ldapdomaindump = ">=0.9.0"
+pyasn1 = ">=0.2.3"
+pycryptodomex = "*"
+pyOpenSSL = ">=21.0.0"
+six = "*"
+
+[[package]]
+name = "importlib-metadata"
+version = "4.8.3"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "importlib_metadata-4.8.3-py3-none-any.whl", hash = "sha256:65a9576a5b2d58ca44d133c42a241905cc45e34d2c06fd5ba2bafa221e5d7b5e"},
+    {file = "importlib_metadata-4.8.3.tar.gz", hash = "sha256:766abffff765960fcc18003801f7044eb6755ffae4521c8e8ce8e83b9c9b0668"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+perf = ["ipython"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy", "pytest-perf (>=0.9.2)"]
+
+[[package]]
+name = "itsdangerous"
+version = "2.0.1"
+description = "Safely pass data to untrusted environments and back."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "itsdangerous-2.0.1-py3-none-any.whl", hash = "sha256:5174094b9637652bdb841a3029700391451bd092ba3db90600dea710ba28e97c"},
+    {file = "itsdangerous-2.0.1.tar.gz", hash = "sha256:9e724d68fc22902a1435351f84c3fb8623f303fffcc566a4cb952df8c572cff0"},
+]
+
+[[package]]
+name = "jinja2"
+version = "3.0.3"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
+    {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "ldap3"
+version = "2.9.1"
+description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library"
+optional = false
+python-versions = "*"
+files = [
+    {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
+    {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.4.6"
+
+[[package]]
+name = "ldapdomaindump"
+version = "0.9.4"
+description = "Active Directory information dumper via LDAP"
+optional = false
+python-versions = "*"
+files = [
+    {file = "ldapdomaindump-0.9.4-py2-none-any.whl", hash = "sha256:c05ee1d892e6a0eb2d7bf167242d4bf747ff7758f625588a11795510d06de01f"},
+    {file = "ldapdomaindump-0.9.4-py3-none-any.whl", hash = "sha256:51d0c241af1d6fa3eefd79b95d182a798d39c56c4e2efb7ffae244a0b54f58aa"},
+    {file = "ldapdomaindump-0.9.4.tar.gz", hash = "sha256:99dcda17050a96549966e53bc89e71da670094d53d9542b3b0d0197d035e6f52"},
+]
+
+[package.dependencies]
+dnspython = "*"
+future = "*"
+ldap3 = ">2.5.0,<2.5.2 || >2.5.2,<2.6 || >2.6"
+
+[[package]]
+name = "markupsafe"
+version = "2.0.1"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"},
+    {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"},
+    {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"},
+    {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"},
+    {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"},
+    {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"},
+    {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"},
+]
+
+[[package]]
+name = "pyasn1"
+version = "0.5.1"
+description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+files = [
+    {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"},
+    {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"},
+]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+    {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+    {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pycryptodome"
+version = "3.20.0"
+description = "Cryptographic library for Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+    {file = "pycryptodome-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690"},
+    {file = "pycryptodome-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f"},
+    {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091"},
+    {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4"},
+    {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc"},
+    {file = "pycryptodome-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-win32.whl", hash = "sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72"},
+    {file = "pycryptodome-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9"},
+    {file = "pycryptodome-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a"},
+    {file = "pycryptodome-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e"},
+    {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04"},
+    {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3"},
+    {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea"},
+    {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b"},
+    {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6"},
+    {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab"},
+    {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5"},
+    {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e"},
+    {file = "pycryptodome-3.20.0.tar.gz", hash = "sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7"},
+]
+
+[[package]]
+name = "pycryptodomex"
+version = "3.20.0"
+description = "Cryptographic library for Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:645bd4ca6f543685d643dadf6a856cc382b654cc923460e3a10a49c1b3832aeb"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ff5c9a67f8a4fba4aed887216e32cbc48f2a6fb2673bb10a99e43be463e15913"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8ee606964553c1a0bc74057dd8782a37d1c2bc0f01b83193b6f8bb14523b877b"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7805830e0c56d88f4d491fa5ac640dfc894c5ec570d1ece6ed1546e9df2e98d6"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:bc3ee1b4d97081260d92ae813a83de4d2653206967c4a0a017580f8b9548ddbc"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:8af1a451ff9e123d0d8bd5d5e60f8e3315c3a64f3cdd6bc853e26090e195cdc8"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:cbe71b6712429650e3883dc81286edb94c328ffcd24849accac0a4dbcc76958a"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:76bd15bb65c14900d98835fcd10f59e5e0435077431d3a394b60b15864fddd64"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:653b29b0819605fe0898829c8ad6400a6ccde096146730c2da54eede9b7b8baa"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a5ec91388984909bb5398ea49ee61b68ecb579123694bffa172c3b0a107079"},
+    {file = "pycryptodomex-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:108e5f1c1cd70ffce0b68739c75734437c919d2eaec8e85bffc2c8b4d2794305"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:59af01efb011b0e8b686ba7758d59cf4a8263f9ad35911bfe3f416cee4f5c08c"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:82ee7696ed8eb9a82c7037f32ba9b7c59e51dda6f105b39f043b6ef293989cb3"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91852d4480a4537d169c29a9d104dda44094c78f1f5b67bca76c29a91042b623"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca649483d5ed251d06daf25957f802e44e6bb6df2e8f218ae71968ff8f8edc4"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e186342cfcc3aafaad565cbd496060e5a614b441cacc3995ef0091115c1f6c5"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:25cd61e846aaab76d5791d006497134602a9e451e954833018161befc3b5b9ed"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:9c682436c359b5ada67e882fec34689726a09c461efd75b6ea77b2403d5665b7"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a7a8f33a1f1fb762ede6cc9cbab8f2a9ba13b196bfaf7bc6f0b39d2ba315a43"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-win32.whl", hash = "sha256:c39778fd0548d78917b61f03c1fa8bfda6cfcf98c767decf360945fe6f97461e"},
+    {file = "pycryptodomex-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:2a47bcc478741b71273b917232f521fd5704ab4b25d301669879e7273d3586cc"},
+    {file = "pycryptodomex-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:1be97461c439a6af4fe1cf8bf6ca5936d3db252737d2f379cc6b2e394e12a458"},
+    {file = "pycryptodomex-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:19764605feea0df966445d46533729b645033f134baeb3ea26ad518c9fdf212c"},
+    {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e497413560e03421484189a6b65e33fe800d3bd75590e6d78d4dfdb7accf3b"},
+    {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48217c7901edd95f9f097feaa0388da215ed14ce2ece803d3f300b4e694abea"},
+    {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d00fe8596e1cc46b44bf3907354e9377aa030ec4cd04afbbf6e899fc1e2a7781"},
+    {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88afd7a3af7ddddd42c2deda43d53d3dfc016c11327d0915f90ca34ebda91499"},
+    {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d3584623e68a5064a04748fb6d76117a21a7cb5eaba20608a41c7d0c61721794"},
+    {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0daad007b685db36d977f9de73f61f8da2a7104e20aca3effd30752fd56f73e1"},
+    {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dcac11031a71348faaed1f403a0debd56bf5404232284cf8c761ff918886ebc"},
+    {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:69138068268127cd605e03438312d8f271135a33140e2742b417d027a0539427"},
+    {file = "pycryptodomex-3.20.0.tar.gz", hash = "sha256:7a710b79baddd65b806402e14766c721aee8fb83381769c27920f26476276c1e"},
+]
+
+[[package]]
+name = "pyopenssl"
+version = "23.2.0"
+description = "Python wrapper module around the OpenSSL library"
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "pyOpenSSL-23.2.0-py3-none-any.whl", hash = "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2"},
+    {file = "pyOpenSSL-23.2.0.tar.gz", hash = "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"},
+]
+
+[package.dependencies]
+cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42"
+
+[package.extras]
+docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
+test = ["flaky", "pretend", "pytest (>=3.0.1)"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+    {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+    {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.1.1"
+description = "Backported and Experimental Type Hints for Python 3.6+"
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"},
+    {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"},
+]
+
+[[package]]
+name = "werkzeug"
+version = "2.0.3"
+description = "The comprehensive WSGI web application library."
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "Werkzeug-2.0.3-py3-none-any.whl", hash = "sha256:1421ebfc7648a39a5c58c601b154165d05cf47a3cd0ccb70857cbdacf6c8f2b8"},
+    {file = "Werkzeug-2.0.3.tar.gz", hash = "sha256:b863f8ff057c522164b6067c9e28b041161b4be5ba4d0daceeaa50a163822d3c"},
+]
+
+[package.dependencies]
+dataclasses = {version = "*", markers = "python_version < \"3.7\""}
+
+[package.extras]
+watchdog = ["watchdog"]
+
+[[package]]
+name = "zipp"
+version = "3.6.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.6"
+files = [
+    {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"},
+    {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"},
+]
+
+[package.extras]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["func-timeout", "jaraco.itertools", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = ">=3.6,<3.12"
+content-hash = "1aa4f03c2b440e63c077ed8040a53123e8f70370ece47839c6686c679e4fcbd7"
diff --git a/poetry.toml b/poetry.toml
new file mode 100644
index 0000000..d27d659
--- /dev/null
+++ b/poetry.toml
@@ -0,0 +1,5 @@
+[virtualenvs]
+create = true
+in-project = true
+always-copy = false
+system-site-packages = true
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..c4e82a8
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,41 @@
+[tool.poetry]
+name = "bloodhound"
+version = "1.7.2"
+description = "Python based ingestor for BloodHound"
+authors = [
+    "Dirk-jan Mollema <dirkjan@dirkjanm.io>",
+    "Edwin van Vliet <edwin.vanvliet@fox-it.com>",
+    "Matthijs Gielen <matthijs.gielen@fox-it.com>"
+]
+maintainers = ["Dirk-jan Mollema <dirkjan@dirkjanm.io>"]
+license = "MIT"
+homepage = "https://github.com/dirkjanm/bloodhound.py"
+readme = "README.md"  # Assumes you have a README.md file at the root of your project
+classifiers = [
+    "Intended Audience :: Information Technology",
+    "License :: OSI Approved :: MIT License",
+    "Programming Language :: Python :: 3.6",
+    "Programming Language :: Python :: 3.7",
+    "Programming Language :: Python :: 3.8",
+    "Programming Language :: Python :: 3.9",
+    "Programming Language :: Python :: 3.10",
+    "Programming Language :: Python :: 3.11"
+]
+
+[tool.poetry.dependencies]
+python = ">=3.6,<3.12"
+dnspython = "*"
+impacket = ">=0.9.17"
+ldap3 = ">=2.5,!=2.5.2,!=2.5.0,!=2.6"
+pyasn1 = ">=0.4"
+future = "*"
+pycryptodome = "*"
+
+[tool.poetry.dev-dependencies]
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.poetry.scripts]
+bloodhound-python = "bloodhound:main"

From bb0c1c47911826beb53ed3c715948d47fd1e784d Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Wed, 1 May 2024 11:28:57 +0200
Subject: [PATCH 2/6] useless log

---
 bloodhound/ad/authentication.py | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/bloodhound/ad/authentication.py b/bloodhound/ad/authentication.py
index cc2b49c..50a5e58 100644
--- a/bloodhound/ad/authentication.py
+++ b/bloodhound/ad/authentication.py
@@ -367,9 +367,6 @@ def get_tgt(self):
                 decoded_tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
                 next_realm = str(decoded_tgs["ticket"]["sname"]["name-string"][1])
                 if next_realm.upper() == self.domain.upper():
-                    logging.info(
-                        f"Successfully obtained final TGS for domain {self.domain}."
-                    )
                     break
                 else:
                     # Get next referral TGT

From 7c9fef9290c2af6abfad396fe3dc0bbdf922d3a6 Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Wed, 1 May 2024 13:32:20 +0200
Subject: [PATCH 3/6] Enhancing logging output and zip filename

---
 bloodhound/__init__.py            | 48 ++++++++-----------
 bloodhound/ad/computer.py         |  2 +-
 bloodhound/ad/domain.py           | 80 +++++++++++++++++++------------
 bloodhound/enumeration/domains.py | 11 +++--
 4 files changed, 77 insertions(+), 64 deletions(-)

diff --git a/bloodhound/__init__.py b/bloodhound/__init__.py
index db6be20..0f0ddc5 100644
--- a/bloodhound/__init__.py
+++ b/bloodhound/__init__.py
@@ -455,9 +455,8 @@ def main():
             auth.get_tgt()
 
     # For adding timestamp prefix to the outputfiles
-    timestamp = (
-        datetime.datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S") + "_"
-    )
+    timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+
     bloodhound = BloodHound(ad)
     bloodhound.connect()
     bloodhound.run(
@@ -470,37 +469,28 @@ def main():
         exclude_dcs=args.exclude_dcs,
         fileNamePrefix=args.outputprefix,
     )
+
     # If args --zip is true, the compress output
     if args.zip:
-        logging.info("Compressing output into " + timestamp + "bloodhound.zip")
+        trail_name = f"{args.domain}_{timestamp}_bloodhound_data.zip"
+        logging.info(f"Compressing output into {trail_name}")
+
         # Get a list of files in the current dir
         list_of_files = os.listdir(os.getcwd())
-        # Create handle to zip file with timestamp prefix
-        if args.outputprefix != None:
-            with ZipFile(
-                args.outputprefix + "_" + timestamp + "bloodhound.zip", "w"
-            ) as zip:
-                # For each of those files we fetched
-                for each_file in list_of_files:
-                    # If the files starts with the current timestamp and ends in json
-                    if each_file.startswith(args.outputprefix) and each_file.endswith(
-                        "json"
-                    ):
-                        # Write it to the zip
-                        zip.write(each_file)
-                        # Remove it from disk
-                        os.remove(each_file)
-        else:
-            with ZipFile(timestamp + "bloodhound.zip", "w") as zip:
-                # For each of those files we fetched
-                for each_file in list_of_files:
-                    # If the files starts with the current timestamp and ends in json
-                    if each_file.startswith(timestamp) and each_file.endswith("json"):
-                        # Write it to the zip
-                        zip.write(each_file)
-                        # Remove it from disk
-                        os.remove(each_file)
 
+        # Create handle to zip file with detailed suffix
+        zip_file_name = args.outputprefix + "_" + trail_name if args.outputprefix else trail_name
+
+        with ZipFile(zip_file_name, 'w') as zip:
+            # For each file that matches the criteria
+            for each_file in list_of_files:
+                # Checking if the file starts with the output prefix (if defined) or timestamp, and ends in .json
+                file_prefix = args.outputprefix if args.outputprefix else timestamp
+                if each_file.startswith(file_prefix) and each_file.endswith("json"):
+                    zip.write(each_file)
+                    os.remove(each_file)
+
+        logging.info(f"Successfully created and filled {zip_file_name}")
 
 if __name__ == "__main__":
     main()
diff --git a/bloodhound/ad/computer.py b/bloodhound/ad/computer.py
index 395ce21..2f64b33 100644
--- a/bloodhound/ad/computer.py
+++ b/bloodhound/ad/computer.py
@@ -349,7 +349,7 @@ def try_connect(self) -> bool:
         # Cache the address for future use and proceed with connection
         self.ad.dnscache.put(self.hostname, addr)
         self.addr = addr
-        logging.info(f"Resolved: {self.hostname} -> {addr}")
+        logging.debug(f"Resolved: {self.hostname} -> {addr}")
         logging.debug(f"Attempting to connect to computer: {self.hostname}")
         return ADUtils.tcp_ping(addr, 445)
 
diff --git a/bloodhound/ad/domain.py b/bloodhound/ad/domain.py
index 43ba815..4321c81 100644
--- a/bloodhound/ad/domain.py
+++ b/bloodhound/ad/domain.py
@@ -374,9 +374,9 @@ def get_domains(self, acl=False):
         """
         entries = self.search("(objectClass=domain)", [], generator=True, query_sd=acl)
 
-        entriesNum = 0
+        entries_count = 0
         for entry in entries:
-            entriesNum += 1
+            entries_count += 1
             # Todo: actually use these objects instead of discarding them
             # means rewriting other functions
             domain_object = ADDomain.fromLDAP(
@@ -393,7 +393,7 @@ def get_domains(self, acl=False):
             except IndexError:
                 pass
 
-        if entriesNum == 0:
+        if entries_count == 0:
             # Raise exception if we somehow managed to authenticate but the domain is wrong
             # prevents confusing exceptions later
             actualdn = self.ldap.server.info.other["defaultNamingContext"][0]
@@ -405,7 +405,7 @@ def get_domains(self, acl=False):
             )
             raise CollectionException("Specified domain was not found in LDAP")
 
-        logging.info("Found %u domains", entriesNum)
+        logging.info("Found %u domains", entries_count)
 
         return entries
 
@@ -417,27 +417,33 @@ def get_forest_domains(self):
         This searches the configuration, which is present only once in the forest but is replicated
         to every DC.
         """
-        entries = self.search(
+        entries_count = 0
+
+        found_domains = []
+
+        for entry in self.search(
             "(objectClass=crossRef)",
             ["nETBIOSName", "systemFlags", "nCName", "name"],
             search_base="CN=Partitions,%s"
             % self.ldap.server.info.other["configurationNamingContext"][0],
             generator=True,
-        )
-
-        entriesNum = 0
-        for entry in entries:
+        ):
             # Ensure systemFlags entry is not empty before running the naming context check.
             if not entry["attributes"]["systemFlags"]:
                 continue
+
             # This is a naming context, but not a domain
             if not entry["attributes"]["systemFlags"] & 2:
                 continue
+
             entry["attributes"]["distinguishedName"] = entry["attributes"]["nCName"]
-            entriesNum += 1
+            entries_count += 1
+
             # Todo: actually use these objects instead of discarding them
             # means rewriting other functions
-            d = ADDomain.fromLDAP(entry["attributes"]["nCName"])
+            domain = ADDomain.fromLDAP(entry["attributes"]["nCName"])
+            found_domains.append(domain)
+
             # We don't want to add our own domain since this entry doesn't contain the sid
             # which we need later on
             if entry["attributes"]["nCName"] not in self.ad.domains:
@@ -446,8 +452,36 @@ def get_forest_domains(self):
 
         # Store this number so we can easily determine if we are in a multi-domain
         # forest later on.
-        self.ad.num_domains = entriesNum
-        logging.info("Found %u domains in the forest", entriesNum)
+        self.ad.num_domains = entries_count
+        logging.info(f"Found {entries_count} domains in the forest: {', '.join(d.name for d in found_domains)}")
+
+    def get_trusts(self):
+
+        trusted_domains_names = []
+
+        entries = self.search(
+            "(objectClass=trustedDomain)",
+            attributes=[
+                "flatName",
+                "name",
+                "securityIdentifier",
+                "trustAttributes",
+                "trustDirection",
+                "trustType",
+            ],
+            generator=True,
+        )
+
+        entries_count = 0
+
+        for entry in entries:
+            entries_count += 1
+            trusted_domains_names.append(entry["attributes"]["name"])
+
+
+        logging.info(f"Found {entries_count} trusts: {', '.join(trusted_domains_names)}")
+
+        return entries
 
     def get_cache_items(self):
         self.get_objecttype()
@@ -689,9 +723,9 @@ def get_computers_withcache(self, include_properties=False, acl=False):
 
         entries = self.get_computers(include_properties, acl)
 
-        entriesNum = 0
+        entries_count = 0
         for entry in entries:
-            entriesNum += 1
+            entries_count += 1
             # Resolve it first for DN cache
             resolved_entry = ADUtils.resolve_ad_entry(entry)
             cacheitem = {
@@ -710,7 +744,7 @@ def get_computers_withcache(self, include_properties=False, acl=False):
                 entry["attributes"]["objectSid"],
             )
 
-        logging.info("Found %u computers", entriesNum)
+        logging.info("Found %u computers", entries_count)
 
         return entries
 
@@ -754,20 +788,6 @@ def get_childobjects(self, dn, use_resolver=True):
 
         return entries
 
-    def get_trusts(self):
-        entries = self.search(
-            "(objectClass=trustedDomain)",
-            attributes=[
-                "flatName",
-                "name",
-                "securityIdentifier",
-                "trustAttributes",
-                "trustDirection",
-                "trustType",
-            ],
-            generator=True,
-        )
-        return entries
 
     def prefetch_info(self, props=False, acls=False, cache_computers=False):
         self.get_objecttype()
diff --git a/bloodhound/enumeration/domains.py b/bloodhound/enumeration/domains.py
index 8f4ceed..f5a486e 100644
--- a/bloodhound/enumeration/domains.py
+++ b/bloodhound/enumeration/domains.py
@@ -148,12 +148,15 @@ def dump_domain(self, collect, timestamp="", filename='domains.json', fileNamePr
             num_entries = 0
             for entry in entries:
                 num_entries += 1
-                trust = ADDomainTrust(ADUtils.get_entry_property(entry, 'name'), ADUtils.get_entry_property(entry, 'trustDirection'), ADUtils.get_entry_property(entry, 'trustType'), ADUtils.get_entry_property(entry, 'trustAttributes'), ADUtils.get_entry_property(entry, 'securityIdentifier'))
+                trust = ADDomainTrust(
+                    ADUtils.get_entry_property(entry, 'name'),
+                    ADUtils.get_entry_property(entry, 'trustDirection'),
+                    ADUtils.get_entry_property(entry, 'trustType'),
+                    ADUtils.get_entry_property(entry, 'trustAttributes'),
+                    ADUtils.get_entry_property(entry, 'securityIdentifier')
+                )
                 domain['Trusts'].append(trust.to_output())
 
-            logging.info('Found %u trusts', num_entries)
-
-
         if 'container' in collect:
             for gplink_dn, options in ADUtils.parse_gplink_string(ADUtils.get_entry_property(domain_object, 'gPLink', '')):
                 link = dict()

From 5db27286c05b72164130caa47fb8dcf1c2821d2c Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Wed, 1 May 2024 14:17:36 +0200
Subject: [PATCH 4/6] Enhancements

---
 bloodhound/__init__.py            | 11 +++++++----
 bloodhound/ad/authentication.py   |  9 +--------
 bloodhound/ad/domain.py           | 32 ++++++++++++++-----------------
 bloodhound/enumeration/domains.py | 13 ++++++++++---
 4 files changed, 32 insertions(+), 33 deletions(-)

diff --git a/bloodhound/__init__.py b/bloodhound/__init__.py
index 0f0ddc5..121a2e1 100644
--- a/bloodhound/__init__.py
+++ b/bloodhound/__init__.py
@@ -22,8 +22,11 @@
 #
 ####################
 
-import os, sys, logging, argparse, getpass, time, re, datetime
+# Built-in imports
+import os, sys, logging, argparse, time, re, datetime
 from zipfile import ZipFile
+
+# Local library imports
 from bloodhound.ad.domain import AD, ADDC
 from bloodhound.ad.authentication import ADAuthentication
 from bloodhound.enumeration.computers import ComputerEnumerator
@@ -454,8 +457,9 @@ def main():
         else:
             auth.get_tgt()
 
-    # For adding timestamp prefix to the outputfiles
-    timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+    # For adding timestamp prefix to the output files, formatted in ISO 8601 style
+    timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
+
 
     bloodhound = BloodHound(ad)
     bloodhound.connect()
@@ -473,7 +477,6 @@ def main():
     # If args --zip is true, the compress output
     if args.zip:
         trail_name = f"{args.domain}_{timestamp}_bloodhound_data.zip"
-        logging.info(f"Compressing output into {trail_name}")
 
         # Get a list of files in the current dir
         list_of_files = os.listdir(os.getcwd())
diff --git a/bloodhound/ad/authentication.py b/bloodhound/ad/authentication.py
index 50a5e58..13598dc 100644
--- a/bloodhound/ad/authentication.py
+++ b/bloodhound/ad/authentication.py
@@ -26,13 +26,11 @@
 import logging
 import os
 import sys
-import traceback
 from binascii import unhexlify
 import datetime
 
 # Third party library imports
 from ldap3 import Server, Connection, NTLM, ALL, SASL, KERBEROS
-from ldap3.core.results import RESULT_STRONGER_AUTH_REQUIRED
 from ldap3.operation.bind import bind_operation
 from impacket.krb5.ccache import CCache
 from impacket.krb5.types import Principal, KerberosTime, Ticket
@@ -40,18 +38,13 @@
 from impacket.krb5.asn1 import (
     AP_REQ,
     AS_REP,
-    TGS_REQ,
     Authenticator,
     TGS_REP,
     seq_set,
-    seq_set_iter,
-    PA_FOR_USER_ENC,
-    Ticket as TicketAsn1,
-    EncTGSRepPart,
 )
 from impacket.spnego import SPNEGO_NegTokenInit, TypesMech
 from impacket.krb5 import constants
-from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS, sendReceive
+from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
 from pyasn1.type.univ import noValue
 
 
diff --git a/bloodhound/ad/domain.py b/bloodhound/ad/domain.py
index 4321c81..836932c 100644
--- a/bloodhound/ad/domain.py
+++ b/bloodhound/ad/domain.py
@@ -21,17 +21,17 @@
 # SOFTWARE.
 #
 ####################
+
+# Built-in imports
 from __future__ import unicode_literals
 import logging
-import traceback
 import codecs
 import json
-
+from typing import Generator
 from uuid import UUID
 from dns import resolver
 from ldap3 import ALL_ATTRIBUTES, BASE, SUBTREE, LEVEL
 from ldap3.core.exceptions import (
-    LDAPKeyError,
     LDAPAttributeError,
     LDAPCursorError,
     LDAPNoSuchObjectResult,
@@ -40,6 +40,9 @@
     LDAPCommunicationError,
 )
 from ldap3.protocol.microsoft import security_descriptor_control
+from future.utils import iteritems
+
+# Local library imports
 from bloodhound.ad.utils import (
     ADUtils,
     DNSCache,
@@ -49,7 +52,7 @@
 )
 from bloodhound.ad.computer import ADComputer
 from bloodhound.enumeration.objectresolver import ObjectResolver
-from future.utils import itervalues, iteritems, native_str
+
 
 """
 Active Directory Domain Controller
@@ -455,11 +458,15 @@ def get_forest_domains(self):
         self.ad.num_domains = entries_count
         logging.info(f"Found {entries_count} domains in the forest: {', '.join(d.name for d in found_domains)}")
 
-    def get_trusts(self):
+    def get_trusts(self) -> Generator[dict, None, None]:
+        """
+        Queries the Active Directory for trusted domains and yields the results as a generator.
 
-        trusted_domains_names = []
+        Returns:
+            Generator[dict]: A generator that yields dictionaries, each containing details of a trusted domain.
+        """
 
-        entries = self.search(
+        return self.search(
             "(objectClass=trustedDomain)",
             attributes=[
                 "flatName",
@@ -472,17 +479,6 @@ def get_trusts(self):
             generator=True,
         )
 
-        entries_count = 0
-
-        for entry in entries:
-            entries_count += 1
-            trusted_domains_names.append(entry["attributes"]["name"])
-
-
-        logging.info(f"Found {entries_count} trusts: {', '.join(trusted_domains_names)}")
-
-        return entries
-
     def get_cache_items(self):
         self.get_objecttype()
         self.get_forest_domains()
diff --git a/bloodhound/enumeration/domains.py b/bloodhound/enumeration/domains.py
index f5a486e..fd2c2a8 100644
--- a/bloodhound/enumeration/domains.py
+++ b/bloodhound/enumeration/domains.py
@@ -145,11 +145,16 @@ def dump_domain(self, collect, timestamp="", filename='domains.json', fileNamePr
             domain['Aces'] = resolver.resolve_aces(aces)
 
         if 'trusts' in collect:
-            num_entries = 0
+            trusted_domains_names = []
+
+            entries_count = 0
+
             for entry in entries:
-                num_entries += 1
+                entries_count += 1
+                trust_name = ADUtils.get_entry_property(entry, 'name')
+                trusted_domains_names.append(trust_name)
                 trust = ADDomainTrust(
-                    ADUtils.get_entry_property(entry, 'name'),
+                    trust_name,
                     ADUtils.get_entry_property(entry, 'trustDirection'),
                     ADUtils.get_entry_property(entry, 'trustType'),
                     ADUtils.get_entry_property(entry, 'trustAttributes'),
@@ -157,6 +162,8 @@ def dump_domain(self, collect, timestamp="", filename='domains.json', fileNamePr
                 )
                 domain['Trusts'].append(trust.to_output())
 
+            logging.info(f"Found {entries_count} trusts: {', '.join(trusted_domains_names)}")
+
         if 'container' in collect:
             for gplink_dn, options in ADUtils.parse_gplink_string(ADUtils.get_entry_property(domain_object, 'gPLink', '')):
                 link = dict()

From a1910bf92ee43c4ba7f25c3b18c4cb3a1f6a9d1a Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Wed, 1 May 2024 16:33:43 +0200
Subject: [PATCH 5/6] Refactoring + adding crawling capability

---
 bloodhound/__init__.py            | 496 ++++--------------------------
 bloodhound/ad/domain.py           |   8 +-
 bloodhound/cli.py                 | 195 ++++++++++++
 bloodhound/core.py                | 314 +++++++++++++++++++
 bloodhound/enumeration/domains.py |   3 +
 5 files changed, 571 insertions(+), 445 deletions(-)
 create mode 100644 bloodhound/cli.py
 create mode 100644 bloodhound/core.py

diff --git a/bloodhound/__init__.py b/bloodhound/__init__.py
index 121a2e1..56e175e 100644
--- a/bloodhound/__init__.py
+++ b/bloodhound/__init__.py
@@ -23,361 +23,23 @@
 ####################
 
 # Built-in imports
-import os, sys, logging, argparse, time, re, datetime
-from zipfile import ZipFile
+import sys
+import logging
+from collections import deque
+
 
 # Local library imports
-from bloodhound.ad.domain import AD, ADDC
-from bloodhound.ad.authentication import ADAuthentication
-from bloodhound.enumeration.computers import ComputerEnumerator
-from bloodhound.enumeration.memberships import MembershipEnumerator
-from bloodhound.enumeration.domains import DomainEnumerator
+from bloodhound import core, cli
+
 
 """
 BloodHound.py is a Python port of BloodHound, designed to run on Linux and Windows.
 """
 
-
-class BloodHound(object):
-    def __init__(self, ad):
-        self.ad = ad
-        self.ldap = None
-        self.pdc = None
-        self.sessions = []
-
-    def connect(self):
-        if len(self.ad.dcs()) == 0:
-            logging.error(
-                "Could not find a domain controller. Consider specifying a domain and/or DNS server."
-            )
-            sys.exit(1)
-
-        if not self.ad.baseDN:
-            logging.error(
-                "Could not figure out the domain to query. Please specify this manually with -d"
-            )
-            sys.exit(1)
-
-        pdc = self.ad.dcs()[0]
-        logging.debug("Using LDAP server: %s", pdc)
-        logging.debug("Using base DN: %s", self.ad.baseDN)
-
-        if len(self.ad.kdcs()) > 0:
-            kdc = self.ad.auth.kdc
-            logging.debug("Using kerberos KDC: %s", kdc)
-            logging.debug("Using kerberos realm: %s", self.ad.realm())
-
-        # Create a domain controller object
-        self.pdc = ADDC(pdc, self.ad)
-        # Create an object resolver
-        self.ad.create_objectresolver(self.pdc)
-
-    def run(
-        self,
-        collect,
-        num_workers=10,
-        disable_pooling=False,
-        timestamp="",
-        computerfile="",
-        cachefile=None,
-        exclude_dcs=False,
-        fileNamePrefix="",
-    ):
-        start_time = time.time()
-        if cachefile:
-            self.ad.load_cachefile(cachefile)
-
-        # Check early if we should enumerate computers as well
-        do_computer_enum = any(
-            method in collect
-            for method in [
-                "localadmin",
-                "session",
-                "loggedon",
-                "experimental",
-                "rdp",
-                "dcom",
-                "psremote",
-            ]
-        )
-
-        if "group" in collect or "objectprops" in collect or "acl" in collect:
-            # Fetch domains for later, computers if needed
-            self.pdc.prefetch_info(
-                "objectprops" in collect,
-                "acl" in collect,
-                cache_computers=do_computer_enum,
-            )
-            # Initialize enumerator
-            membership_enum = MembershipEnumerator(
-                self.ad, self.pdc, collect, disable_pooling
-            )
-            membership_enum.enumerate_memberships(
-                timestamp=timestamp, fileNamePrefix=fileNamePrefix
-            )
-        elif "container" in collect:
-            # Fetch domains for later, computers if needed
-            self.pdc.prefetch_info(
-                "objectprops" in collect,
-                "acl" in collect,
-                cache_computers=do_computer_enum,
-            )
-            # Initialize enumerator
-            membership_enum = MembershipEnumerator(
-                self.ad, self.pdc, collect, disable_pooling
-            )
-            membership_enum.do_container_collection(timestamp=timestamp)
-        elif do_computer_enum:
-            # We need to know which computers to query regardless
-            # We also need the domains to have a mapping from NETBIOS -> FQDN for local admins
-            self.pdc.prefetch_info(
-                "objectprops" in collect, "acl" in collect, cache_computers=True
-            )
-        elif "trusts" in collect:
-            # Prefetch domains
-            self.pdc.get_domains("acl" in collect)
-        if "trusts" in collect or "acl" in collect or "objectprops" in collect:
-            trusts_enum = DomainEnumerator(self.ad, self.pdc)
-            trusts_enum.dump_domain(
-                collect, timestamp=timestamp, fileNamePrefix=fileNamePrefix
-            )
-        if do_computer_enum:
-            # If we don't have a GC server, don't use it for deconflictation
-            have_gc = len(self.ad.gcs()) > 0
-            computer_enum = ComputerEnumerator(
-                self.ad,
-                self.pdc,
-                collect,
-                do_gc_lookup=have_gc,
-                computerfile=computerfile,
-                exclude_dcs=exclude_dcs,
-            )
-            computer_enum.enumerate_computers(
-                self.ad.computers,
-                num_workers=num_workers,
-                timestamp=timestamp,
-                fileNamePrefix=fileNamePrefix,
-            )
-        end_time = time.time()
-        minutes, seconds = divmod(int(end_time - start_time), 60)
-        logging.info("Done in %02dM %02dS" % (minutes, seconds))
-
-
-def resolve_collection_methods(methods):
-    """
-    Convert methods (string) to list of validated methods to resolve
-    """
-    valid_methods = [
-        "group",
-        "localadmin",
-        "session",
-        "trusts",
-        "default",
-        "all",
-        "loggedon",
-        "objectprops",
-        "experimental",
-        "acl",
-        "dcom",
-        "rdp",
-        "psremote",
-        "dconly",
-        "container",
-    ]
-    default_methods = ["group", "localadmin", "session", "trusts"]
-    # Similar to SharpHound, All is not really all, it excludes loggedon
-    all_methods = [
-        "group",
-        "localadmin",
-        "session",
-        "trusts",
-        "objectprops",
-        "acl",
-        "dcom",
-        "rdp",
-        "psremote",
-        "container",
-    ]
-    # DC only, does not collect to computers
-    dconly_methods = ["group", "trusts", "objectprops", "acl", "container"]
-    if "," in methods:
-        method_list = [method.lower() for method in methods.split(",")]
-        validated_methods = []
-        for method in method_list:
-            if method not in valid_methods:
-                logging.error("Invalid collection method specified: %s", method)
-                return False
-
-            if method == "default":
-                validated_methods += default_methods
-            elif method == "all":
-                validated_methods += all_methods
-            elif method == "dconly":
-                validated_methods += dconly_methods
-            else:
-                validated_methods.append(method)
-        return set(validated_methods)
-    else:
-        validated_methods = []
-        # It is only one
-        method = methods.lower()
-        if method in valid_methods:
-            if method == "default":
-                validated_methods += default_methods
-            elif method == "all":
-                validated_methods += all_methods
-            elif method == "dconly":
-                validated_methods += dconly_methods
-            else:
-                validated_methods.append(method)
-            return set(validated_methods)
-        else:
-            logging.error("Invalid collection method specified: %s", method)
-            return False
-
-
 def main():
-    #    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
-
-    logger = logging.getLogger()
-    logger.setLevel(logging.INFO)
-    stream = logging.StreamHandler(sys.stderr)
-    stream.setLevel(logging.DEBUG)
-    formatter = logging.Formatter("%(levelname)s: %(message)s")
-    #    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
-    stream.setFormatter(formatter)
-    logger.addHandler(stream)
-
-    parser = argparse.ArgumentParser(
-        add_help=True,
-        description="Python based ingestor for BloodHound\nFor help or reporting issues, visit https://github.com/Fox-IT/BloodHound.py",
-        formatter_class=argparse.RawDescriptionHelpFormatter,
-    )
+    args = cli.parse_arguments()
 
-    parser.add_argument(
-        "-c",
-        "--collectionmethod",
-        action="store",
-        default="Default",
-        help="Which information to collect. Supported: Group, LocalAdmin, Session, "
-        "Trusts, Default (all previous), DCOnly (no computer connections), DCOM, RDP,"
-        "PSRemote, LoggedOn, Container, ObjectProps, ACL, All (all except LoggedOn). "
-        "You can specify more than one by separating them with a comma. (default: Default)",
-    )
-    parser.add_argument(
-        "-d", "--domain", action="store", default="", help="Domain to query."
-    )
-    parser.add_argument("-v", action="store_true", help="Enable verbose output")
-    helptext = (
-        "Specify one or more authentication options. \n"
-        "By default Kerberos authentication is used and NTLM is used as fallback. \n"
-        "Kerberos tickets are automatically requested if a password or hashes are specified."
-    )
-    auopts = parser.add_argument_group("authentication options", description=helptext)
-    auopts.add_argument(
-        "-u",
-        "--username",
-        action="store",
-        help="Username. Format: username[@domain]; If the domain is unspecified, the current domain is used.",
-    )
-    auopts.add_argument("-p", "--password", action="store", help="Password")
-    auopts.add_argument("-k", "--kerberos", action="store_true", help="Use kerberos")
-    auopts.add_argument("--hashes", action="store", help="LM:NLTM hashes")
-    auopts.add_argument(
-        "-aesKey",
-        action="store",
-        metavar="hex key",
-        help="AES key to use for Kerberos Authentication (128 or 256 bits)",
-    )
-    auopts.add_argument(
-        "--auth-method",
-        choices=("auto", "ntlm", "kerberos"),
-        default="auto",
-        action="store",
-        help="Authentication methods. Force Kerberos or NTLM only or use auto for Kerberos with NTLM fallback",
-    )
-    coopts = parser.add_argument_group("collection options")
-    coopts.add_argument(
-        "-ns",
-        "--nameserver",
-        action="store",
-        help="Alternative name server to use for queries",
-    )
-    coopts.add_argument(
-        "--dns-tcp", action="store_true", help="Use TCP instead of UDP for DNS queries"
-    )
-    coopts.add_argument(
-        "--dns-timeout",
-        action="store",
-        type=int,
-        default=3,
-        help="DNS query timeout in seconds (default: 3)",
-    )
-    coopts.add_argument(
-        "-dc",
-        "--domain-controller",
-        metavar="HOST",
-        action="store",
-        help="Override which DC to query (hostname)",
-    )
-    coopts.add_argument(
-        "-gc",
-        "--global-catalog",
-        metavar="HOST",
-        action="store",
-        help="Override which GC to query (hostname)",
-    )
-    coopts.add_argument(
-        "-w",
-        "--workers",
-        action="store",
-        type=int,
-        default=10,
-        help="Number of workers for computer enumeration (default: 10)",
-    )
-    coopts.add_argument(
-        "--exclude-dcs",
-        action="store_true",
-        help="Skip DCs during computer enumeration",
-    )
-    coopts.add_argument(
-        "--disable-pooling",
-        action="store_true",
-        help="Don't use subprocesses for ACL parsing (only for debugging purposes)",
-    )
-    coopts.add_argument(
-        "--disable-autogc",
-        action="store_true",
-        help="Don't automatically select a Global Catalog (use only if it gives errors)",
-    )
-    coopts.add_argument(
-        "--zip",
-        action="store_true",
-        help="Compress the JSON output files into a zip archive",
-    )
-    coopts.add_argument(
-        "--computerfile",
-        action="store",
-        help="File containing computer FQDNs to use as allowlist for any computer based methods",
-    )
-    coopts.add_argument("--cachefile", action="store", help="Cache file (experimental)")
-    coopts.add_argument(
-        "--use-ldaps",
-        action="store_true",
-        help="Use LDAP over TLS on port 636 by default",
-    )
-    coopts.add_argument(
-        "-op",
-        "--outputprefix",
-        metavar="PREFIX_NAME",
-        action="store",
-        help="String to prepend to output file names",
-    )
-
-    args = parser.parse_args()
-
-    if args.v is True:
-        logger.setLevel(logging.DEBUG)
+    cli.setup_logging(verbose=args.v)
 
     # Initialize variables for LM and NT hashes
     lm, nt = "", ""
@@ -387,113 +49,65 @@ def main():
         try:
             lm, nt = args.hashes.split(":")
         except ValueError:
-            logger.error(
-                "Hashes provided in an incorrect format. Expected format: LM:NT"
-            )
-            sys.exit(1)
-
-    nameserver = args.nameserver
-
-    auth = ADAuthentication(
-        username=args.username,
-        password=args.password,
-        domain=args.domain,
-        auth_method=args.auth_method,
-        lm_hash=lm,
-        nt_hash=nt,
-        aeskey=args.aesKey,
-    )
-
-    ad = AD(
-        auth=auth,
-        domain=args.domain,
-        nameserver=nameserver,
-        dns_tcp=args.dns_tcp,
-        dns_timeout=args.dns_timeout,
-        use_ldaps=args.use_ldaps,
-    )
-
-    # Resolve collection methods
-    collect = resolve_collection_methods(args.collectionmethod)
-    if not collect:
-        return
-    logging.debug("Resolved collection methods: %s", ", ".join(list(collect)))
-
-    logging.debug("Using DNS to retrieve domain information")
-    ad.dns_resolve(domain=args.domain, options=args)
-
-    # Override the detected DC / GC if specified
-    if args.domain_controller:
-        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", args.domain_controller):
             logging.error(
-                "The specified domain controller %s looks like an IP address, but requires a hostname (FQDN).\n"
-                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
-                args.domain_controller,
-            )
-            sys.exit(1)
-        ad.override_dc(args.domain_controller)
-        logging.debug("Using supplied domain controller as KDC")
-        auth.set_kdc(args.domain_controller)
-
-    if args.global_catalog:
-        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", args.global_catalog):
-            logging.error(
-                "The specified global catalog server %s looks like an IP address, but requires a hostname (FQDN).\n"
-                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
-                args.global_catalog,
+                "Hashes provided in an incorrect format. Expected format: LM:NT"
             )
             sys.exit(1)
-        ad.override_gc(args.global_catalog)
 
-    if args.auth_method in ("auto", "kerberos"):
-        if args.kerberos is True:
-            logging.debug("Authentication: Kerberos ccache")
-            # kerberize()
-            if not auth.load_ccache():
-                logging.debug(
-                    "Could not load ticket from ccache, trying to request a TGT instead"
-                )
-                auth.get_tgt()
-        else:
-            auth.get_tgt()
-
-    # For adding timestamp prefix to the output files, formatted in ISO 8601 style
-    timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
+    # Queue to manage domains to be processed
+    domains_to_process = deque([args.domain])
+    handled_domains = set()
+
+    while domains_to_process:
+        current_domain = domains_to_process.popleft()
+        if current_domain in handled_domains:
+            continue
+
+        # Perform the ingest on the current domain
+        bloodhound = core.ingest(
+            username=args.username,
+            password=args.password,
+            domain=current_domain,
+            auth_method=args.auth_method,
+            lm_hash=lm,
+            nt_hash=nt,
+            aes_key=args.aesKey,
+            nameserver=args.nameserver,
+            dns_tcp=args.dns_tcp,
+            dns_timeout=args.dns_timeout,
+            use_ldaps=args.use_ldaps,
+            collection_method=args.collectionmethod,
+            workers=args.workers,
+            disable_pooling=args.disable_pooling,
+            computerfile=args.computerfile,
+            cachefile=args.cachefile,
+            exclude_dcs=args.exclude_dcs,
+            file_name_prefix=args.outputprefix,
+            domain_controller=args.domain_controller,
+            global_catalog=args.global_catalog,
+            kerberos=args.kerberos,
+            disable_autogc=args.disable_autogc
+        )
 
+        # Add the current domain to the handled set
+        handled_domains.add(current_domain)
 
-    bloodhound = BloodHound(ad)
-    bloodhound.connect()
-    bloodhound.run(
-        collect=collect,
-        num_workers=args.workers,
-        disable_pooling=args.disable_pooling,
-        timestamp=timestamp,
-        computerfile=args.computerfile,
-        cachefile=args.cachefile,
-        exclude_dcs=args.exclude_dcs,
-        fileNamePrefix=args.outputprefix,
-    )
+        if args.crawl:
+            # Add newly discovered trusted domains to the queue if not already handled
+            for trusted_domain in bloodhound.trusted_domains_names:
+                if trusted_domain not in handled_domains:
+                    domains_to_process.append(trusted_domain)
 
     # If args --zip is true, the compress output
     if args.zip:
-        trail_name = f"{args.domain}_{timestamp}_bloodhound_data.zip"
-
-        # Get a list of files in the current dir
-        list_of_files = os.listdir(os.getcwd())
+        prefix = ""
+        if args.outputprefix:
+            prefix = f"{args.outputprefix}_"
 
-        # Create handle to zip file with detailed suffix
-        zip_file_name = args.outputprefix + "_" + trail_name if args.outputprefix else trail_name
+        prefix += args.domain
+        cli.zip_output(prefix=prefix)
 
-        with ZipFile(zip_file_name, 'w') as zip:
-            # For each file that matches the criteria
-            for each_file in list_of_files:
-                # Checking if the file starts with the output prefix (if defined) or timestamp, and ends in .json
-                file_prefix = args.outputprefix if args.outputprefix else timestamp
-                if each_file.startswith(file_prefix) and each_file.endswith("json"):
-                    zip.write(each_file)
-                    os.remove(each_file)
 
-        logging.info(f"Successfully created and filled {zip_file_name}")
 
 if __name__ == "__main__":
     main()
diff --git a/bloodhound/ad/domain.py b/bloodhound/ad/domain.py
index 836932c..9d6d350 100644
--- a/bloodhound/ad/domain.py
+++ b/bloodhound/ad/domain.py
@@ -811,7 +811,7 @@ def __init__(
         auth=None,
         nameserver=None,
         dns_tcp=False,
-        dns_timeout=3.0,
+        dns_timeout=5.0,
         use_ldaps=False,
     ):
         self.domain = domain
@@ -909,7 +909,7 @@ def load_cachefile(self, cachefile):
     def save_cachefile(self, cachefile):
         pass
 
-    def dns_resolve(self, domain=None, options=None):
+    def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc: bool = False):
         logging.debug("Querying domain controller information from DNS")
 
         basequery = "_ldap._tcp.pdc._msdcs"
@@ -957,8 +957,8 @@ def dns_resolve(self, domain=None, options=None):
 
         except resolver.NXDOMAIN:
             # Only show warning if we don't already have a GC specified manually
-            if options and not options.global_catalog:
-                if not options.disable_autogc:
+            if not global_catalog:
+                if not disable_autogc:
                     logging.warning(
                         "Could not find a global catalog server, assuming the primary DC has this role\n"
                         "If this gives errors, either specify a hostname with -gc or disable gc resolution with --disable-autogc"
diff --git a/bloodhound/cli.py b/bloodhound/cli.py
new file mode 100644
index 0000000..da34589
--- /dev/null
+++ b/bloodhound/cli.py
@@ -0,0 +1,195 @@
+# Built-in imports
+import argparse
+import logging
+import sys
+import datetime
+import os
+from zipfile import ZipFile
+
+
+def zip_output(prefix: str = None, output_dir: str = '.', cleanup: bool = True) -> None:
+    """
+    Creates a zip archive of JSON files that match a specified prefix.
+
+    This function zips all JSON files in the current working directory that start with the specified prefix or a generated timestamp prefix if no prefix is provided.
+    After zipping, it can optionally delete the original files.
+
+    Args:
+        prefix (str, optional): Prefix to filter which files to zip. If None, uses a timestamp as the prefix.
+        output_dir (str, optional): Directory where the zip file will be stored. Defaults to the current directory.
+        cleanup (bool, optional): Whether to delete the original files after zipping. Defaults to True.
+
+    Raises:
+        FileNotFoundError: If the specified output directory does not exist.
+
+    Returns:
+        None: The function creates a zip file and optionally deletes the original files but returns nothing.
+    """
+    if not os.path.exists(output_dir):
+        raise FileNotFoundError(f"The specified output directory {output_dir} does not exist.")
+
+    # For adding timestamp prefix to the output files, formatted in ISO 8601 style
+    timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
+    zip_file_name = f"{prefix}_{timestamp}_bloodhound_data.zip" if prefix else f"{timestamp}_bloodhound_data.zip"
+    zip_file_path = os.path.join(output_dir, zip_file_name)
+
+    with ZipFile(zip_file_path, 'w') as zip:
+        # For each file that matches the criteria
+        for each_file in os.listdir(os.getcwd()):
+            if each_file.endswith("json"):
+                file_path = os.path.join(os.getcwd(), each_file)
+                zip.write(file_path, arcname=each_file)
+                if cleanup:
+                    os.remove(file_path)
+
+    logging.info(f"Successfully created and filled {zip_file_path}")
+
+
+def setup_logging(verbose: bool = False):
+    logger = logging.getLogger()
+    logger.setLevel(logging.INFO)
+    stream = logging.StreamHandler(sys.stderr)
+    stream.setLevel(logging.DEBUG)
+
+    formatter = logging.Formatter("%(levelname)s: %(message)s")
+    stream.setFormatter(formatter)
+    logger.addHandler(stream)
+
+    if verbose is True:
+        logger.setLevel(logging.DEBUG)
+
+def parse_arguments():
+    parser = argparse.ArgumentParser(
+        add_help=True,
+        description="Python based ingestor for BloodHound\nFor help or reporting issues, visit https://github.com/dirkjanm/BloodHound.py",
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+    )
+
+    parser.add_argument(
+        "-c",
+        "--collectionmethod",
+        action="store",
+        default="Default",
+        help="Which information to collect. Supported: Group, LocalAdmin, Session, "
+        "Trusts, Default (all previous), DCOnly (no computer connections), DCOM, RDP,"
+        "PSRemote, LoggedOn, Container, ObjectProps, ACL, All (all except LoggedOn). "
+        "You can specify more than one by separating them with a comma. (default: Default)",
+    )
+    parser.add_argument(
+        "-d", "--domain", action="store", default="", help="Domain to query."
+    )
+    parser.add_argument("-v", action="store_true", help="Enable verbose output")
+    helptext = (
+        "Specify one or more authentication options. \n"
+        "By default Kerberos authentication is used and NTLM is used as fallback. \n"
+        "Kerberos tickets are automatically requested if a password or hashes are specified."
+    )
+    auopts = parser.add_argument_group("authentication options", description=helptext)
+    auopts.add_argument(
+        "-u",
+        "--username",
+        action="store",
+        help="Username. Format: username[@domain]; If the domain is unspecified, the current domain is used.",
+    )
+    auopts.add_argument("-p", "--password", action="store", help="Password")
+    auopts.add_argument("-k", "--kerberos", action="store_true", help="Use kerberos")
+    auopts.add_argument("--hashes", action="store", help="LM:NLTM hashes")
+    auopts.add_argument(
+        "-aesKey",
+        action="store",
+        metavar="hex key",
+        help="AES key to use for Kerberos Authentication (128 or 256 bits)",
+    )
+    auopts.add_argument(
+        "--auth-method",
+        choices=("auto", "ntlm", "kerberos"),
+        default="auto",
+        action="store",
+        help="Authentication methods. Force Kerberos or NTLM only or use auto for Kerberos with NTLM fallback",
+    )
+    coopts = parser.add_argument_group("collection options")
+    coopts.add_argument(
+        "-ns",
+        "--nameserver",
+        action="store",
+        help="Alternative name server to use for queries",
+    )
+    coopts.add_argument(
+        "--dns-tcp", action="store_true", help="Use TCP instead of UDP for DNS queries"
+    )
+    coopts.add_argument(
+        "--dns-timeout",
+        action="store",
+        type=int,
+        default=3,
+        help="DNS query timeout in seconds (default: 3)",
+    )
+    coopts.add_argument(
+        "-dc",
+        "--domain-controller",
+        metavar="HOST",
+        action="store",
+        help="Override which DC to query (hostname)",
+    )
+    coopts.add_argument(
+        "-gc",
+        "--global-catalog",
+        metavar="HOST",
+        action="store",
+        help="Override which GC to query (hostname)",
+    )
+    coopts.add_argument(
+        "-w",
+        "--workers",
+        action="store",
+        type=int,
+        default=10,
+        help="Number of workers for computer enumeration (default: 10)",
+    )
+    coopts.add_argument(
+        "--exclude-dcs",
+        action="store_true",
+        help="Skip DCs during computer enumeration",
+    )
+    coopts.add_argument(
+        "--disable-pooling",
+        action="store_true",
+        help="Don't use subprocesses for ACL parsing (only for debugging purposes)",
+    )
+    coopts.add_argument(
+        "--disable-autogc",
+        action="store_true",
+        help="Don't automatically select a Global Catalog (use only if it gives errors)",
+    )
+
+    coopts.add_argument(
+        "--crawl",
+        action="store_true",
+        help="Enable crawling of discovered domains to dynamically ingest data from trusted domains."
+    )
+
+    coopts.add_argument(
+        "--zip",
+        action="store_true",
+        help="Compress the JSON output files into a zip archive",
+    )
+    coopts.add_argument(
+        "--computerfile",
+        action="store",
+        help="File containing computer FQDNs to use as allowlist for any computer based methods",
+    )
+    coopts.add_argument("--cachefile", action="store", help="Cache file (experimental)")
+    coopts.add_argument(
+        "--use-ldaps",
+        action="store_true",
+        help="Use LDAP over TLS on port 636 by default",
+    )
+    coopts.add_argument(
+        "-op",
+        "--outputprefix",
+        metavar="PREFIX_NAME",
+        action="store",
+        help="String to prepend to output file names",
+    )
+
+    return parser.parse_args()
\ No newline at end of file
diff --git a/bloodhound/core.py b/bloodhound/core.py
new file mode 100644
index 0000000..7a85dcf
--- /dev/null
+++ b/bloodhound/core.py
@@ -0,0 +1,314 @@
+# Built-in imports
+import logging
+import sys
+import time
+import datetime
+import re
+
+# Local library imports
+from bloodhound.ad.domain import AD
+from bloodhound.ad.authentication import ADAuthentication
+from bloodhound.ad.domain import ADDC
+from bloodhound.enumeration.computers import ComputerEnumerator
+from bloodhound.enumeration.memberships import MembershipEnumerator
+from bloodhound.enumeration.domains import DomainEnumerator
+
+def ingest(username, password, domain, auth_method, lm_hash, nt_hash, aes_key, nameserver,
+           dns_tcp, dns_timeout, use_ldaps, collection_method, workers, disable_pooling,
+           computerfile, cachefile, exclude_dcs, file_name_prefix, domain_controller, global_catalog, kerberos, disable_autogc):
+    """
+    Performs the data collection and processing for BloodHound.
+    """
+
+    logging.info(f"Starting data ingestion for domain: {domain}")
+
+    auth = ADAuthentication(
+        username=username,
+        password=password,
+        domain=domain,
+        auth_method=auth_method,
+        lm_hash=lm_hash,
+        nt_hash=nt_hash,
+        aeskey=aes_key,
+    )
+
+    ad = AD(
+        auth=auth,
+        domain=domain,
+        nameserver=nameserver,
+        dns_tcp=dns_tcp,
+        dns_timeout=dns_timeout,
+        use_ldaps=use_ldaps,
+    )
+
+    # Resolve collection methods
+    collect = resolve_collection_methods(collection_method)
+    if not collect:
+        return
+    logging.debug("Resolved collection methods: %s", ", ".join(list(collect)))
+
+    logging.debug("Using DNS to retrieve domain information")
+    ad.dns_resolve(
+        domain=domain,
+        global_catalog=True if global_catalog else False,
+        disable_autogc=disable_autogc
+    )
+
+    # Override the detected DC / GC if specified
+    if domain_controller:
+        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", domain_controller):
+            logging.error(
+                "The specified domain controller %s looks like an IP address, but requires a hostname (FQDN).\n"
+                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
+                domain_controller,
+            )
+            sys.exit(1)
+        ad.override_dc(domain_controller)
+        logging.debug("Using supplied domain controller as KDC")
+        auth.set_kdc(domain_controller)
+
+    if global_catalog:
+        if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", global_catalog):
+            logging.error(
+                "The specified global catalog server %s looks like an IP address, but requires a hostname (FQDN).\n"
+                "Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.",
+                global_catalog,
+            )
+            sys.exit(1)
+        ad.override_gc(global_catalog)
+
+    if auth_method in ("auto", "kerberos"):
+        if kerberos is True:
+            logging.debug("Authentication: Kerberos ccache")
+            # kerberize()
+            if not auth.load_ccache():
+                logging.debug(
+                    "Could not load ticket from ccache, trying to request a TGT instead"
+                )
+                auth.get_tgt()
+        else:
+            auth.get_tgt()
+
+
+    bloodhound = BloodHound(ad)
+    bloodhound.connect()
+    bloodhound.run(
+        collect=collect,
+        num_workers=workers,
+        disable_pooling=disable_pooling,
+        computerfile=computerfile,
+        cachefile=cachefile,
+        exclude_dcs=exclude_dcs,
+        fileNamePrefix=file_name_prefix,
+    )
+
+    logging.info(f"End of data ingestion for domain: {domain}")
+
+    return bloodhound
+
+def resolve_collection_methods(methods):
+    """
+    Convert methods (string) to list of validated methods to resolve
+    """
+    valid_methods = [
+        "group",
+        "localadmin",
+        "session",
+        "trusts",
+        "default",
+        "all",
+        "loggedon",
+        "objectprops",
+        "experimental",
+        "acl",
+        "dcom",
+        "rdp",
+        "psremote",
+        "dconly",
+        "container",
+    ]
+    default_methods = ["group", "localadmin", "session", "trusts"]
+    # Similar to SharpHound, All is not really all, it excludes loggedon
+    all_methods = [
+        "group",
+        "localadmin",
+        "session",
+        "trusts",
+        "objectprops",
+        "acl",
+        "dcom",
+        "rdp",
+        "psremote",
+        "container",
+    ]
+    # DC only, does not collect to computers
+    dconly_methods = ["group", "trusts", "objectprops", "acl", "container"]
+    if "," in methods:
+        method_list = [method.lower() for method in methods.split(",")]
+        validated_methods = []
+        for method in method_list:
+            if method not in valid_methods:
+                logging.error("Invalid collection method specified: %s", method)
+                return False
+
+            if method == "default":
+                validated_methods += default_methods
+            elif method == "all":
+                validated_methods += all_methods
+            elif method == "dconly":
+                validated_methods += dconly_methods
+            else:
+                validated_methods.append(method)
+        return set(validated_methods)
+    else:
+        validated_methods = []
+        # It is only one
+        method = methods.lower()
+        if method in valid_methods:
+            if method == "default":
+                validated_methods += default_methods
+            elif method == "all":
+                validated_methods += all_methods
+            elif method == "dconly":
+                validated_methods += dconly_methods
+            else:
+                validated_methods.append(method)
+            return set(validated_methods)
+        else:
+            logging.error("Invalid collection method specified: %s", method)
+            return False
+
+
+class BloodHound(object):
+    def __init__(self, ad):
+        self.ad = ad
+        self.ldap = None
+        self.pdc = None
+        self.sessions = []
+        self.trusted_domains_names = []
+
+    def connect(self):
+        if len(self.ad.dcs()) == 0:
+            logging.error(
+                "Could not find a domain controller. Consider specifying a domain and/or DNS server."
+            )
+            sys.exit(1)
+
+        if not self.ad.baseDN:
+            logging.error(
+                "Could not figure out the domain to query. Please specify this manually with -d"
+            )
+            sys.exit(1)
+
+        pdc = self.ad.dcs()[0]
+        logging.debug("Using LDAP server: %s", pdc)
+        logging.debug("Using base DN: %s", self.ad.baseDN)
+
+        if len(self.ad.kdcs()) > 0:
+            kdc = self.ad.auth.kdc
+            logging.debug("Using kerberos KDC: %s", kdc)
+            logging.debug("Using kerberos realm: %s", self.ad.realm())
+
+        # Create a domain controller object
+        self.pdc = ADDC(pdc, self.ad)
+        # Create an object resolver
+        self.ad.create_objectresolver(self.pdc)
+
+    def run(
+        self,
+        collect,
+        num_workers=10,
+        disable_pooling=False,
+        computerfile="",
+        cachefile=None,
+        exclude_dcs=False,
+        fileNamePrefix="",
+    ):
+        start_time = time.time()
+
+        timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") + f'_{self.ad.domain}_'
+
+        if cachefile:
+            self.ad.load_cachefile(cachefile)
+
+        # Check early if we should enumerate computers as well
+        do_computer_enum = any(
+            method in collect
+            for method in [
+                "localadmin",
+                "session",
+                "loggedon",
+                "experimental",
+                "rdp",
+                "dcom",
+                "psremote",
+            ]
+        )
+
+        if "group" in collect or "objectprops" in collect or "acl" in collect:
+            # Fetch domains for later, computers if needed
+            self.pdc.prefetch_info(
+                "objectprops" in collect,
+                "acl" in collect,
+                cache_computers=do_computer_enum,
+            )
+            # Initialize enumerator
+            membership_enum = MembershipEnumerator(
+                self.ad, self.pdc, collect, disable_pooling
+            )
+            membership_enum.enumerate_memberships(
+                timestamp=timestamp, fileNamePrefix=fileNamePrefix
+            )
+        elif "container" in collect:
+            # Fetch domains for later, computers if needed
+            self.pdc.prefetch_info(
+                "objectprops" in collect,
+                "acl" in collect,
+                cache_computers=do_computer_enum,
+            )
+            # Initialize enumerator
+            membership_enum = MembershipEnumerator(
+                self.ad, self.pdc, collect, disable_pooling
+            )
+            membership_enum.do_container_collection(timestamp=timestamp)
+        elif do_computer_enum:
+            # We need to know which computers to query regardless
+            # We also need the domains to have a mapping from NETBIOS -> FQDN for local admins
+            self.pdc.prefetch_info(
+                "objectprops" in collect, "acl" in collect, cache_computers=True
+            )
+        elif "trusts" in collect:
+            # Prefetch domains
+            self.pdc.get_domains("acl" in collect)
+
+        if "trusts" in collect or "acl" in collect or "objectprops" in collect:
+            trusts_enum = DomainEnumerator(self.ad, self.pdc)
+            trusts_enum.dump_domain(
+                collect, timestamp=timestamp, fileNamePrefix=fileNamePrefix
+            )
+
+            self.trusted_domains_names = trusts_enum.trusted_domains_names
+
+        if do_computer_enum:
+            # If we don't have a GC server, don't use it for deconflictation
+            have_gc = len(self.ad.gcs()) > 0
+            computer_enum = ComputerEnumerator(
+                self.ad,
+                self.pdc,
+                collect,
+                do_gc_lookup=have_gc,
+                computerfile=computerfile,
+                exclude_dcs=exclude_dcs,
+            )
+            computer_enum.enumerate_computers(
+                self.ad.computers,
+                num_workers=num_workers,
+                timestamp=timestamp,
+                fileNamePrefix=fileNamePrefix,
+            )
+
+
+        end_time = time.time()
+        minutes, seconds = divmod(int(end_time - start_time), 60)
+        logging.info("Done in %02dM %02dS" % (minutes, seconds))
+
diff --git a/bloodhound/enumeration/domains.py b/bloodhound/enumeration/domains.py
index fd2c2a8..557a7e8 100644
--- a/bloodhound/enumeration/domains.py
+++ b/bloodhound/enumeration/domains.py
@@ -43,6 +43,7 @@ def __init__(self, addomain, addc):
         """
         self.addomain = addomain
         self.addc = addc
+        self.trusted_domains_names = []
 
     def dump_domain(self, collect, timestamp="", filename='domains.json', fileNamePrefix=""):
         if (fileNamePrefix != None):
@@ -162,6 +163,8 @@ def dump_domain(self, collect, timestamp="", filename='domains.json', fileNamePr
                 )
                 domain['Trusts'].append(trust.to_output())
 
+            self.trusted_domains_names = trusted_domains_names
+
             logging.info(f"Found {entries_count} trusts: {', '.join(trusted_domains_names)}")
 
         if 'container' in collect:

From 5299dfd0a478ad69b33ec835160f62d3eab634ec Mon Sep 17 00:00:00 2001
From: n3rada <72791564+n3rada@users.noreply.github.com>
Date: Thu, 2 May 2024 01:56:01 +0200
Subject: [PATCH 6/6] Add timeout for dns_resolve

---
 bloodhound/ad/domain.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/bloodhound/ad/domain.py b/bloodhound/ad/domain.py
index 9d6d350..0bb2379 100644
--- a/bloodhound/ad/domain.py
+++ b/bloodhound/ad/domain.py
@@ -909,7 +909,7 @@ def load_cachefile(self, cachefile):
     def save_cachefile(self, cachefile):
         pass
 
-    def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc: bool = False):
+    def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc: bool = False, timeout: float = 5.0):
         logging.debug("Querying domain controller information from DNS")
 
         basequery = "_ldap._tcp.pdc._msdcs"
@@ -925,7 +925,7 @@ def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc:
 
         try:
 
-            q = self.dnsresolver.query(query, "SRV", tcp=self.dns_tcp)
+            q = self.dnsresolver.query(query, "SRV", tcp=self.dns_tcp, lifetime=timeout)
 
             if str(q.qname).lower().startswith("_ldap._tcp.pdc._msdcs"):
                 ad_domain = str(q.qname).lower()[len(basequery) :].strip(".")
@@ -947,7 +947,7 @@ def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc:
 
         try:
             q = self.dnsresolver.query(
-                query.replace("pdc", "gc"), "SRV", tcp=self.dns_tcp
+                query.replace("pdc", "gc"), "SRV", tcp=self.dns_tcp, lifetime=timeout
             )
             for r in q:
                 gc = str(r.target).rstrip(".")
@@ -971,7 +971,7 @@ def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc:
 
         try:
             kquery = query.replace("pdc", "dc").replace("_ldap", "_kerberos")
-            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp)
+            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp, lifetime=timeout)
             # TODO: Get the additional records here to get the DC ip immediately
             for r in q:
                 kdc = str(r.target).rstrip(".")
@@ -994,7 +994,7 @@ def dns_resolve(self, domain=None, global_catalog: bool = True, disable_autogc:
         if self.auth.userdomain.lower() != ad_domain.lower():
             # Resolve KDC for user auth domain
             kquery = "_kerberos._tcp.dc._msdcs.%s" % self.auth.userdomain
-            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp)
+            q = self.dnsresolver.query(kquery, "SRV", tcp=self.dns_tcp, lifetime=timeout)
             for r in q:
                 kdc = str(r.target).rstrip(".")
                 logging.debug("Found KDC for user: %s" % str(r.target).rstrip("."))