3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
47 from functools import partial
57 from samba.auth import system_session
58 from samba.samdb import SamDB
59 from samba.dcerpc import drsuapi
60 from samba.kcc_utils import *
61 from samba.graph_utils import *
62 from samba import ldif_utils
66 """The Knowledge Consistency Checker class.
68 A container for objects and methods allowing a run of the KCC. Produces a
69 set of connections in the samdb for which the Distributed Replication
70 Service can then utilize to replicate naming contexts
73 """Initializes the partitions class which can hold
74 our local DCs partitions or all the partitions in
77 self.part_table = {} # partition objects
79 self.transport_table = {}
80 self.ip_transport = None
81 self.sitelink_table = {}
82 self.dsa_by_dnstr = {}
85 self.get_dsa_by_guidstr = self.dsa_by_guid.get
86 self.get_dsa = self.dsa_by_dnstr.get
88 # TODO: These should be backed by a 'permanent' store so that when
89 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
90 # the failure information can be returned
91 self.kcc_failed_links = {}
92 self.kcc_failed_connections = set()
94 # Used in inter-site topology computation. A list
95 # of connections (by NTDSConnection object) that are
96 # to be kept when pruning un-needed NTDS Connections
97 self.kept_connections = set()
99 self.my_dsa_dnstr = None # My dsa DN
100 self.my_dsa = None # My dsa object
102 self.my_site_dnstr = None
107 def load_all_transports(self):
108 """Loads the inter-site transport objects for Sites
110 ::returns: Raises KCCError on error
113 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
114 self.samdb.get_config_basedn(),
115 scope=ldb.SCOPE_SUBTREE,
116 expression="(objectClass=interSiteTransport)")
117 except ldb.LdbError, (enum, estr):
118 raise KCCError("Unable to find inter-site transports - (%s)" %
124 transport = Transport(dnstr)
126 transport.load_transport(self.samdb)
127 self.transport_table.setdefault(str(transport.guid),
129 if transport.name == 'IP':
130 self.ip_transport = transport
132 if self.ip_transport is None:
133 raise KCCError("there doesn't seem to be an IP transport")
135 def load_all_sitelinks(self):
136 """Loads the inter-site siteLink objects
138 ::returns: Raises KCCError on error
141 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
142 self.samdb.get_config_basedn(),
143 scope=ldb.SCOPE_SUBTREE,
144 expression="(objectClass=siteLink)")
145 except ldb.LdbError, (enum, estr):
146 raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
152 if dnstr in self.sitelink_table:
155 sitelink = SiteLink(dnstr)
157 sitelink.load_sitelink(self.samdb)
159 # Assign this siteLink to table
161 self.sitelink_table[dnstr] = sitelink
163 def load_site(self, dn_str):
164 """Helper for load_my_site and load_all_sites. It puts all the site's
165 DSAs into the KCC indices.
167 site = Site(dn_str, unix_now)
168 site.load_site(self.samdb)
170 # I am not sure why, but we avoid replacing the site with an
172 guid = str(site.site_guid)
173 if guid not in self.site_table:
174 self.site_table[guid] = site
176 self.dsa_by_dnstr.update(site.dsa_table)
177 self.dsa_by_guid.update((str(x.dsa_guid), x)
178 for x in site.dsa_table.values())
182 def load_my_site(self):
183 """Loads the Site class for the local DSA
185 ::returns: Raises an Exception on error
187 self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
188 self.samdb.server_site_name(),
189 self.samdb.get_config_basedn()))
191 self.my_site = self.load_site(self.my_site_dnstr)
193 def load_all_sites(self):
194 """Discover all sites and instantiate and load each
197 ::returns: Raises KCCError on error
200 res = self.samdb.search("CN=Sites,%s" %
201 self.samdb.get_config_basedn(),
202 scope=ldb.SCOPE_SUBTREE,
203 expression="(objectClass=site)")
204 except ldb.LdbError, (enum, estr):
205 raise KCCError("Unable to find sites - (%s)" % estr)
208 sitestr = str(msg.dn)
209 self.load_site(sitestr)
211 def load_my_dsa(self):
212 """Discover my nTDSDSA dn thru the rootDSE entry
214 ::returns: Raises KCCError on error.
216 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
218 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
219 attrs=["objectGUID"])
220 except ldb.LdbError, (enum, estr):
221 logger.warning("Search for %s failed: %s. This typically happens"
222 " in --importldif mode due to lack of module"
223 " support.", dn, estr)
225 # We work around the failure above by looking at the
226 # dsServiceName that was put in the fake rootdse by
227 # the --exportldif, rather than the
228 # samdb.get_ntds_GUID(). The disadvantage is that this
229 # mode requires we modify the @ROOTDSE dnq to support
231 service_name_res = self.samdb.search(base="",
232 scope=ldb.SCOPE_BASE,
233 attrs=["dsServiceName"])
234 dn = ldb.Dn(self.samdb,
235 service_name_res[0]["dsServiceName"][0])
237 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
238 attrs=["objectGUID"])
239 except ldb.LdbError, (enum, estr):
240 raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
243 raise KCCError("Unable to find my nTDSDSA at %s" %
246 ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
247 if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
248 raise KCCError("Did not find the GUID we expected,"
249 " perhaps due to --importldif")
251 self.my_dsa_dnstr = str(res[0].dn)
253 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
255 if self.my_dsa_dnstr not in self.dsa_by_dnstr:
256 DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
257 " it must be RODC.\n"
258 "Let's add it, because my_dsa is special!\n"
259 "(likewise for self.dsa_by_guid of course)" %
262 self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
263 self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
265 def load_all_partitions(self):
266 """Discover all NCs thru the Partitions dn and
267 instantiate and load the NCs.
269 Each NC is inserted into the part_table by partition
270 dn string (not the nCName dn string)
272 ::returns: Raises KCCError on error
275 res = self.samdb.search("CN=Partitions,%s" %
276 self.samdb.get_config_basedn(),
277 scope=ldb.SCOPE_SUBTREE,
278 expression="(objectClass=crossRef)")
279 except ldb.LdbError, (enum, estr):
280 raise KCCError("Unable to find partitions - (%s)" % estr)
283 partstr = str(msg.dn)
286 if partstr in self.part_table:
289 part = Partition(partstr)
291 part.load_partition(self.samdb)
292 self.part_table[partstr] = part
294 def should_be_present_test(self):
295 """Enumerate all loaded partitions and DSAs in local
296 site and test if NC should be present as replica
298 for partdn, part in self.part_table.items():
299 for dsadn, dsa in self.my_site.dsa_table.items():
300 needed, ro, partial = part.should_be_present(dsa)
301 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
302 (dsadn, part.nc_dnstr, needed, ro, partial))
304 def refresh_failed_links_connections(self):
305 """Based on MS-ADTS 6.2.2.1"""
307 # Instead of NULL link with failure_count = 0, the tuple is
310 # LINKS: Refresh failed links
311 self.kcc_failed_links = {}
312 current, needed = self.my_dsa.get_rep_tables()
313 for replica in current.values():
314 # For every possible connection to replicate
315 for reps_from in replica.rep_repsFrom:
316 failure_count = reps_from.consecutive_sync_failures
317 if failure_count <= 0:
320 dsa_guid = str(reps_from.source_dsa_obj_guid)
321 time_first_failure = reps_from.last_success
322 last_result = reps_from.last_attempt
323 dns_name = reps_from.dns_name1
325 f = self.kcc_failed_links.get(dsa_guid)
327 f = KCCFailedObject(dsa_guid, failure_count,
328 time_first_failure, last_result,
330 self.kcc_failed_links[dsa_guid] = f
331 #elif f.failure_count == 0:
332 # f.failure_count = failure_count
333 # f.time_first_failure = time_first_failure
334 # f.last_result = last_result
336 f.failure_count = max(f.failure_count, failure_count)
337 f.time_first_failure = min(f.time_first_failure,
339 f.last_result = last_result
341 # CONNECTIONS: Refresh failed connections
342 restore_connections = set()
343 if opts.attempt_live_connections:
344 DEBUG("refresh_failed_links: checking if links are still down")
345 for connection in self.kcc_failed_connections:
347 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
348 # Failed connection is no longer failing
349 restore_connections.add(connection)
350 except drs_utils.drsException:
351 # Failed connection still failing
352 connection.failure_count += 1
354 DEBUG("refresh_failed_links: not checking live links because we\n"
355 "weren't asked to --attempt-live-connections")
357 # Remove the restored connections from the failed connections
358 self.kcc_failed_connections.difference_update(restore_connections)
360 def is_stale_link_connection(self, target_dsa):
361 """Returns False if no tuple z exists in the kCCFailedLinks or
362 kCCFailedConnections variables such that z.UUIDDsa is the
363 objectGUID of the target dsa, z.FailureCount > 0, and
364 the current time - z.TimeFirstFailure > 2 hours.
366 # Returns True if tuple z exists...
367 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
369 # failure_count should be > 0, but check anyways
370 if failed_link.failure_count > 0:
371 unix_first_failure = \
372 nttime2unix(failed_link.time_first_failure)
373 # TODO guard against future
374 if unix_first_failure > unix_now:
375 logger.error("The last success time attribute for \
376 repsFrom is in the future!")
378 # Perform calculation in seconds
379 if (unix_now - unix_first_failure) > 60 * 60 * 2:
386 # TODO: This should be backed by some form of local database
387 def remove_unneeded_failed_links_connections(self):
388 # Remove all tuples in kcc_failed_links where failure count = 0
389 # In this implementation, this should never happen.
391 # Remove all connections which were not used this run or connections
392 # that became active during this run.
395 def remove_unneeded_ntdsconn(self, all_connected):
396 """Removes unneeded NTDS Connections after computation
397 of KCC intra and inter-site topology has finished.
401 # Loop thru connections
402 for cn_conn in mydsa.connect_table.values():
403 if cn_conn.guid is None:
405 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
406 cn_conn.whenCreated = nt_now
408 cn_conn.load_connection(self.samdb)
410 for cn_conn in mydsa.connect_table.values():
412 s_dnstr = cn_conn.get_from_dnstr()
414 cn_conn.to_be_deleted = True
417 # Get the source DSA no matter what site
418 s_dsa = self.get_dsa(s_dnstr)
420 #XXX should an RODC be regarded as same site
421 same_site = s_dnstr in self.my_site.dsa_table
423 # Given an nTDSConnection object cn, if the DC with the
424 # nTDSDSA object dc that is the parent object of cn and
425 # the DC with the nTDSDA object referenced by cn!fromServer
426 # are in the same site, the KCC on dc deletes cn if all of
427 # the following are true:
429 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
431 # No site settings object s exists for the local DC's site, or
432 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
435 # Another nTDSConnection object cn2 exists such that cn and
436 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
439 # cn!whenCreated < cn2!whenCreated
441 # cn!whenCreated = cn2!whenCreated and
442 # cn!objectGUID < cn2!objectGUID
444 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
446 if not cn_conn.is_generated():
449 if self.my_site.is_cleanup_ntdsconn_disabled():
452 # Loop thru connections looking for a duplicate that
453 # fulfills the previous criteria
455 packed_guid = ndr_pack(cn_conn.guid)
456 for cn2_conn in mydsa.connect_table.values():
457 if cn2_conn is cn_conn:
460 s2_dnstr = cn2_conn.get_from_dnstr()
462 # If the NTDS Connections has a different
463 # fromServer field then no match
464 if s2_dnstr != s_dnstr:
468 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
469 (cn_conn.whenCreated == cn2_conn.whenCreated and
470 packed_guid < ndr_pack(cn2_conn.guid)))
475 if lesser and not cn_conn.is_rodc_topology():
476 cn_conn.to_be_deleted = True
478 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
479 # object dc that is the parent object of cn and the DC with
480 # the nTDSDSA object referenced by cn!fromServer are in
481 # different sites, a KCC acting as an ISTG in dc's site
482 # deletes cn if all of the following are true:
484 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
486 # cn!fromServer references an nTDSDSA object for a DC
487 # in a site other than the local DC's site.
489 # The keepConnections sequence returned by
490 # CreateIntersiteConnections() does not contain
491 # cn!objectGUID, or cn is "superseded by" (see below)
492 # another nTDSConnection cn2 and keepConnections
493 # contains cn2!objectGUID.
495 # The return value of CreateIntersiteConnections()
498 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
501 else: # different site
503 if not mydsa.is_istg():
506 if not cn_conn.is_generated():
510 # We are directly using this connection in intersite or
511 # we are using a connection which can supersede this one.
513 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
514 # appear to be correct.
516 # 1. cn!fromServer and cn!parent appear inconsistent with
518 # 2. The repsFrom do not imply each other
520 if cn_conn in self.kept_connections: # and not_superceded:
523 # This is the result of create_intersite_connections
524 if not all_connected:
527 if not cn_conn.is_rodc_topology():
528 cn_conn.to_be_deleted = True
530 if mydsa.is_ro() or opts.readonly:
531 for connect in mydsa.connect_table.values():
532 if connect.to_be_deleted:
533 DEBUG_FN("TO BE DELETED:\n%s" % connect)
534 if connect.to_be_added:
535 DEBUG_FN("TO BE ADDED:\n%s" % connect)
537 # Peform deletion from our tables but perform
538 # no database modification
539 mydsa.commit_connections(self.samdb, ro=True)
541 # Commit any modified connections
542 mydsa.commit_connections(self.samdb)
544 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
545 """Part of MS-ADTS 6.2.2.5.
547 Update t_repsFrom if necessary to satisfy requirements. Such
548 updates are typically required when the IDL_DRSGetNCChanges
549 server has moved from one site to another--for example, to
550 enable compression when the server is moved from the
551 client's site to another site.
553 :param n_rep: NC replica we need
554 :param t_repsFrom: repsFrom tuple to modify
555 :param s_rep: NC replica at source DSA
556 :param s_dsa: source DSA
557 :param cn_conn: Local DSA NTDSConnection child
559 ::returns: (update) bit field containing which portion of the
560 repsFrom was modified. This bit field is suitable as input
561 to IDL_DRSReplicaModify ulModifyFields element, as it consists
563 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
564 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
565 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
567 s_dnstr = s_dsa.dsa_dnstr
570 same_site = s_dnstr in self.my_site.dsa_table
572 # if schedule doesn't match then update and modify
573 times = convert_schedule_to_repltimes(cn_conn.schedule)
574 if times != t_repsFrom.schedule:
575 t_repsFrom.schedule = times
576 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
578 # Bit DRS_PER_SYNC is set in replicaFlags if and only
579 # if nTDSConnection schedule has a value v that specifies
580 # scheduled replication is to be performed at least once
582 if cn_conn.is_schedule_minimum_once_per_week():
584 if ((t_repsFrom.replica_flags &
585 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
586 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
588 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
589 # if the source DSA and the local DC's nTDSDSA object are
590 # in the same site or source dsa is the FSMO role owner
591 # of one or more FSMO roles in the NC replica.
592 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
594 if ((t_repsFrom.replica_flags &
595 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
596 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
598 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
599 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
600 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
601 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
602 # t.replicaFlags if and only if s and the local DC's
603 # nTDSDSA object are in different sites.
604 if ((cn_conn.options &
605 dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
607 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
610 # it LOOKS as if this next test is a bit silly: it
611 # checks the flag then sets it if it not set; the same
612 # effect could be achieved by unconditionally setting
613 # it. But in fact the repsFrom object has special
614 # magic attached to it, and altering replica_flags has
615 # side-effects. That is bad in my opinion, but there
617 if ((t_repsFrom.replica_flags &
618 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
619 t_repsFrom.replica_flags |= \
620 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
624 if ((t_repsFrom.replica_flags &
625 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
626 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
628 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
629 # and only if s and the local DC's nTDSDSA object are
630 # not in the same site and the
631 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
632 # clear in cn!options
633 if (not same_site and
635 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
637 if ((t_repsFrom.replica_flags &
638 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
639 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
641 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
642 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
643 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
645 if ((t_repsFrom.replica_flags &
646 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
647 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
649 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
650 # set in t.replicaFlags if and only if cn!enabledConnection = false.
651 if not cn_conn.is_enabled():
653 if ((t_repsFrom.replica_flags &
654 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
655 t_repsFrom.replica_flags |= \
656 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
658 if ((t_repsFrom.replica_flags &
659 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
660 t_repsFrom.replica_flags |= \
661 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
663 # If s and the local DC's nTDSDSA object are in the same site,
664 # cn!transportType has no value, or the RDN of cn!transportType
667 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
669 # t.uuidTransport = NULL GUID.
671 # t.uuidDsa = The GUID-based DNS name of s.
675 # Bit DRS_MAIL_REP in t.replicaFlags is set.
677 # If x is the object with dsname cn!transportType,
678 # t.uuidTransport = x!objectGUID.
680 # Let a be the attribute identified by
681 # x!transportAddressAttribute. If a is
682 # the dNSHostName attribute, t.uuidDsa = the GUID-based
683 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
685 # It appears that the first statement i.e.
687 # "If s and the local DC's nTDSDSA object are in the same
688 # site, cn!transportType has no value, or the RDN of
689 # cn!transportType is CN=IP:"
691 # could be a slightly tighter statement if it had an "or"
692 # between each condition. I believe this should
695 # IF (same-site) OR (no-value) OR (type-ip)
697 # because IP should be the primary transport mechanism
698 # (even in inter-site) and the absense of the transportType
699 # attribute should always imply IP no matter if its multi-site
701 # NOTE MS-TECH INCORRECT:
703 # All indications point to these statements above being
704 # incorrectly stated:
706 # t.uuidDsa = The GUID-based DNS name of s.
708 # Let a be the attribute identified by
709 # x!transportAddressAttribute. If a is
710 # the dNSHostName attribute, t.uuidDsa = the GUID-based
711 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
713 # because the uuidDSA is a GUID and not a GUID-base DNS
714 # name. Nor can uuidDsa hold (s!parent)!a if not
715 # dNSHostName. What should have been said is:
717 # t.naDsa = The GUID-based DNS name of s
719 # That would also be correct if transportAddressAttribute
720 # were "mailAddress" because (naDsa) can also correctly
721 # hold the SMTP ISM service address.
723 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
725 # We're not currently supporting SMTP replication
726 # so is_smtp_replication_available() is currently
727 # always returning False
729 cn_conn.transport_dnstr is None or
730 cn_conn.transport_dnstr.find("CN=IP") == 0 or
731 not is_smtp_replication_available())):
733 if ((t_repsFrom.replica_flags &
734 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
735 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
737 t_repsFrom.transport_guid = misc.GUID()
739 # See (NOTE MS-TECH INCORRECT) above
740 if t_repsFrom.version == 0x1:
741 if t_repsFrom.dns_name1 is None or \
742 t_repsFrom.dns_name1 != nastr:
743 t_repsFrom.dns_name1 = nastr
745 if t_repsFrom.dns_name1 is None or \
746 t_repsFrom.dns_name2 is None or \
747 t_repsFrom.dns_name1 != nastr or \
748 t_repsFrom.dns_name2 != nastr:
749 t_repsFrom.dns_name1 = nastr
750 t_repsFrom.dns_name2 = nastr
753 if ((t_repsFrom.replica_flags &
754 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
755 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
757 # We have a transport type but its not an
758 # object in the database
759 if cn_conn.transport_guid not in self.transport_table:
760 raise KCCError("Missing inter-site transport - (%s)" %
761 cn_conn.transport_dnstr)
763 x_transport = self.transport_table[str(cn_conn.transport_guid)]
765 if t_repsFrom.transport_guid != x_transport.guid:
766 t_repsFrom.transport_guid = x_transport.guid
768 # See (NOTE MS-TECH INCORRECT) above
769 if x_transport.address_attr == "dNSHostName":
771 if t_repsFrom.version == 0x1:
772 if t_repsFrom.dns_name1 is None or \
773 t_repsFrom.dns_name1 != nastr:
774 t_repsFrom.dns_name1 = nastr
776 if t_repsFrom.dns_name1 is None or \
777 t_repsFrom.dns_name2 is None or \
778 t_repsFrom.dns_name1 != nastr or \
779 t_repsFrom.dns_name2 != nastr:
780 t_repsFrom.dns_name1 = nastr
781 t_repsFrom.dns_name2 = nastr
784 # MS tech specification says we retrieve the named
785 # attribute in "transportAddressAttribute" from the parent of
788 pdnstr = s_dsa.get_parent_dnstr()
789 attrs = [x_transport.address_attr]
791 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
793 except ldb.LdbError, (enum, estr):
795 "Unable to find attr (%s) for (%s) - (%s)" %
796 (x_transport.address_attr, pdnstr, estr))
799 nastr = str(msg[x_transport.address_attr][0])
801 # See (NOTE MS-TECH INCORRECT) above
802 if t_repsFrom.version == 0x1:
803 if t_repsFrom.dns_name1 is None or \
804 t_repsFrom.dns_name1 != nastr:
805 t_repsFrom.dns_name1 = nastr
807 if t_repsFrom.dns_name1 is None or \
808 t_repsFrom.dns_name2 is None or \
809 t_repsFrom.dns_name1 != nastr or \
810 t_repsFrom.dns_name2 != nastr:
812 t_repsFrom.dns_name1 = nastr
813 t_repsFrom.dns_name2 = nastr
815 if t_repsFrom.is_modified():
816 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
818 def is_repsFrom_implied(self, n_rep, cn_conn):
819 """Given a NC replica and NTDS Connection, determine if the connection
820 implies a repsFrom tuple should be present from the source DSA listed
821 in the connection to the naming context
823 :param n_rep: NC replica
824 :param conn: NTDS Connection
825 ::returns (True || False), source DSA:
827 #XXX different conditions for "implies" than MS-ADTS 6.2.2
829 # NTDS Connection must satisfy all the following criteria
830 # to imply a repsFrom tuple is needed:
832 # cn!enabledConnection = true.
833 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
834 # cn!fromServer references an nTDSDSA object.
838 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
840 s_dnstr = cn_conn.get_from_dnstr()
841 if s_dnstr is not None:
842 s_dsa = self.get_dsa(s_dnstr)
844 # No DSA matching this source DN string?
848 # To imply a repsFrom tuple is needed, each of these
851 # An NC replica of the NC "is present" on the DC to
852 # which the nTDSDSA object referenced by cn!fromServer
855 # An NC replica of the NC "should be present" on
857 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
859 if s_rep is None or not s_rep.is_present():
862 # To imply a repsFrom tuple is needed, each of these
865 # The NC replica on the DC referenced by cn!fromServer is
866 # a writable replica or the NC replica that "should be
867 # present" on the local DC is a partial replica.
869 # The NC is not a domain NC, the NC replica that
870 # "should be present" on the local DC is a partial
871 # replica, cn!transportType has no value, or
872 # cn!transportType has an RDN of CN=IP.
874 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
875 (not n_rep.is_domain() or
876 n_rep.is_partial() or
877 cn_conn.transport_dnstr is None or
878 cn_conn.transport_dnstr.find("CN=IP") == 0)
885 def translate_ntdsconn(self):
886 """This function adjusts values of repsFrom abstract attributes of NC
887 replicas on the local DC to match those implied by
888 nTDSConnection objects.
891 if self.my_dsa.is_translate_ntdsconn_disabled():
892 logger.debug("skipping translate_ntdsconn() "
893 "because disabling flag is set")
896 logger.debug("translate_ntdsconn(): enter")
898 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
900 # Filled in with replicas we currently have that need deleting
903 # We're using the MS notation names here to allow
904 # correlation back to the published algorithm.
906 # n_rep - NC replica (n)
907 # t_repsFrom - tuple (t) in n!repsFrom
908 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
909 # object (s) such that (s!objectGUID = t.uuidDsa)
910 # In our IDL representation of repsFrom the (uuidDsa)
911 # attribute is called (source_dsa_obj_guid)
912 # cn_conn - (cn) is nTDSConnection object and child of the local
913 # DC's nTDSDSA object and (cn!fromServer = s)
914 # s_rep - source DSA replica of n
916 # If we have the replica and its not needed
917 # then we add it to the "to be deleted" list.
918 for dnstr in current_rep_table:
919 if dnstr not in needed_rep_table:
920 delete_reps.add(dnstr)
922 DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
923 len(needed_rep_table), len(delete_reps)))
926 DEBUG('deleting these reps: %s' % delete_reps)
927 for dnstr in delete_reps:
928 del current_rep_table[dnstr]
930 # Now perform the scan of replicas we'll need
931 # and compare any current repsFrom against the
933 for n_rep in needed_rep_table.values():
935 # load any repsFrom and fsmo roles as we'll
936 # need them during connection translation
937 n_rep.load_repsFrom(self.samdb)
938 n_rep.load_fsmo_roles(self.samdb)
940 # Loop thru the existing repsFrom tupples (if any)
941 # XXX This is a list and could contain duplicates
942 # (multiple load_repsFrom calls)
943 for t_repsFrom in n_rep.rep_repsFrom:
945 # for each tuple t in n!repsFrom, let s be the nTDSDSA
946 # object such that s!objectGUID = t.uuidDsa
947 guidstr = str(t_repsFrom.source_dsa_obj_guid)
948 s_dsa = self.get_dsa_by_guidstr(guidstr)
950 # Source dsa is gone from config (strange)
951 # so cleanup stale repsFrom for unlisted DSA
953 logger.warning("repsFrom source DSA guid (%s) not found" %
955 t_repsFrom.to_be_deleted = True
958 s_dnstr = s_dsa.dsa_dnstr
960 # Retrieve my DSAs connection object (if it exists)
961 # that specifies the fromServer equivalent to
962 # the DSA that is specified in the repsFrom source
963 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
965 # Let (cn) be the nTDSConnection object such that (cn)
966 # is a child of the local DC's nTDSDSA object and
967 # (cn!fromServer = s) and (cn!options) does not contain
968 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
970 # KCC removes this repsFrom tuple if any of the following
975 #XXX varying possible interpretations of rodc_topology
976 if cn_conn is None or cn_conn.is_rodc_topology():
977 t_repsFrom.to_be_deleted = True
980 # [...] KCC removes this repsFrom tuple if:
982 # No NC replica of the NC "is present" on DSA that
983 # would be source of replica
985 # A writable replica of the NC "should be present" on
986 # the local DC, but a partial replica "is present" on
988 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
990 if s_rep is None or not s_rep.is_present() or \
991 (not n_rep.is_ro() and s_rep.is_partial()):
993 t_repsFrom.to_be_deleted = True
996 # If the KCC did not remove t from n!repsFrom, it updates t
997 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
999 # Loop thru connections and add implied repsFrom tuples
1000 # for each NTDSConnection under our local DSA if the
1001 # repsFrom is not already present
1002 for cn_conn in self.my_dsa.connect_table.values():
1004 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
1008 # Loop thru the existing repsFrom tupples (if any) and
1009 # if we already have a tuple for this connection then
1010 # no need to proceed to add. It will have been changed
1011 # to have the correct attributes above
1012 for t_repsFrom in n_rep.rep_repsFrom:
1013 guidstr = str(t_repsFrom.source_dsa_obj_guid)
1015 if s_dsa is self.get_dsa_by_guidstr(guidstr):
1022 # Create a new RepsFromTo and proceed to modify
1023 # it according to specification
1024 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
1026 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
1028 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
1030 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
1032 # Add to our NC repsFrom as this is newly computed
1033 if t_repsFrom.is_modified():
1034 n_rep.rep_repsFrom.append(t_repsFrom)
1037 # Display any to be deleted or modified repsFrom
1038 text = n_rep.dumpstr_to_be_deleted()
1040 logger.info("TO BE DELETED:\n%s" % text)
1041 text = n_rep.dumpstr_to_be_modified()
1043 logger.info("TO BE MODIFIED:\n%s" % text)
1045 # Peform deletion from our tables but perform
1046 # no database modification
1047 n_rep.commit_repsFrom(self.samdb, ro=True)
1049 # Commit any modified repsFrom to the NC replica
1050 n_rep.commit_repsFrom(self.samdb)
1052 def merge_failed_links(self):
1053 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1054 The KCC on a writable DC attempts to merge the link and connection
1055 failure information from bridgehead DCs in its own site to help it
1056 identify failed bridgehead DCs.
1058 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
1061 # 1. Queries every bridgehead server in your site (other than yourself)
1062 # 2. For every ntDSConnection that references a server in a different
1063 # site merge all the failure info
1065 # XXX - not implemented yet
1066 if opts.attempt_live_connections:
1067 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1069 DEBUG_FN("skipping merge_failed_links() because it requires "
1070 "real network connections\n"
1071 "and we weren't asked to --attempt-live-connections")
1073 def setup_graph(self, part):
1074 """Set up a GRAPH, populated with a VERTEX for each site
1075 object, a MULTIEDGE for each siteLink object, and a
1076 MUTLIEDGESET for each siteLinkBridge object (or implied
1079 ::returns: a new graph
1083 g = IntersiteGraph()
1085 for site_guid, site in self.site_table.items():
1086 vertex = Vertex(site, part)
1087 vertex.guid = site_guid
1088 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1089 g.vertices.add(vertex)
1091 if not guid_to_vertex.get(site_guid):
1092 guid_to_vertex[site_guid] = []
1094 guid_to_vertex[site_guid].append(vertex)
1096 connected_vertices = set()
1097 for transport_guid, transport in self.transport_table.items():
1098 # Currently only ever "IP"
1099 for site_link_dn, site_link in self.sitelink_table.items():
1100 new_edge = create_edge(transport_guid, site_link,
1102 connected_vertices.update(new_edge.vertices)
1103 g.edges.add(new_edge)
1105 # If 'Bridge all site links' is enabled and Win2k3 bridges required
1107 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1108 # No documentation for this however, ntdsapi.h appears to have:
1109 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1110 if (((self.my_site.site_options & 0x00000002) == 0
1111 and (self.my_site.site_options & 0x00001000) == 0)):
1112 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1114 # TODO get all site link bridges
1115 for site_link_bridge in []:
1116 g.edge_set.add(create_edge_set(g, transport_guid,
1119 g.connected_vertices = connected_vertices
1121 #be less verbose in dot file output unless --debug
1122 do_dot_files = opts.dot_files and opts.debug
1124 for edge in g.edges:
1125 for a, b in itertools.combinations(edge.vertices, 2):
1126 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1127 verify_properties = ()
1128 verify_and_dot('site_edges', dot_edges, directed=False,
1129 label=self.my_dsa_dnstr,
1130 properties=verify_properties, debug=DEBUG,
1132 dot_files=do_dot_files)
1136 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1137 """Get a bridghead DC.
1139 :param site: site object representing for which a bridgehead
1141 :param part: crossRef for NC to replicate.
1142 :param transport: interSiteTransport object for replication
1144 :param partial_ok: True if a DC containing a partial
1145 replica or a full replica will suffice, False if only
1146 a full replica will suffice.
1147 :param detect_failed: True to detect failed DCs and route
1148 replication traffic around them, False to assume no DC
1150 ::returns: dsa object for the bridgehead DC or None
1153 bhs = self.get_all_bridgeheads(site, part, transport,
1154 partial_ok, detect_failed)
1156 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1160 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1161 (site.site_dnstr, bhs[0].dsa_dnstr))
1164 def get_all_bridgeheads(self, site, part, transport,
1165 partial_ok, detect_failed):
1166 """Get all bridghead DCs satisfying the given criteria
1168 :param site: site object representing the site for which
1169 bridgehead DCs are desired.
1170 :param part: partition for NC to replicate.
1171 :param transport: interSiteTransport object for
1172 replication traffic.
1173 :param partial_ok: True if a DC containing a partial
1174 replica or a full replica will suffice, False if
1175 only a full replica will suffice.
1176 :param detect_failed: True to detect failed DCs and route
1177 replication traffic around them, FALSE to assume
1179 ::returns: list of dsa object for available bridgehead
1185 logger.debug("get_all_bridgeheads: %s" % transport.name)
1186 if 'Site-5' in site.site_dnstr:
1187 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1188 " detect_failed %s" % (site.site_dnstr, part.partstr,
1189 partial_ok, detect_failed))
1190 logger.debug(site.rw_dsa_table)
1191 for dsa in site.rw_dsa_table.values():
1193 pdnstr = dsa.get_parent_dnstr()
1195 # IF t!bridgeheadServerListBL has one or more values and
1196 # t!bridgeheadServerListBL does not contain a reference
1197 # to the parent object of dc then skip dc
1198 if ((len(transport.bridgehead_list) != 0 and
1199 pdnstr not in transport.bridgehead_list)):
1202 # IF dc is in the same site as the local DC
1203 # IF a replica of cr!nCName is not in the set of NC replicas
1204 # that "should be present" on dc or a partial replica of the
1205 # NC "should be present" but partialReplicasOkay = FALSE
1207 if self.my_site.same_site(dsa):
1208 needed, ro, partial = part.should_be_present(dsa)
1209 if not needed or (partial and not partial_ok):
1211 rep = dsa.get_current_replica(part.nc_dnstr)
1214 # IF an NC replica of cr!nCName is not in the set of NC
1215 # replicas that "are present" on dc or a partial replica of
1216 # the NC "is present" but partialReplicasOkay = FALSE
1219 rep = dsa.get_current_replica(part.nc_dnstr)
1220 if rep is None or (rep.is_partial() and not partial_ok):
1223 # IF AmIRODC() and cr!nCName corresponds to default NC then
1224 # Let dsaobj be the nTDSDSA object of the dc
1225 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1227 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1228 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1231 # IF t!name != "IP" and the parent object of dc has no value for
1232 # the attribute specified by t!transportAddressAttribute
1234 if transport.name != "IP":
1235 # MS tech specification says we retrieve the named
1236 # attribute in "transportAddressAttribute" from the parent
1239 attrs = [transport.address_attr]
1241 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1243 except ldb.LdbError, (enum, estr):
1247 if transport.address_attr not in msg:
1250 nastr = str(msg[transport.address_attr][0])
1252 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1254 if self.is_bridgehead_failed(dsa, detect_failed):
1255 DEBUG("bridgehead is failed")
1258 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1261 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1263 # SORT bhs such that all GC servers precede DCs that are not GC
1264 # servers, and otherwise by ascending objectGUID
1266 # SORT bhs in a random order
1267 if site.is_random_bridgehead_disabled():
1268 bhs.sort(sort_dsa_by_gc_and_guid)
1274 def is_bridgehead_failed(self, dsa, detect_failed):
1275 """Determine whether a given DC is known to be in a failed state
1276 ::returns: True if and only if the DC should be considered failed
1278 Here we DEPART from the pseudo code spec which appears to be
1279 wrong. It says, in full:
1281 /***** BridgeheadDCFailed *****/
1282 /* Determine whether a given DC is known to be in a failed state.
1283 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1284 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1286 * RETURNS: TRUE if and only if the DC should be considered to be in a
1289 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1291 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1292 the options attribute of the site settings object for the local
1295 ELSEIF a tuple z exists in the kCCFailedLinks or
1296 kCCFailedConnections variables such that z.UUIDDsa =
1297 objectGUID, z.FailureCount > 1, and the current time -
1298 z.TimeFirstFailure > 2 hours
1301 RETURN detectFailedDCs
1305 where you will see detectFailedDCs is not behaving as
1306 advertised -- it is acting as a default return code in the
1307 event that a failure is not detected, not a switch turning
1308 detection on or off. Elsewhere the documentation seems to
1309 concur with the comment rather than the code.
1311 if not detect_failed:
1314 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1315 # When DETECT_STALE_DISABLED, we can never know of if
1316 # it's in a failed state
1317 if self.my_site.site_options & 0x00000008:
1320 return self.is_stale_link_connection(dsa)
1322 def create_connection(self, part, rbh, rsite, transport,
1323 lbh, lsite, link_opt, link_sched,
1324 partial_ok, detect_failed):
1325 """Create an nTDSConnection object with the given parameters
1326 if one does not already exist.
1328 :param part: crossRef object for the NC to replicate.
1329 :param rbh: nTDSDSA object for DC to act as the
1330 IDL_DRSGetNCChanges server (which is in a site other
1331 than the local DC's site).
1332 :param rsite: site of the rbh
1333 :param transport: interSiteTransport object for the transport
1334 to use for replication traffic.
1335 :param lbh: nTDSDSA object for DC to act as the
1336 IDL_DRSGetNCChanges client (which is in the local DC's site).
1337 :param lsite: site of the lbh
1338 :param link_opt: Replication parameters (aggregated siteLink options,
1340 :param link_sched: Schedule specifying the times at which
1341 to begin replicating.
1342 :partial_ok: True if bridgehead DCs containing partial
1343 replicas of the NC are acceptable.
1344 :param detect_failed: True to detect failed DCs and route
1345 replication traffic around them, FALSE to assume no DC
1348 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1350 rbh_table = {x.dsa_dnstr: x for x in rbhs_all}
1352 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
1353 [x.dsa_dnstr for x in rbhs_all]))
1355 # MS-TECH says to compute rbhs_avail but then doesn't use it
1356 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1357 # partial_ok, detect_failed)
1359 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1362 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
1363 [x.dsa_dnstr for x in lbhs_all]))
1365 # MS-TECH says to compute lbhs_avail but then doesn't use it
1366 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1367 # partial_ok, detect_failed)
1369 # FOR each nTDSConnection object cn such that the parent of cn is
1370 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1371 for ldsa in lbhs_all:
1372 for cn in ldsa.connect_table.values():
1374 rdsa = rbh_table.get(cn.from_dnstr)
1378 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1379 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1380 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1381 # cn!transportType references t
1382 if ((cn.is_generated() and
1383 not cn.is_rodc_topology() and
1384 cn.transport_guid == transport.guid)):
1386 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1387 # cn!options and cn!schedule != sch
1388 # Perform an originating update to set cn!schedule to
1390 if ((not cn.is_user_owned_schedule() and
1391 not cn.is_equivalent_schedule(link_sched))):
1392 cn.schedule = link_sched
1393 cn.set_modified(True)
1395 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1396 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1397 if cn.is_override_notify_default() and \
1400 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1402 # Perform an originating update to clear bits
1403 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1404 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1405 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1407 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1408 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1409 cn.set_modified(True)
1414 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1416 # Perform an originating update to set bits
1417 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1418 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1419 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1421 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1422 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1423 cn.set_modified(True)
1425 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1426 if cn.is_twoway_sync():
1428 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1430 # Perform an originating update to clear bit
1431 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1432 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1433 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1434 cn.set_modified(True)
1439 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1441 # Perform an originating update to set bit
1442 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1443 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1444 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1445 cn.set_modified(True)
1447 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1449 if cn.is_intersite_compression_disabled():
1451 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1453 # Perform an originating update to clear bit
1454 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1457 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
1459 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1460 cn.set_modified(True)
1464 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1466 # Perform an originating update to set bit
1467 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1470 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1472 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1473 cn.set_modified(True)
1475 # Display any modified connection
1477 if cn.to_be_modified:
1478 logger.info("TO BE MODIFIED:\n%s" % cn)
1480 ldsa.commit_connections(self.samdb, ro=True)
1482 ldsa.commit_connections(self.samdb)
1485 valid_connections = 0
1487 # FOR each nTDSConnection object cn such that cn!parent is
1488 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1489 for ldsa in lbhs_all:
1490 for cn in ldsa.connect_table.values():
1492 rdsa = rbh_table.get(cn.from_dnstr)
1496 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1498 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1499 # cn!transportType references t) and
1500 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1501 if (((not cn.is_generated() or
1502 cn.transport_guid == transport.guid) and
1503 not cn.is_rodc_topology())):
1505 # LET rguid be the objectGUID of the nTDSDSA object
1506 # referenced by cn!fromServer
1507 # LET lguid be (cn!parent)!objectGUID
1509 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1510 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1511 # Increment cValidConnections by 1
1512 if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
1513 not self.is_bridgehead_failed(ldsa, detect_failed))):
1514 valid_connections += 1
1516 # IF keepConnections does not contain cn!objectGUID
1517 # APPEND cn!objectGUID to keepConnections
1518 self.kept_connections.add(cn)
1521 DEBUG_RED("valid connections %d" % valid_connections)
1522 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1523 # IF cValidConnections = 0
1524 if valid_connections == 0:
1526 # LET opt be NTDSCONN_OPT_IS_GENERATED
1527 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1529 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1530 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1531 # NTDSCONN_OPT_USE_NOTIFY in opt
1532 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1533 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1534 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1536 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1537 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1538 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1539 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1541 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1543 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1545 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
1546 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1548 # Perform an originating update to create a new nTDSConnection
1549 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1550 # cn!options = opt, cn!transportType is a reference to t,
1551 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1552 cn = lbh.new_connection(opt, 0, transport,
1553 rbh.dsa_dnstr, link_sched)
1555 # Display any added connection
1558 logger.info("TO BE ADDED:\n%s" % cn)
1560 lbh.commit_connections(self.samdb, ro=True)
1562 lbh.commit_connections(self.samdb)
1564 # APPEND cn!objectGUID to keepConnections
1565 self.kept_connections.add(cn)
1567 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1569 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1570 # here and in the, but using vertex seems to make more
1571 # sense. That is, it wants this:
1573 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1574 # local_vertex.is_black(), detect_failed)
1578 vertex.accept_red_red = []
1579 vertex.accept_black = []
1580 found_failed = False
1581 for t_guid, transport in self.transport_table.items():
1582 if transport.name != 'IP':
1583 #XXX well this is cheating a bit
1584 logging.warning("WARNING: we are ignoring a transport named %r"
1588 # FLAG_CR_NTDS_DOMAIN 0x00000002
1589 if ((vertex.is_red() and transport.name != "IP" and
1590 vertex.part.system_flags & 0x00000002)):
1593 if vertex not in graph.connected_vertices:
1596 partial_replica_okay = vertex.is_black()
1597 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1598 partial_replica_okay, detect_failed)
1603 vertex.accept_red_red.append(t_guid)
1604 vertex.accept_black.append(t_guid)
1606 # Add additional transport to allow another run of Dijkstra
1607 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1608 vertex.accept_black.append("EDGE_TYPE_ALL")
1612 def create_connections(self, graph, part, detect_failed):
1613 """Construct an NC replica graph for the NC identified by
1614 the given crossRef, then create any additional nTDSConnection
1617 :param graph: site graph.
1618 :param part: crossRef object for NC.
1619 :param detect_failed: True to detect failed DCs and route
1620 replication traffic around them, False to assume no DC
1623 Modifies self.kept_connections by adding any connections
1624 deemed to be "in use".
1626 ::returns: (all_connected, found_failed_dc)
1627 (all_connected) True if the resulting NC replica graph
1628 connects all sites that need to be connected.
1629 (found_failed_dc) True if one or more failed DCs were
1632 all_connected = True
1633 found_failed = False
1635 logger.debug("create_connections(): enter\n"
1636 "\tpartdn=%s\n\tdetect_failed=%s" %
1637 (part.nc_dnstr, detect_failed))
1639 # XXX - This is a highly abbreviated function from the MS-TECH
1640 # ref. It creates connections between bridgeheads to all
1641 # sites that have appropriate replicas. Thus we are not
1642 # creating a minimum cost spanning tree but instead
1643 # producing a fully connected tree. This should produce
1644 # a full (albeit not optimal cost) replication topology.
1646 my_vertex = Vertex(self.my_site, part)
1647 my_vertex.color_vertex()
1649 for v in graph.vertices:
1651 if self.add_transports(v, my_vertex, graph, False):
1654 # No NC replicas for this NC in the site of the local DC,
1655 # so no nTDSConnection objects need be created
1656 if my_vertex.is_white():
1657 return all_connected, found_failed
1659 edge_list, n_components = get_spanning_tree_edges(graph,
1663 logger.debug("%s Number of components: %d" %
1664 (part.nc_dnstr, n_components))
1665 if n_components > 1:
1666 all_connected = False
1668 # LET partialReplicaOkay be TRUE if and only if
1669 # localSiteVertex.Color = COLOR.BLACK
1670 if my_vertex.is_black():
1675 # Utilize the IP transport only for now
1676 transport = self.ip_transport
1678 DEBUG("edge_list %s" % edge_list)
1680 # XXX more accurate comparison?
1681 if e.directed and e.vertices[0].site is self.my_site:
1684 if e.vertices[0].site is self.my_site:
1685 rsite = e.vertices[1].site
1687 rsite = e.vertices[0].site
1689 # We don't make connections to our own site as that
1690 # is intrasite topology generator's job
1691 if rsite is self.my_site:
1692 DEBUG("rsite is my_site")
1695 # Determine bridgehead server in remote site
1696 rbh = self.get_bridgehead(rsite, part, transport,
1697 partial_ok, detect_failed)
1701 # RODC acts as an BH for itself
1703 # LET lbh be the nTDSDSA object of the local DC
1705 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1706 # cr, t, partialReplicaOkay, detectFailedDCs)
1707 if self.my_dsa.is_ro():
1708 lsite = self.my_site
1711 lsite = self.my_site
1712 lbh = self.get_bridgehead(lsite, part, transport,
1713 partial_ok, detect_failed)
1716 DEBUG_RED("DISASTER! lbh is None")
1721 DEBUG_BLUE("vertices")
1723 DEBUG_BLUE("bridgeheads")
1725 DEBUG_BLUE("-" * 70)
1727 sitelink = e.site_link
1728 if sitelink is None:
1732 link_opt = sitelink.options
1733 link_sched = sitelink.schedule
1735 self.create_connection(part, rbh, rsite, transport,
1736 lbh, lsite, link_opt, link_sched,
1737 partial_ok, detect_failed)
1739 return all_connected, found_failed
1741 def create_intersite_connections(self):
1742 """Computes an NC replica graph for each NC replica that "should be
1743 present" on the local DC or "is present" on any DC in the same site
1744 as the local DC. For each edge directed to an NC replica on such a
1745 DC from an NC replica on a DC in another site, the KCC creates an
1746 nTDSConnection object to imply that edge if one does not already
1749 Modifies self.kept_connections - A set of nTDSConnection
1750 objects for edges that are directed
1751 to the local DC's site in one or more NC replica graphs.
1753 returns: True if spanning trees were created for all NC replica
1754 graphs, otherwise False.
1756 all_connected = True
1757 self.kept_connections = set()
1759 # LET crossRefList be the set containing each object o of class
1760 # crossRef such that o is a child of the CN=Partitions child of the
1763 # FOR each crossRef object cr in crossRefList
1764 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1765 # is clear in cr!systemFlags, skip cr.
1766 # LET g be the GRAPH return of SetupGraph()
1768 for part in self.part_table.values():
1770 if not part.is_enabled():
1773 if part.is_foreign():
1776 graph = self.setup_graph(part)
1778 # Create nTDSConnection objects, routing replication traffic
1779 # around "failed" DCs.
1780 found_failed = False
1782 connected, found_failed = self.create_connections(graph,
1785 DEBUG("with detect_failed: connected %s Found failed %s" %
1786 (connected, found_failed))
1788 all_connected = False
1791 # One or more failed DCs preclude use of the ideal NC
1792 # replica graph. Add connections for the ideal graph.
1793 self.create_connections(graph, part, False)
1795 return all_connected
1798 def intersite(self):
1799 """The head method for generating the inter-site KCC replica
1800 connection graph and attendant nTDSConnection objects
1803 Produces self.kept_connections set of NTDS Connections
1804 that should be kept during subsequent pruning process.
1806 ::return (True or False): (True) if the produced NC replica
1807 graph connects all sites that need to be connected
1812 mysite = self.my_site
1813 all_connected = True
1815 logger.debug("intersite(): enter")
1817 # Determine who is the ISTG
1819 mysite.select_istg(self.samdb, mydsa, ro=True)
1821 mysite.select_istg(self.samdb, mydsa, ro=False)
1823 # Test whether local site has topology disabled
1824 if mysite.is_intersite_topology_disabled():
1825 logger.debug("intersite(): exit disabled all_connected=%d" %
1827 return all_connected
1829 if not mydsa.is_istg():
1830 logger.debug("intersite(): exit not istg all_connected=%d" %
1832 return all_connected
1834 self.merge_failed_links()
1836 # For each NC with an NC replica that "should be present" on the
1837 # local DC or "is present" on any DC in the same site as the
1838 # local DC, the KCC constructs a site graph--a precursor to an NC
1839 # replica graph. The site connectivity for a site graph is defined
1840 # by objects of class interSiteTransport, siteLink, and
1841 # siteLinkBridge in the config NC.
1843 all_connected = self.create_intersite_connections()
1845 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1846 return all_connected
1848 def update_rodc_connection(self):
1849 """Runs when the local DC is an RODC and updates the RODC NTFRS
1852 # Given an nTDSConnection object cn1, such that cn1.options contains
1853 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1854 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1855 # that the following is true:
1857 # cn1.fromServer = cn2.fromServer
1858 # cn1.schedule = cn2.schedule
1860 # If no such cn2 can be found, cn1 is not modified.
1861 # If no such cn1 can be found, nothing is modified by this task.
1863 if not self.my_dsa.is_ro():
1866 all_connections = self.my_dsa.connect_table.values()
1867 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1868 rw_connections = [x for x in all_connections
1869 if x not in ro_connections]
1871 # XXX here we are dealing with multiple RODC_TOPO connections,
1872 # if they exist. It is not clear whether the spec means that
1873 # or if it ever arises.
1874 if rw_connections and ro_connections:
1875 for con in ro_connections:
1876 cn2 = rw_connections[0]
1877 con.from_dnstr = cn2.from_dnstr
1878 con.schedule = cn2.schedule
1879 con.to_be_modified = True
1881 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1883 def intrasite_max_node_edges(self, node_count):
1884 """Returns the maximum number of edges directed to a node in
1885 the intrasite replica graph.
1887 The KCC does not create more
1888 than 50 edges directed to a single DC. To optimize replication,
1889 we compute that each node should have n+2 total edges directed
1890 to it such that (n) is the smallest non-negative integer
1891 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1893 (If the number of edges is m (i.e. n + 2), that is the same as
1894 2 * m*m - 2 * m + 3).
1904 :param node_count: total number of nodes in the replica graph
1908 if node_count <= (2 * (n * n) + (6 * n) + 7):
1916 def construct_intrasite_graph(self, site_local, dc_local,
1917 nc_x, gc_only, detect_stale):
1919 # We're using the MS notation names here to allow
1920 # correlation back to the published algorithm.
1922 # nc_x - naming context (x) that we are testing if it
1923 # "should be present" on the local DC
1924 # f_of_x - replica (f) found on a DC (s) for NC (x)
1925 # dc_s - DC where f_of_x replica was found
1926 # dc_local - local DC that potentially needs a replica
1928 # r_list - replica list R
1929 # p_of_x - replica (p) is partial and found on a DC (s)
1931 # l_of_x - replica (l) is the local replica for NC (x)
1932 # that should appear on the local DC
1933 # r_len = is length of replica list |R|
1935 # If the DSA doesn't need a replica for this
1936 # partition (NC x) then continue
1937 needed, ro, partial = nc_x.should_be_present(dc_local)
1939 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
1940 "\n\tgc_only=%d" % gc_only +
1941 "\n\tdetect_stale=%d" % detect_stale +
1942 "\n\tneeded=%s" % needed +
1944 "\n\tpartial=%s" % partial +
1948 DEBUG_RED("%s lacks 'should be present' status, "
1949 "aborting construct_intersite_graph!" %
1953 # Create a NCReplica that matches what the local replica
1954 # should say. We'll use this below in our r_list
1955 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
1958 l_of_x.identify_by_basedn(self.samdb)
1960 l_of_x.rep_partial = partial
1963 # Add this replica that "should be present" to the
1964 # needed replica table for this DSA
1965 dc_local.add_needed_replica(l_of_x)
1969 # Let R be a sequence containing each writable replica f of x
1970 # such that f "is present" on a DC s satisfying the following
1973 # * s is a writable DC other than the local DC.
1975 # * s is in the same site as the local DC.
1977 # * If x is a read-only full replica and x is a domain NC,
1978 # then the DC's functional level is at least
1979 # DS_BEHAVIOR_WIN2008.
1981 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
1982 # in the options attribute of the site settings object for
1983 # the local DC's site, or no tuple z exists in the
1984 # kCCFailedLinks or kCCFailedConnections variables such
1985 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
1986 # for s, z.FailureCount > 0, and the current time -
1987 # z.TimeFirstFailure > 2 hours.
1991 # We'll loop thru all the DSAs looking for
1992 # writeable NC replicas that match the naming
1993 # context dn for (nc_x)
1995 for dc_s in self.my_site.dsa_table.values():
1996 # If this partition (nc_x) doesn't appear as a
1997 # replica (f_of_x) on (dc_s) then continue
1998 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2001 # Pull out the NCReplica (f) of (x) with the dn
2002 # that matches NC (x) we are examining.
2003 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2005 # Replica (f) of NC (x) must be writable
2009 # Replica (f) of NC (x) must satisfy the
2010 # "is present" criteria for DC (s) that
2012 if not f_of_x.is_present():
2015 # DC (s) must be a writable DSA other than
2016 # my local DC. In other words we'd only replicate
2017 # from other writable DC
2018 if dc_s.is_ro() or dc_s is dc_local:
2021 # Certain replica graphs are produced only
2022 # for global catalogs, so test against
2023 # method input parameter
2024 if gc_only and not dc_s.is_gc():
2027 # DC (s) must be in the same site as the local DC
2028 # as this is the intra-site algorithm. This is
2029 # handled by virtue of placing DSAs in per
2030 # site objects (see enclosing for() loop)
2032 # If NC (x) is intended to be read-only full replica
2033 # for a domain NC on the target DC then the source
2034 # DC should have functional level at minimum WIN2008
2036 # Effectively we're saying that in order to replicate
2037 # to a targeted RODC (which was introduced in Windows 2008)
2038 # then we have to replicate from a DC that is also minimally
2041 # You can also see this requirement in the MS special
2042 # considerations for RODC which state that to deploy
2043 # an RODC, at least one writable domain controller in
2044 # the domain must be running Windows Server 2008
2045 if ro and not partial and nc_x.nc_type == NCType.domain:
2046 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2049 # If we haven't been told to turn off stale connection
2050 # detection and this dsa has a stale connection then
2052 if detect_stale and self.is_stale_link_connection(dc_s):
2055 # Replica meets criteria. Add it to table indexed
2056 # by the GUID of the DC that it appears on
2057 r_list.append(f_of_x)
2059 # If a partial (not full) replica of NC (x) "should be present"
2060 # on the local DC, append to R each partial replica (p of x)
2061 # such that p "is present" on a DC satisfying the same
2062 # criteria defined above for full replica DCs.
2064 # XXX This loop and the previous one differ only in whether
2065 # the replica is partial or not. here we only accept partial
2066 # (because we're partial); before we only accepted full. Order
2067 # doen't matter (the list is sorted a few lines down) so these
2068 # loops could easily be merged. Or this could be a helper
2072 # Now we loop thru all the DSAs looking for
2073 # partial NC replicas that match the naming
2074 # context dn for (NC x)
2075 for dc_s in self.my_site.dsa_table.values():
2077 # If this partition NC (x) doesn't appear as a
2078 # replica (p) of NC (x) on the dsa DC (s) then
2080 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2083 # Pull out the NCReplica with the dn that
2084 # matches NC (x) we are examining.
2085 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2087 # Replica (p) of NC (x) must be partial
2088 if not p_of_x.is_partial():
2091 # Replica (p) of NC (x) must satisfy the
2092 # "is present" criteria for DC (s) that
2094 if not p_of_x.is_present():
2097 # DC (s) must be a writable DSA other than
2098 # my DSA. In other words we'd only replicate
2099 # from other writable DSA
2100 if dc_s.is_ro() or dc_s is dc_local:
2103 # Certain replica graphs are produced only
2104 # for global catalogs, so test against
2105 # method input parameter
2106 if gc_only and not dc_s.is_gc():
2109 # If we haven't been told to turn off stale connection
2110 # detection and this dsa has a stale connection then
2112 if detect_stale and self.is_stale_link_connection(dc_s):
2115 # Replica meets criteria. Add it to table indexed
2116 # by the GUID of the DSA that it appears on
2117 r_list.append(p_of_x)
2119 # Append to R the NC replica that "should be present"
2121 r_list.append(l_of_x)
2123 r_list.sort(sort_replica_by_dsa_guid)
2126 max_node_edges = self.intrasite_max_node_edges(r_len)
2128 # Add a node for each r_list element to the replica graph
2131 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2132 graph_list.append(node)
2134 # For each r(i) from (0 <= i < |R|-1)
2136 while i < (r_len-1):
2137 # Add an edge from r(i) to r(i+1) if r(i) is a full
2138 # replica or r(i+1) is a partial replica
2139 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2140 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2142 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2143 # replica or ri is a partial replica.
2144 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2145 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2148 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2149 # or r0 is a partial replica.
2150 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2151 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2153 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2154 # r|R|-1 is a partial replica.
2155 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2156 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2158 DEBUG("r_list is length %s" % len(r_list))
2159 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
2162 do_dot_files = opts.dot_files and opts.debug
2163 if opts.verify or do_dot_files:
2165 dot_vertices = set()
2166 for v1 in graph_list:
2167 dot_vertices.add(v1.dsa_dnstr)
2168 for v2 in v1.edge_from:
2169 dot_edges.append((v2, v1.dsa_dnstr))
2170 dot_vertices.add(v2)
2172 verify_properties = ('connected', 'directed_double_ring')
2173 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2174 label='%s__%s__%s' % (site_local.site_dnstr,
2175 nctype_lut[nc_x.nc_type],
2177 properties=verify_properties, debug=DEBUG,
2179 dot_files=do_dot_files, directed=True)
2181 # For each existing nTDSConnection object implying an edge
2182 # from rj of R to ri such that j != i, an edge from rj to ri
2183 # is not already in the graph, and the total edges directed
2184 # to ri is less than n+2, the KCC adds that edge to the graph.
2185 for vertex in graph_list:
2186 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2187 for connect in dsa.connect_table.values():
2188 remote = connect.from_dnstr
2189 if remote in self.my_site.dsa_table:
2190 vertex.add_edge_from(remote)
2192 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2193 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2195 for tnode in graph_list:
2196 # To optimize replication latency in sites with many NC
2197 # replicas, the KCC adds new edges directed to ri to bring
2198 # the total edges to n+2, where the NC replica rk of R
2199 # from which the edge is directed is chosen at random such
2200 # that k != i and an edge from rk to ri is not already in
2203 # Note that the KCC tech ref does not give a number for
2204 # the definition of "sites with many NC replicas". At a
2205 # bare minimum to satisfy n+2 edges directed at a node we
2206 # have to have at least three replicas in |R| (i.e. if n
2207 # is zero then at least replicas from two other graph
2208 # nodes may direct edges to us).
2209 if r_len >= 3 and not tnode.has_sufficient_edges():
2210 candidates = [x for x in graph_list if
2212 x.dsa_dnstr not in tnode.edge_from)]
2214 DEBUG_BLUE("looking for random link for %s. r_len %d, "
2215 "graph len %d candidates %d"
2216 % (tnode.dsa_dnstr, r_len, len(graph_list),
2219 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2221 while candidates and not tnode.has_sufficient_edges():
2222 other = random.choice(candidates)
2223 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2224 if not tnode.add_edge_from(other):
2225 DEBUG_RED("could not add %s" % other.dsa_dstr)
2226 candidates.remove(other)
2228 DEBUG_CYAN("not adding links to %s: nodes %s, links is %s/%s" %
2229 (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
2232 # Print the graph node in debug mode
2233 logger.debug("%s" % tnode)
2235 # For each edge directed to the local DC, ensure a nTDSConnection
2236 # points to us that satisfies the KCC criteria
2238 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2239 tnode.add_connections_from_edges(dc_local)
2241 if opts.verify or do_dot_files:
2243 dot_vertices = set()
2244 for v1 in graph_list:
2245 dot_vertices.add(v1.dsa_dnstr)
2246 for v2 in v1.edge_from:
2247 dot_edges.append((v2, v1.dsa_dnstr))
2248 dot_vertices.add(v2)
2250 verify_properties = ('connected', 'directed_double_ring_or_small')
2251 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2252 label='%s__%s__%s' % (site_local.site_dnstr,
2253 nctype_lut[nc_x.nc_type],
2255 properties=verify_properties, debug=DEBUG,
2257 dot_files=do_dot_files, directed=True)
2259 def intrasite(self):
2260 """The head method for generating the intra-site KCC replica
2261 connection graph and attendant nTDSConnection objects
2267 logger.debug("intrasite(): enter")
2269 # Test whether local site has topology disabled
2270 mysite = self.my_site
2271 if mysite.is_intrasite_topology_disabled():
2274 detect_stale = (not mysite.is_detect_stale_disabled())
2275 for connect in mydsa.connect_table.values():
2276 if connect.to_be_added:
2277 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2279 # Loop thru all the partitions, with gc_only False
2280 for partdn, part in self.part_table.items():
2281 self.construct_intrasite_graph(mysite, mydsa, part, False,
2283 for connect in mydsa.connect_table.values():
2284 if connect.to_be_added:
2285 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2287 # If the DC is a GC server, the KCC constructs an additional NC
2288 # replica graph (and creates nTDSConnection objects) for the
2289 # config NC as above, except that only NC replicas that "are present"
2290 # on GC servers are added to R.
2291 for connect in mydsa.connect_table.values():
2292 if connect.to_be_added:
2293 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2295 # Do it again, with gc_only True
2296 for partdn, part in self.part_table.items():
2297 if part.is_config():
2298 self.construct_intrasite_graph(mysite, mydsa, part, True,
2301 # The DC repeats the NC replica graph computation and nTDSConnection
2302 # creation for each of the NC replica graphs, this time assuming
2303 # that no DC has failed. It does so by re-executing the steps as
2304 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2305 # set in the options attribute of the site settings object for
2306 # the local DC's site. (ie. we set "detec_stale" flag to False)
2307 for connect in mydsa.connect_table.values():
2308 if connect.to_be_added:
2309 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2311 # Loop thru all the partitions.
2312 for partdn, part in self.part_table.items():
2313 self.construct_intrasite_graph(mysite, mydsa, part, False,
2314 False) # don't detect stale
2316 # If the DC is a GC server, the KCC constructs an additional NC
2317 # replica graph (and creates nTDSConnection objects) for the
2318 # config NC as above, except that only NC replicas that "are present"
2319 # on GC servers are added to R.
2320 for connect in mydsa.connect_table.values():
2321 if connect.to_be_added:
2322 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2324 for partdn, part in self.part_table.items():
2325 if part.is_config():
2326 self.construct_intrasite_graph(mysite, mydsa, part, True,
2327 False) # don't detect stale
2330 # Display any to be added or modified repsFrom
2331 for connect in mydsa.connect_table.values():
2332 if connect.to_be_deleted:
2333 logger.info("TO BE DELETED:\n%s" % connect)
2334 if connect.to_be_modified:
2335 logger.info("TO BE MODIFIED:\n%s" % connect)
2336 if connect.to_be_added:
2337 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2339 mydsa.commit_connections(self.samdb, ro=True)
2341 # Commit any newly created connections to the samdb
2342 mydsa.commit_connections(self.samdb)
2344 def list_dsas(self):
2348 self.load_all_sites()
2349 self.load_all_partitions()
2350 self.load_all_transports()
2351 self.load_all_sitelinks()
2353 for site in self.site_table.values():
2354 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2355 for dsa in site.dsa_table.values()])
2358 def load_samdb(self, dburl, lp, creds):
2359 self.samdb = SamDB(url=dburl,
2360 session_info=system_session(),
2361 credentials=creds, lp=lp)
2363 def plot_all_connections(self, basename, verify_properties=()):
2364 verify = verify_properties and opts.verify
2365 plot = opts.dot_files
2366 if not (verify or plot):
2374 for dsa in self.dsa_by_dnstr.values():
2375 dot_vertices.append(dsa.dsa_dnstr)
2377 vertex_colours.append('#cc0000')
2379 vertex_colours.append('#0000cc')
2380 for con in dsa.connect_table.values():
2381 if con.is_rodc_topology():
2382 edge_colours.append('red')
2384 edge_colours.append('blue')
2385 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2387 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2388 label=self.my_dsa_dnstr, properties=verify_properties,
2389 debug=DEBUG, verify=verify, dot_files=plot,
2390 directed=True, edge_colors=edge_colours,
2391 vertex_colors=vertex_colours)
2393 def run(self, dburl, lp, creds, forced_local_dsa=None,
2394 forget_local_links=False, forget_intersite_links=False):
2395 """Method to perform a complete run of the KCC and
2396 produce an updated topology for subsequent NC replica
2397 syncronization between domain controllers
2399 # We may already have a samdb setup if we are
2400 # currently importing an ldif for a test run
2401 if self.samdb is None:
2403 self.load_samdb(dburl, lp, creds)
2404 except ldb.LdbError, (num, msg):
2405 logger.error("Unable to open sam database %s : %s" %
2409 if forced_local_dsa:
2410 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
2418 self.load_all_sites()
2419 self.load_all_partitions()
2420 self.load_all_transports()
2421 self.load_all_sitelinks()
2423 if opts.verify or opts.dot_files:
2425 for site in self.site_table.values():
2426 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2428 in site.dsa_table.items())
2430 self.plot_all_connections('dsa_initial')
2433 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2434 for dnstr, c_rep in current_reps.items():
2435 DEBUG("c_rep %s" % c_rep)
2436 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2438 verify_and_dot('dsa_repsFrom_initial', dot_edges,
2439 directed=True, label=self.my_dsa_dnstr,
2440 properties=(), debug=DEBUG, verify=opts.verify,
2441 dot_files=opts.dot_files)
2444 for site in self.site_table.values():
2445 for dsa in site.dsa_table.values():
2446 current_reps, needed_reps = dsa.get_rep_tables()
2447 for dn_str, rep in current_reps.items():
2448 for reps_from in rep.rep_repsFrom:
2449 DEBUG("rep %s" % rep)
2450 dsa_guid = str(reps_from.source_dsa_obj_guid)
2451 dsa_dn = guid_to_dnstr[dsa_guid]
2452 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2454 verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
2455 directed=True, label=self.my_dsa_dnstr,
2456 properties=(), debug=DEBUG, verify=opts.verify,
2457 dot_files=opts.dot_files)
2460 for link in self.sitelink_table.values():
2461 for a, b in itertools.combinations(link.site_list, 2):
2462 dot_edges.append((str(a), str(b)))
2463 properties = ('connected',)
2464 verify_and_dot('dsa_sitelink_initial', dot_edges,
2466 label=self.my_dsa_dnstr, properties=properties,
2467 debug=DEBUG, verify=opts.verify,
2468 dot_files=opts.dot_files)
2470 if forget_local_links:
2471 for dsa in self.my_site.dsa_table.values():
2472 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2473 if v.is_rodc_topology()}
2474 self.plot_all_connections('dsa_forgotten_local')
2476 if forget_intersite_links:
2477 for site in self.site_table.values():
2478 for dsa in site.dsa_table.values():
2479 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2480 if site is self.my_site and v.is_rodc_topology()}
2482 self.plot_all_connections('dsa_forgotten_all')
2483 # These are the published steps (in order) for the
2484 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2487 self.refresh_failed_links_connections()
2493 all_connected = self.intersite()
2496 self.remove_unneeded_ntdsconn(all_connected)
2499 self.translate_ntdsconn()
2502 self.remove_unneeded_failed_links_connections()
2505 self.update_rodc_connection()
2507 if opts.verify or opts.dot_files:
2508 self.plot_all_connections('dsa_final',
2509 ('connected', 'forest_of_rings'))
2511 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2515 my_dnstr = self.my_dsa.dsa_dnstr
2516 current_reps, needed_reps = self.my_dsa.get_rep_tables()
2517 for dnstr, n_rep in needed_reps.items():
2518 for reps_from in n_rep.rep_repsFrom:
2519 guid_str = str(reps_from.source_dsa_obj_guid)
2520 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2521 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2523 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
2524 label=self.my_dsa_dnstr,
2525 properties=(), debug=DEBUG, verify=opts.verify,
2526 dot_files=opts.dot_files,
2527 edge_colors=edge_colors)
2531 for site in self.site_table.values():
2532 for dsa in site.dsa_table.values():
2533 current_reps, needed_reps = dsa.get_rep_tables()
2534 for n_rep in needed_reps.values():
2535 for reps_from in n_rep.rep_repsFrom:
2536 dsa_guid = str(reps_from.source_dsa_obj_guid)
2537 dsa_dn = guid_to_dnstr[dsa_guid]
2538 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2540 verify_and_dot('dsa_repsFrom_final_all', dot_edges,
2541 directed=True, label=self.my_dsa_dnstr,
2542 properties=(), debug=DEBUG, verify=opts.verify,
2543 dot_files=opts.dot_files)
2550 def import_ldif(self, dburl, lp, creds, ldif_file):
2551 """Import all objects and attributes that are relevent
2552 to the KCC algorithms from a previously exported LDIF file.
2554 The point of this function is to allow a programmer/debugger to
2555 import an LDIF file with non-security relevent information that
2556 was previously extracted from a DC database. The LDIF file is used
2557 to create a temporary abbreviated database. The KCC algorithm can
2558 then run against this abbreviated database for debug or test
2559 verification that the topology generated is computationally the
2560 same between different OSes and algorithms.
2562 :param dburl: path to the temporary abbreviated db to create
2563 :param ldif_file: path to the ldif file to import
2566 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, creds, ldif_file,
2567 opts.forced_local_dsa)
2568 except ldif_utils.LdifError, e:
2573 def export_ldif(self, dburl, lp, creds, ldif_file):
2574 """Routine to extract all objects and attributes that are relevent
2575 to the KCC algorithms from a DC database.
2577 The point of this function is to allow a programmer/debugger to
2578 extract an LDIF file with non-security relevent information from
2579 a DC database. The LDIF file can then be used to "import" via
2580 the import_ldif() function this file into a temporary abbreviated
2581 database. The KCC algorithm can then run against this abbreviated
2582 database for debug or test verification that the topology generated
2583 is computationally the same between different OSes and algorithms.
2585 :param dburl: LDAP database URL to extract info from
2586 :param ldif_file: output LDIF file name to create
2589 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
2591 except ldif_utils.LdifError, e:
2596 ##################################################
2598 ##################################################
2601 def get_spanning_tree_edges(graph, my_site, label=None):
2602 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
2603 # just the shortest-paths connecting colored vertices
2605 internal_edges = set()
2607 for e_set in graph.edge_set:
2609 for v in graph.vertices:
2612 # All con_type in an edge set is the same
2613 for e in e_set.edges:
2614 edgeType = e.con_type
2615 for v in e.vertices:
2618 if opts.verify or opts.dot_files:
2619 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
2622 *(itertools.combinations(edge.vertices, 2)
2623 for edge in e_set.edges))]
2624 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2626 if opts.dot_files and opts.debug:
2627 write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
2628 vertices=graph_nodes, label=label)
2631 verify_graph('spanning tree edge set %s' % edgeType,
2632 graph_edges, vertices=graph_nodes,
2633 properties=('complete', 'connected'),
2636 # Run dijkstra's algorithm with just the red vertices as seeds
2637 # Seed from the full replicas
2638 dijkstra(graph, edgeType, False)
2641 process_edge_set(graph, e_set, internal_edges)
2643 # Run dijkstra's algorithm with red and black vertices as the seeds
2644 # Seed from both full and partial replicas
2645 dijkstra(graph, edgeType, True)
2648 process_edge_set(graph, e_set, internal_edges)
2650 # All vertices have root/component as itself
2651 setup_vertices(graph)
2652 process_edge_set(graph, None, internal_edges)
2654 if opts.verify or opts.dot_files:
2655 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2656 for e in internal_edges]
2657 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2658 verify_properties = ('multi_edge_forest',)
2659 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
2660 properties=verify_properties, debug=DEBUG,
2662 dot_files=opts.dot_files)
2664 # Phase 2: Run Kruskal's on the internal edges
2665 output_edges, components = kruskal(graph, internal_edges)
2667 # This recalculates the cost for the path connecting the
2668 # closest red vertex. Ignoring types is fine because NO
2669 # suboptimal edge should exist in the graph
2670 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
2671 # Phase 3: Process the output
2672 for v in graph.vertices:
2676 v.dist_to_red = v.repl_info.cost
2678 if opts.verify or opts.dot_files:
2679 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
2680 for e in internal_edges]
2681 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2682 verify_properties = ('multi_edge_forest',)
2683 verify_and_dot('postkruskal', graph_edges, graph_nodes,
2684 label=label, properties=verify_properties,
2685 debug=DEBUG, verify=opts.verify,
2686 dot_files=opts.dot_files)
2688 # Ensure only one-way connections for partial-replicas,
2689 # and make sure they point the right way.
2691 for edge in output_edges:
2692 # We know these edges only have two endpoints because we made
2694 v, w = edge.vertices
2695 if v.site is my_site or w.site is my_site:
2696 if (((v.is_black() or w.is_black()) and
2697 v.dist_to_red != MAX_DWORD)):
2698 edge.directed = True
2700 if w.dist_to_red < v.dist_to_red:
2701 edge.vertices[:] = w, v
2702 edge_list.append(edge)
2704 if opts.verify or opts.dot_files:
2705 graph_edges = [[x.site.site_dnstr for x in e.vertices]
2707 #add the reverse edge if not directed.
2708 graph_edges.extend([x.site.site_dnstr
2709 for x in reversed(e.vertices)]
2710 for e in edge_list if not e.directed)
2711 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
2712 verify_properties = ()
2713 verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
2714 label=label, properties=verify_properties,
2715 debug=DEBUG, verify=opts.verify,
2717 dot_files=opts.dot_files)
2719 # count the components
2720 return edge_list, components
2723 def sort_replica_by_dsa_guid(rep1, rep2):
2724 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2727 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2728 if dsa1.is_gc() and not dsa2.is_gc():
2730 if not dsa1.is_gc() and dsa2.is_gc():
2732 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2735 def is_smtp_replication_available():
2736 """Currently always returns false because Samba
2737 doesn't implement SMTP transfer for NC changes
2743 def create_edge(con_type, site_link, guid_to_vertex):
2745 e.site_link = site_link
2747 for site_guid in site_link.site_list:
2748 if str(site_guid) in guid_to_vertex:
2749 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2750 e.repl_info.cost = site_link.cost
2751 e.repl_info.options = site_link.options
2752 e.repl_info.interval = site_link.interval
2753 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2754 e.con_type = con_type
2759 def create_auto_edge_set(graph, transport):
2760 e_set = MultiEdgeSet()
2761 # use a NULL guid, not associated with a SiteLinkBridge object
2762 e_set.guid = misc.GUID()
2763 for site_link in graph.edges:
2764 if site_link.con_type == transport:
2765 e_set.edges.append(site_link)
2770 def create_edge_set(graph, transport, site_link_bridge):
2771 # TODO not implemented - need to store all site link bridges
2772 e_set = MultiEdgeSet()
2773 # e_set.guid = site_link_bridge
2777 def setup_vertices(graph):
2778 for v in graph.vertices:
2780 v.repl_info.cost = MAX_DWORD
2782 v.component_id = None
2784 v.repl_info.cost = 0
2788 v.repl_info.interval = 0
2789 v.repl_info.options = 0xFFFFFFFF
2790 v.repl_info.schedule = None # TODO highly suspicious
2794 def dijkstra(graph, edge_type, include_black):
2796 setup_dijkstra(graph, edge_type, include_black, queue)
2797 while len(queue) > 0:
2798 cost, guid, vertex = heapq.heappop(queue)
2799 for edge in vertex.edges:
2800 for v in edge.vertices:
2802 # add new path from vertex to v
2803 try_new_path(graph, queue, vertex, edge, v)
2806 def setup_dijkstra(graph, edge_type, include_black, queue):
2807 setup_vertices(graph)
2808 for vertex in graph.vertices:
2809 if vertex.is_white():
2812 if (((vertex.is_black() and not include_black)
2813 or edge_type not in vertex.accept_black
2814 or edge_type not in vertex.accept_red_red)):
2815 vertex.repl_info.cost = MAX_DWORD
2816 vertex.root = None # NULL GUID
2817 vertex.demoted = True # Demoted appears not to be used
2819 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2822 def try_new_path(graph, queue, vfrom, edge, vto):
2824 # What this function checks is that there is a valid time frame for
2825 # which replication can actually occur, despite being adequately
2827 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2829 # If the new path costs more than the current, then ignore the edge
2830 if newRI.cost > vto.repl_info.cost:
2833 if newRI.cost < vto.repl_info.cost and not intersect:
2836 new_duration = total_schedule(newRI.schedule)
2837 old_duration = total_schedule(vto.repl_info.schedule)
2839 # Cheaper or longer schedule
2840 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2841 vto.root = vfrom.root
2842 vto.component_id = vfrom.component_id
2843 vto.repl_info = newRI
2844 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2847 def check_demote_vertex(vertex, edge_type):
2848 if vertex.is_white():
2851 # Accepts neither red-red nor black edges, demote
2852 if ((edge_type not in vertex.accept_black and
2853 edge_type not in vertex.accept_red_red)):
2854 vertex.repl_info.cost = MAX_DWORD
2856 vertex.demoted = True # Demoted appears not to be used
2859 def undemote_vertex(vertex):
2860 if vertex.is_white():
2863 vertex.repl_info.cost = 0
2864 vertex.root = vertex
2865 vertex.demoted = False
2868 def process_edge_set(graph, e_set, internal_edges):
2870 for edge in graph.edges:
2871 for vertex in edge.vertices:
2872 check_demote_vertex(vertex, edge.con_type)
2873 process_edge(graph, edge, internal_edges)
2874 for vertex in edge.vertices:
2875 undemote_vertex(vertex)
2877 for edge in e_set.edges:
2878 process_edge(graph, edge, internal_edges)
2881 def process_edge(graph, examine, internal_edges):
2882 # Find the set of all vertices touches the edge to examine
2884 for v in examine.vertices:
2885 # Append a 4-tuple of color, repl cost, guid and vertex
2886 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2887 # Sort by color, lower
2888 DEBUG("vertices is %s" % vertices)
2891 color, cost, guid, bestv = vertices[0]
2892 # Add to internal edges an edge from every colored vertex to bestV
2893 for v in examine.vertices:
2894 if v.component_id is None or v.root is None:
2897 # Only add edge if valid inter-tree edge - needs a root and
2898 # different components
2899 if ((bestv.component_id is not None and
2900 bestv.root is not None and
2901 v.component_id is not None and
2902 v.root is not None and
2903 bestv.component_id != v.component_id)):
2904 add_int_edge(graph, internal_edges, examine, bestv, v)
2907 # Add internal edge, endpoints are roots of the vertices to pass in
2908 # and are always colored
2909 def add_int_edge(graph, internal_edges, examine, v1, v2):
2914 if root1.is_red() and root2.is_red():
2918 if ((examine.con_type not in root1.accept_red_red
2919 or examine.con_type not in root2.accept_red_red)):
2921 elif (examine.con_type not in root1.accept_black
2922 or examine.con_type not in root2.accept_black):
2928 # Create the transitive replInfo for the two trees and this edge
2929 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2931 # ri is now initialized
2932 if not combine_repl_info(ri, examine.repl_info, ri2):
2935 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
2937 # Order by vertex guid
2938 #XXX guid comparison using ndr_pack
2939 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
2940 newIntEdge.v1 = root2
2941 newIntEdge.v2 = root1
2943 internal_edges.add(newIntEdge)
2946 def kruskal(graph, edges):
2947 for v in graph.vertices:
2950 components = set([x for x in graph.vertices if not x.is_white()])
2953 # Sorted based on internal comparison function of internal edge
2956 expected_num_tree_edges = 0 # TODO this value makes little sense
2961 while index < len(edges): # TODO and num_components > 1
2963 parent1 = find_component(e.v1)
2964 parent2 = find_component(e.v2)
2965 if parent1 is not parent2:
2967 add_out_edge(graph, output_edges, e)
2968 parent1.component_id = parent2
2969 components.discard(parent1)
2973 return output_edges, len(components)
2976 def find_component(vertex):
2977 if vertex.component_id is vertex:
2981 while current.component_id is not current:
2982 current = current.component_id
2986 while current.component_id is not root:
2987 n = current.component_id
2988 current.component_id = root
2994 def add_out_edge(graph, output_edges, e):
2998 # This multi-edge is a 'real' edge with no GUID
3001 ee.site_link = e.site_link
3002 ee.vertices.append(v1)
3003 ee.vertices.append(v2)
3004 ee.con_type = e.e_type
3005 ee.repl_info = e.repl_info
3006 output_edges.append(ee)
3012 def test_all_reps_from(lp, creds):
3014 kcc.load_samdb(opts.dburl, lp, creds)
3015 dsas = kcc.list_dsas()
3020 for site in kcc.site_table.values():
3021 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
3022 for dnstr, dsa in site.dsa_table.items())
3031 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
3032 forget_local_links=opts.forget_local_links,
3033 forget_intersite_links=opts.forget_intersite_links)
3034 current, needed = kcc.my_dsa.get_rep_tables()
3036 for name, rep_table, rep_parts in (
3037 ('needed', needed, needed_parts),
3038 ('current', current, current_parts)):
3039 for part, nc_rep in rep_table.items():
3040 edges = rep_parts.setdefault(part, [])
3041 for reps_from in nc_rep.rep_repsFrom:
3042 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
3043 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
3044 edges.append((source, dest))
3046 for site in kcc.site_table.values():
3047 for dsa in site.dsa_table.values():
3049 vertex_colours.append('#cc0000')
3051 vertex_colours.append('#0000cc')
3052 dot_vertices.append(dsa.dsa_dnstr)
3053 for con in dsa.connect_table.values():
3054 if con.is_rodc_topology():
3055 colours.append('red')
3057 colours.append('blue')
3058 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
3060 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
3061 label="all dsa NTDSConnections", properties=(),
3062 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
3063 directed=True, edge_colors=colours,
3064 vertex_colors=vertex_colours)
3066 for name, rep_parts in (('needed', needed_parts),
3067 ('current', current_parts)):
3068 for part, edges in rep_parts.items():
3069 verify_and_dot('repsFrom_%s_all_%s' % (name, part), edges,
3070 directed=True, label=part,
3071 properties=(), debug=DEBUG, verify=opts.verify,
3072 dot_files=opts.dot_files)
3075 logger = logging.getLogger("samba_kcc")
3076 logger.addHandler(logging.StreamHandler(sys.stdout))
3077 DEBUG = logger.debug
3080 def _color_debug(*args, **kwargs):
3081 DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
3083 _globals = globals()
3084 for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
3085 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
3086 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
3087 _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
3090 def DEBUG_FN(msg=''):
3092 filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
3093 DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
3094 CYAN, function, C_NORMAL, msg))
3097 ##################################################
3098 # samba_kcc entry point
3099 ##################################################
3101 parser = optparse.OptionParser("samba_kcc [options]")
3102 sambaopts = options.SambaOptions(parser)
3103 credopts = options.CredentialsOptions(parser)
3105 parser.add_option_group(sambaopts)
3106 parser.add_option_group(credopts)
3107 parser.add_option_group(options.VersionOptions(parser))
3109 parser.add_option("--readonly", default=False,
3110 help="compute topology but do not update database",
3111 action="store_true")
3113 parser.add_option("--debug",
3114 help="debug output",
3115 action="store_true")
3117 parser.add_option("--verify",
3118 help="verify that assorted invariants are kept",
3119 action="store_true")
3121 parser.add_option("--list-verify-tests",
3122 help=("list what verification actions are available "
3123 "and do nothing else"),
3124 action="store_true")
3126 parser.add_option("--no-dot-files", dest='dot_files',
3127 help="Don't write dot graph files in /tmp",
3128 default=True, action="store_false")
3130 parser.add_option("--seed",
3131 help="random number seed",
3134 parser.add_option("--importldif",
3135 help="import topology ldif file",
3136 type=str, metavar="<file>")
3138 parser.add_option("--exportldif",
3139 help="export topology ldif file",
3140 type=str, metavar="<file>")
3142 parser.add_option("-H", "--URL",
3143 help="LDB URL for database or target server",
3144 type=str, metavar="<URL>", dest="dburl")
3146 parser.add_option("--tmpdb",
3147 help="schemaless database file to create for ldif import",
3148 type=str, metavar="<file>")
3150 parser.add_option("--now",
3151 help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
3152 " default: system time)"),
3153 type=str, metavar="<date>")
3155 parser.add_option("--forced-local-dsa",
3156 help="run calculations assuming the DSA is this DN",
3157 type=str, metavar="<DSA>")
3159 parser.add_option("--attempt-live-connections", default=False,
3160 help="Attempt to connect to other DSAs to test links",
3161 action="store_true")
3163 parser.add_option("--list-valid-dsas", default=False,
3164 help=("Print a list of DSA dnstrs that could be"
3165 " used in --forced-local-dsa"),
3166 action="store_true")
3168 parser.add_option("--test-all-reps-from", default=False,
3169 help="Create and verify a graph of reps-from for every DSA",
3170 action="store_true")
3172 parser.add_option("--forget-local-links", default=False,
3173 help="pretend not to know the existing local topology",
3174 action="store_true")
3176 parser.add_option("--forget-intersite-links", default=False,
3177 help="pretend not to know the existing intersite topology",
3178 action="store_true")
3181 opts, args = parser.parse_args()
3184 if opts.list_verify_tests:
3189 logger.setLevel(logging.DEBUG)
3191 logger.setLevel(logging.INFO)
3193 logger.setLevel(logging.WARNING)
3195 # initialize seed from optional input parameter
3197 random.seed(opts.seed)
3199 random.seed(0xACE5CA11)
3202 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3204 now_tuple = time.strptime(opts.now, timeformat)
3209 # else happens if break doesn't --> no match
3210 print >> sys.stderr, "could not parse time '%s'" % opts.now
3213 unix_now = int(time.mktime(now_tuple))
3215 unix_now = int(time.time())
3217 nt_now = unix2nttime(unix_now)
3219 lp = sambaopts.get_loadparm()
3220 creds = credopts.get_credentials(lp, fallback_machine=True)
3222 if opts.dburl is None:
3223 opts.dburl = lp.samdb_url()
3225 if opts.test_all_reps_from:
3226 opts.readonly = True
3227 test_all_reps_from(lp, creds)
3230 # Instantiate Knowledge Consistency Checker and perform run
3234 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3238 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3239 logger.error("Specify a target temp database file with --tmpdb option")
3242 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3246 if opts.list_valid_dsas:
3247 kcc.load_samdb(opts.dburl, lp, creds)
3248 print '\n'.join(kcc.list_dsas())
3252 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3253 opts.forget_local_links, opts.forget_intersite_links)
3256 except GraphError, e: