3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
47 from functools import partial
57 from samba.auth import system_session
58 from samba.samdb import SamDB
59 from samba.dcerpc import drsuapi
60 from samba.kcc_utils import *
61 from samba.graph_utils import *
62 from samba import ldif_utils
65 """The Knowledge Consistency Checker class.
67 A container for objects and methods allowing a run of the KCC. Produces a
68 set of connections in the samdb for which the Distributed Replication
69 Service can then utilize to replicate naming contexts
72 """Initializes the partitions class which can hold
73 our local DCs partitions or all the partitions in
76 self.part_table = {} # partition objects
78 self.transport_table = {}
79 self.sitelink_table = {}
80 self.dsa_by_dnstr = {}
83 self.get_dsa_by_guidstr = self.dsa_by_guid.get
84 self.get_dsa = self.dsa_by_dnstr.get
86 # TODO: These should be backed by a 'permanent' store so that when
87 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
88 # the failure information can be returned
89 self.kcc_failed_links = {}
90 self.kcc_failed_connections = set()
92 # Used in inter-site topology computation. A list
93 # of connections (by NTDSConnection object) that are
94 # to be kept when pruning un-needed NTDS Connections
95 self.kept_connections = set()
97 self.my_dsa_dnstr = None # My dsa DN
98 self.my_dsa = None # My dsa object
100 self.my_site_dnstr = None
105 def load_all_transports(self):
106 """Loads the inter-site transport objects for Sites
108 ::returns: Raises an Exception on error
111 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
112 self.samdb.get_config_basedn(),
113 scope=ldb.SCOPE_SUBTREE,
114 expression="(objectClass=interSiteTransport)")
115 except ldb.LdbError, (enum, estr):
116 raise Exception("Unable to find inter-site transports - (%s)" %
122 transport = Transport(dnstr)
124 transport.load_transport(self.samdb)
127 if str(transport.guid) in self.transport_table:
130 # Assign this transport to table
132 self.transport_table[str(transport.guid)] = transport
134 def load_all_sitelinks(self):
135 """Loads the inter-site siteLink objects
137 ::returns: Raises an Exception on error
140 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
141 self.samdb.get_config_basedn(),
142 scope=ldb.SCOPE_SUBTREE,
143 expression="(objectClass=siteLink)")
144 except ldb.LdbError, (enum, estr):
145 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr)
151 if dnstr in self.sitelink_table:
154 sitelink = SiteLink(dnstr)
156 sitelink.load_sitelink(self.samdb)
158 # Assign this siteLink to table
160 self.sitelink_table[dnstr] = sitelink
162 def load_site(self, dn_str):
163 """Helper for load_my_site and load_all_sites. It puts all the site's
164 DSAs into the KCC indices.
166 site = Site(dn_str, unix_now)
167 site.load_site(self.samdb)
169 # I am not sure why, but we avoid replacing the site with an
171 guid = str(site.site_guid)
172 if guid not in self.site_table:
173 self.site_table[guid] = site
175 self.dsa_by_dnstr.update(site.dsa_table)
176 self.dsa_by_guid.update((str(x.dsa_guid), x) for x in site.dsa_table.values())
180 def load_my_site(self):
181 """Loads the Site class for the local DSA
183 ::returns: Raises an Exception on error
185 self.my_site_dnstr = "CN=%s,CN=Sites,%s" % (
186 self.samdb.server_site_name(),
187 self.samdb.get_config_basedn())
189 self.my_site = self.load_site(self.my_site_dnstr)
191 def load_all_sites(self):
192 """Discover all sites and instantiate and load each
195 ::returns: Raises an Exception on error
198 res = self.samdb.search("CN=Sites,%s" %
199 self.samdb.get_config_basedn(),
200 scope=ldb.SCOPE_SUBTREE,
201 expression="(objectClass=site)")
202 except ldb.LdbError, (enum, estr):
203 raise Exception("Unable to find sites - (%s)" % estr)
206 sitestr = str(msg.dn)
207 self.load_site(sitestr)
209 def load_my_dsa(self):
210 """Discover my nTDSDSA dn thru the rootDSE entry
212 ::returns: Raises an Exception on error.
214 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
216 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
217 attrs=["objectGUID"])
218 except ldb.LdbError, (enum, estr):
219 DEBUG("Search for %s failed: %s. This typically happens in"
220 " --importldif mode due to lack of module support",
223 # We work around the failure above by looking at the
224 # dsServiceName that was put in the fake rootdse by
225 # the --exportldif, rather than the
226 # samdb.get_ntds_GUID(). The disadvantage is that this
227 # mode requires we modify the @ROOTDSE dnq to support
229 service_name_res = self.samdb.search(base="", scope=ldb.SCOPE_BASE,
230 attrs=["dsServiceName"])
231 dn = ldb.Dn(self.samdb, service_name_res[0]["dsServiceName"][0])
233 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
234 attrs=["objectGUID"])
235 except ldb.LdbError, (enum, estr):
236 raise Exception("Unable to find my nTDSDSA - (%s)" % estr)
239 raise Exception("Unable to find my nTDSDSA at %s" % dn.extended_str())
241 if misc.GUID(res[0]["objectGUID"][0]) != misc.GUID(self.samdb.get_ntds_GUID()):
242 raise Exception("Did not find the GUID we expected, perhaps due to --importldif")
244 self.my_dsa_dnstr = str(res[0].dn)
246 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
248 def load_all_partitions(self):
249 """Discover all NCs thru the Partitions dn and
250 instantiate and load the NCs.
252 Each NC is inserted into the part_table by partition
253 dn string (not the nCName dn string)
255 ::returns: Raises an Exception on error
258 res = self.samdb.search("CN=Partitions,%s" %
259 self.samdb.get_config_basedn(),
260 scope=ldb.SCOPE_SUBTREE,
261 expression="(objectClass=crossRef)")
262 except ldb.LdbError, (enum, estr):
263 raise Exception("Unable to find partitions - (%s)" % estr)
266 partstr = str(msg.dn)
269 if partstr in self.part_table:
272 part = Partition(partstr)
274 part.load_partition(self.samdb)
275 self.part_table[partstr] = part
277 def should_be_present_test(self):
278 """Enumerate all loaded partitions and DSAs in local
279 site and test if NC should be present as replica
281 for partdn, part in self.part_table.items():
282 for dsadn, dsa in self.my_site.dsa_table.items():
283 needed, ro, partial = part.should_be_present(dsa)
284 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
285 (dsadn, part.nc_dnstr, needed, ro, partial))
287 def refresh_failed_links_connections(self):
288 """Based on MS-ADTS 6.2.2.1"""
290 # Instead of NULL link with failure_count = 0, the tuple is simply removed
292 # LINKS: Refresh failed links
293 self.kcc_failed_links = {}
294 current, needed = self.my_dsa.get_rep_tables()
295 for replica in current.values():
296 # For every possible connection to replicate
297 for reps_from in replica.rep_repsFrom:
298 failure_count = reps_from.consecutive_sync_failures
299 if failure_count <= 0:
302 dsa_guid = str(reps_from.source_dsa_obj_guid)
303 time_first_failure = reps_from.last_success
304 last_result = reps_from.last_attempt
305 dns_name = reps_from.dns_name1
307 f = self.kcc_failed_links.get(dsa_guid)
309 f = KCCFailedObject(dsa_guid, failure_count,
310 time_first_failure, last_result,
312 self.kcc_failed_links[dsa_guid] = f
313 #elif f.failure_count == 0:
314 # f.failure_count = failure_count
315 # f.time_first_failure = time_first_failure
316 # f.last_result = last_result
318 f.failure_count = max(f.failure_count, failure_count)
319 f.time_first_failure = min(f.time_first_failure, time_first_failure)
320 f.last_result = last_result
322 # CONNECTIONS: Refresh failed connections
323 restore_connections = set()
324 if opts.attempt_live_connections:
325 DEBUG("refresh_failed_links: checking if links are still down")
326 for connection in self.kcc_failed_connections:
328 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
329 # Failed connection is no longer failing
330 restore_connections.add(connection)
331 except drs_utils.drsException:
332 # Failed connection still failing
333 connection.failure_count += 1
335 DEBUG("refresh_failed_links: not checking live links because we weren't\n"
336 "asked to --attempt-live-connections")
338 # Remove the restored connections from the failed connections
339 self.kcc_failed_connections.difference_update(restore_connections)
341 def is_stale_link_connection(self, target_dsa):
342 """Returns False if no tuple z exists in the kCCFailedLinks or
343 kCCFailedConnections variables such that z.UUIDDsa is the
344 objectGUID of the target dsa, z.FailureCount > 0, and
345 the current time - z.TimeFirstFailure > 2 hours.
347 # Returns True if tuple z exists...
348 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
350 # failure_count should be > 0, but check anyways
351 if failed_link.failure_count > 0:
352 unix_first_time_failure = nttime2unix(failed_link.time_first_failure)
353 # TODO guard against future
354 if unix_first_time_failure > unix_now:
355 logger.error("The last success time attribute for \
356 repsFrom is in the future!")
358 # Perform calculation in seconds
359 if (unix_now - unix_first_time_failure) > 60 * 60 * 2:
366 # TODO: This should be backed by some form of local database
367 def remove_unneeded_failed_links_connections(self):
368 # Remove all tuples in kcc_failed_links where failure count = 0
369 # In this implementation, this should never happen.
371 # Remove all connections which were not used this run or connections
372 # that became active during this run.
375 def remove_unneeded_ntdsconn(self, all_connected):
376 """Removes unneeded NTDS Connections after computation
377 of KCC intra and inter-site topology has finished.
381 # Loop thru connections
382 for cn_conn in mydsa.connect_table.values():
383 if cn_conn.guid is None:
385 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
386 cn_conn.whenCreated = nt_now
388 cn_conn.load_connection(self.samdb)
390 for cn_conn in mydsa.connect_table.values():
392 s_dnstr = cn_conn.get_from_dnstr()
394 cn_conn.to_be_deleted = True
397 # Get the source DSA no matter what site
398 s_dsa = self.get_dsa(s_dnstr)
400 # Check if the DSA is in our site
401 if self.my_site.same_site(s_dsa):
406 # Given an nTDSConnection object cn, if the DC with the
407 # nTDSDSA object dc that is the parent object of cn and
408 # the DC with the nTDSDA object referenced by cn!fromServer
409 # are in the same site, the KCC on dc deletes cn if all of
410 # the following are true:
412 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
414 # No site settings object s exists for the local DC's site, or
415 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
418 # Another nTDSConnection object cn2 exists such that cn and
419 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
422 # cn!whenCreated < cn2!whenCreated
424 # cn!whenCreated = cn2!whenCreated and
425 # cn!objectGUID < cn2!objectGUID
427 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
429 if not cn_conn.is_generated():
432 if self.my_site.is_cleanup_ntdsconn_disabled():
435 # Loop thru connections looking for a duplicate that
436 # fulfills the previous criteria
439 for cn2_conn in mydsa.connect_table.values():
440 if cn2_conn is cn_conn:
443 s2_dnstr = cn2_conn.get_from_dnstr()
445 # If the NTDS Connections has a different
446 # fromServer field then no match
447 if s2_dnstr != s_dnstr:
451 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
452 (cn_conn.whenCreated == cn2_conn.whenCreated and
453 ndr_pack(cn_conn.guid) < ndr_pack(cn2_conn.guid)))
458 if lesser and not cn_conn.is_rodc_topology():
459 cn_conn.to_be_deleted = True
461 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
462 # object dc that is the parent object of cn and the DC with
463 # the nTDSDSA object referenced by cn!fromServer are in
464 # different sites, a KCC acting as an ISTG in dc's site
465 # deletes cn if all of the following are true:
467 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
469 # cn!fromServer references an nTDSDSA object for a DC
470 # in a site other than the local DC's site.
472 # The keepConnections sequence returned by
473 # CreateIntersiteConnections() does not contain
474 # cn!objectGUID, or cn is "superseded by" (see below)
475 # another nTDSConnection cn2 and keepConnections
476 # contains cn2!objectGUID.
478 # The return value of CreateIntersiteConnections()
481 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
484 else: # different site
486 if not mydsa.is_istg():
489 if not cn_conn.is_generated():
493 # We are directly using this connection in intersite or
494 # we are using a connection which can supersede this one.
496 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
497 # appear to be correct.
499 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
500 # 2. The repsFrom do not imply each other
502 if cn_conn in self.kept_connections: # and not_superceded:
505 # This is the result of create_intersite_connections
506 if not all_connected:
509 if not cn_conn.is_rodc_topology():
510 cn_conn.to_be_deleted = True
513 if mydsa.is_ro() or opts.readonly:
514 for connect in mydsa.connect_table.values():
515 if connect.to_be_deleted:
516 DEBUG_GREEN("TO BE DELETED:\n%s" % connect)
517 if connect.to_be_added:
518 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
520 # Peform deletion from our tables but perform
521 # no database modification
522 mydsa.commit_connections(self.samdb, ro=True)
524 # Commit any modified connections
525 mydsa.commit_connections(self.samdb)
527 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
528 """Part of MS-ADTS 6.2.2.5.
530 Update t_repsFrom if necessary to satisfy requirements. Such
531 updates are typically required when the IDL_DRSGetNCChanges
532 server has moved from one site to another--for example, to
533 enable compression when the server is moved from the
534 client's site to another site.
536 :param n_rep: NC replica we need
537 :param t_repsFrom: repsFrom tuple to modify
538 :param s_rep: NC replica at source DSA
539 :param s_dsa: source DSA
540 :param cn_conn: Local DSA NTDSConnection child
542 ::returns: (update) bit field containing which portion of the
543 repsFrom was modified. This bit field is suitable as input
544 to IDL_DRSReplicaModify ulModifyFields element, as it consists
546 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
547 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
548 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
550 s_dnstr = s_dsa.dsa_dnstr
553 if self.my_site.same_site(s_dsa):
558 # if schedule doesn't match then update and modify
559 times = convert_schedule_to_repltimes(cn_conn.schedule)
560 if times != t_repsFrom.schedule:
561 t_repsFrom.schedule = times
562 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
564 # Bit DRS_PER_SYNC is set in replicaFlags if and only
565 # if nTDSConnection schedule has a value v that specifies
566 # scheduled replication is to be performed at least once
568 if cn_conn.is_schedule_minimum_once_per_week():
570 if (t_repsFrom.replica_flags &
571 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0:
572 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
574 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
575 # if the source DSA and the local DC's nTDSDSA object are
576 # in the same site or source dsa is the FSMO role owner
577 # of one or more FSMO roles in the NC replica.
578 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
580 if (t_repsFrom.replica_flags &
581 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0:
582 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
584 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
585 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
586 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
587 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
588 # t.replicaFlags if and only if s and the local DC's
589 # nTDSDSA object are in different sites.
590 if (cn_conn.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0:
592 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
594 if (t_repsFrom.replica_flags &
595 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
596 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
600 if (t_repsFrom.replica_flags &
601 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
602 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
604 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
605 # and only if s and the local DC's nTDSDSA object are
606 # not in the same site and the
607 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
608 # clear in cn!options
609 if (not same_site and
611 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
613 if (t_repsFrom.replica_flags &
614 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0:
615 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
617 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
618 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
619 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
621 if (t_repsFrom.replica_flags &
622 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0:
623 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
625 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
626 # set in t.replicaFlags if and only if cn!enabledConnection = false.
627 if not cn_conn.is_enabled():
629 if (t_repsFrom.replica_flags &
630 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0:
631 t_repsFrom.replica_flags |= \
632 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
634 if (t_repsFrom.replica_flags &
635 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0:
636 t_repsFrom.replica_flags |= \
637 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
639 # If s and the local DC's nTDSDSA object are in the same site,
640 # cn!transportType has no value, or the RDN of cn!transportType
643 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
645 # t.uuidTransport = NULL GUID.
647 # t.uuidDsa = The GUID-based DNS name of s.
651 # Bit DRS_MAIL_REP in t.replicaFlags is set.
653 # If x is the object with dsname cn!transportType,
654 # t.uuidTransport = x!objectGUID.
656 # Let a be the attribute identified by
657 # x!transportAddressAttribute. If a is
658 # the dNSHostName attribute, t.uuidDsa = the GUID-based
659 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
661 # It appears that the first statement i.e.
663 # "If s and the local DC's nTDSDSA object are in the same
664 # site, cn!transportType has no value, or the RDN of
665 # cn!transportType is CN=IP:"
667 # could be a slightly tighter statement if it had an "or"
668 # between each condition. I believe this should
671 # IF (same-site) OR (no-value) OR (type-ip)
673 # because IP should be the primary transport mechanism
674 # (even in inter-site) and the absense of the transportType
675 # attribute should always imply IP no matter if its multi-site
677 # NOTE MS-TECH INCORRECT:
679 # All indications point to these statements above being
680 # incorrectly stated:
682 # t.uuidDsa = The GUID-based DNS name of s.
684 # Let a be the attribute identified by
685 # x!transportAddressAttribute. If a is
686 # the dNSHostName attribute, t.uuidDsa = the GUID-based
687 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
689 # because the uuidDSA is a GUID and not a GUID-base DNS
690 # name. Nor can uuidDsa hold (s!parent)!a if not
691 # dNSHostName. What should have been said is:
693 # t.naDsa = The GUID-based DNS name of s
695 # That would also be correct if transportAddressAttribute
696 # were "mailAddress" because (naDsa) can also correctly
697 # hold the SMTP ISM service address.
699 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
701 # We're not currently supporting SMTP replication
702 # so is_smtp_replication_available() is currently
703 # always returning False
705 cn_conn.transport_dnstr is None or
706 cn_conn.transport_dnstr.find("CN=IP") == 0 or
707 not is_smtp_replication_available()):
709 if (t_repsFrom.replica_flags &
710 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0:
711 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
713 t_repsFrom.transport_guid = misc.GUID()
715 # See (NOTE MS-TECH INCORRECT) above
716 if t_repsFrom.version == 0x1:
717 if t_repsFrom.dns_name1 is None or \
718 t_repsFrom.dns_name1 != nastr:
719 t_repsFrom.dns_name1 = nastr
721 if t_repsFrom.dns_name1 is None or \
722 t_repsFrom.dns_name2 is None or \
723 t_repsFrom.dns_name1 != nastr or \
724 t_repsFrom.dns_name2 != nastr:
725 t_repsFrom.dns_name1 = nastr
726 t_repsFrom.dns_name2 = nastr
729 if (t_repsFrom.replica_flags &
730 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0:
731 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
733 # We have a transport type but its not an
734 # object in the database
735 if cn_conn.transport_guid not in self.transport_table:
736 raise Exception("Missing inter-site transport - (%s)" %
737 cn_conn.transport_dnstr)
739 x_transport = self.transport_table[str(cn_conn.transport_guid)]
741 if t_repsFrom.transport_guid != x_transport.guid:
742 t_repsFrom.transport_guid = x_transport.guid
744 # See (NOTE MS-TECH INCORRECT) above
745 if x_transport.address_attr == "dNSHostName":
747 if t_repsFrom.version == 0x1:
748 if t_repsFrom.dns_name1 is None or \
749 t_repsFrom.dns_name1 != nastr:
750 t_repsFrom.dns_name1 = nastr
752 if t_repsFrom.dns_name1 is None or \
753 t_repsFrom.dns_name2 is None or \
754 t_repsFrom.dns_name1 != nastr or \
755 t_repsFrom.dns_name2 != nastr:
756 t_repsFrom.dns_name1 = nastr
757 t_repsFrom.dns_name2 = nastr
760 # MS tech specification says we retrieve the named
761 # attribute in "transportAddressAttribute" from the parent of
764 pdnstr = s_dsa.get_parent_dnstr()
765 attrs = [ x_transport.address_attr ]
767 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
769 except ldb.LdbError, (enum, estr):
771 "Unable to find attr (%s) for (%s) - (%s)" %
772 (x_transport.address_attr, pdnstr, estr))
775 nastr = str(msg[x_transport.address_attr][0])
777 # See (NOTE MS-TECH INCORRECT) above
778 if t_repsFrom.version == 0x1:
779 if t_repsFrom.dns_name1 is None or \
780 t_repsFrom.dns_name1 != nastr:
781 t_repsFrom.dns_name1 = nastr
783 if t_repsFrom.dns_name1 is None or \
784 t_repsFrom.dns_name2 is None or \
785 t_repsFrom.dns_name1 != nastr or \
786 t_repsFrom.dns_name2 != nastr:
788 t_repsFrom.dns_name1 = nastr
789 t_repsFrom.dns_name2 = nastr
791 if t_repsFrom.is_modified():
792 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
794 def is_repsFrom_implied(self, n_rep, cn_conn):
795 """Given a NC replica and NTDS Connection, determine if the connection
796 implies a repsFrom tuple should be present from the source DSA listed
797 in the connection to the naming context
799 :param n_rep: NC replica
800 :param conn: NTDS Connection
801 ::returns (True || False), source DSA:
803 # NTDS Connection must satisfy all the following criteria
804 # to imply a repsFrom tuple is needed:
806 # cn!enabledConnection = true.
807 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
808 # cn!fromServer references an nTDSDSA object.
811 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
813 s_dnstr = cn_conn.get_from_dnstr()
814 if s_dnstr is not None:
815 s_dsa = self.get_dsa(s_dnstr)
817 # No DSA matching this source DN string?
821 # To imply a repsFrom tuple is needed, each of these
824 # An NC replica of the NC "is present" on the DC to
825 # which the nTDSDSA object referenced by cn!fromServer
828 # An NC replica of the NC "should be present" on
830 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
832 if s_rep is None or not s_rep.is_present():
835 # To imply a repsFrom tuple is needed, each of these
838 # The NC replica on the DC referenced by cn!fromServer is
839 # a writable replica or the NC replica that "should be
840 # present" on the local DC is a partial replica.
842 # The NC is not a domain NC, the NC replica that
843 # "should be present" on the local DC is a partial
844 # replica, cn!transportType has no value, or
845 # cn!transportType has an RDN of CN=IP.
847 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
848 (not n_rep.is_domain() or
849 n_rep.is_partial() or
850 cn_conn.transport_dnstr is None or
851 cn_conn.transport_dnstr.find("CN=IP") == 0)
858 def translate_ntdsconn(self):
859 """This function adjusts values of repsFrom abstract attributes of NC
860 replicas on the local DC to match those implied by
861 nTDSConnection objects.
864 if self.my_dsa.is_translate_ntdsconn_disabled():
865 logger.debug("skipping translate_ntdsconn() because disabling flag is set")
868 logger.debug("translate_ntdsconn(): enter")
870 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
872 # Filled in with replicas we currently have that need deleting
875 # We're using the MS notation names here to allow
876 # correlation back to the published algorithm.
878 # n_rep - NC replica (n)
879 # t_repsFrom - tuple (t) in n!repsFrom
880 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
881 # object (s) such that (s!objectGUID = t.uuidDsa)
882 # In our IDL representation of repsFrom the (uuidDsa)
883 # attribute is called (source_dsa_obj_guid)
884 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
885 # nTDSDSA object and (cn!fromServer = s)
886 # s_rep - source DSA replica of n
888 # If we have the replica and its not needed
889 # then we add it to the "to be deleted" list.
890 for dnstr in current_rep_table:
891 if dnstr not in needed_rep_table:
892 delete_reps.add(dnstr)
895 DEBUG('current %d needed %d delete %d', len(current_rep_table),
896 len(needed_rep_table), len(delete_reps))
897 DEBUG('deleting these reps: %s', delete_reps)
898 for dnstr in delete_reps:
899 del current_rep_table[dnstr]
901 # Now perform the scan of replicas we'll need
902 # and compare any current repsFrom against the
904 for n_rep in needed_rep_table.values():
906 # load any repsFrom and fsmo roles as we'll
907 # need them during connection translation
908 n_rep.load_repsFrom(self.samdb)
909 n_rep.load_fsmo_roles(self.samdb)
911 # Loop thru the existing repsFrom tupples (if any)
912 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
914 # for each tuple t in n!repsFrom, let s be the nTDSDSA
915 # object such that s!objectGUID = t.uuidDsa
916 guidstr = str(t_repsFrom.source_dsa_obj_guid)
917 s_dsa = self.get_dsa_by_guidstr(guidstr)
919 # Source dsa is gone from config (strange)
920 # so cleanup stale repsFrom for unlisted DSA
922 logger.warning("repsFrom source DSA guid (%s) not found" %
924 t_repsFrom.to_be_deleted = True
927 s_dnstr = s_dsa.dsa_dnstr
929 # Retrieve my DSAs connection object (if it exists)
930 # that specifies the fromServer equivalent to
931 # the DSA that is specified in the repsFrom source
932 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
934 # Let (cn) be the nTDSConnection object such that (cn)
935 # is a child of the local DC's nTDSDSA object and
936 # (cn!fromServer = s) and (cn!options) does not contain
937 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
938 if cn_conn and not cn_conn.is_rodc_topology():
941 # KCC removes this repsFrom tuple if any of the following
945 # No NC replica of the NC "is present" on DSA that
946 # would be source of replica
948 # A writable replica of the NC "should be present" on
949 # the local DC, but a partial replica "is present" on
951 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
953 if cn_conn is None or \
954 s_rep is None or not s_rep.is_present() or \
955 (not n_rep.is_ro() and s_rep.is_partial()):
957 t_repsFrom.to_be_deleted = True
960 # If the KCC did not remove t from n!repsFrom, it updates t
961 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
963 # Loop thru connections and add implied repsFrom tuples
964 # for each NTDSConnection under our local DSA if the
965 # repsFrom is not already present
966 for cn_conn in self.my_dsa.connect_table.values():
968 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
972 # Loop thru the existing repsFrom tupples (if any) and
973 # if we already have a tuple for this connection then
974 # no need to proceed to add. It will have been changed
975 # to have the correct attributes above
976 for t_repsFrom in n_rep.rep_repsFrom:
977 guidstr = str(t_repsFrom.source_dsa_obj_guid)
978 if s_dsa is self.get_dsa_by_guidstr(guidstr):
985 # Create a new RepsFromTo and proceed to modify
986 # it according to specification
987 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
989 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
991 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
993 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
995 # Add to our NC repsFrom as this is newly computed
996 if t_repsFrom.is_modified():
997 n_rep.rep_repsFrom.append(t_repsFrom)
1000 # Display any to be deleted or modified repsFrom
1001 text = n_rep.dumpstr_to_be_deleted()
1003 logger.info("TO BE DELETED:\n%s" % text)
1004 text = n_rep.dumpstr_to_be_modified()
1006 logger.info("TO BE MODIFIED:\n%s" % text)
1008 # Peform deletion from our tables but perform
1009 # no database modification
1010 n_rep.commit_repsFrom(self.samdb, ro=True)
1012 # Commit any modified repsFrom to the NC replica
1013 n_rep.commit_repsFrom(self.samdb)
1017 def merge_failed_links(self):
1018 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1019 The KCC on a writable DC attempts to merge the link and connection
1020 failure information from bridgehead DCs in its own site to help it
1021 identify failed bridgehead DCs.
1023 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
1026 # 1. Queries every bridgehead server in your site (other than yourself)
1027 # 2. For every ntDSConnection that references a server in a different
1028 # site merge all the failure info
1030 # XXX - not implemented yet
1031 if opts.attempt_live_connections:
1032 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1034 DEBUG("skipping merge_failed_links() because it requires real network connections\n"
1035 "and we weren't asked to --attempt-live-connctions")
1038 def setup_graph(self, part):
1039 """Set up a GRAPH, populated with a VERTEX for each site
1040 object, a MULTIEDGE for each siteLink object, and a
1041 MUTLIEDGESET for each siteLinkBridge object (or implied
1044 ::returns: a new graph
1048 g = IntersiteGraph()
1050 for site_guid, site in self.site_table.items():
1051 vertex = Vertex(site, part)
1052 vertex.guid = site_guid
1053 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1054 g.vertices.add(vertex)
1056 if not guid_to_vertex.get(site_guid):
1057 guid_to_vertex[site_guid] = []
1059 guid_to_vertex[site_guid].append(vertex)
1061 connected_vertices = set()
1062 for transport_guid, transport in self.transport_table.items():
1063 # Currently only ever "IP"
1064 for site_link_dn, site_link in self.sitelink_table.items():
1065 new_edge = create_edge(transport_guid, site_link, guid_to_vertex)
1066 connected_vertices.update(new_edge.vertices)
1067 g.edges.add(new_edge)
1069 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1070 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1071 # No documentation for this however, ntdsapi.h appears to have listed:
1072 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1073 if ((self.my_site.site_options & 0x00000002) == 0
1074 and (self.my_site.site_options & 0x00001000) == 0):
1075 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1077 # TODO get all site link bridges
1078 for site_link_bridge in []:
1079 g.edge_set.add(create_edge_set(g, transport_guid,
1082 g.connected_vertices = connected_vertices
1086 for edge in g.edges:
1087 for a, b in itertools.combinations(edge.vertices, 2):
1088 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1089 verify_properties = ()
1090 verify_and_dot('site_edges', dot_edges, directed=False, label=self.my_dsa_dnstr,
1091 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1092 dot_files=opts.dot_files)
1097 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1098 """Get a bridghead DC.
1100 :param site: site object representing for which a bridgehead
1102 :param part: crossRef for NC to replicate.
1103 :param transport: interSiteTransport object for replication
1105 :param partial_ok: True if a DC containing a partial
1106 replica or a full replica will suffice, False if only
1107 a full replica will suffice.
1108 :param detect_failed: True to detect failed DCs and route
1109 replication traffic around them, False to assume no DC
1111 ::returns: dsa object for the bridgehead DC or None
1114 bhs = self.get_all_bridgeheads(site, part, transport,
1115 partial_ok, detect_failed)
1117 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1121 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1122 (site.site_dnstr, bhs[0].dsa_dnstr))
1125 def get_all_bridgeheads(self, site, part, transport,
1126 partial_ok, detect_failed):
1127 """Get all bridghead DCs satisfying the given criteria
1129 :param site: site object representing the site for which
1130 bridgehead DCs are desired.
1131 :param part: partition for NC to replicate.
1132 :param transport: interSiteTransport object for
1133 replication traffic.
1134 :param partial_ok: True if a DC containing a partial
1135 replica or a full replica will suffice, False if
1136 only a full replica will suffice.
1137 :param detect_failed: True to detect failed DCs and route
1138 replication traffic around them, FALSE to assume
1140 ::returns: list of dsa object for available bridgehead
1146 logger.debug("get_all_bridgeheads: %s" % transport)
1147 if 'Site-5' in site.site_dnstr:
1148 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1149 " detect_failed %s" % (site.site_dnstr, part.partstr,
1150 partial_ok, detect_failed))
1151 logger.debug(site.dsa_table)
1152 for key, dsa in site.dsa_table.items():
1154 pdnstr = dsa.get_parent_dnstr()
1156 # IF t!bridgeheadServerListBL has one or more values and
1157 # t!bridgeheadServerListBL does not contain a reference
1158 # to the parent object of dc then skip dc
1159 if (len(transport.bridgehead_list) != 0 and
1160 pdnstr not in transport.bridgehead_list):
1163 # IF dc is in the same site as the local DC
1164 # IF a replica of cr!nCName is not in the set of NC replicas
1165 # that "should be present" on dc or a partial replica of the
1166 # NC "should be present" but partialReplicasOkay = FALSE
1168 if self.my_site.same_site(dsa):
1169 needed, ro, partial = part.should_be_present(dsa)
1170 if not needed or (partial and not partial_ok):
1172 rep = dsa.get_current_replica(part.nc_dnstr)
1175 # IF an NC replica of cr!nCName is not in the set of NC
1176 # replicas that "are present" on dc or a partial replica of
1177 # the NC "is present" but partialReplicasOkay = FALSE
1180 rep = dsa.get_current_replica(part.nc_dnstr)
1181 if rep is None or (rep.is_partial() and not partial_ok):
1184 # IF AmIRODC() and cr!nCName corresponds to default NC then
1185 # Let dsaobj be the nTDSDSA object of the dc
1186 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1188 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1189 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1192 # IF t!name != "IP" and the parent object of dc has no value for
1193 # the attribute specified by t!transportAddressAttribute
1195 if transport.name != "IP":
1196 # MS tech specification says we retrieve the named
1197 # attribute in "transportAddressAttribute" from the parent
1200 attrs = [ transport.address_attr ]
1202 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1204 except ldb.LdbError, (enum, estr):
1208 if transport.address_attr not in msg:
1211 nastr = str(msg[transport.address_attr][0])
1213 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1215 if self.is_bridgehead_failed(dsa, detect_failed):
1216 DEBUG("bridgehead is failed")
1219 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1222 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1224 # SORT bhs such that all GC servers precede DCs that are not GC
1225 # servers, and otherwise by ascending objectGUID
1227 # SORT bhs in a random order
1228 if site.is_random_bridgehead_disabled():
1229 bhs.sort(sort_dsa_by_gc_and_guid)
1236 def is_bridgehead_failed(self, dsa, detect_failed):
1237 """Determine whether a given DC is known to be in a failed state
1238 ::returns: True if and only if the DC should be considered failed
1240 Here we DEPART from the pseudo code spec which appears to be
1241 wrong. It says, in full:
1243 /***** BridgeheadDCFailed *****/
1244 /* Determine whether a given DC is known to be in a failed state.
1245 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1246 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1248 * RETURNS: TRUE if and only if the DC should be considered to be in a
1251 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1253 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1254 the options attribute of the site settings object for the local
1257 ELSEIF a tuple z exists in the kCCFailedLinks or
1258 kCCFailedConnections variables such that z.UUIDDsa =
1259 objectGUID, z.FailureCount > 1, and the current time -
1260 z.TimeFirstFailure > 2 hours
1263 RETURN detectFailedDCs
1267 where you will see detectFailedDCs is not behaving as
1268 advertised -- it is acting as a default return code in the
1269 event that a failure is not detected, not a switch turning
1270 detection on or off. Elsewhere the documentation seems to
1271 concur with the comment rather than the code.
1273 if not detect_failed:
1276 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1277 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1278 if self.my_site.site_options & 0x00000008:
1281 return self.is_stale_link_connection(dsa)
1284 def create_connection(self, part, rbh, rsite, transport,
1285 lbh, lsite, link_opt, link_sched,
1286 partial_ok, detect_failed):
1287 """Create an nTDSConnection object with the given parameters
1288 if one does not already exist.
1290 :param part: crossRef object for the NC to replicate.
1291 :param rbh: nTDSDSA object for DC to act as the
1292 IDL_DRSGetNCChanges server (which is in a site other
1293 than the local DC's site).
1294 :param rsite: site of the rbh
1295 :param transport: interSiteTransport object for the transport
1296 to use for replication traffic.
1297 :param lbh: nTDSDSA object for DC to act as the
1298 IDL_DRSGetNCChanges client (which is in the local DC's site).
1299 :param lsite: site of the lbh
1300 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1301 :param link_sched: Schedule specifying the times at which
1302 to begin replicating.
1303 :partial_ok: True if bridgehead DCs containing partial
1304 replicas of the NC are acceptable.
1305 :param detect_failed: True to detect failed DCs and route
1306 replication traffic around them, FALSE to assume no DC
1309 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1311 rbh_table = {x.dsa_dnstr:x for x in rbhs_all}
1313 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all), [x.dsa_dnstr for x in rbhs_all]))
1315 # MS-TECH says to compute rbhs_avail but then doesn't use it
1316 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1317 # partial_ok, detect_failed)
1319 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1322 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all), [x.dsa_dnstr for x in lbhs_all]))
1324 # MS-TECH says to compute lbhs_avail but then doesn't use it
1325 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1326 # partial_ok, detect_failed)
1328 # FOR each nTDSConnection object cn such that the parent of cn is
1329 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1330 for ldsa in lbhs_all:
1331 for cn in ldsa.connect_table.values():
1333 rdsa = rbh_table.get(cn.from_dnstr)
1337 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1338 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1339 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1340 # cn!transportType references t
1341 if (cn.is_generated() and not cn.is_rodc_topology() and
1342 cn.transport_guid == transport.guid):
1344 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1345 # cn!options and cn!schedule != sch
1346 # Perform an originating update to set cn!schedule to
1348 if (not cn.is_user_owned_schedule() and
1349 not cn.is_equivalent_schedule(link_sched)):
1350 cn.schedule = link_sched
1351 cn.set_modified(True)
1353 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1354 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1355 if cn.is_override_notify_default() and \
1358 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1360 # Perform an originating update to clear bits
1361 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1362 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1363 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1365 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1366 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1367 cn.set_modified(True)
1372 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1374 # Perform an originating update to set bits
1375 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1376 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1377 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1379 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1380 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1381 cn.set_modified(True)
1384 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1385 if cn.is_twoway_sync():
1387 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1389 # Perform an originating update to clear bit
1390 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1391 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1392 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1393 cn.set_modified(True)
1398 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1400 # Perform an originating update to set bit
1401 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1402 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1403 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1404 cn.set_modified(True)
1407 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1409 if cn.is_intersite_compression_disabled():
1411 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1413 # Perform an originating update to clear bit
1414 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1417 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0:
1419 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1420 cn.set_modified(True)
1424 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1426 # Perform an originating update to set bit
1427 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1430 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1432 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1433 cn.set_modified(True)
1435 # Display any modified connection
1437 if cn.to_be_modified:
1438 logger.info("TO BE MODIFIED:\n%s" % cn)
1440 ldsa.commit_connections(self.samdb, ro=True)
1442 ldsa.commit_connections(self.samdb)
1445 valid_connections = 0
1447 # FOR each nTDSConnection object cn such that cn!parent is
1448 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1449 for ldsa in lbhs_all:
1450 for cn in ldsa.connect_table.values():
1452 rdsa = rbh_table.get(cn.from_dnstr)
1456 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1458 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1459 # cn!transportType references t) and
1460 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1461 if ((not cn.is_generated() or
1462 cn.transport_guid == transport.guid) and
1463 not cn.is_rodc_topology()):
1465 # LET rguid be the objectGUID of the nTDSDSA object
1466 # referenced by cn!fromServer
1467 # LET lguid be (cn!parent)!objectGUID
1469 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1470 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1471 # Increment cValidConnections by 1
1472 if (not self.is_bridgehead_failed(rdsa, detect_failed) and
1473 not self.is_bridgehead_failed(ldsa, detect_failed)):
1474 valid_connections += 1
1476 # IF keepConnections does not contain cn!objectGUID
1477 # APPEND cn!objectGUID to keepConnections
1478 self.kept_connections.add(cn)
1481 DEBUG_RED("valid connections %d" % valid_connections)
1482 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1483 # IF cValidConnections = 0
1484 if valid_connections == 0:
1486 # LET opt be NTDSCONN_OPT_IS_GENERATED
1487 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1489 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1490 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1491 # NTDSCONN_OPT_USE_NOTIFY in opt
1492 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1493 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1494 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1496 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1497 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1498 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1499 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1501 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1503 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1505 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1506 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1508 # Perform an originating update to create a new nTDSConnection
1509 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1510 # cn!options = opt, cn!transportType is a reference to t,
1511 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1512 cn = lbh.new_connection(opt, 0, transport, rbh.dsa_dnstr, link_sched)
1514 # Display any added connection
1517 logger.info("TO BE ADDED:\n%s" % cn)
1519 lbh.commit_connections(self.samdb, ro=True)
1521 lbh.commit_connections(self.samdb)
1523 # APPEND cn!objectGUID to keepConnections
1524 self.kept_connections.add(cn)
1526 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1528 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1529 # here and in the, but using vertex seems to make more
1530 # sense. That is, it wants this:
1532 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1533 # local_vertex.is_black(), detect_failed)
1537 vertex.accept_red_red = []
1538 vertex.accept_black = []
1539 found_failed = False
1540 for t_guid, transport in self.transport_table.items():
1541 if transport.name != 'IP':
1542 #XXX well this is cheating a bit
1543 logging.warning("WARNING: we are ignoring a transport named %r" % transport.name)
1546 # FLAG_CR_NTDS_DOMAIN 0x00000002
1547 if (vertex.is_red() and transport.name != "IP" and
1548 vertex.part.system_flags & 0x00000002):
1551 if vertex not in graph.connected_vertices:
1554 partial_replica_okay = vertex.is_black()
1555 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1556 partial_replica_okay, detect_failed)
1561 vertex.accept_red_red.append(t_guid)
1562 vertex.accept_black.append(t_guid)
1564 # Add additional transport to allow another run of Dijkstra
1565 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1566 vertex.accept_black.append("EDGE_TYPE_ALL")
1570 def create_connections(self, graph, part, detect_failed):
1571 """Construct an NC replica graph for the NC identified by
1572 the given crossRef, then create any additional nTDSConnection
1575 :param graph: site graph.
1576 :param part: crossRef object for NC.
1577 :param detect_failed: True to detect failed DCs and route
1578 replication traffic around them, False to assume no DC
1581 Modifies self.kept_connections by adding any connections
1582 deemed to be "in use".
1584 ::returns: (all_connected, found_failed_dc)
1585 (all_connected) True if the resulting NC replica graph
1586 connects all sites that need to be connected.
1587 (found_failed_dc) True if one or more failed DCs were
1590 all_connected = True
1591 found_failed = False
1593 logger.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1594 (part.nc_dnstr, detect_failed))
1596 # XXX - This is a highly abbreviated function from the MS-TECH
1597 # ref. It creates connections between bridgeheads to all
1598 # sites that have appropriate replicas. Thus we are not
1599 # creating a minimum cost spanning tree but instead
1600 # producing a fully connected tree. This should produce
1601 # a full (albeit not optimal cost) replication topology.
1603 my_vertex = Vertex(self.my_site, part)
1604 my_vertex.color_vertex()
1606 for v in graph.vertices:
1608 if self.add_transports(v, my_vertex, graph, False):
1611 # No NC replicas for this NC in the site of the local DC,
1612 # so no nTDSConnection objects need be created
1613 if my_vertex.is_white():
1614 return all_connected, found_failed
1616 edge_list, component_count = self.get_spanning_tree_edges(graph, label=part.partstr)
1618 logger.debug("%s Number of components: %d" % (part.nc_dnstr, component_count))
1619 if component_count > 1:
1620 all_connected = False
1622 # LET partialReplicaOkay be TRUE if and only if
1623 # localSiteVertex.Color = COLOR.BLACK
1624 if my_vertex.is_black():
1629 # Utilize the IP transport only for now
1631 for transport in self.transport_table.values():
1632 if transport.name == "IP":
1635 if transport is None:
1636 raise Exception("Unable to find inter-site transport for IP")
1638 DEBUG("edge_list %s" % edge_list)
1640 if e.directed and e.vertices[0].site is self.my_site: # more accurate comparison?
1643 if e.vertices[0].site is self.my_site:
1644 rsite = e.vertices[1].site
1646 rsite = e.vertices[0].site
1648 # We don't make connections to our own site as that
1649 # is intrasite topology generator's job
1650 if rsite is self.my_site:
1651 DEBUG("rsite is my_site")
1654 # Determine bridgehead server in remote site
1655 rbh = self.get_bridgehead(rsite, part, transport,
1656 partial_ok, detect_failed)
1660 # RODC acts as an BH for itself
1662 # LET lbh be the nTDSDSA object of the local DC
1664 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1665 # cr, t, partialReplicaOkay, detectFailedDCs)
1666 if self.my_dsa.is_ro():
1667 lsite = self.my_site
1670 lsite = self.my_site
1671 lbh = self.get_bridgehead(lsite, part, transport,
1672 partial_ok, detect_failed)
1675 DEBUG_RED("DISASTER! lbh is None")
1680 DEBUG_BLUE("vertices")
1682 DEBUG_BLUE("bridgeheads")
1684 DEBUG_BLUE("-" * 70)
1686 sitelink = e.site_link
1687 if sitelink is None:
1691 link_opt = sitelink.options
1692 link_sched = sitelink.schedule
1694 self.create_connection(part, rbh, rsite, transport,
1695 lbh, lsite, link_opt, link_sched,
1696 partial_ok, detect_failed)
1698 return all_connected, found_failed
1700 def create_intersite_connections(self):
1701 """Computes an NC replica graph for each NC replica that "should be
1702 present" on the local DC or "is present" on any DC in the same site
1703 as the local DC. For each edge directed to an NC replica on such a
1704 DC from an NC replica on a DC in another site, the KCC creates an
1705 nTDSConnection object to imply that edge if one does not already
1708 Modifies self.kept_connections - A set of nTDSConnection
1709 objects for edges that are directed
1710 to the local DC's site in one or more NC replica graphs.
1712 returns: True if spanning trees were created for all NC replica
1713 graphs, otherwise False.
1715 all_connected = True
1716 self.kept_connections = set()
1718 # LET crossRefList be the set containing each object o of class
1719 # crossRef such that o is a child of the CN=Partitions child of the
1722 # FOR each crossRef object cr in crossRefList
1723 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1724 # is clear in cr!systemFlags, skip cr.
1725 # LET g be the GRAPH return of SetupGraph()
1727 for part in self.part_table.values():
1729 if not part.is_enabled():
1732 if part.is_foreign():
1735 graph = self.setup_graph(part)
1737 # Create nTDSConnection objects, routing replication traffic
1738 # around "failed" DCs.
1739 found_failed = False
1741 connected, found_failed = self.create_connections(graph, part, True)
1743 DEBUG("with detect_failed: connected %s Found failed %s" % (connected, found_failed))
1745 all_connected = False
1748 # One or more failed DCs preclude use of the ideal NC
1749 # replica graph. Add connections for the ideal graph.
1750 self.create_connections(graph, part, False)
1752 return all_connected
1754 def get_spanning_tree_edges(self, graph, label=None):
1755 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1756 # just the shortest-paths connecting colored vertices
1758 internal_edges = set()
1760 for e_set in graph.edge_set:
1762 for v in graph.vertices:
1765 # All con_type in an edge set is the same
1766 for e in e_set.edges:
1767 edgeType = e.con_type
1768 for v in e.vertices:
1771 if opts.verify or opts.dot_files:
1772 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
1773 for a, b in itertools.chain(*(itertools.combinations(edge.vertices, 2)
1774 for edge in e_set.edges))]
1775 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1778 write_dot_file('edgeset_%s' % (edgeType,), graph_edges, vertices=graph_nodes,
1782 verify_graph('spanning tree edge set %s' % edgeType, graph_edges, vertices=graph_nodes,
1783 properties=('complete', 'connected'), debug=DEBUG)
1785 # Run dijkstra's algorithm with just the red vertices as seeds
1786 # Seed from the full replicas
1787 dijkstra(graph, edgeType, False)
1790 process_edge_set(graph, e_set, internal_edges)
1792 # Run dijkstra's algorithm with red and black vertices as the seeds
1793 # Seed from both full and partial replicas
1794 dijkstra(graph, edgeType, True)
1797 process_edge_set(graph, e_set, internal_edges)
1799 # All vertices have root/component as itself
1800 setup_vertices(graph)
1801 process_edge_set(graph, None, internal_edges)
1803 if opts.verify or opts.dot_files:
1804 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1805 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1806 verify_properties = ('multi_edge_forest',)
1807 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
1808 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1809 dot_files=opts.dot_files)
1812 # Phase 2: Run Kruskal's on the internal edges
1813 output_edges, components = kruskal(graph, internal_edges)
1815 # This recalculates the cost for the path connecting the closest red vertex
1816 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1817 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
1818 # Phase 3: Process the output
1819 for v in graph.vertices:
1823 v.dist_to_red = v.repl_info.cost
1825 if opts.verify or opts.dot_files:
1826 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1827 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1828 verify_properties = ('multi_edge_forest',)
1829 verify_and_dot('postkruskal', graph_edges, graph_nodes, label=label,
1830 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1831 dot_files=opts.dot_files)
1833 # count the components
1834 return self.copy_output_edges(graph, output_edges), components
1836 # This ensures only one-way connections for partial-replicas
1837 def copy_output_edges(self, graph, output_edges):
1839 vid = self.my_site # object guid for the local dc's site
1841 for edge in output_edges:
1842 # Three-way edges are no problem here since these were created by
1843 # add_out_edge which only has two endpoints
1844 v = edge.vertices[0]
1845 w = edge.vertices[1]
1846 if v.site is vid or w.site is vid:
1847 if (v.is_black() or w.is_black()) and not v.dist_to_red == MAX_DWORD:
1848 edge.directed = True
1850 if w.dist_to_red < v.dist_to_red:
1851 edge.vertices[0] = w
1852 edge.vertices[1] = v
1854 edge_list.append(edge)
1858 def intersite(self):
1859 """The head method for generating the inter-site KCC replica
1860 connection graph and attendant nTDSConnection objects
1863 Produces self.kept_connections set of NTDS Connections
1864 that should be kept during subsequent pruning process.
1866 ::return (True or False): (True) if the produced NC replica
1867 graph connects all sites that need to be connected
1872 mysite = self.my_site
1873 all_connected = True
1875 logger.debug("intersite(): enter")
1877 # Determine who is the ISTG
1879 mysite.select_istg(self.samdb, mydsa, ro=True)
1881 mysite.select_istg(self.samdb, mydsa, ro=False)
1883 # Test whether local site has topology disabled
1884 if mysite.is_intersite_topology_disabled():
1885 logger.debug("intersite(): exit disabled all_connected=%d" %
1887 return all_connected
1889 if not mydsa.is_istg():
1890 logger.debug("intersite(): exit not istg all_connected=%d" %
1892 return all_connected
1894 self.merge_failed_links()
1896 # For each NC with an NC replica that "should be present" on the
1897 # local DC or "is present" on any DC in the same site as the
1898 # local DC, the KCC constructs a site graph--a precursor to an NC
1899 # replica graph. The site connectivity for a site graph is defined
1900 # by objects of class interSiteTransport, siteLink, and
1901 # siteLinkBridge in the config NC.
1903 all_connected = self.create_intersite_connections()
1905 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1906 return all_connected
1908 def update_rodc_connection(self):
1909 """Runs when the local DC is an RODC and updates the RODC NTFRS
1912 # Given an nTDSConnection object cn1, such that cn1.options contains
1913 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1914 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1915 # that the following is true:
1917 # cn1.fromServer = cn2.fromServer
1918 # cn1.schedule = cn2.schedule
1920 # If no such cn2 can be found, cn1 is not modified.
1921 # If no such cn1 can be found, nothing is modified by this task.
1923 if not self.my_dsa.is_ro():
1926 all_connections = self.my_dsa.connect_table.values()
1927 ro_connections = [x for x in all_connections if x.is_rodc_topology()]
1928 rw_connections = [x for x in all_connections if x not in ro_connections]
1930 # XXX here we are dealing with multiple RODC_TOPO connections,
1931 # if they exist. It is not clear whether the spec means that
1932 # or if it ever arises.
1933 if rw_connections and ro_connections:
1934 for con in ro_connections:
1935 cn2 = rw_connections[0]
1936 con.from_dnstr = cn2.from_dnstr
1937 con.schedule = cn2.schedule
1938 con.to_be_modified = True
1940 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1942 def intrasite_max_node_edges(self, node_count):
1943 """Returns the maximum number of edges directed to a node in
1944 the intrasite replica graph.
1946 The KCC does not create more
1947 than 50 edges directed to a single DC. To optimize replication,
1948 we compute that each node should have n+2 total edges directed
1949 to it such that (n) is the smallest non-negative integer
1950 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1952 (If the number of edges is m (i.e. n + 2), that is the same as
1953 2 * m*m - 2 * m + 3).
1963 :param node_count: total number of nodes in the replica graph
1967 if node_count <= (2 * (n * n) + (6 * n) + 7):
1975 def construct_intrasite_graph(self, site_local, dc_local,
1976 nc_x, gc_only, detect_stale):
1978 # We're using the MS notation names here to allow
1979 # correlation back to the published algorithm.
1981 # nc_x - naming context (x) that we are testing if it
1982 # "should be present" on the local DC
1983 # f_of_x - replica (f) found on a DC (s) for NC (x)
1984 # dc_s - DC where f_of_x replica was found
1985 # dc_local - local DC that potentially needs a replica
1987 # r_list - replica list R
1988 # p_of_x - replica (p) is partial and found on a DC (s)
1990 # l_of_x - replica (l) is the local replica for NC (x)
1991 # that should appear on the local DC
1992 # r_len = is length of replica list |R|
1994 # If the DSA doesn't need a replica for this
1995 # partition (NC x) then continue
1996 needed, ro, partial = nc_x.should_be_present(dc_local)
1998 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
1999 "\n\tgc_only=%d" % gc_only +
2000 "\n\tdetect_stale=%d" % detect_stale +
2001 "\n\tneeded=%s" % needed +
2003 "\n\tpartial=%s" % partial +
2007 DEBUG_RED("%s lacks 'should be present' status, aborting construct_intersite_graph!" %
2011 # Create a NCReplica that matches what the local replica
2012 # should say. We'll use this below in our r_list
2013 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
2016 l_of_x.identify_by_basedn(self.samdb)
2018 l_of_x.rep_partial = partial
2021 # Add this replica that "should be present" to the
2022 # needed replica table for this DSA
2023 dc_local.add_needed_replica(l_of_x)
2027 # Let R be a sequence containing each writable replica f of x
2028 # such that f "is present" on a DC s satisfying the following
2031 # * s is a writable DC other than the local DC.
2033 # * s is in the same site as the local DC.
2035 # * If x is a read-only full replica and x is a domain NC,
2036 # then the DC's functional level is at least
2037 # DS_BEHAVIOR_WIN2008.
2039 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
2040 # in the options attribute of the site settings object for
2041 # the local DC's site, or no tuple z exists in the
2042 # kCCFailedLinks or kCCFailedConnections variables such
2043 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
2044 # for s, z.FailureCount > 0, and the current time -
2045 # z.TimeFirstFailure > 2 hours.
2049 # We'll loop thru all the DSAs looking for
2050 # writeable NC replicas that match the naming
2051 # context dn for (nc_x)
2053 for dc_s in self.my_site.dsa_table.values():
2054 # If this partition (nc_x) doesn't appear as a
2055 # replica (f_of_x) on (dc_s) then continue
2056 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2059 # Pull out the NCReplica (f) of (x) with the dn
2060 # that matches NC (x) we are examining.
2061 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2063 # Replica (f) of NC (x) must be writable
2067 # Replica (f) of NC (x) must satisfy the
2068 # "is present" criteria for DC (s) that
2070 if not f_of_x.is_present():
2073 # DC (s) must be a writable DSA other than
2074 # my local DC. In other words we'd only replicate
2075 # from other writable DC
2076 if dc_s.is_ro() or dc_s is dc_local:
2079 # Certain replica graphs are produced only
2080 # for global catalogs, so test against
2081 # method input parameter
2082 if gc_only and not dc_s.is_gc():
2085 # DC (s) must be in the same site as the local DC
2086 # as this is the intra-site algorithm. This is
2087 # handled by virtue of placing DSAs in per
2088 # site objects (see enclosing for() loop)
2090 # If NC (x) is intended to be read-only full replica
2091 # for a domain NC on the target DC then the source
2092 # DC should have functional level at minimum WIN2008
2094 # Effectively we're saying that in order to replicate
2095 # to a targeted RODC (which was introduced in Windows 2008)
2096 # then we have to replicate from a DC that is also minimally
2099 # You can also see this requirement in the MS special
2100 # considerations for RODC which state that to deploy
2101 # an RODC, at least one writable domain controller in
2102 # the domain must be running Windows Server 2008
2103 if ro and not partial and nc_x.nc_type == NCType.domain:
2104 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2107 # If we haven't been told to turn off stale connection
2108 # detection and this dsa has a stale connection then
2110 if detect_stale and self.is_stale_link_connection(dc_s):
2113 # Replica meets criteria. Add it to table indexed
2114 # by the GUID of the DC that it appears on
2115 r_list.append(f_of_x)
2117 # If a partial (not full) replica of NC (x) "should be present"
2118 # on the local DC, append to R each partial replica (p of x)
2119 # such that p "is present" on a DC satisfying the same
2120 # criteria defined above for full replica DCs.
2122 # XXX This loop and the previous one differ only in whether
2123 # the replica is partial or not. here we only accept partial
2124 # (because we're partial); before we only accepted full. Order
2125 # doen't matter (the list is sorted a few lines down) so these
2126 # loops could easily be merged. Or this could be a helper
2130 # Now we loop thru all the DSAs looking for
2131 # partial NC replicas that match the naming
2132 # context dn for (NC x)
2133 for dc_s in self.my_site.dsa_table.values():
2135 # If this partition NC (x) doesn't appear as a
2136 # replica (p) of NC (x) on the dsa DC (s) then
2138 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2141 # Pull out the NCReplica with the dn that
2142 # matches NC (x) we are examining.
2143 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2145 # Replica (p) of NC (x) must be partial
2146 if not p_of_x.is_partial():
2149 # Replica (p) of NC (x) must satisfy the
2150 # "is present" criteria for DC (s) that
2152 if not p_of_x.is_present():
2155 # DC (s) must be a writable DSA other than
2156 # my DSA. In other words we'd only replicate
2157 # from other writable DSA
2158 if dc_s.is_ro() or dc_s is dc_local:
2161 # Certain replica graphs are produced only
2162 # for global catalogs, so test against
2163 # method input parameter
2164 if gc_only and not dc_s.is_gc():
2167 # DC (s) must be in the same site as the local DC
2168 # as this is the intra-site algorithm. This is
2169 # handled by virtue of placing DSAs in per
2170 # site objects (see enclosing for() loop)
2172 # This criteria is moot (a no-op) for this case
2173 # because we are scanning for (partial = True). The
2174 # MS algorithm statement says partial replica scans
2175 # should adhere to the "same" criteria as full replica
2176 # scans so the criteria doesn't change here...its just
2177 # rendered pointless.
2179 # The case that is occurring would be a partial domain
2180 # replica is needed on a local DC global catalog. There
2181 # is no minimum windows behavior for those since GCs
2182 # have always been present.
2183 if ro and not partial and nc_x.nc_type == NCType.domain:
2184 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2187 # If we haven't been told to turn off stale connection
2188 # detection and this dsa has a stale connection then
2190 if detect_stale and self.is_stale_link_connection(dc_s):
2193 # Replica meets criteria. Add it to table indexed
2194 # by the GUID of the DSA that it appears on
2195 r_list.append(p_of_x)
2197 # Append to R the NC replica that "should be present"
2199 r_list.append(l_of_x)
2201 r_list.sort(sort_replica_by_dsa_guid)
2204 max_node_edges = self.intrasite_max_node_edges(r_len)
2206 # Add a node for each r_list element to the replica graph
2209 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2210 graph_list.append(node)
2212 # For each r(i) from (0 <= i < |R|-1)
2214 while i < (r_len-1):
2215 # Add an edge from r(i) to r(i+1) if r(i) is a full
2216 # replica or r(i+1) is a partial replica
2217 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2218 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2220 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2221 # replica or ri is a partial replica.
2222 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2223 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2226 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2227 # or r0 is a partial replica.
2228 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2229 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2231 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2232 # r|R|-1 is a partial replica.
2233 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2234 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2236 DEBUG("r_list is length %s" % len(r_list))
2237 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr)) for x in r_list))
2239 if opts.verify or opts.dot_files:
2241 dot_vertices = set()
2242 for v1 in graph_list:
2243 dot_vertices.add(v1.dsa_dnstr)
2244 for v2 in v1.edge_from:
2245 dot_edges.append((v2, v1.dsa_dnstr))
2246 dot_vertices.add(v2)
2248 verify_properties = ('connected', 'directed_double_ring')
2249 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2250 label='%s__%s__%s' % (site_local.site_dnstr, nctype_lut[nc_x.nc_type], nc_x.nc_dnstr),
2251 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2252 dot_files=opts.dot_files, directed=True)
2256 # For each existing nTDSConnection object implying an edge
2257 # from rj of R to ri such that j != i, an edge from rj to ri
2258 # is not already in the graph, and the total edges directed
2259 # to ri is less than n+2, the KCC adds that edge to the graph.
2260 for vertex in graph_list:
2261 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2262 for connect in dsa.connect_table.values():
2263 remote = connect.from_dnstr
2264 if remote in self.my_site.dsa_table:
2265 vertex.add_edge_from(remote)
2267 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2268 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2270 for tnode in graph_list:
2271 # To optimize replication latency in sites with many NC replicas, the
2272 # KCC adds new edges directed to ri to bring the total edges to n+2,
2273 # where the NC replica rk of R from which the edge is directed
2274 # is chosen at random such that k != i and an edge from rk to ri
2275 # is not already in the graph.
2277 # Note that the KCC tech ref does not give a number for the definition
2278 # of "sites with many NC replicas". At a bare minimum to satisfy
2279 # n+2 edges directed at a node we have to have at least three replicas
2280 # in |R| (i.e. if n is zero then at least replicas from two other graph
2281 # nodes may direct edges to us).
2282 if r_len >= 3 and not tnode.has_sufficient_edges():
2283 candidates = [x for x in graph_list if (x is not tnode and
2284 x.dsa_dnstr not in tnode.edge_from)]
2286 DEBUG_BLUE("looking for random link for %s. r_len %d, graph len %d candidates %d"
2287 % (tnode.dsa_dnstr, r_len, len(graph_list), len(candidates)))
2289 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2291 while candidates and not tnode.has_sufficient_edges():
2292 other = random.choice(candidates)
2293 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2294 if not tnode.add_edge_from(other):
2295 DEBUG_RED("could not add %s" % other.dsa_dstr)
2296 candidates.remove(other)
2298 DEBUG_CYAN("not adding links to %s: nodes %s, links is %s/%s" %
2299 (tnode.dsa_dnstr, r_len, len(tnode.edge_from), tnode.max_edges))
2302 # Print the graph node in debug mode
2303 logger.debug("%s" % tnode)
2305 # For each edge directed to the local DC, ensure a nTDSConnection
2306 # points to us that satisfies the KCC criteria
2308 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2309 tnode.add_connections_from_edges(dc_local)
2312 if opts.verify or opts.dot_files:
2314 dot_vertices = set()
2315 for v1 in graph_list:
2316 dot_vertices.add(v1.dsa_dnstr)
2317 for v2 in v1.edge_from:
2318 dot_edges.append((v2, v1.dsa_dnstr))
2319 dot_vertices.add(v2)
2321 verify_properties = ('connected', 'directed_double_ring_or_small')
2322 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2323 label='%s__%s__%s' % (site_local.site_dnstr, nctype_lut[nc_x.nc_type], nc_x.nc_dnstr),
2324 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2325 dot_files=opts.dot_files, directed=True)
2328 def intrasite(self):
2329 """The head method for generating the intra-site KCC replica
2330 connection graph and attendant nTDSConnection objects
2336 logger.debug("intrasite(): enter")
2338 # Test whether local site has topology disabled
2339 mysite = self.my_site
2340 if mysite.is_intrasite_topology_disabled():
2343 detect_stale = (not mysite.is_detect_stale_disabled())
2344 for connect in mydsa.connect_table.values():
2345 if connect.to_be_added:
2346 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2348 # Loop thru all the partitions, with gc_only False
2349 for partdn, part in self.part_table.items():
2350 self.construct_intrasite_graph(mysite, mydsa, part, False,
2352 for connect in mydsa.connect_table.values():
2353 if connect.to_be_added:
2354 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2357 # If the DC is a GC server, the KCC constructs an additional NC
2358 # replica graph (and creates nTDSConnection objects) for the
2359 # config NC as above, except that only NC replicas that "are present"
2360 # on GC servers are added to R.
2361 for connect in mydsa.connect_table.values():
2362 if connect.to_be_added:
2363 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2365 # Do it again, with gc_only True
2366 for partdn, part in self.part_table.items():
2367 if part.is_config():
2368 self.construct_intrasite_graph(mysite, mydsa, part, True,
2371 # The DC repeats the NC replica graph computation and nTDSConnection
2372 # creation for each of the NC replica graphs, this time assuming
2373 # that no DC has failed. It does so by re-executing the steps as
2374 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2375 # set in the options attribute of the site settings object for
2376 # the local DC's site. (ie. we set "detec_stale" flag to False)
2377 for connect in mydsa.connect_table.values():
2378 if connect.to_be_added:
2379 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2381 # Loop thru all the partitions.
2382 for partdn, part in self.part_table.items():
2383 self.construct_intrasite_graph(mysite, mydsa, part, False,
2384 False) # don't detect stale
2386 # If the DC is a GC server, the KCC constructs an additional NC
2387 # replica graph (and creates nTDSConnection objects) for the
2388 # config NC as above, except that only NC replicas that "are present"
2389 # on GC servers are added to R.
2390 for connect in mydsa.connect_table.values():
2391 if connect.to_be_added:
2392 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2394 for partdn, part in self.part_table.items():
2395 if part.is_config():
2396 self.construct_intrasite_graph(mysite, mydsa, part, True,
2397 False) # don't detect stale
2400 # Display any to be added or modified repsFrom
2401 for connect in mydsa.connect_table.values():
2402 if connect.to_be_deleted:
2403 logger.info("TO BE DELETED:\n%s" % connect)
2404 if connect.to_be_modified:
2405 logger.info("TO BE MODIFIED:\n%s" % connect)
2406 if connect.to_be_added:
2407 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2409 mydsa.commit_connections(self.samdb, ro=True)
2411 # Commit any newly created connections to the samdb
2412 mydsa.commit_connections(self.samdb)
2415 def list_dsas(self):
2419 self.load_all_sites()
2420 self.load_all_partitions()
2421 self.load_all_transports()
2422 self.load_all_sitelinks()
2424 for site in self.site_table.values():
2425 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2426 for dsa in site.dsa_table.values()])
2429 def load_samdb(self, dburl, lp, creds):
2430 self.samdb = SamDB(url=dburl,
2431 session_info=system_session(),
2432 credentials=creds, lp=lp)
2436 def plot_all_connections(self, basename, verify_properties=()):
2437 verify = verify_properties and opts.verify
2438 plot = opts.dot_files
2439 if not (verify or plot):
2445 for site in self.site_table.values():
2446 for dsa in site.dsa_table.values():
2447 dot_vertices.append(dsa.dsa_dnstr)
2448 for con in dsa.connect_table.values():
2449 if con.is_rodc_topology():
2450 colours.append('red')
2452 colours.append('blue')
2453 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2455 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2456 label=self.my_dsa_dnstr, properties=verify_properties,
2457 debug=DEBUG, verify=verify, dot_files=plot,
2458 directed=True, edge_colors=colours)
2461 def run(self, dburl, lp, creds, forced_local_dsa=None,
2462 forget_local_links=False, forget_intersite_links=False):
2463 """Method to perform a complete run of the KCC and
2464 produce an updated topology for subsequent NC replica
2465 syncronization between domain controllers
2467 # We may already have a samdb setup if we are
2468 # currently importing an ldif for a test run
2469 if self.samdb is None:
2471 self.load_samdb(dburl, lp, creds)
2472 except ldb.LdbError, (num, msg):
2473 logger.error("Unable to open sam database %s : %s" %
2478 if forced_local_dsa:
2479 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % forced_local_dsa)
2486 self.load_all_sites()
2487 self.load_all_partitions()
2488 self.load_all_transports()
2489 self.load_all_sitelinks()
2492 if opts.verify or opts.dot_files:
2494 for site in self.site_table.values():
2495 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2496 for dnstr, dsa in site.dsa_table.items())
2498 self.plot_all_connections('dsa_initial')
2501 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
2502 for dnstr, c_rep in current_rep_table.items():
2503 DEBUG("c_rep %s" % c_rep)
2504 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2506 verify_and_dot('dsa_repsFrom_initial', dot_edges, directed=True, label=self.my_dsa_dnstr,
2507 properties=(), debug=DEBUG, verify=opts.verify,
2508 dot_files=opts.dot_files)
2512 for site in self.site_table.values():
2513 for dsa in site.dsa_table.values():
2514 current_rep_table, needed_rep_table = dsa.get_rep_tables()
2515 for dn_str, rep in current_rep_table.items():
2516 for reps_from in rep.rep_repsFrom:
2517 DEBUG("rep %s" % rep)
2518 dsa_dn = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2519 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2521 verify_and_dot('dsa_repsFrom_initial_all', dot_edges, directed=True, label=self.my_dsa_dnstr,
2522 properties=(), debug=DEBUG, verify=opts.verify,
2523 dot_files=opts.dot_files)
2527 for link in self.sitelink_table.values():
2528 for a, b in itertools.combinations(link.site_list, 2):
2529 dot_edges.append((str(a), str(b)))
2530 verify_properties = ('connected',)
2531 verify_and_dot('dsa_sitelink_initial', dot_edges, directed=False, label=self.my_dsa_dnstr,
2532 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2533 dot_files=opts.dot_files)
2536 if forget_local_links:
2537 for dsa in self.my_site.dsa_table.values():
2538 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2539 if v.is_rodc_topology()}
2540 self.plot_all_connections('dsa_forgotten_local')
2543 if forget_intersite_links:
2544 for site in self.site_table.values():
2545 for dsa in site.dsa_table.values():
2546 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2547 if site is self.my_site and v.is_rodc_topology()}
2549 self.plot_all_connections('dsa_forgotten_all')
2550 # These are the published steps (in order) for the
2551 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2554 self.refresh_failed_links_connections()
2560 all_connected = self.intersite()
2563 self.remove_unneeded_ntdsconn(all_connected)
2566 self.translate_ntdsconn()
2569 self.remove_unneeded_failed_links_connections()
2572 self.update_rodc_connection()
2575 if opts.verify or opts.dot_files:
2576 self.plot_all_connections('dsa_final', ('connected', 'forest_of_rings'))
2578 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2582 my_dnstr = self.my_dsa.dsa_dnstr
2583 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
2584 for dnstr, n_rep in needed_rep_table.items():
2585 for reps_from in n_rep.rep_repsFrom:
2586 guid_str = str(reps_from.source_dsa_obj_guid)
2587 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2588 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2590 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True, label=self.my_dsa_dnstr,
2591 properties=(), debug=DEBUG, verify=opts.verify,
2592 dot_files=opts.dot_files, edge_colors=edge_colors)
2597 for site in self.site_table.values():
2598 for dsa in site.dsa_table.values():
2599 current_rep_table, needed_rep_table = dsa.get_rep_tables()
2600 for n_rep in needed_rep_table.values():
2601 for reps_from in n_rep.rep_repsFrom:
2602 dsa_dn = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2603 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2605 verify_and_dot('dsa_repsFrom_final_all', dot_edges, directed=True, label=self.my_dsa_dnstr,
2606 properties=(), debug=DEBUG, verify=opts.verify,
2607 dot_files=opts.dot_files)
2615 def import_ldif(self, dburl, lp, creds, ldif_file):
2616 """Import all objects and attributes that are relevent
2617 to the KCC algorithms from a previously exported LDIF file.
2619 The point of this function is to allow a programmer/debugger to
2620 import an LDIF file with non-security relevent information that
2621 was previously extracted from a DC database. The LDIF file is used
2622 to create a temporary abbreviated database. The KCC algorithm can
2623 then run against this abbreviated database for debug or test
2624 verification that the topology generated is computationally the
2625 same between different OSes and algorithms.
2627 :param dburl: path to the temporary abbreviated db to create
2628 :param ldif_file: path to the ldif file to import
2631 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, creds, ldif_file,
2632 opts.forced_local_dsa)
2633 except ldif_utils.LdifError, e:
2638 def export_ldif(self, dburl, lp, creds, ldif_file):
2639 """Routine to extract all objects and attributes that are relevent
2640 to the KCC algorithms from a DC database.
2642 The point of this function is to allow a programmer/debugger to
2643 extract an LDIF file with non-security relevent information from
2644 a DC database. The LDIF file can then be used to "import" via
2645 the import_ldif() function this file into a temporary abbreviated
2646 database. The KCC algorithm can then run against this abbreviated
2647 database for debug or test verification that the topology generated
2648 is computationally the same between different OSes and algorithms.
2650 :param dburl: LDAP database URL to extract info from
2651 :param ldif_file: output LDIF file name to create
2654 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds, ldif_file)
2655 except ldif_utils.LdifError, e:
2660 ##################################################
2662 ##################################################
2663 def sort_replica_by_dsa_guid(rep1, rep2):
2664 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2666 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2667 if dsa1.is_gc() and not dsa2.is_gc():
2669 if not dsa1.is_gc() and dsa2.is_gc():
2671 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2673 def is_smtp_replication_available():
2674 """Currently always returns false because Samba
2675 doesn't implement SMTP transfer for NC changes
2680 def create_edge(con_type, site_link, guid_to_vertex):
2682 e.site_link = site_link
2684 for site_guid in site_link.site_list:
2685 if str(site_guid) in guid_to_vertex:
2686 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2687 e.repl_info.cost = site_link.cost
2688 e.repl_info.options = site_link.options
2689 e.repl_info.interval = site_link.interval
2690 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2691 e.con_type = con_type
2695 def create_auto_edge_set(graph, transport):
2696 e_set = MultiEdgeSet()
2697 e_set.guid = misc.GUID() # NULL guid, not associated with a SiteLinkBridge object
2698 for site_link in graph.edges:
2699 if site_link.con_type == transport:
2700 e_set.edges.append(site_link)
2704 def create_edge_set(graph, transport, site_link_bridge):
2705 # TODO not implemented - need to store all site link bridges
2706 e_set = MultiEdgeSet()
2707 # e_set.guid = site_link_bridge
2710 def setup_vertices(graph):
2711 for v in graph.vertices:
2713 v.repl_info.cost = MAX_DWORD
2715 v.component_id = None
2717 v.repl_info.cost = 0
2721 v.repl_info.interval = 0
2722 v.repl_info.options = 0xFFFFFFFF
2723 v.repl_info.schedule = None # TODO highly suspicious
2726 def dijkstra(graph, edge_type, include_black):
2728 setup_dijkstra(graph, edge_type, include_black, queue)
2729 while len(queue) > 0:
2730 cost, guid, vertex = heapq.heappop(queue)
2731 for edge in vertex.edges:
2732 for v in edge.vertices:
2734 # add new path from vertex to v
2735 try_new_path(graph, queue, vertex, edge, v)
2737 def setup_dijkstra(graph, edge_type, include_black, queue):
2738 setup_vertices(graph)
2739 for vertex in graph.vertices:
2740 if vertex.is_white():
2743 if ((vertex.is_black() and not include_black)
2744 or edge_type not in vertex.accept_black
2745 or edge_type not in vertex.accept_red_red):
2746 vertex.repl_info.cost = MAX_DWORD
2747 vertex.root = None # NULL GUID
2748 vertex.demoted = True # Demoted appears not to be used
2750 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2752 def try_new_path(graph, queue, vfrom, edge, vto):
2754 # What this function checks is that there is a valid time frame for
2755 # which replication can actually occur, despite being adequately
2757 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2759 # If the new path costs more than the current, then ignore the edge
2760 if newRI.cost > vto.repl_info.cost:
2763 if newRI.cost < vto.repl_info.cost and not intersect:
2766 new_duration = total_schedule(newRI.schedule)
2767 old_duration = total_schedule(vto.repl_info.schedule)
2769 # Cheaper or longer schedule
2770 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2771 vto.root = vfrom.root
2772 vto.component_id = vfrom.component_id
2773 vto.repl_info = newRI
2774 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2776 def check_demote_vertex(vertex, edge_type):
2777 if vertex.is_white():
2780 # Accepts neither red-red nor black edges, demote
2781 if edge_type not in vertex.accept_black and edge_type not in vertex.accept_red_red:
2782 vertex.repl_info.cost = MAX_DWORD
2784 vertex.demoted = True # Demoted appears not to be used
2786 def undemote_vertex(vertex):
2787 if vertex.is_white():
2790 vertex.repl_info.cost = 0
2791 vertex.root = vertex
2792 vertex.demoted = False
2794 def process_edge_set(graph, e_set, internal_edges):
2796 for edge in graph.edges:
2797 for vertex in edge.vertices:
2798 check_demote_vertex(vertex, edge.con_type)
2799 process_edge(graph, edge, internal_edges)
2800 for vertex in edge.vertices:
2801 undemote_vertex(vertex)
2803 for edge in e_set.edges:
2804 process_edge(graph, edge, internal_edges)
2806 def process_edge(graph, examine, internal_edges):
2807 # Find the set of all vertices touches the edge to examine
2809 for v in examine.vertices:
2810 # Append a 4-tuple of color, repl cost, guid and vertex
2811 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2812 # Sort by color, lower
2813 DEBUG("vertices is %s" % vertices)
2816 color, cost, guid, bestv = vertices[0]
2817 # Add to internal edges an edge from every colored vertex to bestV
2818 for v in examine.vertices:
2819 if v.component_id is None or v.root is None:
2822 # Only add edge if valid inter-tree edge - needs a root and
2823 # different components
2824 if (bestv.component_id is not None and bestv.root is not None
2825 and v.component_id is not None and v.root is not None and
2826 bestv.component_id != v.component_id):
2827 add_int_edge(graph, internal_edges, examine, bestv, v)
2829 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
2830 def add_int_edge(graph, internal_edges, examine, v1, v2):
2835 if root1.is_red() and root2.is_red():
2839 if (examine.con_type not in root1.accept_red_red
2840 or examine.con_type not in root2.accept_red_red):
2843 if (examine.con_type not in root1.accept_black
2844 or examine.con_type not in root2.accept_black):
2850 # Create the transitive replInfo for the two trees and this edge
2851 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2853 # ri is now initialized
2854 if not combine_repl_info(ri, examine.repl_info, ri2):
2857 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type, examine.site_link)
2858 # Order by vertex guid
2859 #XXX guid comparison using ndr_pack
2860 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
2861 newIntEdge.v1 = root2
2862 newIntEdge.v2 = root1
2864 internal_edges.add(newIntEdge)
2866 def kruskal(graph, edges):
2867 for v in graph.vertices:
2870 components = set([x for x in graph.vertices if not x.is_white()])
2873 # Sorted based on internal comparison function of internal edge
2876 expected_num_tree_edges = 0 # TODO this value makes little sense
2881 while index < len(edges): # TODO and num_components > 1
2883 parent1 = find_component(e.v1)
2884 parent2 = find_component(e.v2)
2885 if parent1 is not parent2:
2887 add_out_edge(graph, output_edges, e)
2888 parent1.component_id = parent2
2889 components.discard(parent1)
2893 return output_edges, len(components)
2895 def find_component(vertex):
2896 if vertex.component_id is vertex:
2900 while current.component_id is not current:
2901 current = current.component_id
2905 while current.component_id is not root:
2906 n = current.component_id
2907 current.component_id = root
2912 def add_out_edge(graph, output_edges, e):
2916 # This multi-edge is a 'real' edge with no GUID
2919 ee.site_link = e.site_link
2920 ee.vertices.append(v1)
2921 ee.vertices.append(v2)
2922 ee.con_type = e.e_type
2923 ee.repl_info = e.repl_info
2924 output_edges.append(ee)
2930 def test_all_reps_from(lp, creds):
2932 kcc.load_samdb(opts.dburl, lp, creds)
2933 dsas = kcc.list_dsas()
2938 for site in kcc.site_table.values():
2939 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2940 for dnstr, dsa in site.dsa_table.items())
2948 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa,
2949 forget_local_links=opts.forget_local_links,
2950 forget_intersite_links=opts.forget_intersite_links)
2951 current, needed = kcc.my_dsa.get_rep_tables()
2954 for name, rep_table, rep_parts in (('needed', needed, needed_parts),
2955 ('current', current, current_parts)):
2956 for part, nc_rep in rep_table.items():
2957 edges = rep_parts.setdefault(part, [])
2958 for reps_from in nc_rep.rep_repsFrom:
2959 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2960 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
2961 edges.append((source, dest))
2963 for site in kcc.site_table.values():
2964 for dsa in site.dsa_table.values():
2965 dot_vertices.append(dsa.dsa_dnstr)
2966 for con in dsa.connect_table.values():
2967 if con.is_rodc_topology():
2968 colours.append('red')
2970 colours.append('blue')
2971 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2973 for name, rep_parts in (('needed', needed_parts), ('current', current_parts)):
2974 for part, edges in rep_parts.items():
2975 verify_and_dot('repsFrom_%s_all_%s' % (name, part), edges, directed=True, label=part,
2976 properties=(), debug=DEBUG, verify=opts.verify,
2977 dot_files=opts.dot_files)
2979 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
2980 label="all dsa NTDSConnections", properties=(),
2981 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
2982 directed=True, edge_colors=colours)
2985 logger = logging.getLogger("samba_kcc")
2986 logger.addHandler(logging.StreamHandler(sys.stdout))
2987 DEBUG = logger.debug
2989 def _colour_debug(*args, **kwargs):
2990 DEBUG('%s%s%s' % (kwargs['colour'], args[0], C_NORMAL), *args[1:])
2992 _globals = globals()
2993 for _colour in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
2994 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
2995 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
2996 _globals['DEBUG_' + _colour] = partial(_colour_debug, colour=_globals[_colour])
2999 ##################################################
3000 # samba_kcc entry point
3001 ##################################################
3003 parser = optparse.OptionParser("samba_kcc [options]")
3004 sambaopts = options.SambaOptions(parser)
3005 credopts = options.CredentialsOptions(parser)
3007 parser.add_option_group(sambaopts)
3008 parser.add_option_group(credopts)
3009 parser.add_option_group(options.VersionOptions(parser))
3011 parser.add_option("--readonly", default=False,
3012 help="compute topology but do not update database",
3013 action="store_true")
3015 parser.add_option("--debug",
3016 help="debug output",
3017 action="store_true")
3019 parser.add_option("--verify",
3020 help="verify that assorted invariants are kept",
3021 action="store_true")
3023 parser.add_option("--list-verify-tests",
3024 help="list what verification actions are available and do nothing else",
3025 action="store_true")
3027 parser.add_option("--no-dot-files", dest='dot_files',
3028 help="Don't write dot graph files in /tmp",
3029 default=True, action="store_false")
3031 parser.add_option("--seed",
3032 help="random number seed",
3035 parser.add_option("--importldif",
3036 help="import topology ldif file",
3037 type=str, metavar="<file>")
3039 parser.add_option("--exportldif",
3040 help="export topology ldif file",
3041 type=str, metavar="<file>")
3043 parser.add_option("-H", "--URL" ,
3044 help="LDB URL for database or target server",
3045 type=str, metavar="<URL>", dest="dburl")
3047 parser.add_option("--tmpdb",
3048 help="schemaless database file to create for ldif import",
3049 type=str, metavar="<file>")
3051 parser.add_option("--now",
3052 help="assume current time is this ('YYYYmmddHHMMSS[tz]', default: system time)",
3053 type=str, metavar="<date>")
3055 parser.add_option("--forced-local-dsa",
3056 help="run calculations assuming the DSA is this DN",
3057 type=str, metavar="<DSA>")
3059 parser.add_option("--attempt-live-connections", default=False,
3060 help="Attempt to connect to other DSAs to test links",
3061 action="store_true")
3063 parser.add_option("--list-valid-dsas", default=False,
3064 help="Print a list of DSA dnstrs that could be used in --forced-local-dsa",
3065 action="store_true")
3067 parser.add_option("--test-all-reps-from", default=False,
3068 help="Create and verify a graph of reps-from for every DSA",
3069 action="store_true")
3071 parser.add_option("--forget-local-links", default=False,
3072 help="pretend not to know the existing local topology",
3073 action="store_true")
3075 parser.add_option("--forget-intersite-links", default=False,
3076 help="pretend not to know the existing intersite topology",
3077 action="store_true")
3080 opts, args = parser.parse_args()
3083 if opts.list_verify_tests:
3088 logger.setLevel(logging.DEBUG)
3090 logger.setLevel(logging.INFO)
3092 logger.setLevel(logging.WARNING)
3094 # initialize seed from optional input parameter
3096 random.seed(opts.seed)
3098 random.seed(0xACE5CA11)
3101 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3103 now_tuple = time.strptime(opts.now, timeformat)
3108 # else happens if break doesn't --> no match
3109 print >> sys.stderr, "could not parse time '%s'" % opts.now
3112 unix_now = int(time.mktime(now_tuple))
3114 unix_now = int(time.time())
3116 nt_now = unix2nttime(unix_now)
3118 lp = sambaopts.get_loadparm()
3119 creds = credopts.get_credentials(lp, fallback_machine=True)
3121 if opts.dburl is None:
3122 opts.dburl = lp.samdb_url()
3124 if opts.test_all_reps_from:
3125 opts.readonly = True
3126 test_all_reps_from(lp, creds)
3129 # Instantiate Knowledge Consistency Checker and perform run
3133 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3137 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3138 logger.error("Specify a target temp database file with --tmpdb option.")
3141 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3145 if opts.list_valid_dsas:
3146 kcc.load_samdb(opts.dburl, lp, creds)
3147 print '\n'.join(kcc.list_dsas())
3151 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3152 opts.forget_local_links, opts.forget_intersite_links)
3155 except GraphError, e: