3 # Compute our KCC topology
5 # Copyright (C) Dave Craft 2011
6 # Copyright (C) Andrew Bartlett 2015
8 # Andrew Bartlett's alleged work performed by his underlings Douglas
9 # Bagnall and Garming Sam.
11 # This program is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 3 of the License, or
14 # (at your option) any later version.
16 # This program is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with this program. If not, see <http://www.gnu.org/licenses/>.
29 # ensure we get messages out immediately, so they get in the samba logs,
30 # and don't get swallowed by a timeout
31 os.environ['PYTHONUNBUFFERED'] = '1'
33 # forcing GMT avoids a problem in some timezones with kerberos. Both MIT
34 # heimdal can get mutual authentication errors due to the 24 second difference
35 # between UTC and GMT when using some zone files (eg. the PDT zone from
37 os.environ["TZ"] = "GMT"
39 # Find right directory when running from source tree
40 sys.path.insert(0, "bin/python")
47 from functools import partial
57 from samba.auth import system_session
58 from samba.samdb import SamDB
59 from samba.dcerpc import drsuapi
60 from samba.kcc_utils import *
61 from samba.graph_utils import *
62 from samba import ldif_utils
65 """The Knowledge Consistency Checker class.
67 A container for objects and methods allowing a run of the KCC. Produces a
68 set of connections in the samdb for which the Distributed Replication
69 Service can then utilize to replicate naming contexts
72 """Initializes the partitions class which can hold
73 our local DCs partitions or all the partitions in
76 self.part_table = {} # partition objects
78 self.transport_table = {}
79 self.sitelink_table = {}
80 self.dsa_by_dnstr = {}
83 self.get_dsa_by_guidstr = self.dsa_by_guid.get
84 self.get_dsa = self.dsa_by_dnstr.get
86 # TODO: These should be backed by a 'permanent' store so that when
87 # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
88 # the failure information can be returned
89 self.kcc_failed_links = {}
90 self.kcc_failed_connections = set()
92 # Used in inter-site topology computation. A list
93 # of connections (by NTDSConnection object) that are
94 # to be kept when pruning un-needed NTDS Connections
95 self.kept_connections = set()
97 self.my_dsa_dnstr = None # My dsa DN
98 self.my_dsa = None # My dsa object
100 self.my_site_dnstr = None
105 def load_all_transports(self):
106 """Loads the inter-site transport objects for Sites
108 ::returns: Raises an Exception on error
111 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
112 self.samdb.get_config_basedn(),
113 scope=ldb.SCOPE_SUBTREE,
114 expression="(objectClass=interSiteTransport)")
115 except ldb.LdbError, (enum, estr):
116 raise Exception("Unable to find inter-site transports - (%s)" %
122 transport = Transport(dnstr)
124 transport.load_transport(self.samdb)
127 if str(transport.guid) in self.transport_table:
130 # Assign this transport to table
132 self.transport_table[str(transport.guid)] = transport
134 def load_all_sitelinks(self):
135 """Loads the inter-site siteLink objects
137 ::returns: Raises an Exception on error
140 res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
141 self.samdb.get_config_basedn(),
142 scope=ldb.SCOPE_SUBTREE,
143 expression="(objectClass=siteLink)")
144 except ldb.LdbError, (enum, estr):
145 raise Exception("Unable to find inter-site siteLinks - (%s)" % estr)
151 if dnstr in self.sitelink_table:
154 sitelink = SiteLink(dnstr)
156 sitelink.load_sitelink(self.samdb)
158 # Assign this siteLink to table
160 self.sitelink_table[dnstr] = sitelink
162 def load_site(self, dn_str):
163 """Helper for load_my_site and load_all_sites. It puts all the site's
164 DSAs into the KCC indices.
166 site = Site(dn_str, unix_now)
167 site.load_site(self.samdb)
169 # I am not sure why, but we avoid replacing the site with an
171 guid = str(site.site_guid)
172 if guid not in self.site_table:
173 self.site_table[guid] = site
175 self.dsa_by_dnstr.update(site.dsa_table)
176 self.dsa_by_guid.update((str(x.dsa_guid), x) for x in site.dsa_table.values())
180 def load_my_site(self):
181 """Loads the Site class for the local DSA
183 ::returns: Raises an Exception on error
185 self.my_site_dnstr = "CN=%s,CN=Sites,%s" % (
186 self.samdb.server_site_name(),
187 self.samdb.get_config_basedn())
189 self.my_site = self.load_site(self.my_site_dnstr)
191 def load_all_sites(self):
192 """Discover all sites and instantiate and load each
195 ::returns: Raises an Exception on error
198 res = self.samdb.search("CN=Sites,%s" %
199 self.samdb.get_config_basedn(),
200 scope=ldb.SCOPE_SUBTREE,
201 expression="(objectClass=site)")
202 except ldb.LdbError, (enum, estr):
203 raise Exception("Unable to find sites - (%s)" % estr)
206 sitestr = str(msg.dn)
207 self.load_site(sitestr)
209 def load_my_dsa(self):
210 """Discover my nTDSDSA dn thru the rootDSE entry
212 ::returns: Raises an Exception on error.
214 dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
216 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
217 attrs=["objectGUID"])
218 except ldb.LdbError, (enum, estr):
219 DEBUG("Search for %s failed: %s. This typically happens in"
220 " --importldif mode due to lack of module support",
223 # We work around the failure above by looking at the
224 # dsServiceName that was put in the fake rootdse by
225 # the --exportldif, rather than the
226 # samdb.get_ntds_GUID(). The disadvantage is that this
227 # mode requires we modify the @ROOTDSE dnq to support
229 service_name_res = self.samdb.search(base="", scope=ldb.SCOPE_BASE,
230 attrs=["dsServiceName"])
231 dn = ldb.Dn(self.samdb, service_name_res[0]["dsServiceName"][0])
233 res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
234 attrs=["objectGUID"])
235 except ldb.LdbError, (enum, estr):
236 raise Exception("Unable to find my nTDSDSA - (%s)" % estr)
239 raise Exception("Unable to find my nTDSDSA at %s" % dn.extended_str())
241 if misc.GUID(res[0]["objectGUID"][0]) != misc.GUID(self.samdb.get_ntds_GUID()):
242 raise Exception("Did not find the GUID we expected, perhaps due to --importldif")
244 self.my_dsa_dnstr = str(res[0].dn)
246 self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
248 def load_all_partitions(self):
249 """Discover all NCs thru the Partitions dn and
250 instantiate and load the NCs.
252 Each NC is inserted into the part_table by partition
253 dn string (not the nCName dn string)
255 ::returns: Raises an Exception on error
258 res = self.samdb.search("CN=Partitions,%s" %
259 self.samdb.get_config_basedn(),
260 scope=ldb.SCOPE_SUBTREE,
261 expression="(objectClass=crossRef)")
262 except ldb.LdbError, (enum, estr):
263 raise Exception("Unable to find partitions - (%s)" % estr)
266 partstr = str(msg.dn)
269 if partstr in self.part_table:
272 part = Partition(partstr)
274 part.load_partition(self.samdb)
275 self.part_table[partstr] = part
277 def should_be_present_test(self):
278 """Enumerate all loaded partitions and DSAs in local
279 site and test if NC should be present as replica
281 for partdn, part in self.part_table.items():
282 for dsadn, dsa in self.my_site.dsa_table.items():
283 needed, ro, partial = part.should_be_present(dsa)
284 logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
285 (dsadn, part.nc_dnstr, needed, ro, partial))
287 def refresh_failed_links_connections(self):
288 """Based on MS-ADTS 6.2.2.1"""
290 # Instead of NULL link with failure_count = 0, the tuple is simply removed
292 # LINKS: Refresh failed links
293 self.kcc_failed_links = {}
294 current, needed = self.my_dsa.get_rep_tables()
295 for replica in current.values():
296 # For every possible connection to replicate
297 for reps_from in replica.rep_repsFrom:
298 failure_count = reps_from.consecutive_sync_failures
299 if failure_count <= 0:
302 dsa_guid = str(reps_from.source_dsa_obj_guid)
303 time_first_failure = reps_from.last_success
304 last_result = reps_from.last_attempt
305 dns_name = reps_from.dns_name1
307 f = self.kcc_failed_links.get(dsa_guid)
309 f = KCCFailedObject(dsa_guid, failure_count,
310 time_first_failure, last_result,
312 self.kcc_failed_links[dsa_guid] = f
313 #elif f.failure_count == 0:
314 # f.failure_count = failure_count
315 # f.time_first_failure = time_first_failure
316 # f.last_result = last_result
318 f.failure_count = max(f.failure_count, failure_count)
319 f.time_first_failure = min(f.time_first_failure, time_first_failure)
320 f.last_result = last_result
322 # CONNECTIONS: Refresh failed connections
323 restore_connections = set()
324 if opts.attempt_live_connections:
325 DEBUG("refresh_failed_links: checking if links are still down")
326 for connection in self.kcc_failed_connections:
328 drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
329 # Failed connection is no longer failing
330 restore_connections.add(connection)
331 except drs_utils.drsException:
332 # Failed connection still failing
333 connection.failure_count += 1
335 DEBUG("refresh_failed_links: not checking live links because we weren't\n"
336 "asked to --attempt-live-connections")
338 # Remove the restored connections from the failed connections
339 self.kcc_failed_connections.difference_update(restore_connections)
341 def is_stale_link_connection(self, target_dsa):
342 """Returns False if no tuple z exists in the kCCFailedLinks or
343 kCCFailedConnections variables such that z.UUIDDsa is the
344 objectGUID of the target dsa, z.FailureCount > 0, and
345 the current time - z.TimeFirstFailure > 2 hours.
347 # Returns True if tuple z exists...
348 failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
350 # failure_count should be > 0, but check anyways
351 if failed_link.failure_count > 0:
352 unix_first_time_failure = nttime2unix(failed_link.time_first_failure)
353 # TODO guard against future
354 if unix_first_time_failure > unix_now:
355 logger.error("The last success time attribute for \
356 repsFrom is in the future!")
358 # Perform calculation in seconds
359 if (unix_now - unix_first_time_failure) > 60 * 60 * 2:
366 # TODO: This should be backed by some form of local database
367 def remove_unneeded_failed_links_connections(self):
368 # Remove all tuples in kcc_failed_links where failure count = 0
369 # In this implementation, this should never happen.
371 # Remove all connections which were not used this run or connections
372 # that became active during this run.
375 def remove_unneeded_ntdsconn(self, all_connected):
376 """Removes unneeded NTDS Connections after computation
377 of KCC intra and inter-site topology has finished.
381 # Loop thru connections
382 for cn_conn in mydsa.connect_table.values():
383 if cn_conn.guid is None:
385 cn_conn.guid = misc.GUID(str(uuid.uuid4()))
386 cn_conn.whenCreated = nt_now
388 cn_conn.load_connection(self.samdb)
390 for cn_conn in mydsa.connect_table.values():
392 s_dnstr = cn_conn.get_from_dnstr()
394 cn_conn.to_be_deleted = True
397 # Get the source DSA no matter what site
398 s_dsa = self.get_dsa(s_dnstr)
400 # Check if the DSA is in our site
401 if self.my_site.same_site(s_dsa):
406 # Given an nTDSConnection object cn, if the DC with the
407 # nTDSDSA object dc that is the parent object of cn and
408 # the DC with the nTDSDA object referenced by cn!fromServer
409 # are in the same site, the KCC on dc deletes cn if all of
410 # the following are true:
412 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
414 # No site settings object s exists for the local DC's site, or
415 # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
418 # Another nTDSConnection object cn2 exists such that cn and
419 # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
422 # cn!whenCreated < cn2!whenCreated
424 # cn!whenCreated = cn2!whenCreated and
425 # cn!objectGUID < cn2!objectGUID
427 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
429 if not cn_conn.is_generated():
432 if self.my_site.is_cleanup_ntdsconn_disabled():
435 # Loop thru connections looking for a duplicate that
436 # fulfills the previous criteria
439 for cn2_conn in mydsa.connect_table.values():
440 if cn2_conn is cn_conn:
443 s2_dnstr = cn2_conn.get_from_dnstr()
445 # If the NTDS Connections has a different
446 # fromServer field then no match
447 if s2_dnstr != s_dnstr:
451 lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
452 (cn_conn.whenCreated == cn2_conn.whenCreated and
453 ndr_pack(cn_conn.guid) < ndr_pack(cn2_conn.guid)))
458 if lesser and not cn_conn.is_rodc_topology():
459 cn_conn.to_be_deleted = True
461 # Given an nTDSConnection object cn, if the DC with the nTDSDSA
462 # object dc that is the parent object of cn and the DC with
463 # the nTDSDSA object referenced by cn!fromServer are in
464 # different sites, a KCC acting as an ISTG in dc's site
465 # deletes cn if all of the following are true:
467 # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
469 # cn!fromServer references an nTDSDSA object for a DC
470 # in a site other than the local DC's site.
472 # The keepConnections sequence returned by
473 # CreateIntersiteConnections() does not contain
474 # cn!objectGUID, or cn is "superseded by" (see below)
475 # another nTDSConnection cn2 and keepConnections
476 # contains cn2!objectGUID.
478 # The return value of CreateIntersiteConnections()
481 # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
484 else: # different site
486 if not mydsa.is_istg():
489 if not cn_conn.is_generated():
493 # We are directly using this connection in intersite or
494 # we are using a connection which can supersede this one.
496 # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
497 # appear to be correct.
499 # 1. cn!fromServer and cn!parent appear inconsistent with no cn2
500 # 2. The repsFrom do not imply each other
502 if cn_conn in self.kept_connections: # and not_superceded:
505 # This is the result of create_intersite_connections
506 if not all_connected:
509 if not cn_conn.is_rodc_topology():
510 cn_conn.to_be_deleted = True
513 if mydsa.is_ro() or opts.readonly:
514 for connect in mydsa.connect_table.values():
515 if connect.to_be_deleted:
516 DEBUG_GREEN("TO BE DELETED:\n%s" % connect)
517 if connect.to_be_added:
518 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
520 # Peform deletion from our tables but perform
521 # no database modification
522 mydsa.commit_connections(self.samdb, ro=True)
524 # Commit any modified connections
525 mydsa.commit_connections(self.samdb)
527 def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
528 """Part of MS-ADTS 6.2.2.5.
530 Update t_repsFrom if necessary to satisfy requirements. Such
531 updates are typically required when the IDL_DRSGetNCChanges
532 server has moved from one site to another--for example, to
533 enable compression when the server is moved from the
534 client's site to another site.
536 :param n_rep: NC replica we need
537 :param t_repsFrom: repsFrom tuple to modify
538 :param s_rep: NC replica at source DSA
539 :param s_dsa: source DSA
540 :param cn_conn: Local DSA NTDSConnection child
542 ::returns: (update) bit field containing which portion of the
543 repsFrom was modified. This bit field is suitable as input
544 to IDL_DRSReplicaModify ulModifyFields element, as it consists
546 drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
547 drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
548 drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
550 s_dnstr = s_dsa.dsa_dnstr
553 if self.my_site.same_site(s_dsa):
558 # if schedule doesn't match then update and modify
559 times = convert_schedule_to_repltimes(cn_conn.schedule)
560 if times != t_repsFrom.schedule:
561 t_repsFrom.schedule = times
562 update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
564 # Bit DRS_PER_SYNC is set in replicaFlags if and only
565 # if nTDSConnection schedule has a value v that specifies
566 # scheduled replication is to be performed at least once
568 if cn_conn.is_schedule_minimum_once_per_week():
570 if (t_repsFrom.replica_flags &
571 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0:
572 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
574 # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
575 # if the source DSA and the local DC's nTDSDSA object are
576 # in the same site or source dsa is the FSMO role owner
577 # of one or more FSMO roles in the NC replica.
578 if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
580 if (t_repsFrom.replica_flags &
581 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0:
582 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
584 # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
585 # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
586 # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
587 # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
588 # t.replicaFlags if and only if s and the local DC's
589 # nTDSDSA object are in different sites.
590 if (cn_conn.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0:
592 if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
594 if (t_repsFrom.replica_flags &
595 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
596 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
600 if (t_repsFrom.replica_flags &
601 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0:
602 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
604 # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
605 # and only if s and the local DC's nTDSDSA object are
606 # not in the same site and the
607 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
608 # clear in cn!options
609 if (not same_site and
611 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
613 if (t_repsFrom.replica_flags &
614 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0:
615 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
617 # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
618 # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
619 if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
621 if (t_repsFrom.replica_flags &
622 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0:
623 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
625 # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
626 # set in t.replicaFlags if and only if cn!enabledConnection = false.
627 if not cn_conn.is_enabled():
629 if (t_repsFrom.replica_flags &
630 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0:
631 t_repsFrom.replica_flags |= \
632 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
634 if (t_repsFrom.replica_flags &
635 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0:
636 t_repsFrom.replica_flags |= \
637 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
639 # If s and the local DC's nTDSDSA object are in the same site,
640 # cn!transportType has no value, or the RDN of cn!transportType
643 # Bit DRS_MAIL_REP in t.replicaFlags is clear.
645 # t.uuidTransport = NULL GUID.
647 # t.uuidDsa = The GUID-based DNS name of s.
651 # Bit DRS_MAIL_REP in t.replicaFlags is set.
653 # If x is the object with dsname cn!transportType,
654 # t.uuidTransport = x!objectGUID.
656 # Let a be the attribute identified by
657 # x!transportAddressAttribute. If a is
658 # the dNSHostName attribute, t.uuidDsa = the GUID-based
659 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
661 # It appears that the first statement i.e.
663 # "If s and the local DC's nTDSDSA object are in the same
664 # site, cn!transportType has no value, or the RDN of
665 # cn!transportType is CN=IP:"
667 # could be a slightly tighter statement if it had an "or"
668 # between each condition. I believe this should
671 # IF (same-site) OR (no-value) OR (type-ip)
673 # because IP should be the primary transport mechanism
674 # (even in inter-site) and the absense of the transportType
675 # attribute should always imply IP no matter if its multi-site
677 # NOTE MS-TECH INCORRECT:
679 # All indications point to these statements above being
680 # incorrectly stated:
682 # t.uuidDsa = The GUID-based DNS name of s.
684 # Let a be the attribute identified by
685 # x!transportAddressAttribute. If a is
686 # the dNSHostName attribute, t.uuidDsa = the GUID-based
687 # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
689 # because the uuidDSA is a GUID and not a GUID-base DNS
690 # name. Nor can uuidDsa hold (s!parent)!a if not
691 # dNSHostName. What should have been said is:
693 # t.naDsa = The GUID-based DNS name of s
695 # That would also be correct if transportAddressAttribute
696 # were "mailAddress" because (naDsa) can also correctly
697 # hold the SMTP ISM service address.
699 nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
701 # We're not currently supporting SMTP replication
702 # so is_smtp_replication_available() is currently
703 # always returning False
705 cn_conn.transport_dnstr is None or
706 cn_conn.transport_dnstr.find("CN=IP") == 0 or
707 not is_smtp_replication_available()):
709 if (t_repsFrom.replica_flags &
710 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0:
711 t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
713 t_repsFrom.transport_guid = misc.GUID()
715 # See (NOTE MS-TECH INCORRECT) above
716 if t_repsFrom.version == 0x1:
717 if t_repsFrom.dns_name1 is None or \
718 t_repsFrom.dns_name1 != nastr:
719 t_repsFrom.dns_name1 = nastr
721 if t_repsFrom.dns_name1 is None or \
722 t_repsFrom.dns_name2 is None or \
723 t_repsFrom.dns_name1 != nastr or \
724 t_repsFrom.dns_name2 != nastr:
725 t_repsFrom.dns_name1 = nastr
726 t_repsFrom.dns_name2 = nastr
729 if (t_repsFrom.replica_flags &
730 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0:
731 t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP
733 # We have a transport type but its not an
734 # object in the database
735 if cn_conn.transport_guid not in self.transport_table:
736 raise Exception("Missing inter-site transport - (%s)" %
737 cn_conn.transport_dnstr)
739 x_transport = self.transport_table[str(cn_conn.transport_guid)]
741 if t_repsFrom.transport_guid != x_transport.guid:
742 t_repsFrom.transport_guid = x_transport.guid
744 # See (NOTE MS-TECH INCORRECT) above
745 if x_transport.address_attr == "dNSHostName":
747 if t_repsFrom.version == 0x1:
748 if t_repsFrom.dns_name1 is None or \
749 t_repsFrom.dns_name1 != nastr:
750 t_repsFrom.dns_name1 = nastr
752 if t_repsFrom.dns_name1 is None or \
753 t_repsFrom.dns_name2 is None or \
754 t_repsFrom.dns_name1 != nastr or \
755 t_repsFrom.dns_name2 != nastr:
756 t_repsFrom.dns_name1 = nastr
757 t_repsFrom.dns_name2 = nastr
760 # MS tech specification says we retrieve the named
761 # attribute in "transportAddressAttribute" from the parent of
764 pdnstr = s_dsa.get_parent_dnstr()
765 attrs = [ x_transport.address_attr ]
767 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
769 except ldb.LdbError, (enum, estr):
771 "Unable to find attr (%s) for (%s) - (%s)" %
772 (x_transport.address_attr, pdnstr, estr))
775 nastr = str(msg[x_transport.address_attr][0])
777 # See (NOTE MS-TECH INCORRECT) above
778 if t_repsFrom.version == 0x1:
779 if t_repsFrom.dns_name1 is None or \
780 t_repsFrom.dns_name1 != nastr:
781 t_repsFrom.dns_name1 = nastr
783 if t_repsFrom.dns_name1 is None or \
784 t_repsFrom.dns_name2 is None or \
785 t_repsFrom.dns_name1 != nastr or \
786 t_repsFrom.dns_name2 != nastr:
788 t_repsFrom.dns_name1 = nastr
789 t_repsFrom.dns_name2 = nastr
791 if t_repsFrom.is_modified():
792 logger.debug("modify_repsFrom(): %s" % t_repsFrom)
794 def is_repsFrom_implied(self, n_rep, cn_conn):
795 """Given a NC replica and NTDS Connection, determine if the connection
796 implies a repsFrom tuple should be present from the source DSA listed
797 in the connection to the naming context
799 :param n_rep: NC replica
800 :param conn: NTDS Connection
801 ::returns (True || False), source DSA:
803 # NTDS Connection must satisfy all the following criteria
804 # to imply a repsFrom tuple is needed:
806 # cn!enabledConnection = true.
807 # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
808 # cn!fromServer references an nTDSDSA object.
811 if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
813 s_dnstr = cn_conn.get_from_dnstr()
814 if s_dnstr is not None:
815 s_dsa = self.get_dsa(s_dnstr)
817 # No DSA matching this source DN string?
821 # To imply a repsFrom tuple is needed, each of these
824 # An NC replica of the NC "is present" on the DC to
825 # which the nTDSDSA object referenced by cn!fromServer
828 # An NC replica of the NC "should be present" on
830 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
832 if s_rep is None or not s_rep.is_present():
835 # To imply a repsFrom tuple is needed, each of these
838 # The NC replica on the DC referenced by cn!fromServer is
839 # a writable replica or the NC replica that "should be
840 # present" on the local DC is a partial replica.
842 # The NC is not a domain NC, the NC replica that
843 # "should be present" on the local DC is a partial
844 # replica, cn!transportType has no value, or
845 # cn!transportType has an RDN of CN=IP.
847 implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
848 (not n_rep.is_domain() or
849 n_rep.is_partial() or
850 cn_conn.transport_dnstr is None or
851 cn_conn.transport_dnstr.find("CN=IP") == 0)
858 def translate_ntdsconn(self):
859 """This function adjusts values of repsFrom abstract attributes of NC
860 replicas on the local DC to match those implied by
861 nTDSConnection objects.
864 if self.my_dsa.is_translate_ntdsconn_disabled():
865 logger.debug("skipping translate_ntdsconn() because disabling flag is set")
868 logger.debug("translate_ntdsconn(): enter")
870 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
872 # Filled in with replicas we currently have that need deleting
875 # We're using the MS notation names here to allow
876 # correlation back to the published algorithm.
878 # n_rep - NC replica (n)
879 # t_repsFrom - tuple (t) in n!repsFrom
880 # s_dsa - Source DSA of the replica. Defined as nTDSDSA
881 # object (s) such that (s!objectGUID = t.uuidDsa)
882 # In our IDL representation of repsFrom the (uuidDsa)
883 # attribute is called (source_dsa_obj_guid)
884 # cn_conn - (cn) is nTDSConnection object and child of the local DC's
885 # nTDSDSA object and (cn!fromServer = s)
886 # s_rep - source DSA replica of n
888 # If we have the replica and its not needed
889 # then we add it to the "to be deleted" list.
890 for dnstr in current_rep_table:
891 if dnstr not in needed_rep_table:
892 delete_reps.add(dnstr)
895 DEBUG('current %d needed %d delete %d', len(current_rep_table),
896 len(needed_rep_table), len(delete_reps))
897 DEBUG('deleting these reps: %s', delete_reps)
898 for dnstr in delete_reps:
899 del current_rep_table[dnstr]
901 # Now perform the scan of replicas we'll need
902 # and compare any current repsFrom against the
904 for n_rep in needed_rep_table.values():
906 # load any repsFrom and fsmo roles as we'll
907 # need them during connection translation
908 n_rep.load_repsFrom(self.samdb)
909 n_rep.load_fsmo_roles(self.samdb)
911 # Loop thru the existing repsFrom tupples (if any)
912 for i, t_repsFrom in enumerate(n_rep.rep_repsFrom):
914 # for each tuple t in n!repsFrom, let s be the nTDSDSA
915 # object such that s!objectGUID = t.uuidDsa
916 guidstr = str(t_repsFrom.source_dsa_obj_guid)
917 s_dsa = self.get_dsa_by_guidstr(guidstr)
919 # Source dsa is gone from config (strange)
920 # so cleanup stale repsFrom for unlisted DSA
922 logger.warning("repsFrom source DSA guid (%s) not found" %
924 t_repsFrom.to_be_deleted = True
927 s_dnstr = s_dsa.dsa_dnstr
929 # Retrieve my DSAs connection object (if it exists)
930 # that specifies the fromServer equivalent to
931 # the DSA that is specified in the repsFrom source
932 cn_conn = self.my_dsa.get_connection_by_from_dnstr(s_dnstr)
934 # Let (cn) be the nTDSConnection object such that (cn)
935 # is a child of the local DC's nTDSDSA object and
936 # (cn!fromServer = s) and (cn!options) does not contain
937 # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.
938 if cn_conn and not cn_conn.is_rodc_topology():
941 # KCC removes this repsFrom tuple if any of the following
945 # No NC replica of the NC "is present" on DSA that
946 # would be source of replica
948 # A writable replica of the NC "should be present" on
949 # the local DC, but a partial replica "is present" on
951 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
953 if cn_conn is None or \
954 s_rep is None or not s_rep.is_present() or \
955 (not n_rep.is_ro() and s_rep.is_partial()):
957 t_repsFrom.to_be_deleted = True
960 # If the KCC did not remove t from n!repsFrom, it updates t
961 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
963 # Loop thru connections and add implied repsFrom tuples
964 # for each NTDSConnection under our local DSA if the
965 # repsFrom is not already present
966 for cn_conn in self.my_dsa.connect_table.values():
968 implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
972 # Loop thru the existing repsFrom tupples (if any) and
973 # if we already have a tuple for this connection then
974 # no need to proceed to add. It will have been changed
975 # to have the correct attributes above
976 for t_repsFrom in n_rep.rep_repsFrom:
977 guidstr = str(t_repsFrom.source_dsa_obj_guid)
978 if s_dsa is self.get_dsa_by_guidstr(guidstr):
985 # Create a new RepsFromTo and proceed to modify
986 # it according to specification
987 t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
989 t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
991 s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
993 self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
995 # Add to our NC repsFrom as this is newly computed
996 if t_repsFrom.is_modified():
997 n_rep.rep_repsFrom.append(t_repsFrom)
1000 # Display any to be deleted or modified repsFrom
1001 text = n_rep.dumpstr_to_be_deleted()
1003 logger.info("TO BE DELETED:\n%s" % text)
1004 text = n_rep.dumpstr_to_be_modified()
1006 logger.info("TO BE MODIFIED:\n%s" % text)
1008 # Peform deletion from our tables but perform
1009 # no database modification
1010 n_rep.commit_repsFrom(self.samdb, ro=True)
1012 # Commit any modified repsFrom to the NC replica
1013 n_rep.commit_repsFrom(self.samdb)
1017 def merge_failed_links(self):
1018 """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
1019 The KCC on a writable DC attempts to merge the link and connection
1020 failure information from bridgehead DCs in its own site to help it
1021 identify failed bridgehead DCs.
1023 # MS-TECH Ref 6.2.2.3.2 Merge of kCCFailedLinks and kCCFailedLinks
1026 # 1. Queries every bridgehead server in your site (other than yourself)
1027 # 2. For every ntDSConnection that references a server in a different
1028 # site merge all the failure info
1030 # XXX - not implemented yet
1031 if opts.attempt_live_connections:
1032 DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
1034 DEBUG("skipping merge_failed_links() because it requires real network connections\n"
1035 "and we weren't asked to --attempt-live-connctions")
1038 def setup_graph(self, part):
1039 """Set up a GRAPH, populated with a VERTEX for each site
1040 object, a MULTIEDGE for each siteLink object, and a
1041 MUTLIEDGESET for each siteLinkBridge object (or implied
1044 ::returns: a new graph
1048 g = IntersiteGraph()
1050 for site_guid, site in self.site_table.items():
1051 vertex = Vertex(site, part)
1052 vertex.guid = site_guid
1053 vertex.ndrpacked_guid = ndr_pack(site.site_guid)
1054 g.vertices.add(vertex)
1056 if not guid_to_vertex.get(site_guid):
1057 guid_to_vertex[site_guid] = []
1059 guid_to_vertex[site_guid].append(vertex)
1061 connected_vertices = set()
1062 for transport_guid, transport in self.transport_table.items():
1063 # Currently only ever "IP"
1064 for site_link_dn, site_link in self.sitelink_table.items():
1065 new_edge = create_edge(transport_guid, site_link, guid_to_vertex)
1066 connected_vertices.update(new_edge.vertices)
1067 g.edges.add(new_edge)
1069 # If 'Bridge all site links' is enabled and Win2k3 bridges required is not set
1070 # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
1071 # No documentation for this however, ntdsapi.h appears to have listed:
1072 # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
1073 if ((self.my_site.site_options & 0x00000002) == 0
1074 and (self.my_site.site_options & 0x00001000) == 0):
1075 g.edge_set.add(create_auto_edge_set(g, transport_guid))
1077 # TODO get all site link bridges
1078 for site_link_bridge in []:
1079 g.edge_set.add(create_edge_set(g, transport_guid,
1082 g.connected_vertices = connected_vertices
1086 for edge in g.edges:
1087 for a, b in itertools.combinations(edge.vertices, 2):
1088 dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
1089 verify_properties = ()
1090 verify_and_dot('site_edges', dot_edges, directed=False, label=self.my_dsa_dnstr,
1091 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1092 dot_files=opts.dot_files)
1097 def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
1098 """Get a bridghead DC.
1100 :param site: site object representing for which a bridgehead
1102 :param part: crossRef for NC to replicate.
1103 :param transport: interSiteTransport object for replication
1105 :param partial_ok: True if a DC containing a partial
1106 replica or a full replica will suffice, False if only
1107 a full replica will suffice.
1108 :param detect_failed: True to detect failed DCs and route
1109 replication traffic around them, False to assume no DC
1111 ::returns: dsa object for the bridgehead DC or None
1114 bhs = self.get_all_bridgeheads(site, part, transport,
1115 partial_ok, detect_failed)
1117 DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
1121 DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
1122 (site.site_dnstr, bhs[0].dsa_dnstr))
1125 def get_all_bridgeheads(self, site, part, transport,
1126 partial_ok, detect_failed):
1127 """Get all bridghead DCs satisfying the given criteria
1129 :param site: site object representing the site for which
1130 bridgehead DCs are desired.
1131 :param part: partition for NC to replicate.
1132 :param transport: interSiteTransport object for
1133 replication traffic.
1134 :param partial_ok: True if a DC containing a partial
1135 replica or a full replica will suffice, False if
1136 only a full replica will suffice.
1137 :param detect_failed: True to detect failed DCs and route
1138 replication traffic around them, FALSE to assume
1140 ::returns: list of dsa object for available bridgehead
1146 logger.debug("get_all_bridgeheads: %s" % transport)
1147 if 'Site-5' in site.site_dnstr:
1148 DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
1149 " detect_failed %s" % (site.site_dnstr, part.partstr,
1150 partial_ok, detect_failed))
1151 logger.debug(site.dsa_table)
1152 for key, dsa in site.dsa_table.items():
1154 pdnstr = dsa.get_parent_dnstr()
1156 # IF t!bridgeheadServerListBL has one or more values and
1157 # t!bridgeheadServerListBL does not contain a reference
1158 # to the parent object of dc then skip dc
1159 if (len(transport.bridgehead_list) != 0 and
1160 pdnstr not in transport.bridgehead_list):
1163 # IF dc is in the same site as the local DC
1164 # IF a replica of cr!nCName is not in the set of NC replicas
1165 # that "should be present" on dc or a partial replica of the
1166 # NC "should be present" but partialReplicasOkay = FALSE
1168 if self.my_site.same_site(dsa):
1169 needed, ro, partial = part.should_be_present(dsa)
1170 if not needed or (partial and not partial_ok):
1172 rep = dsa.get_current_replica(part.nc_dnstr)
1175 # IF an NC replica of cr!nCName is not in the set of NC
1176 # replicas that "are present" on dc or a partial replica of
1177 # the NC "is present" but partialReplicasOkay = FALSE
1180 rep = dsa.get_current_replica(part.nc_dnstr)
1181 if rep is None or (rep.is_partial() and not partial_ok):
1184 # IF AmIRODC() and cr!nCName corresponds to default NC then
1185 # Let dsaobj be the nTDSDSA object of the dc
1186 # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
1188 if self.my_dsa.is_ro() and rep is not None and rep.is_default():
1189 if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
1192 # IF t!name != "IP" and the parent object of dc has no value for
1193 # the attribute specified by t!transportAddressAttribute
1195 if transport.name != "IP":
1196 # MS tech specification says we retrieve the named
1197 # attribute in "transportAddressAttribute" from the parent
1200 attrs = [ transport.address_attr ]
1202 res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
1204 except ldb.LdbError, (enum, estr):
1208 if transport.address_attr not in msg:
1211 nastr = str(msg[transport.address_attr][0])
1213 # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
1215 if self.is_bridgehead_failed(dsa, detect_failed):
1216 DEBUG("bridgehead is failed")
1219 logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
1222 # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
1224 # SORT bhs such that all GC servers precede DCs that are not GC
1225 # servers, and otherwise by ascending objectGUID
1227 # SORT bhs in a random order
1228 if site.is_random_bridgehead_disabled():
1229 bhs.sort(sort_dsa_by_gc_and_guid)
1236 def is_bridgehead_failed(self, dsa, detect_failed):
1237 """Determine whether a given DC is known to be in a failed state
1238 ::returns: True if and only if the DC should be considered failed
1240 Here we DEPART from the pseudo code spec which appears to be
1241 wrong. It says, in full:
1243 /***** BridgeheadDCFailed *****/
1244 /* Determine whether a given DC is known to be in a failed state.
1245 * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
1246 * IN: detectFailedDCs - TRUE if and only failed DC detection is
1248 * RETURNS: TRUE if and only if the DC should be considered to be in a
1251 BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
1253 IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
1254 the options attribute of the site settings object for the local
1257 ELSEIF a tuple z exists in the kCCFailedLinks or
1258 kCCFailedConnections variables such that z.UUIDDsa =
1259 objectGUID, z.FailureCount > 1, and the current time -
1260 z.TimeFirstFailure > 2 hours
1263 RETURN detectFailedDCs
1267 where you will see detectFailedDCs is not behaving as
1268 advertised -- it is acting as a default return code in the
1269 event that a failure is not detected, not a switch turning
1270 detection on or off. Elsewhere the documentation seems to
1271 concur with the comment rather than the code.
1273 if not detect_failed:
1276 # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
1277 # When DETECT_STALE_DISABLED, we can never know of if it's in a failed state
1278 if self.my_site.site_options & 0x00000008:
1281 return self.is_stale_link_connection(dsa)
1284 def create_connection(self, part, rbh, rsite, transport,
1285 lbh, lsite, link_opt, link_sched,
1286 partial_ok, detect_failed):
1287 """Create an nTDSConnection object with the given parameters
1288 if one does not already exist.
1290 :param part: crossRef object for the NC to replicate.
1291 :param rbh: nTDSDSA object for DC to act as the
1292 IDL_DRSGetNCChanges server (which is in a site other
1293 than the local DC's site).
1294 :param rsite: site of the rbh
1295 :param transport: interSiteTransport object for the transport
1296 to use for replication traffic.
1297 :param lbh: nTDSDSA object for DC to act as the
1298 IDL_DRSGetNCChanges client (which is in the local DC's site).
1299 :param lsite: site of the lbh
1300 :param link_opt: Replication parameters (aggregated siteLink options, etc.)
1301 :param link_sched: Schedule specifying the times at which
1302 to begin replicating.
1303 :partial_ok: True if bridgehead DCs containing partial
1304 replicas of the NC are acceptable.
1305 :param detect_failed: True to detect failed DCs and route
1306 replication traffic around them, FALSE to assume no DC
1309 rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
1311 rbh_table = {x.dsa_dnstr:x for x in rbhs_all}
1313 DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all), [x.dsa_dnstr for x in rbhs_all]))
1315 # MS-TECH says to compute rbhs_avail but then doesn't use it
1316 # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
1317 # partial_ok, detect_failed)
1319 lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
1322 DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all), [x.dsa_dnstr for x in lbhs_all]))
1324 # MS-TECH says to compute lbhs_avail but then doesn't use it
1325 # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
1326 # partial_ok, detect_failed)
1328 # FOR each nTDSConnection object cn such that the parent of cn is
1329 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1330 for ldsa in lbhs_all:
1331 for cn in ldsa.connect_table.values():
1333 rdsa = rbh_table.get(cn.from_dnstr)
1337 DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
1338 # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
1339 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
1340 # cn!transportType references t
1341 if (cn.is_generated() and not cn.is_rodc_topology() and
1342 cn.transport_guid == transport.guid):
1344 # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
1345 # cn!options and cn!schedule != sch
1346 # Perform an originating update to set cn!schedule to
1348 if (not cn.is_user_owned_schedule() and
1349 not cn.is_equivalent_schedule(link_sched)):
1350 cn.schedule = link_sched
1351 cn.set_modified(True)
1353 # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1354 # NTDSCONN_OPT_USE_NOTIFY are set in cn
1355 if cn.is_override_notify_default() and \
1358 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
1360 # Perform an originating update to clear bits
1361 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1362 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1363 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
1365 ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1366 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1367 cn.set_modified(True)
1372 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
1374 # Perform an originating update to set bits
1375 # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1376 # NTDSCONN_OPT_USE_NOTIFY in cn!options
1377 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1379 (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1380 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1381 cn.set_modified(True)
1384 # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
1385 if cn.is_twoway_sync():
1387 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
1389 # Perform an originating update to clear bit
1390 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1391 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
1392 cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1393 cn.set_modified(True)
1398 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
1400 # Perform an originating update to set bit
1401 # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
1402 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1403 cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1404 cn.set_modified(True)
1407 # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
1409 if cn.is_intersite_compression_disabled():
1411 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
1413 # Perform an originating update to clear bit
1414 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1417 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0:
1419 ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1420 cn.set_modified(True)
1424 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1426 # Perform an originating update to set bit
1427 # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
1430 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1432 dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1433 cn.set_modified(True)
1435 # Display any modified connection
1437 if cn.to_be_modified:
1438 logger.info("TO BE MODIFIED:\n%s" % cn)
1440 ldsa.commit_connections(self.samdb, ro=True)
1442 ldsa.commit_connections(self.samdb)
1445 valid_connections = 0
1447 # FOR each nTDSConnection object cn such that cn!parent is
1448 # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
1449 for ldsa in lbhs_all:
1450 for cn in ldsa.connect_table.values():
1452 rdsa = rbh_table.get(cn.from_dnstr)
1456 DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
1458 # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
1459 # cn!transportType references t) and
1460 # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
1461 if ((not cn.is_generated() or
1462 cn.transport_guid == transport.guid) and
1463 not cn.is_rodc_topology()):
1465 # LET rguid be the objectGUID of the nTDSDSA object
1466 # referenced by cn!fromServer
1467 # LET lguid be (cn!parent)!objectGUID
1469 # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
1470 # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
1471 # Increment cValidConnections by 1
1472 if (not self.is_bridgehead_failed(rdsa, detect_failed) and
1473 not self.is_bridgehead_failed(ldsa, detect_failed)):
1474 valid_connections += 1
1476 # IF keepConnections does not contain cn!objectGUID
1477 # APPEND cn!objectGUID to keepConnections
1478 self.kept_connections.add(cn)
1481 DEBUG_RED("valid connections %d" % valid_connections)
1482 DEBUG("kept_connections:\n%s" % (self.kept_connections,))
1483 # IF cValidConnections = 0
1484 if valid_connections == 0:
1486 # LET opt be NTDSCONN_OPT_IS_GENERATED
1487 opt = dsdb.NTDSCONN_OPT_IS_GENERATED
1489 # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
1490 # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
1491 # NTDSCONN_OPT_USE_NOTIFY in opt
1492 if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
1493 opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
1494 dsdb.NTDSCONN_OPT_USE_NOTIFY)
1496 # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
1497 # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
1498 if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
1499 opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
1501 # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
1503 # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
1505 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0:
1506 opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
1508 # Perform an originating update to create a new nTDSConnection
1509 # object cn that is a child of lbh, cn!enabledConnection = TRUE,
1510 # cn!options = opt, cn!transportType is a reference to t,
1511 # cn!fromServer is a reference to rbh, and cn!schedule = sch
1512 cn = lbh.new_connection(opt, 0, transport, rbh.dsa_dnstr, link_sched)
1514 # Display any added connection
1517 logger.info("TO BE ADDED:\n%s" % cn)
1519 lbh.commit_connections(self.samdb, ro=True)
1521 lbh.commit_connections(self.samdb)
1523 # APPEND cn!objectGUID to keepConnections
1524 self.kept_connections.add(cn)
1526 def add_transports(self, vertex, local_vertex, graph, detect_failed):
1528 # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
1529 # here and in the, but using vertex seems to make more
1530 # sense. That is, it wants this:
1532 #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1533 # local_vertex.is_black(), detect_failed)
1537 vertex.accept_red_red = []
1538 vertex.accept_black = []
1539 found_failed = False
1540 for t_guid, transport in self.transport_table.items():
1541 if transport.name != 'IP':
1542 #XXX well this is cheating a bit
1543 logging.warning("WARNING: we are ignoring a transport named %r" % transport.name)
1546 # FLAG_CR_NTDS_DOMAIN 0x00000002
1547 if (vertex.is_red() and transport.name != "IP" and
1548 vertex.part.system_flags & 0x00000002):
1551 if vertex not in graph.connected_vertices:
1554 partial_replica_okay = vertex.is_black()
1555 bh = self.get_bridgehead(vertex.site, vertex.part, transport,
1556 partial_replica_okay, detect_failed)
1561 vertex.accept_red_red.append(t_guid)
1562 vertex.accept_black.append(t_guid)
1564 # Add additional transport to allow another run of Dijkstra
1565 vertex.accept_red_red.append("EDGE_TYPE_ALL")
1566 vertex.accept_black.append("EDGE_TYPE_ALL")
1570 def create_connections(self, graph, part, detect_failed):
1571 """Construct an NC replica graph for the NC identified by
1572 the given crossRef, then create any additional nTDSConnection
1575 :param graph: site graph.
1576 :param part: crossRef object for NC.
1577 :param detect_failed: True to detect failed DCs and route
1578 replication traffic around them, False to assume no DC
1581 Modifies self.kept_connections by adding any connections
1582 deemed to be "in use".
1584 ::returns: (all_connected, found_failed_dc)
1585 (all_connected) True if the resulting NC replica graph
1586 connects all sites that need to be connected.
1587 (found_failed_dc) True if one or more failed DCs were
1590 all_connected = True
1591 found_failed = False
1593 logger.debug("create_connections(): enter\n\tpartdn=%s\n\tdetect_failed=%s" %
1594 (part.nc_dnstr, detect_failed))
1596 # XXX - This is a highly abbreviated function from the MS-TECH
1597 # ref. It creates connections between bridgeheads to all
1598 # sites that have appropriate replicas. Thus we are not
1599 # creating a minimum cost spanning tree but instead
1600 # producing a fully connected tree. This should produce
1601 # a full (albeit not optimal cost) replication topology.
1603 my_vertex = Vertex(self.my_site, part)
1604 my_vertex.color_vertex()
1606 for v in graph.vertices:
1608 if self.add_transports(v, my_vertex, graph, False):
1611 # No NC replicas for this NC in the site of the local DC,
1612 # so no nTDSConnection objects need be created
1613 if my_vertex.is_white():
1614 return all_connected, found_failed
1616 edge_list, component_count = self.get_spanning_tree_edges(graph, label=part.partstr)
1618 logger.debug("%s Number of components: %d" % (part.nc_dnstr, component_count))
1619 if component_count > 1:
1620 all_connected = False
1622 # LET partialReplicaOkay be TRUE if and only if
1623 # localSiteVertex.Color = COLOR.BLACK
1624 if my_vertex.is_black():
1629 # Utilize the IP transport only for now
1631 for transport in self.transport_table.values():
1632 if transport.name == "IP":
1635 if transport is None:
1636 raise Exception("Unable to find inter-site transport for IP")
1638 DEBUG("edge_list %s" % edge_list)
1640 if e.directed and e.vertices[0].site is self.my_site: # more accurate comparison?
1643 if e.vertices[0].site is self.my_site:
1644 rsite = e.vertices[1].site
1646 rsite = e.vertices[0].site
1648 # We don't make connections to our own site as that
1649 # is intrasite topology generator's job
1650 if rsite is self.my_site:
1651 DEBUG("rsite is my_site")
1654 # Determine bridgehead server in remote site
1655 rbh = self.get_bridgehead(rsite, part, transport,
1656 partial_ok, detect_failed)
1660 # RODC acts as an BH for itself
1662 # LET lbh be the nTDSDSA object of the local DC
1664 # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
1665 # cr, t, partialReplicaOkay, detectFailedDCs)
1666 if self.my_dsa.is_ro():
1667 lsite = self.my_site
1670 lsite = self.my_site
1671 lbh = self.get_bridgehead(lsite, part, transport,
1672 partial_ok, detect_failed)
1675 DEBUG_RED("DISASTER! lbh is None")
1680 DEBUG_BLUE("vertices")
1682 DEBUG_BLUE("bridgeheads")
1684 DEBUG_BLUE("-" * 70)
1686 sitelink = e.site_link
1687 if sitelink is None:
1691 link_opt = sitelink.options
1692 link_sched = sitelink.schedule
1694 self.create_connection(part, rbh, rsite, transport,
1695 lbh, lsite, link_opt, link_sched,
1696 partial_ok, detect_failed)
1698 return all_connected, found_failed
1700 def create_intersite_connections(self):
1701 """Computes an NC replica graph for each NC replica that "should be
1702 present" on the local DC or "is present" on any DC in the same site
1703 as the local DC. For each edge directed to an NC replica on such a
1704 DC from an NC replica on a DC in another site, the KCC creates an
1705 nTDSConnection object to imply that edge if one does not already
1708 Modifies self.kept_connections - A set of nTDSConnection
1709 objects for edges that are directed
1710 to the local DC's site in one or more NC replica graphs.
1712 returns: True if spanning trees were created for all NC replica
1713 graphs, otherwise False.
1715 all_connected = True
1716 self.kept_connections = set()
1718 # LET crossRefList be the set containing each object o of class
1719 # crossRef such that o is a child of the CN=Partitions child of the
1722 # FOR each crossRef object cr in crossRefList
1723 # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
1724 # is clear in cr!systemFlags, skip cr.
1725 # LET g be the GRAPH return of SetupGraph()
1727 for part in self.part_table.values():
1729 if not part.is_enabled():
1732 if part.is_foreign():
1735 graph = self.setup_graph(part)
1737 # Create nTDSConnection objects, routing replication traffic
1738 # around "failed" DCs.
1739 found_failed = False
1741 connected, found_failed = self.create_connections(graph, part, True)
1743 DEBUG("with detect_failed: connected %s Found failed %s" % (connected, found_failed))
1745 all_connected = False
1748 # One or more failed DCs preclude use of the ideal NC
1749 # replica graph. Add connections for the ideal graph.
1750 self.create_connections(graph, part, False)
1752 return all_connected
1754 def get_spanning_tree_edges(self, graph, label=None):
1755 # Phase 1: Run Dijkstra's to get a list of internal edges, which are
1756 # just the shortest-paths connecting colored vertices
1758 internal_edges = set()
1760 for e_set in graph.edge_set:
1762 for v in graph.vertices:
1765 # All con_type in an edge set is the same
1766 for e in e_set.edges:
1767 edgeType = e.con_type
1768 for v in e.vertices:
1771 if opts.verify or opts.dot_files:
1772 graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
1773 for a, b in itertools.chain(*(itertools.combinations(edge.vertices, 2)
1774 for edge in e_set.edges))]
1775 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1778 write_dot_file('edgeset_%s' % (edgeType,), graph_edges, vertices=graph_nodes,
1782 verify_graph('spanning tree edge set %s' % edgeType, graph_edges, vertices=graph_nodes,
1783 properties=('complete', 'connected'), debug=DEBUG)
1785 # Run dijkstra's algorithm with just the red vertices as seeds
1786 # Seed from the full replicas
1787 dijkstra(graph, edgeType, False)
1790 process_edge_set(graph, e_set, internal_edges)
1792 # Run dijkstra's algorithm with red and black vertices as the seeds
1793 # Seed from both full and partial replicas
1794 dijkstra(graph, edgeType, True)
1797 process_edge_set(graph, e_set, internal_edges)
1799 # All vertices have root/component as itself
1800 setup_vertices(graph)
1801 process_edge_set(graph, None, internal_edges)
1803 if opts.verify or opts.dot_files:
1804 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1805 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1806 verify_properties = ('multi_edge_forest',)
1807 verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
1808 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1809 dot_files=opts.dot_files)
1812 # Phase 2: Run Kruskal's on the internal edges
1813 output_edges, components = kruskal(graph, internal_edges)
1815 # This recalculates the cost for the path connecting the closest red vertex
1816 # Ignoring types is fine because NO suboptimal edge should exist in the graph
1817 dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
1818 # Phase 3: Process the output
1819 for v in graph.vertices:
1823 v.dist_to_red = v.repl_info.cost
1825 if opts.verify or opts.dot_files:
1826 graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) for e in internal_edges]
1827 graph_nodes = [v.site.site_dnstr for v in graph.vertices]
1828 verify_properties = ('multi_edge_forest',)
1829 verify_and_dot('postkruskal', graph_edges, graph_nodes, label=label,
1830 properties=verify_properties, debug=DEBUG, verify=opts.verify,
1831 dot_files=opts.dot_files)
1833 # count the components
1834 return self.copy_output_edges(graph, output_edges), components
1836 # This ensures only one-way connections for partial-replicas
1837 def copy_output_edges(self, graph, output_edges):
1839 vid = self.my_site # object guid for the local dc's site
1841 for edge in output_edges:
1842 # Three-way edges are no problem here since these were created by
1843 # add_out_edge which only has two endpoints
1844 v = edge.vertices[0]
1845 w = edge.vertices[1]
1846 if v.site is vid or w.site is vid:
1847 if (v.is_black() or w.is_black()) and not v.dist_to_red == MAX_DWORD:
1848 edge.directed = True
1850 if w.dist_to_red < v.dist_to_red:
1851 edge.vertices[0] = w
1852 edge.vertices[1] = v
1854 edge_list.append(edge)
1858 def intersite(self):
1859 """The head method for generating the inter-site KCC replica
1860 connection graph and attendant nTDSConnection objects
1863 Produces self.kept_connections set of NTDS Connections
1864 that should be kept during subsequent pruning process.
1866 ::return (True or False): (True) if the produced NC replica
1867 graph connects all sites that need to be connected
1872 mysite = self.my_site
1873 all_connected = True
1875 logger.debug("intersite(): enter")
1877 # Determine who is the ISTG
1879 mysite.select_istg(self.samdb, mydsa, ro=True)
1881 mysite.select_istg(self.samdb, mydsa, ro=False)
1883 # Test whether local site has topology disabled
1884 if mysite.is_intersite_topology_disabled():
1885 logger.debug("intersite(): exit disabled all_connected=%d" %
1887 return all_connected
1889 if not mydsa.is_istg():
1890 logger.debug("intersite(): exit not istg all_connected=%d" %
1892 return all_connected
1894 self.merge_failed_links()
1896 # For each NC with an NC replica that "should be present" on the
1897 # local DC or "is present" on any DC in the same site as the
1898 # local DC, the KCC constructs a site graph--a precursor to an NC
1899 # replica graph. The site connectivity for a site graph is defined
1900 # by objects of class interSiteTransport, siteLink, and
1901 # siteLinkBridge in the config NC.
1903 all_connected = self.create_intersite_connections()
1905 logger.debug("intersite(): exit all_connected=%d" % all_connected)
1906 return all_connected
1908 def update_rodc_connection(self):
1909 """Runs when the local DC is an RODC and updates the RODC NTFRS
1912 # Given an nTDSConnection object cn1, such that cn1.options contains
1913 # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
1914 # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
1915 # that the following is true:
1917 # cn1.fromServer = cn2.fromServer
1918 # cn1.schedule = cn2.schedule
1920 # If no such cn2 can be found, cn1 is not modified.
1921 # If no such cn1 can be found, nothing is modified by this task.
1923 if not self.my_dsa.is_ro():
1927 # Find cn2 - the DRS NTDSConnection
1928 for con in self.my_dsa.connect_table.values():
1929 if not con.is_rodc_topology():
1933 # Find cn1 - the FRS NTDSConnection
1935 for con in self.my_dsa.connect_table.values():
1936 if con.is_rodc_topology():
1937 con.from_dnstr = cn2.from_dnstr
1938 con.schedule = cn2.schedule
1939 con.to_be_modified = True
1941 # Commit changes to the database
1942 self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)
1944 def intrasite_max_node_edges(self, node_count):
1945 """Returns the maximum number of edges directed to a node in
1946 the intrasite replica graph.
1948 The KCC does not create more
1949 than 50 edges directed to a single DC. To optimize replication,
1950 we compute that each node should have n+2 total edges directed
1951 to it such that (n) is the smallest non-negative integer
1952 satisfying (node_count <= 2*(n*n) + 6*n + 7)
1954 (If the number of edges is m (i.e. n + 2), that is the same as
1955 2 * m*m - 2 * m + 3).
1965 :param node_count: total number of nodes in the replica graph
1969 if node_count <= (2 * (n * n) + (6 * n) + 7):
1977 def construct_intrasite_graph(self, site_local, dc_local,
1978 nc_x, gc_only, detect_stale):
1980 # We're using the MS notation names here to allow
1981 # correlation back to the published algorithm.
1983 # nc_x - naming context (x) that we are testing if it
1984 # "should be present" on the local DC
1985 # f_of_x - replica (f) found on a DC (s) for NC (x)
1986 # dc_s - DC where f_of_x replica was found
1987 # dc_local - local DC that potentially needs a replica
1989 # r_list - replica list R
1990 # p_of_x - replica (p) is partial and found on a DC (s)
1992 # l_of_x - replica (l) is the local replica for NC (x)
1993 # that should appear on the local DC
1994 # r_len = is length of replica list |R|
1996 # If the DSA doesn't need a replica for this
1997 # partition (NC x) then continue
1998 needed, ro, partial = nc_x.should_be_present(dc_local)
2000 DEBUG_YELLOW("construct_intrasite_graph(): enter" +
2001 "\n\tgc_only=%d" % gc_only +
2002 "\n\tdetect_stale=%d" % detect_stale +
2003 "\n\tneeded=%s" % needed +
2005 "\n\tpartial=%s" % partial +
2009 DEBUG_RED("%s lacks 'should be present' status, aborting construct_intersite_graph!" %
2013 # Create a NCReplica that matches what the local replica
2014 # should say. We'll use this below in our r_list
2015 l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
2018 l_of_x.identify_by_basedn(self.samdb)
2020 l_of_x.rep_partial = partial
2023 # Add this replica that "should be present" to the
2024 # needed replica table for this DSA
2025 dc_local.add_needed_replica(l_of_x)
2029 # Let R be a sequence containing each writable replica f of x
2030 # such that f "is present" on a DC s satisfying the following
2033 # * s is a writable DC other than the local DC.
2035 # * s is in the same site as the local DC.
2037 # * If x is a read-only full replica and x is a domain NC,
2038 # then the DC's functional level is at least
2039 # DS_BEHAVIOR_WIN2008.
2041 # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
2042 # in the options attribute of the site settings object for
2043 # the local DC's site, or no tuple z exists in the
2044 # kCCFailedLinks or kCCFailedConnections variables such
2045 # that z.UUIDDsa is the objectGUID of the nTDSDSA object
2046 # for s, z.FailureCount > 0, and the current time -
2047 # z.TimeFirstFailure > 2 hours.
2051 # We'll loop thru all the DSAs looking for
2052 # writeable NC replicas that match the naming
2053 # context dn for (nc_x)
2055 for dc_s in self.my_site.dsa_table.values():
2056 # If this partition (nc_x) doesn't appear as a
2057 # replica (f_of_x) on (dc_s) then continue
2058 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2061 # Pull out the NCReplica (f) of (x) with the dn
2062 # that matches NC (x) we are examining.
2063 f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2065 # Replica (f) of NC (x) must be writable
2069 # Replica (f) of NC (x) must satisfy the
2070 # "is present" criteria for DC (s) that
2072 if not f_of_x.is_present():
2075 # DC (s) must be a writable DSA other than
2076 # my local DC. In other words we'd only replicate
2077 # from other writable DC
2078 if dc_s.is_ro() or dc_s is dc_local:
2081 # Certain replica graphs are produced only
2082 # for global catalogs, so test against
2083 # method input parameter
2084 if gc_only and not dc_s.is_gc():
2087 # DC (s) must be in the same site as the local DC
2088 # as this is the intra-site algorithm. This is
2089 # handled by virtue of placing DSAs in per
2090 # site objects (see enclosing for() loop)
2092 # If NC (x) is intended to be read-only full replica
2093 # for a domain NC on the target DC then the source
2094 # DC should have functional level at minimum WIN2008
2096 # Effectively we're saying that in order to replicate
2097 # to a targeted RODC (which was introduced in Windows 2008)
2098 # then we have to replicate from a DC that is also minimally
2101 # You can also see this requirement in the MS special
2102 # considerations for RODC which state that to deploy
2103 # an RODC, at least one writable domain controller in
2104 # the domain must be running Windows Server 2008
2105 if ro and not partial and nc_x.nc_type == NCType.domain:
2106 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2109 # If we haven't been told to turn off stale connection
2110 # detection and this dsa has a stale connection then
2112 if detect_stale and self.is_stale_link_connection(dc_s):
2115 # Replica meets criteria. Add it to table indexed
2116 # by the GUID of the DC that it appears on
2117 r_list.append(f_of_x)
2119 # If a partial (not full) replica of NC (x) "should be present"
2120 # on the local DC, append to R each partial replica (p of x)
2121 # such that p "is present" on a DC satisfying the same
2122 # criteria defined above for full replica DCs.
2124 # XXX This loop and the previous one differ only in whether
2125 # the replica is partial or not. here we only accept partial
2126 # (because we're partial); before we only accepted full. Order
2127 # doen't matter (the list is sorted a few lines down) so these
2128 # loops could easily be merged. Or this could be a helper
2132 # Now we loop thru all the DSAs looking for
2133 # partial NC replicas that match the naming
2134 # context dn for (NC x)
2135 for dc_s in self.my_site.dsa_table.values():
2137 # If this partition NC (x) doesn't appear as a
2138 # replica (p) of NC (x) on the dsa DC (s) then
2140 if not nc_x.nc_dnstr in dc_s.current_rep_table:
2143 # Pull out the NCReplica with the dn that
2144 # matches NC (x) we are examining.
2145 p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
2147 # Replica (p) of NC (x) must be partial
2148 if not p_of_x.is_partial():
2151 # Replica (p) of NC (x) must satisfy the
2152 # "is present" criteria for DC (s) that
2154 if not p_of_x.is_present():
2157 # DC (s) must be a writable DSA other than
2158 # my DSA. In other words we'd only replicate
2159 # from other writable DSA
2160 if dc_s.is_ro() or dc_s is dc_local:
2163 # Certain replica graphs are produced only
2164 # for global catalogs, so test against
2165 # method input parameter
2166 if gc_only and not dc_s.is_gc():
2169 # DC (s) must be in the same site as the local DC
2170 # as this is the intra-site algorithm. This is
2171 # handled by virtue of placing DSAs in per
2172 # site objects (see enclosing for() loop)
2174 # This criteria is moot (a no-op) for this case
2175 # because we are scanning for (partial = True). The
2176 # MS algorithm statement says partial replica scans
2177 # should adhere to the "same" criteria as full replica
2178 # scans so the criteria doesn't change here...its just
2179 # rendered pointless.
2181 # The case that is occurring would be a partial domain
2182 # replica is needed on a local DC global catalog. There
2183 # is no minimum windows behavior for those since GCs
2184 # have always been present.
2185 if ro and not partial and nc_x.nc_type == NCType.domain:
2186 if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
2189 # If we haven't been told to turn off stale connection
2190 # detection and this dsa has a stale connection then
2192 if detect_stale and self.is_stale_link_connection(dc_s):
2195 # Replica meets criteria. Add it to table indexed
2196 # by the GUID of the DSA that it appears on
2197 r_list.append(p_of_x)
2199 # Append to R the NC replica that "should be present"
2201 r_list.append(l_of_x)
2203 r_list.sort(sort_replica_by_dsa_guid)
2206 max_node_edges = self.intrasite_max_node_edges(r_len)
2208 # Add a node for each r_list element to the replica graph
2211 node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
2212 graph_list.append(node)
2214 # For each r(i) from (0 <= i < |R|-1)
2216 while i < (r_len-1):
2217 # Add an edge from r(i) to r(i+1) if r(i) is a full
2218 # replica or r(i+1) is a partial replica
2219 if not r_list[i].is_partial() or r_list[i+1].is_partial():
2220 graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
2222 # Add an edge from r(i+1) to r(i) if r(i+1) is a full
2223 # replica or ri is a partial replica.
2224 if not r_list[i+1].is_partial() or r_list[i].is_partial():
2225 graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
2228 # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
2229 # or r0 is a partial replica.
2230 if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
2231 graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)
2233 # Add an edge from r0 to r|R|-1 if r0 is a full replica or
2234 # r|R|-1 is a partial replica.
2235 if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
2236 graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)
2238 DEBUG("r_list is length %s" % len(r_list))
2239 DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr)) for x in r_list))
2241 if opts.verify or opts.dot_files:
2243 dot_vertices = set()
2244 for v1 in graph_list:
2245 dot_vertices.add(v1.dsa_dnstr)
2246 for v2 in v1.edge_from:
2247 dot_edges.append((v2, v1.dsa_dnstr))
2248 dot_vertices.add(v2)
2250 verify_properties = ('connected', 'directed_double_ring')
2251 verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
2252 label='%s__%s__%s' % (site_local.site_dnstr, nctype_lut[nc_x.nc_type], nc_x.nc_dnstr),
2253 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2254 dot_files=opts.dot_files, directed=True)
2258 # For each existing nTDSConnection object implying an edge
2259 # from rj of R to ri such that j != i, an edge from rj to ri
2260 # is not already in the graph, and the total edges directed
2261 # to ri is less than n+2, the KCC adds that edge to the graph.
2262 for vertex in graph_list:
2263 dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
2264 for connect in dsa.connect_table.values():
2265 remote = connect.from_dnstr
2266 if remote in self.my_site.dsa_table:
2267 vertex.add_edge_from(remote)
2269 DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
2270 DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
2272 for tnode in graph_list:
2273 # To optimize replication latency in sites with many NC replicas, the
2274 # KCC adds new edges directed to ri to bring the total edges to n+2,
2275 # where the NC replica rk of R from which the edge is directed
2276 # is chosen at random such that k != i and an edge from rk to ri
2277 # is not already in the graph.
2279 # Note that the KCC tech ref does not give a number for the definition
2280 # of "sites with many NC replicas". At a bare minimum to satisfy
2281 # n+2 edges directed at a node we have to have at least three replicas
2282 # in |R| (i.e. if n is zero then at least replicas from two other graph
2283 # nodes may direct edges to us).
2284 if r_len >= 3 and not tnode.has_sufficient_edges():
2285 candidates = [x for x in graph_list if (x is not tnode and
2286 x.dsa_dnstr not in tnode.edge_from)]
2288 DEBUG_BLUE("looking for random link for %s. r_len %d, graph len %d candidates %d"
2289 % (tnode.dsa_dnstr, r_len, len(graph_list), len(candidates)))
2291 DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
2293 while candidates and not tnode.has_sufficient_edges():
2294 other = random.choice(candidates)
2295 DEBUG("trying to add candidate %s" % other.dsa_dstr)
2296 if not tnode.add_edge_from(other):
2297 DEBUG_RED("could not add %s" % other.dsa_dstr)
2298 candidates.remove(other)
2300 DEBUG_CYAN("not adding links to %s: nodes %s, links is %s/%s" %
2301 (tnode.dsa_dnstr, r_len, len(tnode.edge_from), tnode.max_edges))
2304 # Print the graph node in debug mode
2305 logger.debug("%s" % tnode)
2307 # For each edge directed to the local DC, ensure a nTDSConnection
2308 # points to us that satisfies the KCC criteria
2310 if tnode.dsa_dnstr == dc_local.dsa_dnstr:
2311 tnode.add_connections_from_edges(dc_local)
2314 if opts.verify or opts.dot_files:
2316 dot_vertices = set()
2317 for v1 in graph_list:
2318 dot_vertices.add(v1.dsa_dnstr)
2319 for v2 in v1.edge_from:
2320 dot_edges.append((v2, v1.dsa_dnstr))
2321 dot_vertices.add(v2)
2323 verify_properties = ('connected', 'directed_double_ring_or_small')
2324 verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
2325 label='%s__%s__%s' % (site_local.site_dnstr, nctype_lut[nc_x.nc_type], nc_x.nc_dnstr),
2326 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2327 dot_files=opts.dot_files, directed=True)
2330 def intrasite(self):
2331 """The head method for generating the intra-site KCC replica
2332 connection graph and attendant nTDSConnection objects
2338 logger.debug("intrasite(): enter")
2340 # Test whether local site has topology disabled
2341 mysite = self.my_site
2342 if mysite.is_intrasite_topology_disabled():
2345 detect_stale = (not mysite.is_detect_stale_disabled())
2346 for dnstr, connect in mydsa.connect_table.items():
2347 if connect.to_be_added:
2348 DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
2350 # Loop thru all the partitions, with gc_only False
2351 for partdn, part in self.part_table.items():
2352 self.construct_intrasite_graph(mysite, mydsa, part, False,
2354 for dnstr, connect in mydsa.connect_table.items():
2355 if connect.to_be_added:
2356 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2359 # If the DC is a GC server, the KCC constructs an additional NC
2360 # replica graph (and creates nTDSConnection objects) for the
2361 # config NC as above, except that only NC replicas that "are present"
2362 # on GC servers are added to R.
2363 for dnstr, connect in mydsa.connect_table.items():
2364 if connect.to_be_added:
2365 DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
2367 # Do it again, with gc_only True
2368 for partdn, part in self.part_table.items():
2369 if part.is_config():
2370 self.construct_intrasite_graph(mysite, mydsa, part, True,
2373 # The DC repeats the NC replica graph computation and nTDSConnection
2374 # creation for each of the NC replica graphs, this time assuming
2375 # that no DC has failed. It does so by re-executing the steps as
2376 # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
2377 # set in the options attribute of the site settings object for
2378 # the local DC's site. (ie. we set "detec_stale" flag to False)
2379 for dnstr, connect in mydsa.connect_table.items():
2380 if connect.to_be_added:
2381 DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
2383 # Loop thru all the partitions.
2384 for partdn, part in self.part_table.items():
2385 self.construct_intrasite_graph(mysite, mydsa, part, False,
2386 False) # don't detect stale
2388 # If the DC is a GC server, the KCC constructs an additional NC
2389 # replica graph (and creates nTDSConnection objects) for the
2390 # config NC as above, except that only NC replicas that "are present"
2391 # on GC servers are added to R.
2392 for dnstr, connect in mydsa.connect_table.items():
2393 if connect.to_be_added:
2394 DEBUG_RED("TO BE ADDED:\n%s" % connect)
2396 for partdn, part in self.part_table.items():
2397 if part.is_config():
2398 self.construct_intrasite_graph(mysite, mydsa, part, True,
2399 False) # don't detect stale
2402 # Display any to be added or modified repsFrom
2403 for dnstr, connect in mydsa.connect_table.items():
2404 if connect.to_be_deleted:
2405 logger.info("TO BE DELETED:\n%s" % connect)
2406 if connect.to_be_modified:
2407 logger.info("TO BE MODIFIED:\n%s" % connect)
2408 if connect.to_be_added:
2409 DEBUG_GREEN("TO BE ADDED:\n%s" % connect)
2411 mydsa.commit_connections(self.samdb, ro=True)
2413 # Commit any newly created connections to the samdb
2414 mydsa.commit_connections(self.samdb)
2417 def list_dsas(self):
2421 self.load_all_sites()
2422 self.load_all_partitions()
2423 self.load_all_transports()
2424 self.load_all_sitelinks()
2426 for site in self.site_table.values():
2427 dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
2428 for dsa in site.dsa_table.values()])
2431 def load_samdb(self, dburl, lp, creds):
2432 self.samdb = SamDB(url=dburl,
2433 session_info=system_session(),
2434 credentials=creds, lp=lp)
2438 def plot_all_connections(self, basename, verify_properties=()):
2439 verify = verify_properties and opts.verify
2440 plot = opts.dot_files
2441 if not (verify or plot):
2447 for site in self.site_table.values():
2448 for dsa in site.dsa_table.values():
2449 dot_vertices.append(dsa.dsa_dnstr)
2450 for con in dsa.connect_table.values():
2451 if con.is_rodc_topology():
2452 colours.append('red')
2454 colours.append('blue')
2455 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2457 verify_and_dot(basename, dot_edges, vertices=dot_vertices,
2458 label=self.my_dsa_dnstr, properties=verify_properties,
2459 debug=DEBUG, verify=verify, dot_files=plot,
2460 directed=True, edge_colors=colours)
2463 def run(self, dburl, lp, creds, forced_local_dsa=None,
2464 forget_local_links=False, forget_intersite_links=False):
2465 """Method to perform a complete run of the KCC and
2466 produce an updated topology for subsequent NC replica
2467 syncronization between domain controllers
2469 # We may already have a samdb setup if we are
2470 # currently importing an ldif for a test run
2471 if self.samdb is None:
2473 self.load_samdb(dburl, lp, creds)
2474 except ldb.LdbError, (num, msg):
2475 logger.error("Unable to open sam database %s : %s" %
2480 if forced_local_dsa:
2481 self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % forced_local_dsa)
2488 self.load_all_sites()
2489 self.load_all_partitions()
2490 self.load_all_transports()
2491 self.load_all_sitelinks()
2494 if opts.verify or opts.dot_files:
2496 for site in self.site_table.values():
2497 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2498 for dnstr, dsa in site.dsa_table.items())
2500 self.plot_all_connections('dsa_initial')
2503 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
2504 for dnstr, c_rep in current_rep_table.items():
2505 DEBUG("c_rep %s" % c_rep)
2506 dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
2508 verify_and_dot('dsa_repsFrom_initial', dot_edges, directed=True, label=self.my_dsa_dnstr,
2509 properties=(), debug=DEBUG, verify=opts.verify,
2510 dot_files=opts.dot_files)
2514 for site in self.site_table.values():
2515 for dsa in site.dsa_table.values():
2516 current_rep_table, needed_rep_table = dsa.get_rep_tables()
2517 for dn_str, rep in current_rep_table.items():
2518 for reps_from in rep.rep_repsFrom:
2519 DEBUG("rep %s" % rep)
2520 dsa_dn = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2521 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2523 verify_and_dot('dsa_repsFrom_initial_all', dot_edges, directed=True, label=self.my_dsa_dnstr,
2524 properties=(), debug=DEBUG, verify=opts.verify,
2525 dot_files=opts.dot_files)
2529 for link in self.sitelink_table.values():
2530 for a, b in itertools.combinations(link.site_list, 2):
2531 dot_edges.append((str(a), str(b)))
2532 verify_properties = ('connected',)
2533 verify_and_dot('dsa_sitelink_initial', dot_edges, directed=False, label=self.my_dsa_dnstr,
2534 properties=verify_properties, debug=DEBUG, verify=opts.verify,
2535 dot_files=opts.dot_files)
2538 if forget_local_links:
2539 for dsa in self.my_site.dsa_table.values():
2540 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2541 if v.is_rodc_topology()}
2542 self.plot_all_connections('dsa_forgotten_local')
2545 if forget_intersite_links:
2546 for site in self.site_table.values():
2547 for dsa in site.dsa_table.values():
2548 dsa.connect_table = {k:v for k, v in dsa.connect_table.items()
2549 if site is self.my_site and v.is_rodc_topology()}
2551 self.plot_all_connections('dsa_forgotten_all')
2552 # These are the published steps (in order) for the
2553 # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
2556 self.refresh_failed_links_connections()
2562 all_connected = self.intersite()
2565 self.remove_unneeded_ntdsconn(all_connected)
2568 self.translate_ntdsconn()
2571 self.remove_unneeded_failed_links_connections()
2574 self.update_rodc_connection()
2577 if opts.verify or opts.dot_files:
2578 self.plot_all_connections('dsa_final', ('connected', 'forest_of_rings'))
2580 DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))
2584 my_dnstr = self.my_dsa.dsa_dnstr
2585 current_rep_table, needed_rep_table = self.my_dsa.get_rep_tables()
2586 for dnstr, n_rep in needed_rep_table.items():
2587 for reps_from in n_rep.rep_repsFrom:
2588 guid_str = str(reps_from.source_dsa_obj_guid)
2589 dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
2590 edge_colors.append('#' + str(n_rep.nc_guid)[:6])
2592 verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True, label=self.my_dsa_dnstr,
2593 properties=(), debug=DEBUG, verify=opts.verify,
2594 dot_files=opts.dot_files, edge_colors=edge_colors)
2599 for site in self.site_table.values():
2600 for dsa in site.dsa_table.values():
2601 current_rep_table, needed_rep_table = dsa.get_rep_tables()
2602 for n_rep in needed_rep_table.values():
2603 for reps_from in n_rep.rep_repsFrom:
2604 dsa_dn = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2605 dot_edges.append((dsa.dsa_dnstr, dsa_dn))
2607 verify_and_dot('dsa_repsFrom_final_all', dot_edges, directed=True, label=self.my_dsa_dnstr,
2608 properties=(), debug=DEBUG, verify=opts.verify,
2609 dot_files=opts.dot_files)
2617 def import_ldif(self, dburl, lp, creds, ldif_file):
2618 """Import all objects and attributes that are relevent
2619 to the KCC algorithms from a previously exported LDIF file.
2621 The point of this function is to allow a programmer/debugger to
2622 import an LDIF file with non-security relevent information that
2623 was previously extracted from a DC database. The LDIF file is used
2624 to create a temporary abbreviated database. The KCC algorithm can
2625 then run against this abbreviated database for debug or test
2626 verification that the topology generated is computationally the
2627 same between different OSes and algorithms.
2629 :param dburl: path to the temporary abbreviated db to create
2630 :param ldif_file: path to the ldif file to import
2633 self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, creds, ldif_file,
2634 opts.forced_local_dsa)
2635 except ldif_utils.LdifError, e:
2640 def export_ldif(self, dburl, lp, creds, ldif_file):
2641 """Routine to extract all objects and attributes that are relevent
2642 to the KCC algorithms from a DC database.
2644 The point of this function is to allow a programmer/debugger to
2645 extract an LDIF file with non-security relevent information from
2646 a DC database. The LDIF file can then be used to "import" via
2647 the import_ldif() function this file into a temporary abbreviated
2648 database. The KCC algorithm can then run against this abbreviated
2649 database for debug or test verification that the topology generated
2650 is computationally the same between different OSes and algorithms.
2652 :param dburl: LDAP database URL to extract info from
2653 :param ldif_file: output LDIF file name to create
2656 ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds, ldif_file)
2657 except ldif_utils.LdifError, e:
2662 ##################################################
2664 ##################################################
2665 def sort_replica_by_dsa_guid(rep1, rep2):
2666 return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))
2668 def sort_dsa_by_gc_and_guid(dsa1, dsa2):
2669 if dsa1.is_gc() and not dsa2.is_gc():
2671 if not dsa1.is_gc() and dsa2.is_gc():
2673 return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
2675 def is_smtp_replication_available():
2676 """Currently always returns false because Samba
2677 doesn't implement SMTP transfer for NC changes
2682 def create_edge(con_type, site_link, guid_to_vertex):
2684 e.site_link = site_link
2686 for site_guid in site_link.site_list:
2687 if str(site_guid) in guid_to_vertex:
2688 e.vertices.extend(guid_to_vertex.get(str(site_guid)))
2689 e.repl_info.cost = site_link.cost
2690 e.repl_info.options = site_link.options
2691 e.repl_info.interval = site_link.interval
2692 e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
2693 e.con_type = con_type
2697 def create_auto_edge_set(graph, transport):
2698 e_set = MultiEdgeSet()
2699 e_set.guid = misc.GUID() # NULL guid, not associated with a SiteLinkBridge object
2700 for site_link in graph.edges:
2701 if site_link.con_type == transport:
2702 e_set.edges.append(site_link)
2706 def create_edge_set(graph, transport, site_link_bridge):
2707 # TODO not implemented - need to store all site link bridges
2708 e_set = MultiEdgeSet()
2709 # e_set.guid = site_link_bridge
2712 def setup_vertices(graph):
2713 for v in graph.vertices:
2715 v.repl_info.cost = MAX_DWORD
2717 v.component_id = None
2719 v.repl_info.cost = 0
2723 v.repl_info.interval = 0
2724 v.repl_info.options = 0xFFFFFFFF
2725 v.repl_info.schedule = None # TODO highly suspicious
2728 def dijkstra(graph, edge_type, include_black):
2730 setup_dijkstra(graph, edge_type, include_black, queue)
2731 while len(queue) > 0:
2732 cost, guid, vertex = heapq.heappop(queue)
2733 for edge in vertex.edges:
2734 for v in edge.vertices:
2736 # add new path from vertex to v
2737 try_new_path(graph, queue, vertex, edge, v)
2739 def setup_dijkstra(graph, edge_type, include_black, queue):
2740 setup_vertices(graph)
2741 for vertex in graph.vertices:
2742 if vertex.is_white():
2745 if ((vertex.is_black() and not include_black)
2746 or edge_type not in vertex.accept_black
2747 or edge_type not in vertex.accept_red_red):
2748 vertex.repl_info.cost = MAX_DWORD
2749 vertex.root = None # NULL GUID
2750 vertex.demoted = True # Demoted appears not to be used
2752 heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
2754 def try_new_path(graph, queue, vfrom, edge, vto):
2756 # What this function checks is that there is a valid time frame for
2757 # which replication can actually occur, despite being adequately
2759 intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)
2761 # If the new path costs more than the current, then ignore the edge
2762 if newRI.cost > vto.repl_info.cost:
2765 if newRI.cost < vto.repl_info.cost and not intersect:
2768 new_duration = total_schedule(newRI.schedule)
2769 old_duration = total_schedule(vto.repl_info.schedule)
2771 # Cheaper or longer schedule
2772 if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
2773 vto.root = vfrom.root
2774 vto.component_id = vfrom.component_id
2775 vto.repl_info = newRI
2776 heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
2778 def check_demote_vertex(vertex, edge_type):
2779 if vertex.is_white():
2782 # Accepts neither red-red nor black edges, demote
2783 if edge_type not in vertex.accept_black and edge_type not in vertex.accept_red_red:
2784 vertex.repl_info.cost = MAX_DWORD
2786 vertex.demoted = True # Demoted appears not to be used
2788 def undemote_vertex(vertex):
2789 if vertex.is_white():
2792 vertex.repl_info.cost = 0
2793 vertex.root = vertex
2794 vertex.demoted = False
2796 def process_edge_set(graph, e_set, internal_edges):
2798 for edge in graph.edges:
2799 for vertex in edge.vertices:
2800 check_demote_vertex(vertex, edge.con_type)
2801 process_edge(graph, edge, internal_edges)
2802 for vertex in edge.vertices:
2803 undemote_vertex(vertex)
2805 for edge in e_set.edges:
2806 process_edge(graph, edge, internal_edges)
2808 def process_edge(graph, examine, internal_edges):
2809 # Find the set of all vertices touches the edge to examine
2811 for v in examine.vertices:
2812 # Append a 4-tuple of color, repl cost, guid and vertex
2813 vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
2814 # Sort by color, lower
2815 DEBUG("vertices is %s" % vertices)
2818 color, cost, guid, bestv = vertices[0]
2819 # Add to internal edges an edge from every colored vertex to bestV
2820 for v in examine.vertices:
2821 if v.component_id is None or v.root is None:
2824 # Only add edge if valid inter-tree edge - needs a root and
2825 # different components
2826 if (bestv.component_id is not None and bestv.root is not None
2827 and v.component_id is not None and v.root is not None and
2828 bestv.component_id != v.component_id):
2829 add_int_edge(graph, internal_edges, examine, bestv, v)
2831 # Add internal edge, endpoints are roots of the vertices to pass in and are always colored
2832 def add_int_edge(graph, internal_edges, examine, v1, v2):
2837 if root1.is_red() and root2.is_red():
2841 if (examine.con_type not in root1.accept_red_red
2842 or examine.con_type not in root2.accept_red_red):
2845 if (examine.con_type not in root1.accept_black
2846 or examine.con_type not in root2.accept_black):
2852 # Create the transitive replInfo for the two trees and this edge
2853 if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
2855 # ri is now initialized
2856 if not combine_repl_info(ri, examine.repl_info, ri2):
2859 newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type, examine.site_link)
2860 # Order by vertex guid
2861 #XXX guid comparison using ndr_pack
2862 if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
2863 newIntEdge.v1 = root2
2864 newIntEdge.v2 = root1
2866 internal_edges.add(newIntEdge)
2868 def kruskal(graph, edges):
2869 for v in graph.vertices:
2872 components = set([x for x in graph.vertices if not x.is_white()])
2875 # Sorted based on internal comparison function of internal edge
2878 expected_num_tree_edges = 0 # TODO this value makes little sense
2883 while index < len(edges): # TODO and num_components > 1
2885 parent1 = find_component(e.v1)
2886 parent2 = find_component(e.v2)
2887 if parent1 is not parent2:
2889 add_out_edge(graph, output_edges, e)
2890 parent1.component_id = parent2
2891 components.discard(parent1)
2895 return output_edges, len(components)
2897 def find_component(vertex):
2898 if vertex.component_id is vertex:
2902 while current.component_id is not current:
2903 current = current.component_id
2907 while current.component_id is not root:
2908 n = current.component_id
2909 current.component_id = root
2914 def add_out_edge(graph, output_edges, e):
2918 # This multi-edge is a 'real' edge with no GUID
2921 ee.site_link = e.site_link
2922 ee.vertices.append(v1)
2923 ee.vertices.append(v2)
2924 ee.con_type = e.e_type
2925 ee.repl_info = e.repl_info
2926 output_edges.append(ee)
2932 def test_all_reps_from(lp, creds):
2934 kcc.load_samdb(opts.dburl, lp, creds)
2935 dsas = kcc.list_dsas()
2940 for site in kcc.site_table.values():
2941 guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
2942 for dnstr, dsa in site.dsa_table.items())
2950 kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa,
2951 forget_local_links=opts.forget_local_links,
2952 forget_intersite_links=opts.forget_intersite_links)
2953 current, needed = kcc.my_dsa.get_rep_tables()
2956 for name, rep_table, rep_parts in (('needed', needed, needed_parts),
2957 ('current', current, current_parts)):
2958 for part, nc_rep in rep_table.items():
2959 edges = rep_parts.setdefault(part, [])
2960 for reps_from in nc_rep.rep_repsFrom:
2961 source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
2962 dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
2963 edges.append((source, dest))
2965 for site in kcc.site_table.values():
2966 for dsa in site.dsa_table.values():
2967 dot_vertices.append(dsa.dsa_dnstr)
2968 for con in dsa.connect_table.values():
2969 if con.is_rodc_topology():
2970 colours.append('red')
2972 colours.append('blue')
2973 dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
2975 for name, rep_parts in (('needed', needed_parts), ('current', current_parts)):
2976 for part, edges in rep_parts.items():
2977 verify_and_dot('repsFrom_%s_all_%s' % (name, part), edges, directed=True, label=part,
2978 properties=(), debug=DEBUG, verify=opts.verify,
2979 dot_files=opts.dot_files)
2981 verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
2982 label="all dsa NTDSConnections", properties=(),
2983 debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
2984 directed=True, edge_colors=colours)
2987 logger = logging.getLogger("samba_kcc")
2988 logger.addHandler(logging.StreamHandler(sys.stdout))
2989 DEBUG = logger.debug
2991 def _colour_debug(*args, **kwargs):
2992 DEBUG('%s%s%s' % (kwargs['colour'], args[0], C_NORMAL), *args[1:])
2994 _globals = globals()
2995 for _colour in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
2996 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
2997 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
2998 _globals['DEBUG_' + _colour] = partial(_colour_debug, colour=_globals[_colour])
3001 ##################################################
3002 # samba_kcc entry point
3003 ##################################################
3005 parser = optparse.OptionParser("samba_kcc [options]")
3006 sambaopts = options.SambaOptions(parser)
3007 credopts = options.CredentialsOptions(parser)
3009 parser.add_option_group(sambaopts)
3010 parser.add_option_group(credopts)
3011 parser.add_option_group(options.VersionOptions(parser))
3013 parser.add_option("--readonly", default=False,
3014 help="compute topology but do not update database",
3015 action="store_true")
3017 parser.add_option("--debug",
3018 help="debug output",
3019 action="store_true")
3021 parser.add_option("--verify",
3022 help="verify that assorted invariants are kept",
3023 action="store_true")
3025 parser.add_option("--list-verify-tests",
3026 help="list what verification actions are available and do nothing else",
3027 action="store_true")
3029 parser.add_option("--no-dot-files", dest='dot_files',
3030 help="Don't write dot graph files in /tmp",
3031 default=True, action="store_false")
3033 parser.add_option("--seed",
3034 help="random number seed",
3037 parser.add_option("--importldif",
3038 help="import topology ldif file",
3039 type=str, metavar="<file>")
3041 parser.add_option("--exportldif",
3042 help="export topology ldif file",
3043 type=str, metavar="<file>")
3045 parser.add_option("-H", "--URL" ,
3046 help="LDB URL for database or target server",
3047 type=str, metavar="<URL>", dest="dburl")
3049 parser.add_option("--tmpdb",
3050 help="schemaless database file to create for ldif import",
3051 type=str, metavar="<file>")
3053 parser.add_option("--now",
3054 help="assume current time is this ('YYYYmmddHHMMSS[tz]', default: system time)",
3055 type=str, metavar="<date>")
3057 parser.add_option("--forced-local-dsa",
3058 help="run calculations assuming the DSA is this DN",
3059 type=str, metavar="<DSA>")
3061 parser.add_option("--attempt-live-connections", default=False,
3062 help="Attempt to connect to other DSAs to test links",
3063 action="store_true")
3065 parser.add_option("--list-valid-dsas", default=False,
3066 help="Print a list of DSA dnstrs that could be used in --forced-local-dsa",
3067 action="store_true")
3069 parser.add_option("--test-all-reps-from", default=False,
3070 help="Create and verify a graph of reps-from for every DSA",
3071 action="store_true")
3073 parser.add_option("--forget-local-links", default=False,
3074 help="pretend not to know the existing local topology",
3075 action="store_true")
3077 parser.add_option("--forget-intersite-links", default=False,
3078 help="pretend not to know the existing intersite topology",
3079 action="store_true")
3082 opts, args = parser.parse_args()
3085 if opts.list_verify_tests:
3090 logger.setLevel(logging.DEBUG)
3092 logger.setLevel(logging.INFO)
3094 logger.setLevel(logging.WARNING)
3096 # initialize seed from optional input parameter
3098 random.seed(opts.seed)
3100 random.seed(0xACE5CA11)
3103 for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
3105 now_tuple = time.strptime(opts.now, timeformat)
3110 # else happens if break doesn't --> no match
3111 print >> sys.stderr, "could not parse time '%s'" % opts.now
3114 unix_now = int(time.mktime(now_tuple))
3116 unix_now = int(time.time())
3118 nt_now = unix2nttime(unix_now)
3120 lp = sambaopts.get_loadparm()
3121 creds = credopts.get_credentials(lp, fallback_machine=True)
3123 if opts.dburl is None:
3124 opts.dburl = lp.samdb_url()
3126 if opts.test_all_reps_from:
3127 opts.readonly = True
3128 test_all_reps_from(lp, creds)
3131 # Instantiate Knowledge Consistency Checker and perform run
3135 rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
3139 if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
3140 logger.error("Specify a target temp database file with --tmpdb option.")
3143 rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
3147 if opts.list_valid_dsas:
3148 kcc.load_samdb(opts.dburl, lp, creds)
3149 print '\n'.join(kcc.list_dsas())
3153 rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
3154 opts.forget_local_links, opts.forget_intersite_links)
3157 except GraphError, e: