_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33300
BasePickerInput.format_js2py
train
def format_js2py(cls, datetime_format): """Convert moment datetime format to python datetime format.""" for js_format, py_format in cls.format_map: datetime_format = datetime_format.replace(js_format, py_format) return datetime_format
python
{ "resource": "" }
q33301
BasePickerInput._calculate_options
train
def _calculate_options(self): """Calculate and Return the options.""" _options = self._default_options.copy() _options.update(self.options) if self.options_param: _options.update(self.options_param) return _options
python
{ "resource": "" }
q33302
BasePickerInput._calculate_format
train
def _calculate_format(self): """Calculate and Return the datetime format.""" _format = self.format_param if self.format_param else self.format if self.config['options'].get('format'): _format = self.format_js2py(self.config['options'].get('format')) else: self.config['options']['format'] = self.format_py2js(_format) return _format
python
{ "resource": "" }
q33303
BasePickerInput.get_context
train
def get_context(self, name, value, attrs): """Return widget context dictionary.""" context = super().get_context( name, value, attrs) context['widget']['attrs']['dp_config'] = json_dumps(self.config) return context
python
{ "resource": "" }
q33304
BasePickerInput.end_of
train
def end_of(self, event_id, import_options=True): """ Set Date-Picker as the end-date of a date-range. Args: - event_id (string): User-defined unique id for linking two fields - import_options (bool): inherit options from start-date input, default: TRUE """ event_id = str(event_id) if event_id in DatePickerDictionary.items: linked_picker = DatePickerDictionary.items[event_id] self.config['linked_to'] = linked_picker.config['id'] if import_options: backup_moment_format = self.config['options']['format'] self.config['options'].update(linked_picker.config['options']) self.config['options'].update(self.options_param) if self.format_param or 'format' in self.options_param: self.config['options']['format'] = backup_moment_format else: self.format = linked_picker.format # Setting useCurrent is necessary, see following issue # https://github.com/Eonasdan/bootstrap-datetimepicker/issues/1075 self.config['options']['useCurrent'] = False self._link_to(linked_picker) else: raise KeyError( 'start-date not specified for event_id "%s"' % event_id) return self
python
{ "resource": "" }
q33305
get_base_input
train
def get_base_input(test=False): """ Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions. """ from django.forms.widgets import DateTimeBaseInput if 'get_context' in dir(DateTimeBaseInput) and not test: # django version 1.11 and above base_input = DateTimeBaseInput else: # django version below 1.11 from bootstrap_datepicker_plus._compatibility import ( CompatibleDateTimeBaseInput ) base_input = CompatibleDateTimeBaseInput return base_input
python
{ "resource": "" }
q33306
split_lines
train
def split_lines(source, maxline=79): """Split inputs according to lines. If a line is short enough, just yield it. Otherwise, fix it. """ result = [] extend = result.extend append = result.append line = [] multiline = False count = 0 find = str.find for item in source: index = find(item, '\n') if index: line.append(item) multiline = index > 0 count += len(item) else: if line: if count <= maxline or multiline: extend(line) else: wrap_line(line, maxline, result) count = 0 multiline = False line = [] append(item) return result
python
{ "resource": "" }
q33307
wrap_line
train
def wrap_line(line, maxline=79, result=[], count=count): """ We have a line that is too long, so we're going to try to wrap it. """ # Extract the indentation append = result.append extend = result.extend indentation = line[0] lenfirst = len(indentation) indent = lenfirst - len(indentation.lstrip()) assert indent in (0, lenfirst) indentation = line.pop(0) if indent else '' # Get splittable/non-splittable groups dgroups = list(delimiter_groups(line)) unsplittable = dgroups[::2] splittable = dgroups[1::2] # If the largest non-splittable group won't fit # on a line, try to add parentheses to the line. if max(count(x) for x in unsplittable) > maxline - indent: line = add_parens(line, maxline, indent) dgroups = list(delimiter_groups(line)) unsplittable = dgroups[::2] splittable = dgroups[1::2] # Deal with the first (always unsplittable) group, and # then set up to deal with the remainder in pairs. first = unsplittable[0] append(indentation) extend(first) if not splittable: return result pos = indent + count(first) indentation += ' ' indent += 4 if indent >= maxline/2: maxline = maxline/2 + indent for sg, nsg in zip(splittable, unsplittable[1:]): if sg: # If we already have stuff on the line and even # the very first item won't fit, start a new line if pos > indent and pos + len(sg[0]) > maxline: append('\n') append(indentation) pos = indent # Dump lines out of the splittable group # until the entire thing fits csg = count(sg) while pos + csg > maxline: ready, sg = split_group(sg, pos, maxline) if ready[-1].endswith(' '): ready[-1] = ready[-1][:-1] extend(ready) append('\n') append(indentation) pos = indent csg = count(sg) # Dump the remainder of the splittable group if sg: extend(sg) pos += csg # Dump the unsplittable group, optionally # preceded by a linefeed. cnsg = count(nsg) if pos > indent and pos + cnsg > maxline: append('\n') append(indentation) pos = indent extend(nsg) pos += cnsg
python
{ "resource": "" }
q33308
split_group
train
def split_group(source, pos, maxline): """ Split a group into two subgroups. The first will be appended to the current line, the second will start the new line. Note that the first group must always contain at least one item. The original group may be destroyed. """ first = [] source.reverse() while source: tok = source.pop() first.append(tok) pos += len(tok) if source: tok = source[-1] allowed = (maxline + 1) if tok.endswith(' ') else (maxline - 4) if pos + len(tok) > allowed: break source.reverse() return first, source
python
{ "resource": "" }
q33309
delimiter_groups
train
def delimiter_groups(line, begin_delim=begin_delim, end_delim=end_delim): """Split a line into alternating groups. The first group cannot have a line feed inserted, the next one can, etc. """ text = [] line = iter(line) while True: # First build and yield an unsplittable group for item in line: text.append(item) if item in begin_delim: break if not text: break yield text # Now build and yield a splittable group level = 0 text = [] for item in line: if item in begin_delim: level += 1 elif item in end_delim: level -= 1 if level < 0: yield text text = [item] break text.append(item) else: assert not text, text break
python
{ "resource": "" }
q33310
add_parens
train
def add_parens(line, maxline, indent, statements=statements, count=count): """Attempt to add parentheses around the line in order to make it splittable. """ if line[0] in statements: index = 1 if not line[0].endswith(' '): index = 2 assert line[1] == ' ' line.insert(index, '(') if line[-1] == ':': line.insert(-1, ')') else: line.append(')') # That was the easy stuff. Now for assignments. groups = list(get_assign_groups(line)) if len(groups) == 1: # So sad, too bad return line counts = list(count(x) for x in groups) didwrap = False # If the LHS is large, wrap it first if sum(counts[:-1]) >= maxline - indent - 4: for group in groups[:-1]: didwrap = False # Only want to know about last group if len(group) > 1: group.insert(0, '(') group.insert(-1, ')') didwrap = True # Might not need to wrap the RHS if wrapped the LHS if not didwrap or counts[-1] > maxline - indent - 10: groups[-1].insert(0, '(') groups[-1].append(')') return [item for group in groups for item in group]
python
{ "resource": "" }
q33311
_prep_triple_quotes
train
def _prep_triple_quotes(s, mysplit=mysplit, replacements=replacements): """ Split the string up and force-feed some replacements to make sure it will round-trip OK """ s = mysplit(s) s[1::2] = (replacements[x] for x in s[1::2]) return ''.join(s)
python
{ "resource": "" }
q33312
pretty_string
train
def pretty_string(s, embedded, current_line, uni_lit=False, min_trip_str=20, max_line=100): """There are a lot of reasons why we might not want to or be able to return a triple-quoted string. We can always punt back to the default normal string. """ default = repr(s) # Punt on abnormal strings if (isinstance(s, special_unicode) or not isinstance(s, basestring)): return default if uni_lit and isinstance(s, bytes): return 'b' + default len_s = len(default) if current_line.strip(): len_current = len(current_line) second_line_start = s.find('\n') + 1 if embedded > 1 and not second_line_start: return default if len_s < min_trip_str: return default line_indent = len_current - len(current_line.lstrip()) # Could be on a line by itself... if embedded and not second_line_start: return default total_len = len_current + len_s if total_len < max_line and not _properly_indented(s, line_indent): return default fancy = string_triplequote_repr(s) # Sometimes this doesn't work. One reason is that # the AST has no understanding of whether \r\n was # entered that way in the string or was a cr/lf in the # file. So we punt just so we can round-trip properly. try: if eval(fancy) == s and '\r' not in fancy: return fancy except: pass return default
python
{ "resource": "" }
q33313
TreeWalk.setup
train
def setup(self): """All the node-specific handlers are setup at object initialization time. """ self.pre_handlers = pre_handlers = {} self.post_handlers = post_handlers = {} for name in sorted(vars(type(self))): if name.startswith('init_'): getattr(self, name)() elif name.startswith('pre_'): pre_handlers[name[4:]] = getattr(self, name) elif name.startswith('post_'): post_handlers[name[5:]] = getattr(self, name)
python
{ "resource": "" }
q33314
TreeWalk.walk
train
def walk(self, node, name='', list=list, len=len, type=type): """Walk the tree starting at a given node. Maintain a stack of nodes. """ pre_handlers = self.pre_handlers.get post_handlers = self.post_handlers.get nodestack = self.nodestack emptystack = len(nodestack) append, pop = nodestack.append, nodestack.pop append([node, name, list(iter_node(node, name + '_item')), -1]) while len(nodestack) > emptystack: node, name, subnodes, index = nodestack[-1] if index >= len(subnodes): handler = (post_handlers(type(node).__name__) or post_handlers(name + '_name')) if handler is None: pop() continue self.cur_node = node self.cur_name = name handler() current = nodestack and nodestack[-1] popstack = current and current[0] is node if popstack and current[-1] >= len(current[-2]): pop() continue nodestack[-1][-1] = index + 1 if index < 0: handler = (pre_handlers(type(node).__name__) or pre_handlers(name + '_name')) if handler is not None: self.cur_node = node self.cur_name = name if handler(): pop() else: node, name = subnodes[index] append([node, name, list(iter_node(node, name + '_item')), -1])
python
{ "resource": "" }
q33315
TreeWalk.replace
train
def replace(self, new_node): """Replace a node after first checking integrity of node stack.""" cur_node = self.cur_node nodestack = self.nodestack cur = nodestack.pop() prev = nodestack[-1] index = prev[-1] - 1 oldnode, name = prev[-2][index] assert cur[0] is cur_node is oldnode, (cur[0], cur_node, prev[-2], index) parent = prev[0] if isinstance(parent, list): parent[index] = new_node else: setattr(parent, name, new_node)
python
{ "resource": "" }
q33316
strip_tree
train
def strip_tree(node, # Runtime optimization iter_node=iter_node, special=ast.AST, list=list, isinstance=isinstance, type=type, len=len): """Strips an AST by removing all attributes not in _fields. Returns a set of the names of all attributes stripped. This canonicalizes two trees for comparison purposes. """ stripped = set() def strip(node, indent): unknown = set() leaf = True for subnode, _ in iter_node(node, unknown=unknown): leaf = False strip(subnode, indent + ' ') if leaf: if isinstance(node, special): unknown = set(vars(node)) stripped.update(unknown) for name in unknown: delattr(node, name) if hasattr(node, 'ctx'): delattr(node, 'ctx') if 'ctx' in node._fields: mylist = list(node._fields) mylist.remove('ctx') node._fields = mylist strip(node, '') return stripped
python
{ "resource": "" }
q33317
fast_compare
train
def fast_compare(tree1, tree2): """ This is optimized to compare two AST trees for equality. It makes several assumptions that are currently true for AST trees used by rtrip, and it doesn't examine the _attributes. """ geta = ast.AST.__getattribute__ work = [(tree1, tree2)] pop = work.pop extend = work.extend # TypeError in cPython, AttributeError in PyPy exception = TypeError, AttributeError zipl = zip_longest type_ = type list_ = list while work: n1, n2 = pop() try: f1 = geta(n1, '_fields') f2 = geta(n2, '_fields') except exception: if type_(n1) is list_: extend(zipl(n1, n2)) continue if n1 == n2: continue return False else: f1 = [x for x in f1 if x != 'ctx'] if f1 != [x for x in f2 if x != 'ctx']: return False extend((geta(n1, fname), geta(n2, fname)) for fname in f1) return True
python
{ "resource": "" }
q33318
get_op_symbol
train
def get_op_symbol(obj, fmt='%s', symbol_data=symbol_data, type=type): """Given an AST node object, returns a string containing the symbol. """ return fmt % symbol_data[type(obj)]
python
{ "resource": "" }
q33319
CodeToAst.find_py_files
train
def find_py_files(srctree, ignore=None): """Return all the python files in a source tree Ignores any path that contains the ignore string This is not used by other class methods, but is designed to be used in code that uses this class. """ if not os.path.isdir(srctree): yield os.path.split(srctree) for srcpath, _, fnames in os.walk(srctree): # Avoid infinite recursion for silly users if ignore is not None and ignore in srcpath: continue for fname in (x for x in fnames if x.endswith('.py')): yield srcpath, fname
python
{ "resource": "" }
q33320
CodeToAst.parse_file
train
def parse_file(fname): """Parse a python file into an AST. This is a very thin wrapper around ast.parse TODO: Handle encodings other than the default for Python 2 (issue #26) """ try: with fopen(fname) as f: fstr = f.read() except IOError: if fname != 'stdin': raise sys.stdout.write('\nReading from stdin:\n\n') fstr = sys.stdin.read() fstr = fstr.replace('\r\n', '\n').replace('\r', '\n') if not fstr.endswith('\n'): fstr += '\n' return ast.parse(fstr, filename=fname)
python
{ "resource": "" }
q33321
CodeToAst.get_file_info
train
def get_file_info(codeobj): """Returns the file and line number of a code object. If the code object has a __file__ attribute (e.g. if it is a module), then the returned line number will be 0 """ fname = getattr(codeobj, '__file__', None) linenum = 0 if fname is None: func_code = codeobj.__code__ fname = func_code.co_filename linenum = func_code.co_firstlineno fname = fname.replace('.pyc', '.py') return fname, linenum
python
{ "resource": "" }
q33322
validate_token_age
train
def validate_token_age(callback_token): """ Returns True if a given token is within the age expiration limit. """ try: token = CallbackToken.objects.get(key=callback_token, is_active=True) seconds = (timezone.now() - token.created_at).total_seconds() token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME if seconds <= token_expiry_time: return True else: # Invalidate our token. token.is_active = False token.save() return False except CallbackToken.DoesNotExist: # No valid token. return False
python
{ "resource": "" }
q33323
verify_user_alias
train
def verify_user_alias(user, token): """ Marks a user's contact point as verified depending on accepted token type. """ if token.to_alias_type == 'EMAIL': if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME): setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True) elif token.to_alias_type == 'MOBILE': if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME): setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True) else: return False user.save() return True
python
{ "resource": "" }
q33324
send_email_with_callback_token
train
def send_email_with_callback_token(user, email_token, **kwargs): """ Sends a Email to user.email. Passes silently without sending in test environment """ try: if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS: # Make sure we have a sending address before sending. # Get email subject and message email_subject = kwargs.get('email_subject', api_settings.PASSWORDLESS_EMAIL_SUBJECT) email_plaintext = kwargs.get('email_plaintext', api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE) email_html = kwargs.get('email_html', api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME) # Inject context if user specifies. context = inject_template_context({'callback_token': email_token.key, }) html_message = loader.render_to_string(email_html, context,) send_mail( email_subject, email_plaintext % email_token.key, api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS, [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)], fail_silently=False, html_message=html_message,) else: logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.") return False return True except Exception as e: logger.debug("Failed to send token email to user: %d." "Possibly no email on user object. Email entered was %s" % (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME))) logger.debug(e) return False
python
{ "resource": "" }
q33325
send_sms_with_callback_token
train
def send_sms_with_callback_token(user, mobile_token, **kwargs): """ Sends a SMS to user.mobile via Twilio. Passes silently without sending in test environment. """ base_string = kwargs.get('mobile_message', api_settings.PASSWORDLESS_MOBILE_MESSAGE) try: if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER: # We need a sending number to send properly if api_settings.PASSWORDLESS_TEST_SUPPRESSION is True: # we assume success to prevent spamming SMS during testing. return True from twilio.rest import Client twilio_client = Client(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN']) twilio_client.messages.create( body=base_string % mobile_token.key, to=getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME), from_=api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER ) return True else: logger.debug("Failed to send token sms. Missing PASSWORDLESS_MOBILE_NOREPLY_NUMBER.") return False except ImportError: logger.debug("Couldn't import Twilio client. Is twilio installed?") return False except KeyError: logger.debug("Couldn't send SMS." "Did you set your Twilio account tokens and specify a PASSWORDLESS_MOBILE_NOREPLY_NUMBER?") except Exception as e: logger.debug("Failed to send token SMS to user: {}. " "Possibly no mobile number on user object or the twilio package isn't set up yet. " "Number entered was {}".format(user.id, getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME))) logger.debug(e) return False
python
{ "resource": "" }
q33326
invalidate_previous_tokens
train
def invalidate_previous_tokens(sender, instance, **kwargs): """ Invalidates all previously issued tokens as a post_save signal. """ active_tokens = None if isinstance(instance, CallbackToken): active_tokens = CallbackToken.objects.active().filter(user=instance.user).exclude(id=instance.id) # Invalidate tokens if active_tokens: for token in active_tokens: token.is_active = False token.save()
python
{ "resource": "" }
q33327
check_unique_tokens
train
def check_unique_tokens(sender, instance, **kwargs): """ Ensures that mobile and email tokens are unique or tries once more to generate. """ if isinstance(instance, CallbackToken): if CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): instance.key = generate_numeric_token()
python
{ "resource": "" }
q33328
update_alias_verification
train
def update_alias_verification(sender, instance, **kwargs): """ Flags a user's email as unverified if they change it. Optionally sends a verification token to the new endpoint. """ if isinstance(instance, User): if instance.id: if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED is True: """ For marking email aliases as not verified when a user changes it. """ email_field = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME email_verified_field = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME # Verify that this is an existing instance and not a new one. try: user_old = User.objects.get(id=instance.id) # Pre-save object instance_email = getattr(instance, email_field) # Incoming Email old_email = getattr(user_old, email_field) # Pre-save object email if instance_email != old_email and instance_email != "" and instance_email is not None: # Email changed, verification should be flagged setattr(instance, email_verified_field, False) if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True: email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME message_payload = {'email_subject': email_subject, 'email_plaintext': email_plaintext, 'email_html': email_html} success = TokenService.send_token(instance, 'email', **message_payload) if success: logger.info('drfpasswordless: Successfully sent email on updated address: %s' % instance_email) else: logger.info('drfpasswordless: Failed to send email to updated address: %s' % instance_email) except User.DoesNotExist: # User probably is just initially being created setattr(instance, email_verified_field, True) if api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED is True: """ For marking mobile aliases as not verified when a user changes it. """ mobile_field = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME mobile_verified_field = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME # Verify that this is an existing instance and not a new one. try: user_old = User.objects.get(id=instance.id) # Pre-save object instance_mobile = getattr(instance, mobile_field) # Incoming mobile old_mobile = getattr(user_old, mobile_field) # Pre-save object mobile if instance_mobile != old_mobile and instance_mobile != "" and instance_mobile is not None: # Mobile changed, verification should be flagged setattr(instance, mobile_verified_field, False) if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True: mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE message_payload = {'mobile_message': mobile_message} success = TokenService.send_token(instance, 'mobile', **message_payload) if success: logger.info('drfpasswordless: Successfully sent SMS on updated mobile: %s' % instance_mobile) else: logger.info('drfpasswordless: Failed to send SMS to updated mobile: %s' % instance_mobile) except User.DoesNotExist: # User probably is just initially being created setattr(instance, mobile_verified_field, True)
python
{ "resource": "" }
q33329
calc_dihedral
train
def calc_dihedral(point1, point2, point3, point4): """Calculates a dihedral angle Here, two planes are defined by (point1, point2, point3) and (point2, point3, point4). The angle between them is returned. Parameters ---------- point1, point2, point3, point4 : array-like, shape=(3,), dtype=float Four points that define two planes Returns ------- float The dihedral angle between the two planes defined by the four points. """ points = np.array([point1, point2, point3, point4]) x = np.cross(points[1] - points[0], points[2] - points[1]) y = np.cross(points[2] - points[1], points[3] - points[2]) return angle(x, y)
python
{ "resource": "" }
q33330
Pattern.scale
train
def scale(self, by): """Scale the points in the Pattern. Parameters ---------- by : float or np.ndarray, shape=(3,) The factor to scale by. If a scalar, scale all directions isotropically. If np.ndarray, scale each direction independently. """ self.points *= np.asarray([by]) self._adjust_ports()
python
{ "resource": "" }
q33331
Pattern.apply
train
def apply(self, compound, orientation='', compound_port=''): """Arrange copies of a Compound as specified by the Pattern. Parameters ---------- compound orientation Returns ------- """ compounds = list() if self.orientations.get(orientation): for port in self.orientations[orientation]: new_compound = clone(compound) new_port = new_compound.labels[compound_port] (new_compound, new_port['up'], port['up']) compounds.append(new_compound) else: for point in self.points: new_compound = clone(compound) new_compound.translate(point) compounds.append(new_compound) return compounds
python
{ "resource": "" }
q33332
Pattern.apply_to_compound
train
def apply_to_compound(self, guest, guest_port_name='down', host=None, backfill=None, backfill_port_name='up', scale=True): """Attach copies of a guest Compound to Ports on a host Compound. Parameters ---------- guest : mb.Compound The Compound prototype to be applied to the host Compound guest_port_name : str, optional, default='down' The name of the port located on `guest` to attach to the host host : mb.Compound, optional, default=None A Compound with available ports to add copies of `guest` to backfill : mb.Compound, optional, default=None A Compound to add to the remaining available ports on `host` after clones of `guest` have been added for each point in the pattern backfill_port_name : str, optional, default='up' The name of the port located on `backfill` to attach to the host scale : bool, optional, default=True Scale the points in the pattern to the lengths of the `host`'s `boundingbox` and shift them by the `boundingbox`'s mins Returns ------- """ n_ports = len(host.available_ports()) assert n_ports >= self.points.shape[0], "Not enough ports for pattern." assert_port_exists(guest_port_name, guest) box = host.boundingbox if scale: self.scale(box.lengths) self.points += box.mins pattern = self.points port_positions = np.empty(shape=(n_ports, 3)) port_list = list() for port_idx, port in enumerate(host.available_ports()): port_positions[port_idx, :] = port['up']['middle'].pos port_list.append(port) used_ports = set() # Keep track of used ports for backfilling. guests = [] for point in pattern: closest_point_idx = np.argmin(host.min_periodic_distance(point, port_positions)) closest_port = port_list[closest_point_idx] used_ports.add(closest_port) # Attach the guest to the closest port. new_guest = clone(guest) force_overlap(new_guest, new_guest.labels[guest_port_name], closest_port) guests.append(new_guest) # Move the port as far away as possible (simpler than removing it). # There may well be a more elegant/efficient way of doing this. port_positions[closest_point_idx, :] = np.array([np.inf, np.inf, np.inf]) backfills = [] if backfill: assert_port_exists(backfill_port_name, backfill) # Attach the backfilling Compound to unused ports. for port in port_list: if port not in used_ports: new_backfill = clone(backfill) # Might make sense to have a backfill_port_name option... force_overlap(new_backfill, new_backfill.labels[backfill_port_name], port) backfills.append(new_backfill) return guests, backfills
python
{ "resource": "" }
q33333
Lattice._sanitize_inputs
train
def _sanitize_inputs(self, lattice_spacing, lattice_vectors, lattice_points, angles): """Check for proper inputs and set instance attributes. validate_inputs takes the data passed to the constructor by the user and will ensure that the data is correctly formatted and will then set its instance attributes. validate_inputs checks that dimensionality is maintained, the unit cell is right handed, the area or volume of the unit cell is positive and non-zero for 2D and 3D respectively, lattice spacing are provided, basis vectors do not overlap when the unit cell is expanded. Exceptions Raised ----------------- TypeError : incorrect typing of the input parameters. ValueError : values are not within restrictions. """ if angles is not None and lattice_vectors is not None: raise ValueError('Overdefined system: angles and lattice_vectors ' 'provided. Only one of these should be passed.') self._validate_lattice_spacing(lattice_spacing) if angles is not None: self._validate_angles(angles) self.lattice_vectors = self._from_lattice_parameters(self.angles) else: self._validate_lattice_vectors(lattice_vectors) self.angles = self._from_lattice_vectors() self._validate_lattice_points(lattice_points)
python
{ "resource": "" }
q33334
Lattice._validate_lattice_spacing
train
def _validate_lattice_spacing(self, lattice_spacing): """Ensure that lattice spacing is provided and correct. _validate_lattice_spacing will ensure that the lattice spacing provided are acceptable values. Additional Numpy errors can also occur due to the conversion to a Numpy array. Exceptions Raised ----------------- ValueError : Incorrect lattice_spacing input """ dataType = np.float64 if lattice_spacing is not None: lattice_spacing = np.asarray(lattice_spacing, dtype=dataType) lattice_spacing = lattice_spacing.reshape((3,)) if np.shape(lattice_spacing) != (self.dimension,): raise ValueError('Lattice spacing should be a vector of ' 'size:({},). Please include lattice spacing ' 'of size >= 0 depending on desired ' 'dimensionality.' .format(self.dimension)) else: raise ValueError('No lattice_spacing provided. Please provide ' 'lattice spacing\'s that are >= 0. with size ({},)' .format((self.dimension))) if np.any(np.isnan(lattice_spacing)): raise ValueError('None type or NaN type values present in ' 'lattice_spacing: {}.'.format(lattice_spacing)) elif np.any(lattice_spacing < 0.0): raise ValueError('Negative lattice spacing value. One of ' 'the spacing: {} is negative.' .format(lattice_spacing)) self.lattice_spacing = lattice_spacing
python
{ "resource": "" }
q33335
Lattice._validate_angles
train
def _validate_angles(self, angles): """Ensure that the angles between the lattice_vectors are correct""" dataType = np.float64 tempAngles = np.asarray(angles, dtype=dataType) tempAngles = tempAngles.reshape((3,)) if np.shape(tempAngles) == (self.dimension,): if np.sum(tempAngles) < 360.0 or np.sum(tempAngles) > -360.0: if (np.all(tempAngles != 180.0) and np.all(tempAngles != 0.0)): pass else: raise ValueError('Angles cannot be 180.0 or 0.0') else: raise ValueError('Angles sum: {} is either greater than ' '360.0 or less than -360.0' .format(np.sum(tempAngles))) for subset in it.permutations(tempAngles, r=self.dimension): if not subset[0] < np.sum(tempAngles) - subset[0]: raise ValueError('Each angle provided must be less' 'than the sum of the other angles. ' '{} is greater.'.format(subset[0])) else: raise ValueError('Incorrect array size. When converted to a ' 'Numpy array, the shape is: {}, expected {}.' .format(np.shape(tempAngles), (3,))) self.angles = tempAngles
python
{ "resource": "" }
q33336
Lattice._validate_lattice_vectors
train
def _validate_lattice_vectors(self, lattice_vectors): """Ensure that the lattice_vectors are reasonable inputs. """ dataType = np.float64 if lattice_vectors is None: lattice_vectors = np.identity(self.dimension, dtype=dataType) else: lattice_vectors = np.asarray(lattice_vectors, dtype=dataType) if (self.dimension, self.dimension) != np.shape(lattice_vectors): raise ValueError('Dimensionality of lattice_vectors is ' ' of shape {} not {}.' .format(np.shape(lattice_vectors), (self.dimension, self.dimension))) det = np.linalg.det(lattice_vectors) if abs(det) == 0.0: raise ValueError('Co-linear vectors: {}' 'have a determinant of 0.0. Does not ' 'define a unit cell.' .format(lattice_vectors)) if det <= 0.0: raise ValueError('Negative Determinant: the determinant ' 'of {} is negative, indicating a left-' 'handed system.' .format(det)) self.lattice_vectors = lattice_vectors
python
{ "resource": "" }
q33337
Lattice._from_lattice_parameters
train
def _from_lattice_parameters(self, angles): """Convert Bravais lattice parameters to lattice vectors. _from_lattice_parameters will generate the lattice vectors based on the parameters necessary to build a Bravais Lattice. The lattice vectors are in the lower diagonal matrix form. This was adapted from the ASE triclinic.py lattice parameter code. S. R. Bahn and K. W. Jacobsen An object-oriented scripting interface to a legacy electronic structure code Comput. Sci. Eng., Vol. 4, 56-66, 2002 Parameters ---------- angles : list-like, required Angles of bravais lattice. """ dataType = np.float64 (alpha, beta, gamma) = angles radianConversion = np.pi / 180.0 cosa = np.cos(alpha * radianConversion) cosb = np.cos(beta * radianConversion) sinb = np.sin(beta * radianConversion) cosg = np.cos(gamma * radianConversion) sing = np.sin(gamma * radianConversion) matCoef_y = (cosa - cosb * cosg) / sing matCoef_z = np.power(sinb, 2, dtype=dataType) - \ np.power(matCoef_y, 2, dtype=dataType) if matCoef_z > 0.: matCoef_z = np.sqrt(matCoef_z) else: raise ValueError('Incorrect lattice vector coefficients.' 'Lattice parameters chosen return a non-positive ' 'z vector.') lattice_vec = [[1, 0, 0], [cosg, sing, 0], [cosb, matCoef_y, matCoef_z]] return np.asarray(lattice_vec, dtype=np.float64)
python
{ "resource": "" }
q33338
Lattice._from_lattice_vectors
train
def _from_lattice_vectors(self): """Calculate the angles between the vectors that define the lattice. _from_lattice_vectors will calculate the angles alpha, beta, and gamma from the Lattice object attribute lattice_vectors. """ degreeConvsersion = 180.0 / np.pi vector_magnitudes = np.linalg.norm(self.lattice_vectors, axis=1) a_dot_b = np.dot(self.lattice_vectors[0], self.lattice_vectors[1]) b_dot_c = np.dot(self.lattice_vectors[1], self.lattice_vectors[2]) a_dot_c = np.dot(self.lattice_vectors[0], self.lattice_vectors[2]) alpha_raw = b_dot_c / (vector_magnitudes[1] * vector_magnitudes[2]) beta_raw = a_dot_c / (vector_magnitudes[0] * vector_magnitudes[2]) gamma_raw = a_dot_b / (vector_magnitudes[0] * vector_magnitudes[1]) alpha = np.arccos(np.clip(alpha_raw, -1.0, 1.0)) * degreeConvsersion beta = np.arccos(np.clip(beta_raw, -1.0, 1.0)) * degreeConvsersion gamma = np.arccos(np.clip(gamma_raw, -1.0, 1.0)) * degreeConvsersion return np.asarray([alpha, beta, gamma], dtype=np.float64)
python
{ "resource": "" }
q33339
Bilayer.create_layer
train
def create_layer(self, lipid_indices=None, flip_orientation=False): """Create a monolayer of lipids. Parameters ---------- lipid_indices : list, optional, default=None A list of indices associated with each lipid in the layer. flip_orientation : bool, optional, default=False Flip the orientation of the layer with respect to the z-dimension. """ layer = mb.Compound() if not lipid_indices: lipid_indices = list(range(self.n_lipids_per_layer)) shuffle(lipid_indices) for n_type, n_of_lipid_type in enumerate(self.number_of_each_lipid_per_layer): current_type = self.lipids[n_type][0] for n_this_type, n_this_lipid_type in enumerate(range(n_of_lipid_type)): lipids_placed = n_type + n_this_type new_lipid = clone(current_type) random_index = lipid_indices[lipids_placed] position = self.pattern[random_index] # Zero and space in z-direction particles = list(new_lipid.particles()) ref_atom = self.ref_atoms[n_type] new_lipid.translate(-particles[ref_atom].pos + self.spacing) # Move to point on pattern if flip_orientation == True: center = new_lipid.center center[2] = 0.0 new_lipid.translate(-center) new_lipid.rotate(np.pi, [1, 0, 0]) new_lipid.translate(center) new_lipid.translate(position) layer.add(new_lipid) return layer, lipid_indices
python
{ "resource": "" }
q33340
Bilayer.solvate_bilayer
train
def solvate_bilayer(self): """Solvate the constructed bilayer. """ solvent_number_density = self.solvent.n_particles / np.prod(self.solvent.periodicity) lengths = self.lipid_box.lengths water_box_z = self.solvent_per_layer / (lengths[0] * lengths[1] * solvent_number_density) mins = self.lipid_box.mins maxs = self.lipid_box.maxs bilayer_solvent_box = mb.Box(mins=[mins[0], mins[1], maxs[2]], maxs=[maxs[0], maxs[1], maxs[2] + 2 * water_box_z]) self.solvent_components.add(mb.fill_box(self.solvent, bilayer_solvent_box))
python
{ "resource": "" }
q33341
Bilayer.solvent_per_layer
train
def solvent_per_layer(self): """Determine the number of solvent molecules per single layer. """ if self._solvent_per_layer: return self._solvent_per_layer assert not (self.solvent_per_lipid is None and self.n_solvent is None) if self.solvent_per_lipid is not None: assert self.n_solvent is None self._solvent_per_layer = self.n_lipids_per_layer * self.solvent_per_lipid elif self.n_solvent is not None: assert self.solvent_per_lipid is None self._solvent_per_layer = self.n_solvent / 2 return self._solvent_per_layer
python
{ "resource": "" }
q33342
Bilayer.number_of_each_lipid_per_layer
train
def number_of_each_lipid_per_layer(self): """The number of each lipid per layer. """ if self._number_of_each_lipid_per_layer: return self._number_of_each_lipid_per_layer for lipid in self.lipids[:-1]: self._number_of_each_lipid_per_layer.append(int(round(lipid[1] * self.n_lipids_per_layer))) # TODO: give warning if frac * n different than actual # Rounding errors may make this off by 1, so just do total - whats_been_added. self._number_of_each_lipid_per_layer.append(self.n_lipids_per_layer - sum(self._number_of_each_lipid_per_layer)) assert len(self._number_of_each_lipid_per_layer) == len(self.lipids) return self._number_of_each_lipid_per_layer
python
{ "resource": "" }
q33343
Bilayer.lipid_box
train
def lipid_box(self): """The box containing all of the lipids. """ if self._lipid_box: return self._lipid_box else: self._lipid_box = self.lipid_components.boundingbox # Add buffer around lipid box. self._lipid_box.mins -= np.array([0.5*np.sqrt(self.apl), 0.5*np.sqrt(self.apl), 0.5*np.sqrt(self.apl)]) self._lipid_box.maxs += np.array([0.5*np.sqrt(self.apl), 0.5*np.sqrt(self.apl), 0.5*np.sqrt(self.apl)]) return self._lipid_box
python
{ "resource": "" }
q33344
load
train
def load(filename, relative_to_module=None, compound=None, coords_only=False, rigid=False, use_parmed=False, smiles=False, **kwargs): """Load a file into an mbuild compound. Files are read using the MDTraj package unless the `use_parmed` argument is specified as True. Please refer to http://mdtraj.org/1.8.0/load_functions.html for formats supported by MDTraj and https://parmed.github.io/ParmEd/html/ readwrite.html for formats supported by ParmEd. Parameters ---------- filename : str Name of the file from which to load atom and bond information. relative_to_module : str, optional, default=None Instead of looking in the current working directory, look for the file where this module is defined. This is typically used in Compound classes that will be instantiated from a different directory (such as the Compounds located in mbuild.lib). compound : mb.Compound, optional, default=None Existing compound to load atom and bond information into. coords_only : bool, optional, default=False Only load the coordinates into an existing compoint. rigid : bool, optional, default=False Treat the compound as a rigid body use_parmed : bool, optional, default=False Use readers from ParmEd instead of MDTraj. smiles: bool, optional, default=False Use Open Babel to parse filename as a SMILES string or file containing a SMILES string **kwargs : keyword arguments Key word arguments passed to mdTraj for loading. Returns ------- compound : mb.Compound """ # Handle mbuild *.py files containing a class that wraps a structure file # in its own folder. E.g., you build a system from ~/foo.py and it imports # from ~/bar/baz.py where baz.py loads ~/bar/baz.pdb. if relative_to_module: script_path = os.path.realpath( sys.modules[relative_to_module].__file__) file_dir = os.path.dirname(script_path) filename = os.path.join(file_dir, filename) if compound is None: compound = Compound() # Handle the case of a xyz file, which must use an internal reader extension = os.path.splitext(filename)[-1] if extension == '.xyz' and not 'top' in kwargs: if coords_only: tmp = read_xyz(filename) if tmp.n_particles != compound.n_particles: raise ValueError('Number of atoms in {filename} does not match' ' {compound}'.format(**locals())) ref_and_compound = zip(tmp._particles(include_ports=False), compound.particles(include_ports=False)) for ref_particle, particle in ref_and_compound: particle.pos = ref_particle.pos else: compound = read_xyz(filename) return compound if use_parmed: warn( "use_parmed set to True. Bonds may be inferred from inter-particle " "distances and standard residue templates!") structure = pmd.load_file(filename, structure=True, **kwargs) compound.from_parmed(structure, coords_only=coords_only) elif smiles: pybel = import_('pybel') # First we try treating filename as a SMILES string try: mymol = pybel.readstring("smi", filename) # Now we treat it as a filename except(OSError, IOError): # For now, we only support reading in a single smiles molecule, # but pybel returns a generator, so we get the first molecule # and warn the user if there is more mymol_generator = pybel.readfile("smi", filename) mymol_list = list(mymol_generator) if len(mymol_list) == 1: mymol = mymol_list[0] else: mymol = mymol_list[0] warn("More than one SMILES string in file, more than one SMILES " "string is not supported, using {}".format(mymol.write("smi"))) tmp_dir = tempfile.mkdtemp() temp_file = os.path.join(tmp_dir, 'smiles_to_mol2_intermediate.mol2') mymol.make3D() mymol.write("MOL2", temp_file) structure = pmd.load_file(temp_file, structure=True, **kwargs) compound.from_parmed(structure, coords_only=coords_only) else: traj = md.load(filename, **kwargs) compound.from_trajectory(traj, frame=-1, coords_only=coords_only) if rigid: compound.label_rigid_bodies() return compound
python
{ "resource": "" }
q33345
Compound.successors
train
def successors(self): """Yield Compounds below self in the hierarchy. Yields ------- mb.Compound The next Particle below self in the hierarchy """ if not self.children: return for part in self.children: # Parts local to the current Compound. yield part # Parts further down the hierarchy. for subpart in part.successors(): yield subpart
python
{ "resource": "" }
q33346
Compound.ancestors
train
def ancestors(self): """Generate all ancestors of the Compound recursively. Yields ------ mb.Compound The next Compound above self in the hierarchy """ if self.parent is not None: yield self.parent for ancestor in self.parent.ancestors(): yield ancestor
python
{ "resource": "" }
q33347
Compound.particles_by_name
train
def particles_by_name(self, name): """Return all Particles of the Compound with a specific name Parameters ---------- name : str Only particles with this name are returned Yields ------ mb.Compound The next Particle in the Compound with the user-specified name """ for particle in self.particles(): if particle.name == name: yield particle
python
{ "resource": "" }
q33348
Compound.contains_rigid
train
def contains_rigid(self): """Returns True if the Compound contains rigid bodies If the Compound contains any particle with a rigid_id != None then contains_rigid will return True. If the Compound has no children (i.e. the Compound resides at the bottom of the containment hierarchy) then contains_rigid will return False. Returns ------- bool True if the Compound contains any particle with a rigid_id != None Notes ----- The private variable '_check_if_contains_rigid_bodies' is used to help cache the status of 'contains_rigid'. If '_check_if_contains_rigid_bodies' is False, then the rigid body containment of the Compound has not changed, and the particle tree is not traversed, boosting performance. """ if self._check_if_contains_rigid_bodies: self._check_if_contains_rigid_bodies = False if any(particle.rigid_id is not None for particle in self._particles()): self._contains_rigid = True else: self._contains_rigid = False return self._contains_rigid
python
{ "resource": "" }
q33349
Compound.max_rigid_id
train
def max_rigid_id(self): """Returns the maximum rigid body ID contained in the Compound. This is usually used by compound.root to determine the maximum rigid_id in the containment hierarchy. Returns ------- int or None The maximum rigid body ID contained in the Compound. If no rigid body IDs are found, None is returned """ try: return max([particle.rigid_id for particle in self.particles() if particle.rigid_id is not None]) except ValueError: return
python
{ "resource": "" }
q33350
Compound.rigid_particles
train
def rigid_particles(self, rigid_id=None): """Generate all particles in rigid bodies. If a rigid_id is specified, then this function will only yield particles with a matching rigid_id. Parameters ---------- rigid_id : int, optional Include only particles with this rigid body ID Yields ------ mb.Compound The next particle with a rigid_id that is not None, or the next particle with a matching rigid_id if specified """ for particle in self.particles(): if rigid_id is not None: if particle.rigid_id == rigid_id: yield particle else: if particle.rigid_id is not None: yield particle
python
{ "resource": "" }
q33351
Compound.label_rigid_bodies
train
def label_rigid_bodies(self, discrete_bodies=None, rigid_particles=None): """Designate which Compounds should be treated as rigid bodies If no arguments are provided, this function will treat the compound as a single rigid body by providing all particles in `self` with the same rigid_id. If `discrete_bodies` is not None, each instance of a Compound with a name found in `discrete_bodies` will be treated as a unique rigid body. If `rigid_particles` is not None, only Particles (Compounds at the bottom of the containment hierarchy) matching this name will be considered part of the rigid body. Parameters ---------- discrete_bodies : str or list of str, optional, default=None Name(s) of Compound instances to be treated as unique rigid bodies. Compound instances matching this (these) name(s) will be provided with unique rigid_ids rigid_particles : str or list of str, optional, default=None Name(s) of Compound instances at the bottom of the containment hierarchy (Particles) to be included in rigid bodies. Only Particles matching this (these) name(s) will have their rigid_ids altered to match the rigid body number. Examples -------- Creating a rigid benzene >>> import mbuild as mb >>> from mbuild.utils.io import get_fn >>> benzene = mb.load(get_fn('benzene.mol2')) >>> benzene.label_rigid_bodies() Creating a semi-rigid benzene, where only the carbons are treated as a rigid body >>> import mbuild as mb >>> from mbuild.utils.io import get_fn >>> benzene = mb.load(get_fn('benzene.mol2')) >>> benzene.label_rigid_bodies(rigid_particles='C') Create a box of rigid benzenes, where each benzene has a unique rigid body ID. >>> import mbuild as mb >>> from mbuild.utils.io import get_fn >>> benzene = mb.load(get_fn('benzene.mol2')) >>> benzene.name = 'Benzene' >>> filled = mb.fill_box(benzene, ... n_compounds=10, ... box=[0, 0, 0, 4, 4, 4]) >>> filled.label_rigid_bodies(distinct_bodies='Benzene') Create a box of semi-rigid benzenes, where each benzene has a unique rigid body ID and only the carbon portion is treated as rigid. >>> import mbuild as mb >>> from mbuild.utils.io import get_fn >>> benzene = mb.load(get_fn('benzene.mol2')) >>> benzene.name = 'Benzene' >>> filled = mb.fill_box(benzene, ... n_compounds=10, ... box=[0, 0, 0, 4, 4, 4]) >>> filled.label_rigid_bodies(distinct_bodies='Benzene', ... rigid_particles='C') """ if discrete_bodies is not None: if isinstance(discrete_bodies, string_types): discrete_bodies = [discrete_bodies] if rigid_particles is not None: if isinstance(rigid_particles, string_types): rigid_particles = [rigid_particles] if self.root.max_rigid_id is not None: rigid_id = self.root.max_rigid_id + 1 warn("{} rigid bodies already exist. Incrementing 'rigid_id'" "starting from {}.".format(rigid_id, rigid_id)) else: rigid_id = 0 for successor in self.successors(): if discrete_bodies and successor.name not in discrete_bodies: continue for particle in successor.particles(): if rigid_particles and particle.name not in rigid_particles: continue particle.rigid_id = rigid_id if discrete_bodies: rigid_id += 1
python
{ "resource": "" }
q33352
Compound.unlabel_rigid_bodies
train
def unlabel_rigid_bodies(self): """Remove all rigid body labels from the Compound """ self._check_if_contains_rigid_bodies = True for child in self.children: child._check_if_contains_rigid_bodies = True for particle in self.particles(): particle.rigid_id = None
python
{ "resource": "" }
q33353
Compound._increment_rigid_ids
train
def _increment_rigid_ids(self, increment): """Increment the rigid_id of all rigid Particles in a Compound Adds `increment` to the rigid_id of all Particles in `self` that already have an integer rigid_id. """ for particle in self.particles(): if particle.rigid_id is not None: particle.rigid_id += increment
python
{ "resource": "" }
q33354
Compound._reorder_rigid_ids
train
def _reorder_rigid_ids(self): """Reorder rigid body IDs ensuring consecutiveness. Primarily used internally to ensure consecutive rigid_ids following removal of a Compound. """ max_rigid = self.max_rigid_id unique_rigid_ids = sorted( set([p.rigid_id for p in self.rigid_particles()])) n_unique_rigid = len(unique_rigid_ids) if max_rigid and n_unique_rigid != max_rigid + 1: missing_rigid_id = ( unique_rigid_ids[-1] * (unique_rigid_ids[-1] + 1)) / 2 - sum(unique_rigid_ids) for successor in self.successors(): if successor.rigid_id is not None: if successor.rigid_id > missing_rigid_id: successor.rigid_id -= 1 if self.rigid_id: if self.rigid_id > missing_rigid_id: self.rigid_id -= 1
python
{ "resource": "" }
q33355
Compound.add
train
def add(self, new_child, label=None, containment=True, replace=False, inherit_periodicity=True, reset_rigid_ids=True): """Add a part to the Compound. Note: This does not necessarily add the part to self.children but may instead be used to add a reference to the part to self.labels. See 'containment' argument. Parameters ---------- new_child : mb.Compound or list-like of mb.Compound The object(s) to be added to this Compound. label : str, optional A descriptive string for the part. containment : bool, optional, default=True Add the part to self.children. replace : bool, optional, default=True Replace the label if it already exists. inherit_periodicity : bool, optional, default=True Replace the periodicity of self with the periodicity of the Compound being added reset_rigid_ids : bool, optional, default=True If the Compound to be added contains rigid bodies, reset the rigid_ids such that values remain distinct from rigid_ids already present in `self`. Can be set to False if attempting to add Compounds to an existing rigid body. """ # Support batch add via lists, tuples and sets. if (isinstance(new_child, collections.Iterable) and not isinstance(new_child, string_types)): for child in new_child: self.add(child, reset_rigid_ids=reset_rigid_ids) return if not isinstance(new_child, Compound): raise ValueError('Only objects that inherit from mbuild.Compound ' 'can be added to Compounds. You tried to add ' '"{}".'.format(new_child)) if new_child.contains_rigid or new_child.rigid_id is not None: if self.contains_rigid and reset_rigid_ids: new_child._increment_rigid_ids(increment=self.max_rigid_id + 1) self._check_if_contains_rigid_bodies = True if self.rigid_id is not None: self.rigid_id = None # Create children and labels on the first add operation if self.children is None: self.children = OrderedSet() if self.labels is None: self.labels = OrderedDict() if containment: if new_child.parent is not None: raise MBuildError('Part {} already has a parent: {}'.format( new_child, new_child.parent)) self.children.add(new_child) new_child.parent = self if new_child.bond_graph is not None: if self.root.bond_graph is None: self.root.bond_graph = new_child.bond_graph else: self.root.bond_graph.compose(new_child.bond_graph) new_child.bond_graph = None # Add new_part to labels. Does not currently support batch add. if label is None: label = '{0}[$]'.format(new_child.__class__.__name__) if label.endswith('[$]'): label = label[:-3] if label not in self.labels: self.labels[label] = [] label_pattern = label + '[{}]' count = len(self.labels[label]) self.labels[label].append(new_child) label = label_pattern.format(count) if not replace and label in self.labels: raise MBuildError('Label "{0}" already exists in {1}.'.format( label, self)) else: self.labels[label] = new_child new_child.referrers.add(self) if (inherit_periodicity and isinstance(new_child, Compound) and new_child.periodicity.any()): self.periodicity = new_child.periodicity
python
{ "resource": "" }
q33356
Compound.remove
train
def remove(self, objs_to_remove): """Remove children from the Compound. Parameters ---------- objs_to_remove : mb.Compound or list of mb.Compound The Compound(s) to be removed from self """ if not self.children: return if not hasattr(objs_to_remove, '__iter__'): objs_to_remove = [objs_to_remove] objs_to_remove = set(objs_to_remove) if len(objs_to_remove) == 0: return remove_from_here = objs_to_remove.intersection(self.children) self.children -= remove_from_here yet_to_remove = objs_to_remove - remove_from_here for removed in remove_from_here: for child in removed.children: removed.remove(child) for removed_part in remove_from_here: if removed_part.rigid_id is not None: for ancestor in removed_part.ancestors(): ancestor._check_if_contains_rigid_bodies = True if self.root.bond_graph and self.root.bond_graph.has_node( removed_part): for neighbor in self.root.bond_graph.neighbors(removed_part): self.root.remove_bond((removed_part, neighbor)) self.root.bond_graph.remove_node(removed_part) self._remove_references(removed_part) # Remove the part recursively from sub-compounds. for child in self.children: child.remove(yet_to_remove) if child.contains_rigid: self.root._reorder_rigid_ids()
python
{ "resource": "" }
q33357
Compound._remove_references
train
def _remove_references(self, removed_part): """Remove labels pointing to this part and vice versa. """ removed_part.parent = None # Remove labels in the hierarchy pointing to this part. referrers_to_remove = set() for referrer in removed_part.referrers: if removed_part not in referrer.ancestors(): for label, referred_part in list(referrer.labels.items()): if referred_part is removed_part: del referrer.labels[label] referrers_to_remove.add(referrer) removed_part.referrers -= referrers_to_remove # Remove labels in this part pointing into the hierarchy. labels_to_delete = [] if isinstance(removed_part, Compound): for label, part in list(removed_part.labels.items()): if not isinstance(part, Compound): for p in part: self._remove_references(p) elif removed_part not in part.ancestors(): try: part.referrers.discard(removed_part) except KeyError: pass else: labels_to_delete.append(label) for label in labels_to_delete: removed_part.labels.pop(label, None)
python
{ "resource": "" }
q33358
Compound.referenced_ports
train
def referenced_ports(self): """Return all Ports referenced by this Compound. Returns ------- list of mb.Compound A list of all ports referenced by the Compound """ from mbuild.port import Port return [port for port in self.labels.values() if isinstance(port, Port)]
python
{ "resource": "" }
q33359
Compound.all_ports
train
def all_ports(self): """Return all Ports referenced by this Compound and its successors Returns ------- list of mb.Compound A list of all Ports referenced by this Compound and its successors """ from mbuild.port import Port return [successor for successor in self.successors() if isinstance(successor, Port)]
python
{ "resource": "" }
q33360
Compound.available_ports
train
def available_ports(self): """Return all unoccupied Ports referenced by this Compound. Returns ------- list of mb.Compound A list of all unoccupied ports referenced by the Compound """ from mbuild.port import Port return [port for port in self.labels.values() if isinstance(port, Port) and not port.used]
python
{ "resource": "" }
q33361
Compound.bonds
train
def bonds(self): """Return all bonds in the Compound and sub-Compounds. Yields ------- tuple of mb.Compound The next bond in the Compound See Also -------- bond_graph.edges_iter : Iterates over all edges in a BondGraph """ if self.root.bond_graph: if self.root == self: return self.root.bond_graph.edges_iter() else: return self.root.bond_graph.subgraph( self.particles()).edges_iter() else: return iter(())
python
{ "resource": "" }
q33362
Compound.add_bond
train
def add_bond(self, particle_pair): """Add a bond between two Particles. Parameters ---------- particle_pair : indexable object, length=2, dtype=mb.Compound The pair of Particles to add a bond between """ if self.root.bond_graph is None: self.root.bond_graph = BondGraph() self.root.bond_graph.add_edge(particle_pair[0], particle_pair[1])
python
{ "resource": "" }
q33363
Compound.remove_bond
train
def remove_bond(self, particle_pair): """Deletes a bond between a pair of Particles Parameters ---------- particle_pair : indexable object, length=2, dtype=mb.Compound The pair of Particles to remove the bond between """ from mbuild.port import Port if self.root.bond_graph is None or not self.root.bond_graph.has_edge( *particle_pair): warn("Bond between {} and {} doesn't exist!".format(*particle_pair)) return self.root.bond_graph.remove_edge(*particle_pair) bond_vector = particle_pair[0].pos - particle_pair[1].pos if np.allclose(bond_vector, np.zeros(3)): warn("Particles {} and {} overlap! Ports will not be added." "".format(*particle_pair)) return distance = np.linalg.norm(bond_vector) particle_pair[0].parent.add(Port(anchor=particle_pair[0], orientation=-bond_vector, separation=distance / 2), 'port[$]') particle_pair[1].parent.add(Port(anchor=particle_pair[1], orientation=bond_vector, separation=distance / 2), 'port[$]')
python
{ "resource": "" }
q33364
Compound.xyz
train
def xyz(self): """Return all particle coordinates in this compound. Returns ------- pos : np.ndarray, shape=(n, 3), dtype=float Array with the positions of all particles. """ if not self.children: pos = np.expand_dims(self._pos, axis=0) else: arr = np.fromiter(itertools.chain.from_iterable( particle.pos for particle in self.particles()), dtype=float) pos = arr.reshape((-1, 3)) return pos
python
{ "resource": "" }
q33365
Compound.xyz_with_ports
train
def xyz_with_ports(self): """Return all particle coordinates in this compound including ports. Returns ------- pos : np.ndarray, shape=(n, 3), dtype=float Array with the positions of all particles and ports. """ if not self.children: pos = self._pos else: arr = np.fromiter( itertools.chain.from_iterable( particle.pos for particle in self.particles( include_ports=True)), dtype=float) pos = arr.reshape((-1, 3)) return pos
python
{ "resource": "" }
q33366
Compound.xyz
train
def xyz(self, arrnx3): """Set the positions of the particles in the Compound, excluding the Ports. This function does not set the position of the ports. Parameters ---------- arrnx3 : np.ndarray, shape=(n,3), dtype=float The new particle positions """ if not self.children: if not arrnx3.shape[0] == 1: raise ValueError( 'Trying to set position of {} with more than one' 'coordinate: {}'.format( self, arrnx3)) self.pos = np.squeeze(arrnx3) else: for atom, coords in zip( self._particles( include_ports=False), arrnx3): atom.pos = coords
python
{ "resource": "" }
q33367
Compound.xyz_with_ports
train
def xyz_with_ports(self, arrnx3): """Set the positions of the particles in the Compound, including the Ports. Parameters ---------- arrnx3 : np.ndarray, shape=(n,3), dtype=float The new particle positions """ if not self.children: if not arrnx3.shape[0] == 1: raise ValueError( 'Trying to set position of {} with more than one' 'coordinate: {}'.format( self, arrnx3)) self.pos = np.squeeze(arrnx3) else: for atom, coords in zip( self._particles( include_ports=True), arrnx3): atom.pos = coords
python
{ "resource": "" }
q33368
Compound.center
train
def center(self): """The cartesian center of the Compound based on its Particles. Returns ------- np.ndarray, shape=(3,), dtype=float The cartesian center of the Compound based on its Particles """ if np.all(np.isfinite(self.xyz)): return np.mean(self.xyz, axis=0)
python
{ "resource": "" }
q33369
Compound.boundingbox
train
def boundingbox(self): """Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound """ xyz = self.xyz return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))
python
{ "resource": "" }
q33370
Compound.min_periodic_distance
train
def min_periodic_distance(self, xyz0, xyz1): """Vectorized distance calculation considering minimum image. Parameters ---------- xyz0 : np.ndarray, shape=(3,), dtype=float Coordinates of first point xyz1 : np.ndarray, shape=(3,), dtype=float Coordinates of second point Returns ------- float Vectorized distance between the two points following minimum image convention """ d = np.abs(xyz0 - xyz1) d = np.where(d > 0.5 * self.periodicity, self.periodicity - d, d) return np.sqrt((d ** 2).sum(axis=-1))
python
{ "resource": "" }
q33371
Compound.particles_in_range
train
def particles_in_range( self, compound, dmax, max_particles=20, particle_kdtree=None, particle_array=None): """Find particles within a specified range of another particle. Parameters ---------- compound : mb.Compound Reference particle to find other particles in range of dmax : float Maximum distance from 'compound' to look for Particles max_particles : int, optional, default=20 Maximum number of Particles to return particle_kdtree : mb.PeriodicCKDTree, optional KD-tree for looking up nearest neighbors. If not provided, a KD- tree will be generated from all Particles in self particle_array : np.ndarray, shape=(n,), dtype=mb.Compound, optional Array of possible particles to consider for return. If not provided, this defaults to all Particles in self Returns ------- np.ndarray, shape=(n,), dtype=mb.Compound Particles in range of compound according to user-defined limits See Also -------- periodic_kdtree.PerioidicCKDTree : mBuild implementation of kd-trees scipy.spatial.ckdtree : Further details on kd-trees """ if particle_kdtree is None: particle_kdtree = PeriodicCKDTree( data=self.xyz, bounds=self.periodicity) _, idxs = particle_kdtree.query( compound.pos, k=max_particles, distance_upper_bound=dmax) idxs = idxs[idxs != self.n_particles] if particle_array is None: particle_array = np.array(list(self.particles())) return particle_array[idxs]
python
{ "resource": "" }
q33372
Compound.visualize
train
def visualize(self, show_ports=False): """Visualize the Compound using nglview. Allows for visualization of a Compound within a Jupyter Notebook. Parameters ---------- show_ports : bool, optional, default=False Visualize Ports in addition to Particles """ nglview = import_('nglview') from mdtraj.geometry.sasa import _ATOMIC_RADII if run_from_ipython(): remove_digits = lambda x: ''.join(i for i in x if not i.isdigit() or i == '_') for particle in self.particles(): particle.name = remove_digits(particle.name).upper() if not particle.name: particle.name = 'UNK' tmp_dir = tempfile.mkdtemp() self.save(os.path.join(tmp_dir, 'tmp.mol2'), show_ports=show_ports, overwrite=True) widget = nglview.show_file(os.path.join(tmp_dir, 'tmp.mol2')) widget.clear() widget.add_ball_and_stick(cylinderOnly=True) elements = set([particle.name for particle in self.particles()]) scale = 50.0 for element in elements: try: widget.add_ball_and_stick('_{}'.format( element.upper()), aspect_ratio=_ATOMIC_RADII[element.title()]**1.5 * scale) except KeyError: ids = [str(i) for i, particle in enumerate(self.particles()) if particle.name == element] widget.add_ball_and_stick( '@{}'.format( ','.join(ids)), aspect_ratio=0.17**1.5 * scale, color='grey') if show_ports: widget.add_ball_and_stick('_VS', aspect_ratio=1.0, color='#991f00') return widget else: raise RuntimeError('Visualization is only supported in Jupyter ' 'Notebooks.')
python
{ "resource": "" }
q33373
Compound.update_coordinates
train
def update_coordinates(self, filename, update_port_locations=True): """Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file """ if update_port_locations: xyz_init = self.xyz self = load(filename, compound=self, coords_only=True) self._update_port_locations(xyz_init) else: self = load(filename, compound=self, coords_only=True)
python
{ "resource": "" }
q33374
Compound._update_port_locations
train
def _update_port_locations(self, initial_coordinates): """Adjust port locations after particles have moved Compares the locations of Particles between 'self' and an array of reference coordinates. Shifts Ports in accordance with how far anchors have been moved. This conserves the location of Ports with respect to their anchor Particles, but does not conserve the orientation of Ports with respect to the molecule as a whole. Parameters ---------- initial_coordinates : np.ndarray, shape=(n, 3), dtype=float Reference coordinates to use for comparing how far anchor Particles have shifted. """ particles = list(self.particles()) for port in self.all_ports(): if port.anchor: idx = particles.index(port.anchor) shift = particles[idx].pos - initial_coordinates[idx] port.translate(shift)
python
{ "resource": "" }
q33375
Compound._kick
train
def _kick(self): """Slightly adjust all coordinates in a Compound Provides a slight adjustment to coordinates to kick them out of local energy minima. """ xyz_init = self.xyz for particle in self.particles(): particle.pos += (np.random.rand(3,) - 0.5) / 100 self._update_port_locations(xyz_init)
python
{ "resource": "" }
q33376
Compound.save
train
def save(self, filename, show_ports=False, forcefield_name=None, forcefield_files=None, forcefield_debug=False, box=None, overwrite=False, residues=None, references_file=None, combining_rule='lorentz', foyerkwargs={}, **kwargs): """Save the Compound to a file. Parameters ---------- filename : str Filesystem path in which to save the trajectory. The extension or prefix will be parsed and control the format. Supported extensions are: 'hoomdxml', 'gsd', 'gro', 'top', 'lammps', 'lmp' show_ports : bool, optional, default=False Save ports contained within the compound. forcefield_files : str, optional, default=None Apply a forcefield to the output file using a forcefield provided by the `foyer` package. forcefield_name : str, optional, default=None Apply a named forcefield to the output file using the `foyer` package, e.g. 'oplsaa'. Forcefields listed here: https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields forcefield_debug : bool, optional, default=False Choose level of verbosity when applying a forcefield through `foyer`. Specifically, when missing atom types in the forcefield xml file, determine if the warning is condensed or verbose. box : mb.Box, optional, default=self.boundingbox (with buffer) Box information to be written to the output file. If 'None', a bounding box is used with 0.25nm buffers at each face to avoid overlapping atoms. overwrite : bool, optional, default=False Overwrite if the filename already exists residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. references_file : str, optional, default=None Specify a filename to write references for the forcefield that is to be applied. References are written in BiBTeX format. combining_rule : str, optional, default='lorentz' Specify the combining rule for nonbonded interactions. Only relevant when the `foyer` package is used to apply a forcefield. Valid options are 'lorentz' and 'geometric', specifying Lorentz-Berthelot and geometric combining rules respectively. Other Parameters ---------------- foyerkwargs : dict, optional Specify keyword arguments when applying the foyer Forcefield ref_distance : float, optional, default=1.0 Normalization factor used when saving to .gsd and .hoomdxml formats for converting distance values to reduced units. ref_energy : float, optional, default=1.0 Normalization factor used when saving to .gsd and .hoomdxml formats for converting energy values to reduced units. ref_mass : float, optional, default=1.0 Normalization factor used when saving to .gsd and .hoomdxml formats for converting mass values to reduced units. atom_style: str, default='full' Defines the style of atoms to be saved in a LAMMPS data file. The following atom styles are currently supported: 'full', 'atomic', 'charge', 'molecular' see http://lammps.sandia.gov/doc/atom_style.html for more information on atom styles. See Also -------- formats.gsdwrite.write_gsd : Write to GSD format formats.hoomdxml.write_hoomdxml : Write to Hoomd XML format formats.lammpsdata.write_lammpsdata : Write to LAMMPS data format """ extension = os.path.splitext(filename)[-1] if extension == '.xyz': traj = self.to_trajectory(show_ports=show_ports) traj.save(filename) return # Savers supported by mbuild.formats savers = {'.hoomdxml': write_hoomdxml, '.gsd': write_gsd, '.lammps': write_lammpsdata, '.lmp': write_lammpsdata} try: saver = savers[extension] except KeyError: saver = None if os.path.exists(filename) and not overwrite: raise IOError('{0} exists; not overwriting'.format(filename)) structure = self.to_parmed(box=box, residues=residues, show_ports=show_ports) # Apply a force field with foyer if specified if forcefield_name or forcefield_files: foyer = import_('foyer') ff = foyer.Forcefield(forcefield_files=forcefield_files, name=forcefield_name, debug=forcefield_debug) structure = ff.apply(structure, references_file=references_file, **foyerkwargs) structure.combining_rule = combining_rule total_charge = sum([atom.charge for atom in structure]) if round(total_charge, 4) != 0.0: warn('System is not charge neutral. Total charge is {}.' ''.format(total_charge)) # Provide a warning if rigid_ids are not sequential from 0 if self.contains_rigid: unique_rigid_ids = sorted(set([ p.rigid_id for p in self.rigid_particles()])) if max(unique_rigid_ids) != len(unique_rigid_ids) - 1: warn("Unique rigid body IDs are not sequential starting from zero.") if saver: # mBuild supported saver. if extension in ['.gsd', '.hoomdxml']: kwargs['rigid_bodies'] = [ p.rigid_id for p in self.particles()] saver(filename=filename, structure=structure, **kwargs) else: # ParmEd supported saver. structure.save(filename, overwrite=overwrite, **kwargs)
python
{ "resource": "" }
q33377
Compound.translate
train
def translate(self, by): """Translate the Compound by a vector Parameters ---------- by : np.ndarray, shape=(3,), dtype=float """ new_positions = _translate(self.xyz_with_ports, by) self.xyz_with_ports = new_positions
python
{ "resource": "" }
q33378
Compound.rotate
train
def rotate(self, theta, around): """Rotate Compound around an arbitrary vector. Parameters ---------- theta : float The angle by which to rotate the Compound, in radians. around : np.ndarray, shape=(3,), dtype=float The vector about which to rotate the Compound. """ new_positions = _rotate(self.xyz_with_ports, theta, around) self.xyz_with_ports = new_positions
python
{ "resource": "" }
q33379
Compound.spin
train
def spin(self, theta, around): """Rotate Compound in place around an arbitrary vector. Parameters ---------- theta : float The angle by which to rotate the Compound, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the Compound. """ around = np.asarray(around).reshape(3) center_pos = self.center self.translate(-center_pos) self.rotate(theta, around) self.translate(center_pos)
python
{ "resource": "" }
q33380
Compound.from_trajectory
train
def from_trajectory(self, traj, frame=-1, coords_only=False): """Extract atoms and bonds from a md.Trajectory. Will create sub-compounds for every chain if there is more than one and sub-sub-compounds for every residue. Parameters ---------- traj : mdtraj.Trajectory The trajectory to load. frame : int, optional, default=-1 (last) The frame to take coordinates from. coords_only : bool, optional, default=False Only read coordinate information """ if coords_only: if traj.n_atoms != self.n_particles: raise ValueError('Number of atoms in {traj} does not match' ' {self}'.format(**locals())) atoms_particles = zip(traj.topology.atoms, self.particles(include_ports=False)) if None in self._particles(include_ports=False): raise ValueError('Some particles are None') for mdtraj_atom, particle in atoms_particles: particle.pos = traj.xyz[frame, mdtraj_atom.index] return atom_mapping = dict() for chain in traj.topology.chains: if traj.topology.n_chains > 1: chain_compound = Compound() self.add(chain_compound, 'chain[$]') else: chain_compound = self for res in chain.residues: for atom in res.atoms: new_atom = Particle(name=str(atom.name), pos=traj.xyz[frame, atom.index]) chain_compound.add( new_atom, label='{0}[$]'.format( atom.name)) atom_mapping[atom] = new_atom for mdtraj_atom1, mdtraj_atom2 in traj.topology.bonds: atom1 = atom_mapping[mdtraj_atom1] atom2 = atom_mapping[mdtraj_atom2] self.add_bond((atom1, atom2)) if np.any(traj.unitcell_lengths) and np.any(traj.unitcell_lengths[0]): self.periodicity = traj.unitcell_lengths[0] else: self.periodicity = np.array([0., 0., 0.])
python
{ "resource": "" }
q33381
Compound.to_trajectory
train
def to_trajectory(self, show_ports=False, chains=None, residues=None, box=None): """Convert to an md.Trajectory and flatten the compound. Parameters ---------- show_ports : bool, optional, default=False Include all port atoms when converting to trajectory. chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. box : mb.Box, optional, default=self.boundingbox (with buffer) Box information to be used when converting to a `Trajectory`. If 'None', a bounding box is used with a 0.5nm buffer in each dimension. to avoid overlapping atoms, unless `self.periodicity` is not None, in which case those values are used for the box lengths. Returns ------- trajectory : md.Trajectory See also -------- _to_topology """ atom_list = [particle for particle in self.particles(show_ports)] top = self._to_topology(atom_list, chains, residues) # Coordinates. xyz = np.ndarray(shape=(1, top.n_atoms, 3), dtype='float') for idx, atom in enumerate(atom_list): xyz[0, idx] = atom.pos # Unitcell information. unitcell_angles = [90.0, 90.0, 90.0] if box is None: unitcell_lengths = np.empty(3) for dim, val in enumerate(self.periodicity): if val: unitcell_lengths[dim] = val else: unitcell_lengths[dim] = self.boundingbox.lengths[dim] + 0.5 else: unitcell_lengths = box.lengths unitcell_angles = box.angles return md.Trajectory(xyz, top, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles)
python
{ "resource": "" }
q33382
Compound._to_topology
train
def _to_topology(self, atom_list, chains=None, residues=None): """Create a mdtraj.Topology from a Compound. Parameters ---------- atom_list : list of mb.Compound Atoms to include in the topology chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. Returns ------- top : mdtraj.Topology See Also -------- mdtraj.Topology : Details on the mdtraj Topology object """ from mdtraj.core.topology import Topology if isinstance(chains, string_types): chains = [chains] if isinstance(chains, (list, set)): chains = tuple(chains) if isinstance(residues, string_types): residues = [residues] if isinstance(residues, (list, set)): residues = tuple(residues) top = Topology() atom_mapping = {} default_chain = top.add_chain() default_residue = top.add_residue('RES', default_chain) compound_residue_map = dict() atom_residue_map = dict() compound_chain_map = dict() atom_chain_map = dict() for atom in atom_list: # Chains if chains: if atom.name in chains: current_chain = top.add_chain() compound_chain_map[atom] = current_chain else: for parent in atom.ancestors(): if chains and parent.name in chains: if parent not in compound_chain_map: current_chain = top.add_chain() compound_chain_map[parent] = current_chain current_residue = top.add_residue( 'RES', current_chain) break else: current_chain = default_chain else: current_chain = default_chain atom_chain_map[atom] = current_chain # Residues if residues: if atom.name in residues: current_residue = top.add_residue(atom.name, current_chain) compound_residue_map[atom] = current_residue else: for parent in atom.ancestors(): if residues and parent.name in residues: if parent not in compound_residue_map: current_residue = top.add_residue( parent.name, current_chain) compound_residue_map[parent] = current_residue break else: current_residue = default_residue else: if chains: try: # Grab the default residue from the custom chain. current_residue = next(current_chain.residues) except StopIteration: # Add the residue to the current chain current_residue = top.add_residue('RES', current_chain) else: # Grab the default chain's default residue current_residue = default_residue atom_residue_map[atom] = current_residue # Add the actual atoms try: elem = get_by_symbol(atom.name) except KeyError: elem = get_by_symbol("VS") at = top.add_atom(atom.name, elem, atom_residue_map[atom]) at.charge = atom.charge atom_mapping[atom] = at # Remove empty default residues. chains_to_remove = [ chain for chain in top.chains if chain.n_atoms == 0] residues_to_remove = [res for res in top.residues if res.n_atoms == 0] for chain in chains_to_remove: top._chains.remove(chain) for res in residues_to_remove: for chain in top.chains: try: chain._residues.remove(res) except ValueError: # Already gone. pass for atom1, atom2 in self.bonds(): # Ensure that both atoms are part of the compound. This becomes an # issue if you try to convert a sub-compound to a topology which is # bonded to a different subcompound. if all(a in atom_mapping.keys() for a in [atom1, atom2]): top.add_bond(atom_mapping[atom1], atom_mapping[atom2]) return top
python
{ "resource": "" }
q33383
Compound.from_parmed
train
def from_parmed(self, structure, coords_only=False): """Extract atoms and bonds from a pmd.Structure. Will create sub-compounds for every chain if there is more than one and sub-sub-compounds for every residue. Parameters ---------- structure : pmd.Structure The structure to load. coords_only : bool Set preexisting atoms in compound to coordinates given by structure. """ if coords_only: if len(structure.atoms) != self.n_particles: raise ValueError( 'Number of atoms in {structure} does not match' ' {self}'.format( **locals())) atoms_particles = zip(structure.atoms, self.particles(include_ports=False)) if None in self._particles(include_ports=False): raise ValueError('Some particles are None') for parmed_atom, particle in atoms_particles: particle.pos = np.array([parmed_atom.xx, parmed_atom.xy, parmed_atom.xz]) / 10 return atom_mapping = dict() chain_id = None chains = defaultdict(list) for residue in structure.residues: chains[residue.chain].append(residue) for chain, residues in chains.items(): if len(chains) > 1: chain_compound = Compound() self.add(chain_compound, chain_id) else: chain_compound = self for residue in residues: for atom in residue.atoms: pos = np.array([atom.xx, atom.xy, atom.xz]) / 10 new_atom = Particle(name=str(atom.name), pos=pos) chain_compound.add( new_atom, label='{0}[$]'.format( atom.name)) atom_mapping[atom] = new_atom for bond in structure.bonds: atom1 = atom_mapping[bond.atom1] atom2 = atom_mapping[bond.atom2] self.add_bond((atom1, atom2)) if structure.box is not None: # Convert from A to nm self.periodicity = 0.1 * structure.box[0:3] else: self.periodicity = np.array([0., 0., 0.])
python
{ "resource": "" }
q33384
Compound.to_networkx
train
def to_networkx(self, names_only=False): """Create a NetworkX graph representing the hierarchy of a Compound. Parameters ---------- names_only : bool, optional, default=False Store only the names of the compounds in the graph. When set to False, the default behavior, the nodes are the compounds themselves. Returns ------- G : networkx.DiGraph """ nx = import_('networkx') nodes = list() edges = list() if names_only: nodes.append(self.name) else: nodes.append(self) nodes, edges = self._iterate_children(nodes, edges, names_only=names_only) graph = nx.DiGraph() graph.add_nodes_from(nodes) graph.add_edges_from(edges) return graph
python
{ "resource": "" }
q33385
Compound.to_intermol
train
def to_intermol(self, molecule_types=None): """Create an InterMol system from a Compound. Parameters ---------- molecule_types : list or tuple of subclasses of Compound Returns ------- intermol_system : intermol.system.System """ from intermol.atom import Atom as InterMolAtom from intermol.molecule import Molecule from intermol.system import System import simtk.unit as u if isinstance(molecule_types, list): molecule_types = tuple(molecule_types) elif molecule_types is None: molecule_types = (type(self),) intermol_system = System() last_molecule_compound = None for atom_index, atom in enumerate(self.particles()): for parent in atom.ancestors(): # Don't want inheritance via isinstance(). if type(parent) in molecule_types: # Check if we have encountered this molecule type before. if parent.name not in intermol_system.molecule_types: self._add_intermol_molecule_type( intermol_system, parent) if parent != last_molecule_compound: last_molecule_compound = parent last_molecule = Molecule(name=parent.name) intermol_system.add_molecule(last_molecule) break else: # Should never happen if molecule_types only contains # type(self) raise ValueError( 'Found an atom {} that is not part of any of ' 'the specified molecule types {}'.format( atom, molecule_types)) # Add the actual intermol atoms. intermol_atom = InterMolAtom(atom_index + 1, name=atom.name, residue_index=1, residue_name='RES') intermol_atom.position = atom.pos * u.nanometers last_molecule.add_atom(intermol_atom) return intermol_system
python
{ "resource": "" }
q33386
Compound._add_intermol_molecule_type
train
def _add_intermol_molecule_type(intermol_system, parent): """Create a molecule type for the parent and add bonds. """ from intermol.moleculetype import MoleculeType from intermol.forces.bond import Bond as InterMolBond molecule_type = MoleculeType(name=parent.name) intermol_system.add_molecule_type(molecule_type) for index, parent_atom in enumerate(parent.particles()): parent_atom.index = index + 1 for atom1, atom2 in parent.bonds(): intermol_bond = InterMolBond(atom1.index, atom2.index) molecule_type.bonds.add(intermol_bond)
python
{ "resource": "" }
q33387
assert_port_exists
train
def assert_port_exists(port_name, compound): """Ensure that a Port label exists in a Compound. """ if port_name in compound.labels: return True else: from mbuild.port import Port available_ports = [name for name in compound.labels if isinstance(compound.labels[name], Port)] compound_name = compound.__class__.__name__ raise ValueError("No port named '{port_name}' in {compound_name}'s" " labels. Labeled Ports in {compound_name} are:" " {available_ports}".format(**locals()))
python
{ "resource": "" }
q33388
SilicaInterface._cleave_interface
train
def _cleave_interface(self, bulk_silica, tile_x, tile_y, thickness): """Carve interface from bulk silica. Also includes a buffer of O's above and below the surface to ensure the interface is coated. """ O_buffer = self._O_buffer tile_z = int(math.ceil((thickness + 2*O_buffer) / bulk_silica.periodicity[2])) bulk = mb.recipes.TiledCompound(bulk_silica, n_tiles=(tile_x, tile_y, tile_z)) interface = mb.Compound(periodicity=(bulk.periodicity[0], bulk.periodicity[1], 0.0)) for i, particle in enumerate(bulk.particles()): if ((particle.name == 'Si' and O_buffer < particle.pos[2] < (thickness + O_buffer)) or (particle.name == 'O' and particle.pos[2] < (thickness + 2*O_buffer))): interface_particle = mb.Compound(name=particle.name, pos=particle.pos) interface.add(interface_particle, particle.name + "_{}".format(i)) self.add(interface)
python
{ "resource": "" }
q33389
SilicaInterface._strip_stray_atoms
train
def _strip_stray_atoms(self): """Remove stray atoms and surface pieces. """ components = self.bond_graph.connected_components() major_component = max(components, key=len) for atom in list(self.particles()): if atom not in major_component: self.remove(atom)
python
{ "resource": "" }
q33390
SilicaInterface._bridge_dangling_Os
train
def _bridge_dangling_Os(self, oh_density, thickness): """Form Si-O-Si bridges to yield desired density of reactive surface sites. References ---------- .. [1] Hartkamp, R., Siboulet, B., Dufreche, J.-F., Boasne, B. "Ion-specific adsorption and electroosmosis in charged amorphous porous silica." (2015) Phys. Chem. Chem. Phys. 17, 24683-24695 """ area = self.periodicity[0] * self.periodicity[1] target = int(oh_density * area) dangling_Os = [atom for atom in self.particles() if atom.name == 'O' and atom.pos[2] > thickness and len(self.bond_graph.neighbors(atom)) == 1] n_bridges = int((len(dangling_Os) - target) / 2) for _ in range(n_bridges): bridged = False while not bridged: O1 = random.choice(dangling_Os) Si1 = self.bond_graph.neighbors(O1)[0] for O2 in dangling_Os: if O2 == O1: continue Si2 = self.bond_graph.neighbors(O2)[0] if Si1 == Si2: continue if any(neigh in self.bond_graph.neighbors(Si2) for neigh in self.bond_graph.neighbors(Si1)): continue r = self.min_periodic_distance(Si1.pos, Si2.pos) if r < 0.45: bridged = True self.add_bond((O1, Si2)) dangling_Os.remove(O1) dangling_Os.remove(O2) self.remove(O2) break
python
{ "resource": "" }
q33391
SilicaInterface._identify_surface_sites
train
def _identify_surface_sites(self, thickness): """Label surface sites and add ports above them. """ for atom in self.particles(): if len(self.bond_graph.neighbors(atom)) == 1: if atom.name == 'O' and atom.pos[2] > thickness: atom.name = 'OS' port = mb.Port(anchor=atom) port.spin(np.pi/2, [1, 0, 0]) port.translate(np.array([0.0, 0.0, 0.1])) self.add(port, "port_{}".format(len(self.referenced_ports())))
python
{ "resource": "" }
q33392
fill_region
train
def fill_region(compound, n_compounds, region, overlap=0.2, seed=12345, edge=0.2, fix_orientation=False, temp_file=None): """Fill a region of a box with a compound using packmol. Parameters ---------- compound : mb.Compound or list of mb.Compound Compound or list of compounds to be put in region. n_compounds : int or list of int Number of compounds to be put in region. region : mb.Box or list of mb.Box Region to be filled by compounds. overlap : float, units nm, default=0.2 Minimum separation between atoms of different molecules. seed : int, default=12345 Random seed to be passed to PACKMOL. edge : float, units nm, default=0.2 Buffer at the edge of the region to not place molecules. This is necessary in some systems because PACKMOL does not account for periodic boundary conditions in its optimization. fix_orientation : bool or list of bools Specify that compounds should not be rotated when filling the box, default=False. temp_file : str, default=None File name to write PACKMOL's raw output to. Returns ------- filled : mb.Compound If using mulitple regions and compounds, the nth value in each list are used in order. For example, if the third compound will be put in the third region using the third value in n_compounds. """ _check_packmol(PACKMOL) if not isinstance(compound, (list, set)): compound = [compound] if not isinstance(n_compounds, (list, set)): n_compounds = [n_compounds] if not isinstance(fix_orientation, (list, set)): fix_orientation = [fix_orientation]*len(compound) if compound is not None and n_compounds is not None: if len(compound) != len(n_compounds): msg = ("`compound` and `n_compounds` must be of equal length.") raise ValueError(msg) if compound is not None: if len(compound) != len(fix_orientation): msg = ("`compound`, `n_compounds`, and `fix_orientation` must be of equal length.") raise ValueError(msg) # See if region is a single region or list if isinstance(region, Box): # Cannot iterate over boxes region = [region] elif not any(isinstance(reg, (list, set, Box)) for reg in region): region = [region] region = [_validate_box(reg) for reg in region] # In angstroms for packmol. overlap *= 10 # Build the input file and call packmol. filled_xyz = _new_xyz_file() # List to hold file handles for the temporary compounds compound_xyz_list = list() try: input_text = PACKMOL_HEADER.format(overlap, filled_xyz.name, seed) for comp, m_compounds, reg, rotate in zip(compound, n_compounds, region, fix_orientation): m_compounds = int(m_compounds) compound_xyz = _new_xyz_file() compound_xyz_list.append(compound_xyz) comp.save(compound_xyz.name, overwrite=True) reg_mins = reg.mins * 10 reg_maxs = reg.maxs * 10 reg_maxs -= edge * 10 # Apply edge buffer input_text += PACKMOL_BOX.format(compound_xyz.name, m_compounds, reg_mins[0], reg_mins[1], reg_mins[2], reg_maxs[0], reg_maxs[1], reg_maxs[2], PACKMOL_CONSTRAIN if rotate else "") _run_packmol(input_text, filled_xyz, temp_file) # Create the topology and update the coordinates. filled = Compound() filled = _create_topology(filled, compound, n_compounds) filled.update_coordinates(filled_xyz.name) finally: for file_handle in compound_xyz_list: file_handle.close() os.unlink(file_handle.name) filled_xyz.close() os.unlink(filled_xyz.name) return filled
python
{ "resource": "" }
q33393
solvate
train
def solvate(solute, solvent, n_solvent, box, overlap=0.2, seed=12345, edge=0.2, fix_orientation=False, temp_file=None): """Solvate a compound in a box of solvent using packmol. Parameters ---------- solute : mb.Compound Compound to be placed in a box and solvated. solvent : mb.Compound Compound to solvate the box. n_solvent : int Number of solvents to be put in box. box : mb.Box Box to be filled by compounds. overlap : float, units nm, default=0.2 Minimum separation between atoms of different molecules. seed : int, default=12345 Random seed to be passed to PACKMOL. edge : float, units nm, default=0.2 Buffer at the edge of the box to not place molecules. This is necessary in some systems because PACKMOL does not account for periodic boundary conditions in its optimization. fix_orientation : bool Specify if solvent should not be rotated when filling box, default=False. temp_file : str, default=None File name to write PACKMOL's raw output to. Returns ------- solvated : mb.Compound """ _check_packmol(PACKMOL) box = _validate_box(box) if not isinstance(solvent, (list, set)): solvent = [solvent] if not isinstance(n_solvent, (list, set)): n_solvent = [n_solvent] if not isinstance(fix_orientation, (list, set)): fix_orientation = [fix_orientation] * len(solvent) if len(solvent) != len(n_solvent): msg = ("`n_solvent` and `n_solvent` must be of equal length.") raise ValueError(msg) # In angstroms for packmol. box_mins = box.mins * 10 box_maxs = box.maxs * 10 overlap *= 10 center_solute = (box_maxs + box_mins) / 2 # Apply edge buffer box_maxs -= edge * 10 # Build the input file for each compound and call packmol. solvated_xyz = _new_xyz_file() solute_xyz = _new_xyz_file() # generate list of temp files for the solvents solvent_xyz_list = list() try: solute.save(solute_xyz.name, overwrite=True) input_text = (PACKMOL_HEADER.format(overlap, solvated_xyz.name, seed) + PACKMOL_SOLUTE.format(solute_xyz.name, *center_solute)) for solv, m_solvent, rotate in zip(solvent, n_solvent, fix_orientation): m_solvent = int(m_solvent) solvent_xyz = _new_xyz_file() solvent_xyz_list.append(solvent_xyz) solv.save(solvent_xyz.name, overwrite=True) input_text += PACKMOL_BOX.format(solvent_xyz.name, m_solvent, box_mins[0], box_mins[1], box_mins[2], box_maxs[0], box_maxs[1], box_maxs[2], PACKMOL_CONSTRAIN if rotate else "") _run_packmol(input_text, solvated_xyz, temp_file) # Create the topology and update the coordinates. solvated = Compound() solvated.add(solute) solvated = _create_topology(solvated, solvent, n_solvent) solvated.update_coordinates(solvated_xyz.name) finally: for file_handle in solvent_xyz_list: file_handle.close() os.unlink(file_handle.name) solvated_xyz.close() solute_xyz.close() os.unlink(solvated_xyz.name) os.unlink(solute_xyz.name) return solvated
python
{ "resource": "" }
q33394
_create_topology
train
def _create_topology(container, comp_to_add, n_compounds): """Return updated mBuild compound with new coordinates. Parameters ---------- container : mb.Compound, required Compound containing the updated system generated by PACKMOL. comp_to_add : mb.Compound or list of mb.Compounds, required Compound(s) to add to the container. container : int or list of int, required Amount of comp_to_add to container. Return ------ container : mb.Compound Compound with added compounds from PACKMOL. """ for comp, m_compound in zip(comp_to_add, n_compounds): for _ in range(m_compound): container.add(clone(comp)) return container
python
{ "resource": "" }
q33395
_write_pair_information
train
def _write_pair_information(gsd_file, structure): """Write the special pairs in the system. Parameters ---------- gsd_file : The file object of the GSD file being written structure : parmed.Structure Parmed structure object holding system information """ pair_types = [] pair_typeid = [] pairs = [] for ai in structure.atoms: for aj in ai.dihedral_partners: #make sure we don't double add if ai.idx > aj.idx: ps = '-'.join(sorted([ai.type, aj.type], key=natural_sort)) if ps not in pair_types: pair_types.append(ps) pair_typeid.append(pair_types.index(ps)) pairs.append((ai.idx, aj.idx)) gsd_file.pairs.types = pair_types gsd_file.pairs.typeid = pair_typeid gsd_file.pairs.group = pairs gsd_file.pairs.N = len(pairs)
python
{ "resource": "" }
q33396
_write_dihedral_information
train
def _write_dihedral_information(gsd_file, structure): """Write the dihedrals in the system. Parameters ---------- gsd_file : The file object of the GSD file being written structure : parmed.Structure Parmed structure object holding system information """ gsd_file.dihedrals.N = len(structure.rb_torsions) unique_dihedral_types = set() for dihedral in structure.rb_torsions: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) unique_dihedral_types.add(dihedral_type) unique_dihedral_types = sorted(list(unique_dihedral_types), key=natural_sort) gsd_file.dihedrals.types = unique_dihedral_types dihedral_typeids = [] dihedral_groups = [] for dihedral in structure.rb_torsions: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) dihedral_typeids.append(unique_dihedral_types.index(dihedral_type)) dihedral_groups.append((dihedral.atom1.idx, dihedral.atom2.idx, dihedral.atom3.idx, dihedral.atom4.idx)) gsd_file.dihedrals.typeid = dihedral_typeids gsd_file.dihedrals.group = dihedral_groups
python
{ "resource": "" }
q33397
import_
train
def import_(module): """Import a module, and issue a nice message to stderr if the module isn't installed. Parameters ---------- module : str The module you'd like to import, as a string Returns ------- module : {module, object} The module object Examples -------- >>> # the following two lines are equivalent. the difference is that the >>> # second will check for an ImportError and print you a very nice >>> # user-facing message about what's wrong (where you can install the >>> # module from, etc) if the import fails >>> import tables >>> tables = import_('tables') """ try: return importlib.import_module(module) except ImportError as e: try: message = MESSAGES[module] except KeyError: message = 'The code at {filename}:{line_number} requires the ' + module + ' package' e = ImportError('No module named %s' % module) frame, filename, line_number, function_name, lines, index = \ inspect.getouterframes(inspect.currentframe())[1] m = message.format(filename=os.path.basename(filename), line_number=line_number) m = textwrap.dedent(m) bar = '\033[91m' + '#' * max(len(line) for line in m.split(os.linesep)) + '\033[0m' print('', file=sys.stderr) print(bar, file=sys.stderr) print(m, file=sys.stderr) print(bar, file=sys.stderr) raise DelayImportError(m)
python
{ "resource": "" }
q33398
get_fn
train
def get_fn(name): """Get the full path to one of the reference files shipped for utils. In the source distribution, these files are in ``mbuild/utils/reference``, but on installation, they're moved to somewhere in the user's python site-packages directory. Parameters ---------- name : str Name of the file to load (with respect to the reference/ folder). """ fn = resource_filename('mbuild', os.path.join('utils', 'reference', name)) if not os.path.exists(fn): raise IOError('Sorry! {} does not exists.'.format(fn)) return fn
python
{ "resource": "" }
q33399
angle
train
def angle(u, v, w=None): """Returns the angle in radians between two vectors. """ if w is not None: u = u - v v = w - v c = np.dot(u, v) / norm(u) / norm(v) return np.arccos(np.clip(c, -1, 1))
python
{ "resource": "" }