code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
async def run_checks(self): """ Run checks on itself and on the FSM """ async for check in self.fsm.health_check(): yield check async for check in self.self_check(): yield check for check in MiddlewareManager.health_check(): yield check
Run checks on itself and on the FSM
def accepts(*checkers_args, **checkers_kws): """ Create a decorator for validating function parameters. Parameters ---------- checkers_args: positional args Functions to apply to the inputs of the decorated function. The position of the argument is assumed to match the position of the function in the decorator. checkers_kws: keyword args Keyword pairs in the form (arg: function) to apply to the inputs of the decorated function. Example ------- @accepts(df=df_checker) def do_something_with_df(df, args*, kw**): print(df.head()) """ @decorator def run_checkers(func, *args, **kwargs): all_args = inspect.getcallargs(func, *args, **kwargs) if checkers_args: for idx, checker_function in enumerate(checkers_args): if callable(checker_function): result = checker_function(args[idx]) if checkers_kws: for key in checkers_kws.keys(): if key not in all_args: raise ValueError('Argument specified in @accepts is not found in decorated function') else: df = all_args[key] result = checkers_kws[key](df) return func(*args, **kwargs) return run_checkers
Create a decorator for validating function parameters. Parameters ---------- checkers_args: positional args Functions to apply to the inputs of the decorated function. The position of the argument is assumed to match the position of the function in the decorator. checkers_kws: keyword args Keyword pairs in the form (arg: function) to apply to the inputs of the decorated function. Example ------- @accepts(df=df_checker) def do_something_with_df(df, args*, kw**): print(df.head())
def priority(self): """ Get priority for this Schema. Used to sort mapping keys :rtype: int """ # Markers have priority set on the class if self.compiled_type == const.COMPILED_TYPE.MARKER: return self.compiled.priority # Other types have static priority return const.compiled_type_priorities[self.compiled_type]
Get priority for this Schema. Used to sort mapping keys :rtype: int
def write_xyz(self, *args, **kwargs): """Deprecated, use :meth:`~chemcoord.Cartesian.to_xyz` """ message = 'Will be removed in the future. Please use to_xyz().' with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn(message, DeprecationWarning) return self.to_xyz(*args, **kwargs)
Deprecated, use :meth:`~chemcoord.Cartesian.to_xyz`
def add_root_book(self, book_id): """Adds a root book. arg: book_id (osid.id.Id): the ``Id`` of a book raise: AlreadyExists - ``book_id`` is already in hierarchy raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.add_root_bin_template if self._catalog_session is not None: return self._catalog_session.add_root_catalog(catalog_id=book_id) return self._hierarchy_session.add_root(id_=book_id)
Adds a root book. arg: book_id (osid.id.Id): the ``Id`` of a book raise: AlreadyExists - ``book_id`` is already in hierarchy raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def clacks_overhead(fn): """ A Django view decorator that will add the `X-Clacks-Overhead` header. Usage: @clacks_overhead def my_view(request): return my_response """ @wraps(fn) def _wrapped(*args, **kw): response = fn(*args, **kw) response['X-Clacks-Overhead'] = 'GNU Terry Pratchett' return response return _wrapped
A Django view decorator that will add the `X-Clacks-Overhead` header. Usage: @clacks_overhead def my_view(request): return my_response
def FindTypeInfo(self, name): """Search for a type_info instance which describes this key.""" result = self.type_infos.get(name) if result is None: # Not found, assume string. result = type_info.String(name=name, default="") return result
Search for a type_info instance which describes this key.
def get_content_type(self): """Returns the Content Type to serve from either the extension or the Accept headers. Uses the :attr:`EXTENSION_MAP` list for all the configured MIME types. """ extension = self.path_params.get('_extension') for ext, mime in self.EXTENSION_MAP: if ext == extension: return mime # Else: use the Accept headers if self.response.vary is None: self.response.vary = ['Accept'] else: self.response.vary.append('Accept') types = [mime for ext, mime in self.EXTENSION_MAP] ct = self.request.accept.best_match(types) # No best match found. The specification allows us to either return a # 406 or just use another format in this case. # We pick the default format, though that may become a configurable # behavior in the future. if not ct: ct = types[0] return ct
Returns the Content Type to serve from either the extension or the Accept headers. Uses the :attr:`EXTENSION_MAP` list for all the configured MIME types.
def visitPlusCardinality(self, ctx: ShExDocParser.PlusCardinalityContext): """ '+' """ self.expression.min = 1 self.expression.max = -1
'+'
def visit_reference(self, node: docutils.nodes.reference) -> None: """Called for "reference" nodes.""" # if len(node.children) != 1 or not isinstance(node.children[0], docutils.nodes.Text) \ # or not all(_ in node.attributes for _ in ('name', 'refuri')): # return path = pathlib.Path(node.attributes['refuri']) try: if path.is_absolute(): return resolved_path = path.resolve() except FileNotFoundError: # in resolve(), prior to Python 3.6 return # except OSError: # in is_absolute() and resolve(), on URLs in Windows # return try: resolved_path.relative_to(HERE) except ValueError: return if not path.is_file(): return assert node.attributes['name'] == node.children[0].astext() self.references.append(node)
Called for "reference" nodes.
def blend_mode(self): """BlendMode: The blend mode used for drawing operations.""" blend_mode_ptr = ffi.new('int *') lib.SDL_GetTextureBlendMode(self._ptr, blend_mode_ptr) return BlendMode(blend_mode_ptr[0])
BlendMode: The blend mode used for drawing operations.
def tee(iterable, n=2): """Return n independent iterators from a single iterable. Once tee() has made a split, the original iterable should not be used anywhere else; otherwise, the iterable could get advanced without the tee objects being informed. This itertool may require significant auxiliary storage (depending on how much temporary data needs to be stored). In general, if one iterator uses most or all of the data before another iterator starts, it is faster to use list() instead of tee(). """ tees = tuple(AsyncTeeIterable(iterable) for _ in range(n)) for tee in tees: tee._siblings = tees return tees
Return n independent iterators from a single iterable. Once tee() has made a split, the original iterable should not be used anywhere else; otherwise, the iterable could get advanced without the tee objects being informed. This itertool may require significant auxiliary storage (depending on how much temporary data needs to be stored). In general, if one iterator uses most or all of the data before another iterator starts, it is faster to use list() instead of tee().
def process_config(raw_path, cache_dir, cache_file, **kwargs): """ Read a build configuration and create it, storing the result in a build cache. Arguments raw_path -- path to a build configuration cache_dir -- the directory where cache should be written cache_file -- The filename to write the cache. This will live inside cache_dir. **kwargs -- additional arguments used by some modifiers """ config = _create_cache(raw_path, cache_dir, cache_file) for modifier in _CONFIG_MODIFIERS: modifier(config, **kwargs) # pylint: disable=protected-access cache = devpipeline_configure.cache._CachedConfig( config, os.path.join(cache_dir, cache_file) ) _handle_value_modifiers(cache) _add_package_options(cache) _write_config(cache, cache_dir) return cache
Read a build configuration and create it, storing the result in a build cache. Arguments raw_path -- path to a build configuration cache_dir -- the directory where cache should be written cache_file -- The filename to write the cache. This will live inside cache_dir. **kwargs -- additional arguments used by some modifiers
def talk_back(self, message): """that's what she said: Tells you some things she actually said. :)""" quote = self.get_quote() if quote: self.reply("Actually, she said things like this: \n%s" % quote)
that's what she said: Tells you some things she actually said. :)
def get_average_along_axis(self, ind): """ Get the averaged total of the volumetric data a certain axis direction. For example, useful for visualizing Hartree Potentials from a LOCPOT file. Args: ind (int): Index of axis. Returns: Average total along axis """ m = self.data["total"] ng = self.dim if ind == 0: total = np.sum(np.sum(m, axis=1), 1) elif ind == 1: total = np.sum(np.sum(m, axis=0), 1) else: total = np.sum(np.sum(m, axis=0), 0) return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
Get the averaged total of the volumetric data a certain axis direction. For example, useful for visualizing Hartree Potentials from a LOCPOT file. Args: ind (int): Index of axis. Returns: Average total along axis
def converge(self, playbook=None, **kwargs): """ Executes ``ansible-playbook`` against the converge playbook unless specified otherwise and returns a string. :param playbook: An optional string containing an absolute path to a playbook. :param kwargs: An optional keyword arguments. :return: str """ if playbook is None: pb = self._get_ansible_playbook(self.playbooks.converge, **kwargs) else: pb = self._get_ansible_playbook(playbook, **kwargs) return pb.execute()
Executes ``ansible-playbook`` against the converge playbook unless specified otherwise and returns a string. :param playbook: An optional string containing an absolute path to a playbook. :param kwargs: An optional keyword arguments. :return: str
def randpath(self): """ -> a random URI-like #str path """ return '/'.join( gen_rand_str(3, 10, use=self.random, keyspace=list(self.keyspace)) for _ in range(self.random.randint(0, 3)))
-> a random URI-like #str path
def _resolve_hostname(name): """Returns resolved hostname using the ssh config""" if env.ssh_config is None: return name elif not os.path.exists(os.path.join("nodes", name + ".json")): resolved_name = env.ssh_config.lookup(name)['hostname'] if os.path.exists(os.path.join("nodes", resolved_name + ".json")): name = resolved_name return name
Returns resolved hostname using the ssh config
def _consolidate_binds(local_binds, remote_binds): """ Fill local_binds with defaults when no value/s were specified, leaving paramiko to decide in which local port the tunnel will be open """ count = len(remote_binds) - len(local_binds) if count < 0: raise ValueError('Too many local bind addresses ' '(local_bind_addresses > remote_bind_addresses)') local_binds.extend([('0.0.0.0', 0) for x in range(count)]) return local_binds
Fill local_binds with defaults when no value/s were specified, leaving paramiko to decide in which local port the tunnel will be open
def fmt_row(self, columns, dimensions, row, **settings): """ Format single table row. """ cells = [] i = 0 for column in columns: cells.append(self.fmt_cell( row[i], dimensions[i], column, **settings[self.SETTING_TEXT_FORMATING] ) ) i += 1 return self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) + \ self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]).join(cells) + \ self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING])
Format single table row.
def _setup_directories(self): """ Creates data directory structure. * Raises a ``DirectorySetupFail`` exception if error occurs while creating directories. """ dirs = [self._data_dir] dirs += [os.path.join(self._data_dir, name) for name in self.DATA_SUBDIRS] for path in dirs: if not os.path.isdir(path): try: os.makedirs(path) # recursive mkdir os.chmod(path, 0755) # rwxr-xr-x except OSError: raise errors.DirectorySetupFail() return True
Creates data directory structure. * Raises a ``DirectorySetupFail`` exception if error occurs while creating directories.
def module_settings(self): """ Get Module settings. Uses GET to /settings/modules interface. :Returns: (dict) Module settings as shown `here <https://cloud.knuverse.com/docs/api/#api-Module_Settings-Get_the_module_settings>`_. """ response = self._get(url.settings_modules) self._check_response(response, 200) return self._create_response(response)
Get Module settings. Uses GET to /settings/modules interface. :Returns: (dict) Module settings as shown `here <https://cloud.knuverse.com/docs/api/#api-Module_Settings-Get_the_module_settings>`_.
def _to_dict(self, serialize=False): """ This method works by copying self.__dict__, and removing everything that should not be serialized. """ copy_dict = self.__dict__.copy() for key, value in vars(self).items(): # We want to send all ids to Zendesk always if serialize and key == 'id': continue # If this is a Zenpy object, convert it to a dict. if not serialize and isinstance(value, BaseObject): copy_dict[key] = copy_dict.pop(key).to_dict() # This object has a flag indicating it has been dirtied, so we want to send it off. elif serialize and getattr(value, '_dirty', False): continue # Here we have an attribute that should always be sent to Zendesk. elif serialize and key in self._always_dirty: continue # These are for internal tracking, so just delete. elif key in ('api', '_dirty_attributes', '_always_dirty', '_dirty_callback', '_dirty'): del copy_dict[key] # If the attribute has not been modified, do not send it. elif serialize and key not in self._dirty_attributes: del copy_dict[key] # Some reserved words are prefixed with an underscore, remove it here. elif key.startswith('_'): copy_dict[key[1:]] = copy_dict[key] del copy_dict[key] return copy_dict
This method works by copying self.__dict__, and removing everything that should not be serialized.
def keys(self): """return a list of all app_names""" keys = [] for app_name, __ in self.items(): keys.append(app_name) return keys
return a list of all app_names
def run(self, depth=None): """ Checks that the paths in the specified path group stay the same over the next `depth` bytes. The path group should have a "left" and a "right" stash, each with a single path. """ #pg_history = [ ] if len(self.simgr.right) != 1 or len(self.simgr.left) != 1: self._report_incongruency("Single path in pg.left and pg.right required.") return False if "UNICORN" in self.simgr.one_right.options and depth is not None: self.simgr.one_right.unicorn.max_steps = depth if "UNICORN" in self.simgr.one_left.options and depth is not None: self.simgr.one_left.unicorn.max_steps = depth l.debug("Performing initial path comparison.") if not self.compare_paths(self.simgr.left[0], self.simgr.right[0]): self._report_incongruency("Initial path comparison check failed.") return False while len(self.simgr.left) > 0 and len(self.simgr.right) > 0: if depth is not None: self._update_progress(100. * float(self.simgr.one_left.history.block_count) / depth) if len(self.simgr.deadended) != 0: self._report_incongruency("Unexpected deadended paths before step.") return False if len(self.simgr.right) == 0 and len(self.simgr.left) == 0: l.debug("All done!") return True if len(self.simgr.right) != 1 or len(self.simgr.left) != 1: self._report_incongruency("Different numbers of paths in left and right stash..") return False # do a step l.debug( "Stepping right path with weighted length %d/%d", self.simgr.right[0].history.block_count, depth ) self.prev_pg = self.simgr.copy() #pylint:disable=unused-variable self.simgr.step(stash='right') CongruencyCheck._sync_steps(self.simgr) if len(self.simgr.errored) != 0: self._report_incongruency("Unexpected errored paths.") return False try: if not self.compare_path_group(self.simgr) and self._validate_incongruency(): self._report_incongruency("Path group comparison failed.") return False except AngrIncongruencyError: if self._validate_incongruency(): raise if depth is not None: self.simgr.drop(stash='left', filter_func=lambda p: p.history.block_count >= depth) self.simgr.drop(stash='right', filter_func=lambda p: p.history.block_count >= depth) self.simgr.right.sort(key=lambda p: p.addr) self.simgr.left.sort(key=lambda p: p.addr) self.simgr.stashed_right[:] = self.simgr.stashed_right[::-1] self.simgr.stashed_left[:] = self.simgr.stashed_left[::-1] self.simgr.move('stashed_right', 'right') self.simgr.move('stashed_left', 'left') if len(self.simgr.left) > 1: self.simgr.split(from_stash='left', limit=1, to_stash='stashed_left') self.simgr.split(from_stash='right', limit=1, to_stash='stashed_right')
Checks that the paths in the specified path group stay the same over the next `depth` bytes. The path group should have a "left" and a "right" stash, each with a single path.
def Terminate(self, status=None): """Terminates this flow.""" try: self.queue_manager.DestroyFlowStates(self.session_id) except queue_manager.MoreDataException: pass # This flow might already not be running. if not self.IsRunning(): return self._SendTerminationMessage(status=status) # Mark as terminated. self.context.state = rdf_flow_runner.FlowContext.State.TERMINATED self.flow_obj.Flush()
Terminates this flow.
def pull_tasks(self, kill_event): """ Pulls tasks from the incoming tasks 0mq pipe onto the internal pending task queue Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die. """ logger.info("[TASK PULL THREAD] starting") poller = zmq.Poller() poller.register(self.task_incoming, zmq.POLLIN) # Send a registration message msg = self.create_reg_message() logger.debug("Sending registration message: {}".format(msg)) self.task_incoming.send(msg) last_beat = time.time() last_interchange_contact = time.time() task_recv_counter = 0 poll_timer = 1 while not kill_event.is_set(): time.sleep(LOOP_SLOWDOWN) ready_worker_count = self.ready_worker_queue.qsize() pending_task_count = self.pending_task_queue.qsize() logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count, pending_task_count)) if time.time() > last_beat + self.heartbeat_period: self.heartbeat() last_beat = time.time() if pending_task_count < self.max_queue_size and ready_worker_count > 0: logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count)) msg = ((ready_worker_count).to_bytes(4, "little")) self.task_incoming.send(msg) socks = dict(poller.poll(timeout=poll_timer)) if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN: _, pkl_msg = self.task_incoming.recv_multipart() tasks = pickle.loads(pkl_msg) last_interchange_contact = time.time() if tasks == 'STOP': logger.critical("[TASK_PULL_THREAD] Received stop request") kill_event.set() break elif tasks == HEARTBEAT_CODE: logger.debug("Got heartbeat from interchange") else: # Reset timer on receiving message poll_timer = 1 task_recv_counter += len(tasks) logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks], task_recv_counter)) for task in tasks: self.pending_task_queue.put(task) else: logger.debug("[TASK_PULL_THREAD] No incoming tasks") # Limit poll duration to heartbeat_period # heartbeat_period is in s vs poll_timer in ms poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2) # Only check if no messages were received. if time.time() > last_interchange_contact + self.heartbeat_threshold: logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold") kill_event.set() logger.critical("[TASK_PULL_THREAD] Exiting") break
Pulls tasks from the incoming tasks 0mq pipe onto the internal pending task queue Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def attribute_node(self, name, ns_uri=None): """ :param string name: the name of the attribute to return. :param ns_uri: a URI defining a namespace constraint on the attribute. :type ns_uri: string or None :return: this element's attributes that match ``ns_uri`` as :class:`Attribute` nodes. """ attr_impl_node = self.adapter.get_node_attribute_node( self.impl_node, name, ns_uri) return self.adapter.wrap_node( attr_impl_node, self.adapter.impl_document, self.adapter)
:param string name: the name of the attribute to return. :param ns_uri: a URI defining a namespace constraint on the attribute. :type ns_uri: string or None :return: this element's attributes that match ``ns_uri`` as :class:`Attribute` nodes.
def align(s1, s2, gap=' ', eq=operator.eq): '''aligns two strings >>> print(*align('pharmacy', 'farmácia', gap='_'), sep='\\n') pharmac_y _farmácia >>> print(*align('advantage', 'vantagem', gap='_'), sep='\\n') advantage_ __vantagem ''' # first we compute the dynamic programming table m, n = len(s1), len(s2) table = [] # the table is extended lazily, one row at a time row = list(range(n+1)) # the first row is 0, 1, 2, ..., n table.append(list(row)) # copy row and insert into table for i in range(m): p = i row[0] = i+1 for j in range(n): t = 0 if eq(s1[i], s2[j]) else 1 p, row[j+1] = row[j+1], min(p+t, row[j]+1, row[j+1]+1) table.append(list(row)) # copy row and insert into table # now we trace the best alignment path from cell [m][n] to cell [0],[0] s1_, s2_ = '', '' i, j = m, n while i != 0 and j != 0: _, i, j, s1_, s2_ = min( (table[i-1][j-1], i-1, j-1, s1[i-1]+s1_, s2[j-1]+s2_), (table[i-1][j], i-1, j, s1[i-1]+s1_, gap+s2_), (table[i][j-1], i, j-1, gap+s1_, s2[j-1]+s2_) ) if i != 0: s1_ = s1[:i]+s1_ s2_ = gap*i+s2_ if j != 0: s1_ = gap*j+s1_ s2_ = s2[:j]+s2_ return s1_, s2_
aligns two strings >>> print(*align('pharmacy', 'farmácia', gap='_'), sep='\\n') pharmac_y _farmácia >>> print(*align('advantage', 'vantagem', gap='_'), sep='\\n') advantage_ __vantagem
def _output(self): """ Prompts the creating of image objects. """ self.session._out('<</Type /XObject') self.session._out('/Subtype /Image') self.session._out('/Width %s' % self.width) self.session._out('/Height %s' % self.height) if self.colorspace is 'Indexed': self.session._out('/ColorSpace [/Indexed /DeviceRGB %s %s 0 R' % (self.pal, self.number + 1)) else: self.session._out('/ColorSpace /%s' % self.colorspace) if self.colorspace is 'DeviceCMYK': self.session._out('/Decode [1 0 1 0 1 0 1 0]') self.session._out('/BitsPerComponent %s' % self.bits_per_component) if self.filter: self.session._out('/Filter /%s' % self.filter) if self.decode: self.session._out('/DecodeParms << %s >>' % self.decode) if self.transparent: self.session._out('/Mask [%s]' % self.transparent_string) if self.soft_mask: self.session._out('/SMask %s 0 R' % (self.number + 1)) self.session._out('/Length %s >>' % self.size) self.session._put_stream(self.image_data) self.session._out('endobj') if self.colorspace is 'Indexed': self.session._out('<<%s /Length %s >>' % (self.palette_filter, self.palette_length)) self.session._put_stream(self.palette) self.session._out('endobj') if isinstance(self.soft_mask, PDFImage): obj = self.session._add_object() self.soft_mask._set_number(obj.id) self.soft_mask._output()
Prompts the creating of image objects.
def get_template_options(): """ Returns a list of all templates that can be used for CMS pages. The paths that are returned are relative to TURRENTINE_TEMPLATE_ROOT. """ template_root = turrentine_settings.TURRENTINE_TEMPLATE_ROOT turrentine_dir = turrentine_settings.TURRENTINE_TEMPLATE_SUBDIR output = [] for root, dirs, files in os.walk(turrentine_dir): for file_name in files: full_path = os.path.join(root, file_name) relative_path = os.path.relpath(full_path, template_root) output.append(relative_path) return output
Returns a list of all templates that can be used for CMS pages. The paths that are returned are relative to TURRENTINE_TEMPLATE_ROOT.
def id(self) -> typing.Union[str, None]: """Identifier for the project.""" return self._project.id if self._project else None
Identifier for the project.
def get_branch_sha(profile, name): """Get the SHA a branch's HEAD points to. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch. Returns: The requested SHA. """ ref = "heads/" + name data = refs.get_ref(profile, ref) head = data.get("head") sha = head.get("sha") return sha
Get the SHA a branch's HEAD points to. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch. Returns: The requested SHA.
def _handle_xmpp_message(self, xmpp_message: BeautifulSoup): """ a XMPP 'message' in the case of Kik is the actual stanza we receive when someone sends us a message (weather groupchat or not), starts typing, stops typing, reads our message, etc. Examples: http://slixmpp.readthedocs.io/api/stanza/message.html :param xmpp_message: The XMPP 'message' element we received """ if 'xmlns' in xmpp_message.attrs: self._handle_xmlns(xmpp_message['xmlns'], xmpp_message) elif xmpp_message['type'] == 'receipt': if xmpp_message.g: self.callback.on_group_receipts_received(chatting.IncomingGroupReceiptsEvent(xmpp_message)) else: self.xml_namespace_handlers['jabber:client'].handle(xmpp_message) else: # iPads send messages without xmlns, try to handle it as jabber:client self.xml_namespace_handlers['jabber:client'].handle(xmpp_message)
a XMPP 'message' in the case of Kik is the actual stanza we receive when someone sends us a message (weather groupchat or not), starts typing, stops typing, reads our message, etc. Examples: http://slixmpp.readthedocs.io/api/stanza/message.html :param xmpp_message: The XMPP 'message' element we received
def hil_actuator_controls_encode(self, time_usec, controls, mode, flags): ''' Sent from autopilot to simulation. Hardware in the loop control outputs (replacement for HIL_CONTROLS) time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) controls : Control outputs -1 .. 1. Channel assignment depends on the simulated hardware. (float) mode : System mode (MAV_MODE), includes arming state. (uint8_t) flags : Flags as bitfield, reserved for future use. (uint64_t) ''' return MAVLink_hil_actuator_controls_message(time_usec, controls, mode, flags)
Sent from autopilot to simulation. Hardware in the loop control outputs (replacement for HIL_CONTROLS) time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) controls : Control outputs -1 .. 1. Channel assignment depends on the simulated hardware. (float) mode : System mode (MAV_MODE), includes arming state. (uint8_t) flags : Flags as bitfield, reserved for future use. (uint64_t)
def copy(self): ''' Copy the container, put an invalidated copy of the condition in the new container ''' dup = super(Conditional, self).copy() condition = self._condition.copy() condition.invalidate(self) dup._condition = condition return dup
Copy the container, put an invalidated copy of the condition in the new container
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mag = self._convert_magnitude(rup.mag) mean = ( C['c1'] + C['c2'] * mag + C['c10'] * (mag - 6) ** 2 + (C['c6'] + C['c7'] * mag) * np.log(dists.rjb + np.exp(C['c4'])) ) mean = clip_mean(imt, mean) stddevs = self._compute_stddevs(C, dists.rjb.size, stddev_types) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def show_hydrophobic(self): """Visualizes hydrophobic contacts.""" grp = self.getPseudoBondGroup("Hydrophobic Interactions-%i" % self.tid, associateWith=[self.model]) grp.lineType = self.chimera.Dash grp.lineWidth = 3 grp.color = self.colorbyname('gray') for i in self.plcomplex.hydrophobic_contacts.pairs_ids: self.bs_res_ids.append(i[0])
Visualizes hydrophobic contacts.
def getbool(value): """ Returns a boolean from any of a range of values. Returns None for unrecognized values. Numbers other than 0 and 1 are considered unrecognized. >>> getbool(True) True >>> getbool(1) True >>> getbool('1') True >>> getbool('t') True >>> getbool(2) >>> getbool(0) False >>> getbool(False) False >>> getbool('n') False """ value = str(value).lower() if value in ['1', 't', 'true', 'y', 'yes']: return True elif value in ['0', 'f', 'false', 'n', 'no']: return False return None
Returns a boolean from any of a range of values. Returns None for unrecognized values. Numbers other than 0 and 1 are considered unrecognized. >>> getbool(True) True >>> getbool(1) True >>> getbool('1') True >>> getbool('t') True >>> getbool(2) >>> getbool(0) False >>> getbool(False) False >>> getbool('n') False
def _read_from_folder(self, dirname): """ Internal folder reader. :type dirname: str :param dirname: Folder to read from. """ templates = _par_read(dirname=dirname, compressed=False) t_files = glob.glob(dirname + os.sep + '*.ms') tribe_cat_file = glob.glob(os.path.join(dirname, "tribe_cat.*")) if len(tribe_cat_file) != 0: tribe_cat = read_events(tribe_cat_file[0]) else: tribe_cat = Catalog() previous_template_names = [t.name for t in self.templates] for template in templates: if template.name in previous_template_names: # Don't read in for templates that we already have. continue for event in tribe_cat: for comment in event.comments: if comment.text == 'eqcorrscan_template_' + template.name: template.event = event t_file = [t for t in t_files if t.split(os.sep)[-1] == template.name + '.ms'] if len(t_file) == 0: print('No waveform for template: ' + template.name) templates.remove(template) continue elif len(t_file) > 1: print('Multiple waveforms found, using: ' + t_file[0]) template.st = read(t_file[0]) self.templates.extend(templates) return
Internal folder reader. :type dirname: str :param dirname: Folder to read from.
def get_share_properties(self, share_name, timeout=None): ''' Returns all user-defined metadata and system properties for the specified share. The data returned does not include the shares's list of files or directories. :param str share_name: Name of existing share. :param int timeout: The timeout parameter is expressed in seconds. :return: A Share that exposes properties and metadata. :rtype: :class:`.Share` ''' _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(share_name) request.query = [ ('restype', 'share'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _parse_share(share_name, response)
Returns all user-defined metadata and system properties for the specified share. The data returned does not include the shares's list of files or directories. :param str share_name: Name of existing share. :param int timeout: The timeout parameter is expressed in seconds. :return: A Share that exposes properties and metadata. :rtype: :class:`.Share`
def __replace_names(sentence, counts): """Lets find and replace all instances of #NAME :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#NAME') != -1: sentence = sentence.replace('#NAME', str(__get_name(counts)), 1) if sentence.find('#NAME') == -1: return sentence return sentence else: return sentence
Lets find and replace all instances of #NAME :param _sentence: :param counts:
def encode(password, algorithm, salt, iterations): """ Encode a Password :param password: Password :param algorithm :param salt: Salt :param iterations: iterations :return: PBKDF2 hashed Password """ hash = hashlib.pbkdf2_hmac(digest().name, password.encode(), salt.encode(), iterations) encoded = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (algorithm, iterations, salt, encoded)
Encode a Password :param password: Password :param algorithm :param salt: Salt :param iterations: iterations :return: PBKDF2 hashed Password
def drop(connection, skip): """Drop all.""" for idx, name, manager in _iterate_managers(connection, skip): click.secho(f'dropping {name}', fg='cyan', bold=True) manager.drop_all()
Drop all.
def ReadMostRecentClientGraphSeries(self, client_label, report_type ): """See db.Database.""" series_with_timestamps = self.ReadAllClientGraphSeries( client_label, report_type) if not series_with_timestamps: return None _, latest_series = list(sorted(iteritems(series_with_timestamps)))[-1] return latest_series
See db.Database.
def absorptionCoefficient_Doppler(Components=None,SourceTables=None,partitionFunction=PYTIPS, Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None, IntensityThreshold=DefaultIntensityThreshold, OmegaWingHW=DefaultOmegaWingHW, ParameterBindings=DefaultParameterBindings, EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings, GammaL='dummy', HITRAN_units=True, LineShift=True, File=None, Format=None, OmegaGrid=None, WavenumberRange=None,WavenumberStep=None,WavenumberWing=None, WavenumberWingHW=None,WavenumberGrid=None): """ INPUT PARAMETERS: Components: list of tuples [(M,I,D)], where M - HITRAN molecule number, I - HITRAN isotopologue number, D - abundance (optional) SourceTables: list of tables from which to calculate cross-section (optional) partitionFunction: pointer to partition function (default is PYTIPS) (optional) Environment: dictionary containing thermodynamic parameters. 'p' - pressure in atmospheres, 'T' - temperature in Kelvin Default={'p':1.,'T':296.} WavenumberRange: wavenumber range to consider. WavenumberStep: wavenumber step to consider. WavenumberWing: absolute wing for calculating a lineshape (in cm-1) WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths) IntensityThreshold: threshold for intensities GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self') HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient File: write output to file (if specified) Format: c-format of file output (accounts for significant digits in WavenumberStep) OUTPUT PARAMETERS: Wavenum: wavenumber grid with respect to parameters OmegaRange and OmegaStep Xsect: absorption coefficient calculated on the grid --- DESCRIPTION: Calculate absorption coefficient using Doppler (Gauss) profile. Absorption coefficient is calculated at arbitrary temperature and pressure. User can vary a wide range of parameters to control a process of calculation. The choise of these parameters depends on properties of a particular linelist. Default values are a sort of guess which give a decent precision (on average) for a reasonable amount of cpu time. To increase calculation accuracy, user should use a trial and error method. --- EXAMPLE OF USAGE: nu,coef = absorptionCoefficient_Doppler(((2,1),),'co2',WavenumberStep=0.01, HITRAN_units=False,GammaL='gamma_self') --- """ if WavenumberRange: OmegaRange=WavenumberRange if WavenumberStep: OmegaStep=WavenumberStep if WavenumberWing: OmegaWing=WavenumberWing if WavenumberWingHW: OmegaWingHW=WavenumberWingHW if WavenumberGrid: OmegaGrid=WavenumberGrid # "bug" with 1-element list Components = listOfTuples(Components) SourceTables = listOfTuples(SourceTables) # determine final input values Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\ IntensityThreshold,Format = \ getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange, OmegaStep,OmegaWing,IntensityThreshold,Format) # special for Doppler case: set OmegaStep to a smaller value if not OmegaStep: OmegaStep = 0.001 # warn user about too large omega step if OmegaStep>0.005: warn('Big wavenumber step: possible accuracy decline') # get uniform linespace for cross-section #number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1 #Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points) if OmegaGrid is not None: Omegas = npsort(OmegaGrid) else: #Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep) Omegas = arange_(OmegaRange[0],OmegaRange[1],OmegaStep) # fix number_of_points = len(Omegas) Xsect = zeros(number_of_points) # reference temperature and pressure Tref = __FloatType__(296.) # K pref = __FloatType__(1.) # atm # actual temperature and pressure T = Environment['T'] # K p = Environment['p'] # atm # create dictionary from Components ABUNDANCES = {} NATURAL_ABUNDANCES = {} for Component in Components: M = Component[0] I = Component[1] if len(Component) >= 3: ni = Component[2] else: try: ni = ISO[(M,I)][ISO_INDEX['abundance']] except KeyError: raise Exception('cannot find component M,I = %d,%d.' % (M,I)) ABUNDANCES[(M,I)] = ni NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']] # precalculation of volume concentration if HITRAN_units: factor = __FloatType__(1.0) else: factor = volumeConcentration(p,T) # SourceTables contain multiple tables for TableName in SourceTables: # get line centers nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] # loop through line centers (single stream) for RowID in range(nline): # get ftbasic line parameters (lower level) LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID] LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID] LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID] MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID] IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID] if LineShift: Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID] else: Shift0DB = 0 # filter by molecule and isotopologue if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue # partition functions for T and Tref # TODO: optimize SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T) SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref) # get all environment dependences from voigt parameters # intensity LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref, LowerStateEnergyDB,LineCenterDB) # FILTER by LineIntensity: compare it with IntencityThreshold # TODO: apply wing narrowing instead of filtering, this would be more appropriate if LineIntensity < IntensityThreshold: continue # doppler broadening coefficient (GammaD) #GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB)) #GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref) #print(GammaD) cMassMol = 1.66053873e-27 #cSqrt2Ln2 = 1.1774100225 fSqrtMass = sqrt(molecularMass(MoleculeNumberDB,IsoNumberDB)) #fSqrtMass = sqrt(32831.2508809) cc_ = 2.99792458e8 cBolts_ = 1.3806503e-23 #cBolts_ = 1.3806488E-23 GammaD = (cSqrt2Ln2/cc_)*sqrt(cBolts_/cMassMol)*sqrt(T) * LineCenterDB/fSqrtMass #GammaD = 4.30140e-7*LineCenterDB*sqrt(T/molecularMass(MoleculeNumberDB,IsoNumberDB)) #cc_ = 2.99792458e8 # 2.99792458e10 # 2.99792458e8 #cBolts_ = 1.3806503e-23 #1.3806488E-16 # 1.380648813E-16 # 1.3806503e-23 # 1.3806488E-23 #GammaD = sqrt(log(2))*LineCenterDB*sqrt(2*cBolts_*T/(cMassMol*molecularMass(MoleculeNumberDB,IsoNumberDB)*cc_**2)) #print(GammaD) # get final wing of the line according to GammaD, OmegaWingHW and OmegaWing # XXX min or max? OmegaWingF = max(OmegaWing,OmegaWingHW*GammaD) # shift coefficient Shift0 = Shift0DB*p/pref # XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version #PROFILE_VOIGT(sg0,GamD,Gam0,sg) # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # sg : Current WaveNumber of the Computation in cm-1 (Input). # XXX time? BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF) BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF) lineshape_vals = PROFILE_DOPPLER(LineCenterDB+Shift0,GammaD,Omegas[BoundIndexLower:BoundIndexUpper]) #lineshape_vals = PROFILE_VOIGT(LineCenterDB,GammaD,cZero,Omegas[BoundIndexLower:BoundIndexUpper])[0] #Xsect[BoundIndexLower:BoundIndexUpper] += lineshape_vals # DEBUG Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \ ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \ LineIntensity * lineshape_vals if File: save_to_file(File,Format,Omegas,Xsect) return Omegas,Xsect
INPUT PARAMETERS: Components: list of tuples [(M,I,D)], where M - HITRAN molecule number, I - HITRAN isotopologue number, D - abundance (optional) SourceTables: list of tables from which to calculate cross-section (optional) partitionFunction: pointer to partition function (default is PYTIPS) (optional) Environment: dictionary containing thermodynamic parameters. 'p' - pressure in atmospheres, 'T' - temperature in Kelvin Default={'p':1.,'T':296.} WavenumberRange: wavenumber range to consider. WavenumberStep: wavenumber step to consider. WavenumberWing: absolute wing for calculating a lineshape (in cm-1) WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths) IntensityThreshold: threshold for intensities GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self') HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient File: write output to file (if specified) Format: c-format of file output (accounts for significant digits in WavenumberStep) OUTPUT PARAMETERS: Wavenum: wavenumber grid with respect to parameters OmegaRange and OmegaStep Xsect: absorption coefficient calculated on the grid --- DESCRIPTION: Calculate absorption coefficient using Doppler (Gauss) profile. Absorption coefficient is calculated at arbitrary temperature and pressure. User can vary a wide range of parameters to control a process of calculation. The choise of these parameters depends on properties of a particular linelist. Default values are a sort of guess which give a decent precision (on average) for a reasonable amount of cpu time. To increase calculation accuracy, user should use a trial and error method. --- EXAMPLE OF USAGE: nu,coef = absorptionCoefficient_Doppler(((2,1),),'co2',WavenumberStep=0.01, HITRAN_units=False,GammaL='gamma_self') ---
def join(L, keycols=None, nullvals=None, renamer=None, returnrenaming=False, Names=None): """ Combine two or more numpy ndarray with structured dtype on common key column(s). Merge a list (or dictionary) of numpy ndarray with structured dtype, given by `L`, on key columns listed in `keycols`. This function is actually a wrapper for :func:`tabular.spreadsheet.strictjoin`. The ``strictjoin`` function has a few restrictions, and this ``join`` function will try to ensure that they are satisfied: * each element of `keycol` must be a valid column name in `X` and each array in `L`, and all of the same data-type. * for each column `col` in `keycols`, and each array `A` in `L`, the values in `A[col]` must be unique, -- and same for `X[col]`. (Actually this uniqueness doesn't have to hold for the first tabarray in L, that is, L[0], but must for all the subsequent ones.) * the *non*-key-column column names in each of the arrays must be disjoint from each other -- or disjoint after a renaming (see below). An error will be thrown if these conditions are not met. If you don't provide a value of `keycols`, the algorithm will attempt to infer which columns should be used by trying to find the largest set of common column names that contain unique values in each array and have the same data type. An error will be thrown if no such inference can be made. *Renaming of overlapping columns* If the non-keycol column names of the arrays overlap, ``join`` will by default attempt to rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. You can override the default renaming scheme using the `renamer` parameter. *Nullvalues for keycolumn differences* If there are regions of the keycolumns that are not overlapping between merged arrays, `join` will fill in the relevant entries with null values chosen by default: * '0' for integer columns * '0.0' for float columns * the empty character ('') for string columns. **Parameters** **L** : list or dictionary Numpy recarrays to merge. If `L` is a dictionary, the keys name each numpy recarray, and the corresponding values are the actual numpy recarrays. **keycols** : list of strings List of the names of the key columns along which to do the merging. **nullvals** : function, optional A function that returns a null value for a numpy format descriptor string, e.g. ``'<i4'`` or ``'|S5'``. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT` **renamer** : function, optional A function for renaming overlapping non-key column names among the numpy recarrays to merge. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **returnrenaming** : Boolean, optional Whether to return the result of the `renamer` function. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **Names**: list of strings: If `L` is a list, than names for elements of `L` can be specified with `Names` (without losing the ordering as you would if you did it with a dictionary). `len(L)` must equal `len(Names)` **Returns** **result** : numpy ndarray with structured dtype Result of the join, e.g. the result of merging the input numpy arrays defined in `L` on the key columns listed in `keycols`. **renaming** : dictionary of dictionaries, optional The result returned by the `renamer` function. Returned only if `returnrenaming == True`. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **See Also:** :func:`tabular.spreadsheet.strictjoin` """ if isinstance(L, dict): Names = L.keys() LL = L.values() else: if Names == None: Names = range(len(L)) else: assert len(Names) == len(L) LL = L if not keycols: keycols = utils.listintersection([a.dtype.names for a in LL]) if len(keycols) == 0: raise ValueError('No common column names found.') keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l] for a in LL])] if len(keycols) == 0: raise ValueError('No suitable common keycolumns, ' 'with identical datatypes found.') keycols = [l for l in keycols if all([isunique(a[keycols]) for a in LL])] if len(keycols) == 0: raise ValueError('No suitable common keycolumns, ' 'with unique value sets in all arrays to be ' 'merged, were found.') else: print('Inferring keycols to be:', keycols) elif isinstance(keycols,str): keycols = [l.strip() for l in keycols.split(',')] commons = set(Commons([l.dtype.names for l in LL])).difference(keycols) renaming = {} if len(commons) > 0: print 'common attributes, forcing a renaming ...' if renamer == None: print('Using default renamer ...') renamer = DEFAULT_RENAMER renaming = renamer(L, Names=Names) if not RenamingIsInCorrectFormat(renaming, L, Names=Names): print('Renaming from specified renamer is not in correct format,' 'using default renamer instead ...') renaming = DEFAULT_RENAMER(L, Names = Names) NewNames = [[l if l not in renaming[k].keys() else renaming[k][l] for l in ll.dtype.names] for (k, ll) in zip(Names, LL)] if set(Commons(NewNames)).difference(keycols): raise ValueError('Renaming convention failed to produce ' 'separated names.') Result = strictjoin(L, keycols, nullvals, renaming, Names=Names) if returnrenaming: return [Result, renaming] else: if renaming: print('There was a nontrivial renaming, to get it set ' '"returnrenaming = True" in keyword to join function.') return Result
Combine two or more numpy ndarray with structured dtype on common key column(s). Merge a list (or dictionary) of numpy ndarray with structured dtype, given by `L`, on key columns listed in `keycols`. This function is actually a wrapper for :func:`tabular.spreadsheet.strictjoin`. The ``strictjoin`` function has a few restrictions, and this ``join`` function will try to ensure that they are satisfied: * each element of `keycol` must be a valid column name in `X` and each array in `L`, and all of the same data-type. * for each column `col` in `keycols`, and each array `A` in `L`, the values in `A[col]` must be unique, -- and same for `X[col]`. (Actually this uniqueness doesn't have to hold for the first tabarray in L, that is, L[0], but must for all the subsequent ones.) * the *non*-key-column column names in each of the arrays must be disjoint from each other -- or disjoint after a renaming (see below). An error will be thrown if these conditions are not met. If you don't provide a value of `keycols`, the algorithm will attempt to infer which columns should be used by trying to find the largest set of common column names that contain unique values in each array and have the same data type. An error will be thrown if no such inference can be made. *Renaming of overlapping columns* If the non-keycol column names of the arrays overlap, ``join`` will by default attempt to rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. You can override the default renaming scheme using the `renamer` parameter. *Nullvalues for keycolumn differences* If there are regions of the keycolumns that are not overlapping between merged arrays, `join` will fill in the relevant entries with null values chosen by default: * '0' for integer columns * '0.0' for float columns * the empty character ('') for string columns. **Parameters** **L** : list or dictionary Numpy recarrays to merge. If `L` is a dictionary, the keys name each numpy recarray, and the corresponding values are the actual numpy recarrays. **keycols** : list of strings List of the names of the key columns along which to do the merging. **nullvals** : function, optional A function that returns a null value for a numpy format descriptor string, e.g. ``'<i4'`` or ``'|S5'``. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT` **renamer** : function, optional A function for renaming overlapping non-key column names among the numpy recarrays to merge. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **returnrenaming** : Boolean, optional Whether to return the result of the `renamer` function. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **Names**: list of strings: If `L` is a list, than names for elements of `L` can be specified with `Names` (without losing the ordering as you would if you did it with a dictionary). `len(L)` must equal `len(Names)` **Returns** **result** : numpy ndarray with structured dtype Result of the join, e.g. the result of merging the input numpy arrays defined in `L` on the key columns listed in `keycols`. **renaming** : dictionary of dictionaries, optional The result returned by the `renamer` function. Returned only if `returnrenaming == True`. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **See Also:** :func:`tabular.spreadsheet.strictjoin`
def cleanup(self): """ Clean up my temporary files. """ all([delete_file_or_tree(f) for f in self.to_delete]) self.to_delete = []
Clean up my temporary files.
def success_count(self): """ Amount of passed test cases in this list. :return: integer """ return len([i for i, result in enumerate(self.data) if result.success])
Amount of passed test cases in this list. :return: integer
def _addDPFilesToOldEntry(self, *files): """callback to add DPs corresponding to files.""" # quiet flag is always true self.view_entry_dialog.addDataProducts(self.purrer.makeDataProducts( [(file, True) for file in files], unbanish=True, unignore=True))
callback to add DPs corresponding to files.
def spike_times(signal, threshold, fs, absval=True): """Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum""" times = [] if absval: signal = np.abs(signal) over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: segments = [0, len(over)-1] else: # add end points to sections for looping if segments[0] != 0: segments = np.insert(segments, [0], [0]) else: #first point in singleton times.append(float(over[0])/fs) if 1 not in segments: # make sure that first point is in there segments[0] = 1 if segments[-1] != len(over)-1: segments = np.insert(segments, [len(segments)], [len(over)-1]) else: times.append(float(over[-1])/fs) for iseg in range(1,len(segments)): if segments[iseg] - segments[iseg-1] == 1: # only single point over threshold idx = over[segments[iseg]] else: segments[0] = segments[0]-1 # find maximum of continuous set over max idx = over[segments[iseg-1]+1] + np.argmax(signal[over[segments[iseg-1]+1]:over[segments[iseg]]]) times.append(float(idx)/fs) elif len(over) == 1: times.append(float(over[0])/fs) if len(times)>0: return refractory(times) else: return times
Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum
def _onError(self, error): """ Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions. """ self.stop() self._logModule.err( error, "Unhandled error logging exception to %s" % (self.airbrakeURL,)) self.start()
Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions.
def from_inline(cls: Type[IdentityType], version: int, currency: str, inline: str) -> IdentityType: """ Return Identity instance from inline Identity string :param version: Document version number :param currency: Name of the currency :param inline: Inline string of the Identity :return: """ selfcert_data = Identity.re_inline.match(inline) if selfcert_data is None: raise MalformedDocumentError("Inline self certification") pubkey = selfcert_data.group(1) signature = selfcert_data.group(2) ts = BlockUID.from_str(selfcert_data.group(3)) uid = selfcert_data.group(4) return cls(version, currency, pubkey, uid, ts, signature)
Return Identity instance from inline Identity string :param version: Document version number :param currency: Name of the currency :param inline: Inline string of the Identity :return:
def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None, order=None, family=None, genus=None, strict=False, verbose=False, offset=None, limit=100, **kwargs): ''' Lookup names in the GBIF backbone taxonomy. :param name: [str] Full scientific name potentially with authorship (required) :param rank: [str] The rank given as our rank enum. (optional) :param kingdom: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param phylum: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param class: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param order: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param family: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param genus: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param strict: [bool] If True it (fuzzy) matches only the given name, but never a taxon in the upper classification (optional) :param verbose: [bool] If True show alternative matches considered which had been rejected. :param offset: [int] Record to start at. Default: ``0`` :param limit: [int] Number of results to return. Default: ``100`` A list for a single taxon with many slots (with ``verbose=False`` - default), or a list of length two, first element for the suggested taxon match, and a data.frame with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``). If you don't get a match GBIF gives back a list of length 3 with slots synonym, confidence, and ``matchType='NONE'``. reference: http://www.gbif.org/developer/species#searching Usage:: from pygbif import species species.name_backbone(name='Helianthus annuus', kingdom='plants') species.name_backbone(name='Helianthus', rank='genus', kingdom='plants') species.name_backbone(name='Poa', rank='genus', family='Poaceae') # Verbose - gives back alternatives species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True) # Strictness species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False) species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True) # Non-existent name species.name_backbone(name='Aso') # Multiple equal matches species.name_backbone(name='Oenante') ''' url = gbif_baseurl + 'species/match' args = {'name': name, 'rank': rank, 'kingdom': kingdom, 'phylum': phylum, 'class': clazz, 'order': order, 'family': family, 'genus': genus, 'strict': strict, 'verbose': verbose, 'offset': offset, 'limit': limit} tt = gbif_GET(url, args, **kwargs) return tt
Lookup names in the GBIF backbone taxonomy. :param name: [str] Full scientific name potentially with authorship (required) :param rank: [str] The rank given as our rank enum. (optional) :param kingdom: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param phylum: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param class: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param order: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param family: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param genus: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param strict: [bool] If True it (fuzzy) matches only the given name, but never a taxon in the upper classification (optional) :param verbose: [bool] If True show alternative matches considered which had been rejected. :param offset: [int] Record to start at. Default: ``0`` :param limit: [int] Number of results to return. Default: ``100`` A list for a single taxon with many slots (with ``verbose=False`` - default), or a list of length two, first element for the suggested taxon match, and a data.frame with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``). If you don't get a match GBIF gives back a list of length 3 with slots synonym, confidence, and ``matchType='NONE'``. reference: http://www.gbif.org/developer/species#searching Usage:: from pygbif import species species.name_backbone(name='Helianthus annuus', kingdom='plants') species.name_backbone(name='Helianthus', rank='genus', kingdom='plants') species.name_backbone(name='Poa', rank='genus', family='Poaceae') # Verbose - gives back alternatives species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True) # Strictness species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False) species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True) # Non-existent name species.name_backbone(name='Aso') # Multiple equal matches species.name_backbone(name='Oenante')
def configure(root_directory, build_path, cmake_command, only_show): """ Main configure function. """ default_build_path = os.path.join(root_directory, 'build') # check that CMake is available, if not stop check_cmake_exists('cmake') # deal with build path if build_path is None: build_path = default_build_path if not only_show: setup_build_path(build_path) cmake_command += ' -B' + build_path print('{0}\n'.format(cmake_command)) if only_show: sys.exit(0) run_cmake(cmake_command, build_path, default_build_path)
Main configure function.
def get_proficiency_admin_session(self, proxy): """Gets the ``OsidSession`` associated with the proficiency administration service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``ProficiencyAdminSession`` :rtype: ``osid.learning.ProficiencyAdminSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proficiency_admin()`` is ``true``.* """ if not self.supports_proficiency_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ProficiencyAdminSession(proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the proficiency administration service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``ProficiencyAdminSession`` :rtype: ``osid.learning.ProficiencyAdminSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proficiency_admin()`` is ``true``.*
def score_frequency_grid(self, f0, df, N): """Compute the score on a frequency grid. Some models can compute results faster if the inputs are passed in this manner. Parameters ---------- f0, df, N : (float, float, int) parameters describing the frequency grid freq = f0 + df * arange(N) Note that these are frequencies, not angular frequencies. Returns ------- score : ndarray the length-N array giving the score at each frequency """ return self._score_frequency_grid(f0, df, N)
Compute the score on a frequency grid. Some models can compute results faster if the inputs are passed in this manner. Parameters ---------- f0, df, N : (float, float, int) parameters describing the frequency grid freq = f0 + df * arange(N) Note that these are frequencies, not angular frequencies. Returns ------- score : ndarray the length-N array giving the score at each frequency
def orify(event, changed_callback): ''' Override ``set`` and ``clear`` methods on event to call specified callback function after performing default behaviour. Parameters ---------- ''' event.changed = changed_callback if not hasattr(event, '_set'): # `set`/`clear` methods have not been overridden on event yet. # Override methods to call `changed_callback` after performing default # action. event._set = event.set event._clear = event.clear event.set = lambda: or_set(event) event.clear = lambda: or_clear(event)
Override ``set`` and ``clear`` methods on event to call specified callback function after performing default behaviour. Parameters ----------
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata): """Read a page containing dictionary data. Consumes data using the plain encoding and returns an array of values. """ raw_bytes = _read_page(file_obj, page_header, column_metadata) io_obj = io.BytesIO(raw_bytes) values = encoding.read_plain( io_obj, column_metadata.type, page_header.dictionary_page_header.num_values ) # convert the values once, if the dictionary is associated with a converted_type. schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1]) return convert_column(values, schema_element) if schema_element.converted_type is not None else values
Read a page containing dictionary data. Consumes data using the plain encoding and returns an array of values.
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[], policies=[], dashboards=[], credentials=[], description=''): '''group_add name, restrict, repos ''' return self.raw_query('group', 'add', data={ 'lces': [{'id': i} for i in lces], 'assets': [{'id': i} for i in assets], 'queries': [{'id': i} for i in queries], 'policies': [{'id': i} for i in policies], 'dashboardTabs': [{'id': i} for i in dashboards], 'credentials': [{'id': i} for i in credentials], 'repositories': [{'id': i} for i in repos], 'definingAssets': [{'id': i} for i in restrict], 'name': name, 'description': description, 'users': [], 'context': '' })
group_add name, restrict, repos
def init_word_db(cls, name, text): """Initialize a database of words for the maker with the given name""" # Prep the words text = text.replace('\n', ' ').replace('\r', ' ') words = [w.strip() for w in text.split(' ') if w.strip()] assert len(words) > 2, \ 'Database text sources must contain 3 or more words.' # Build the database freqs = {} for i in range(len(words) - 2): # Create a triplet from the current word w1 = words[i] w2 = words[i + 1] w3 = words[i + 2] # Add the triplet to the database key = (w1, w2) if key in freqs: freqs[key].append(w3) else: freqs[key] = [w3] # Store the database so it can be used cls._dbs[name] = { 'freqs': freqs, 'words': words, 'word_count': len(words) - 2 }
Initialize a database of words for the maker with the given name
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): # pylint: disable=too-many-arguments """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the following equations: Equation (8) on p. 203 for the bedrock ground motion: ``ln(y_br) = c1 + c2*(M - 6) + c3*(M - 6)**2 - lnR - c4*R + ln(ε_br)`` Equation (9) on p. 207 gives the site amplification factor: ``ln(F_s) = a1*y_br + a2 + ln(δ_site)`` Equation (10) on p. 207 for the ground motion at a given site: ``y_site = y_br*F_s`` Equation (11) on p. 207 for total standard error at a given site: ``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)`` """ # obtain coefficients for required intensity measure type coeffs = self.COEFFS_BEDROCK[imt].copy() # obtain site-class specific coefficients a_1, a_2, sigma_site = self._get_site_coeffs(sites, imt) coeffs.update({'a1': a_1, 'a2': a_2, 'sigma_site': sigma_site}) # compute bedrock motion, equation (8) ln_mean = (self._compute_magnitude_terms(rup, coeffs) + self._compute_distance_terms(dists, coeffs)) # adjust for site class, equation (10) ln_mean += self._compute_site_amplification(ln_mean, coeffs) # No need to convert to g since "In [equation (8)], y_br = (SA/g)" ln_stddevs = self._get_stddevs(coeffs, stddev_types) return ln_mean, [ln_stddevs]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the following equations: Equation (8) on p. 203 for the bedrock ground motion: ``ln(y_br) = c1 + c2*(M - 6) + c3*(M - 6)**2 - lnR - c4*R + ln(ε_br)`` Equation (9) on p. 207 gives the site amplification factor: ``ln(F_s) = a1*y_br + a2 + ln(δ_site)`` Equation (10) on p. 207 for the ground motion at a given site: ``y_site = y_br*F_s`` Equation (11) on p. 207 for total standard error at a given site: ``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)``
def uploads(self, option): """ Set whether to filter by a user's uploads list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"uploads": option}) return self.__class__(**params)
Set whether to filter by a user's uploads list. Options available are user.ONLY, user.NOT, and None; default is None.
def rgb_color_list_to_hex(color_list): """ Convert a list of RGBa colors to a list of hexadecimal color codes. Parameters ---------- color_list : list the list of RGBa colors Returns ------- color_list_hex : list """ color_list_rgb = [[int(x*255) for x in c[0:3]] for c in color_list] color_list_hex = ['#{:02X}{:02X}{:02X}'.format(rgb[0], rgb[1], rgb[2]) for rgb in color_list_rgb] return color_list_hex
Convert a list of RGBa colors to a list of hexadecimal color codes. Parameters ---------- color_list : list the list of RGBa colors Returns ------- color_list_hex : list
def process_scheduled_consumption(self, token): """Processes a scheduled consumption request that has completed :type token: RequestToken :param token: The token associated to the consumption request that is used to identify the request. """ scheduled_retry = self._tokens_to_scheduled_consumption.pop(token) self._total_wait = max( self._total_wait - scheduled_retry['time_to_consume'], 0)
Processes a scheduled consumption request that has completed :type token: RequestToken :param token: The token associated to the consumption request that is used to identify the request.
def make_get_request(url, params, headers, connection): """ Helper function that makes an HTTP GET request to the given firebase endpoint. Timeout is 60 seconds. `url`: The full URL of the firebase endpoint (DSN appended.) `params`: Python dict that is appended to the URL like a querystring. `headers`: Python dict. HTTP request headers. `connection`: Predefined HTTP connection instance. If not given, it is supplied by the `decorators.http_connection` function. The returning value is a Python dict deserialized by the JSON decoder. However, if the status code is not 2x or 403, an requests.HTTPError is raised. connection = connection_pool.get_available_connection() response = make_get_request('http://firebase.localhost/users', {'print': silent'}, {'X_FIREBASE_SOMETHING': 'Hi'}, connection) response => {'1': 'John Doe', '2': 'Jane Doe'} """ timeout = getattr(connection, 'timeout') response = connection.get(url, params=params, headers=headers, timeout=timeout) if response.ok or response.status_code == 403: return response.json() if response.content else None else: response.raise_for_status()
Helper function that makes an HTTP GET request to the given firebase endpoint. Timeout is 60 seconds. `url`: The full URL of the firebase endpoint (DSN appended.) `params`: Python dict that is appended to the URL like a querystring. `headers`: Python dict. HTTP request headers. `connection`: Predefined HTTP connection instance. If not given, it is supplied by the `decorators.http_connection` function. The returning value is a Python dict deserialized by the JSON decoder. However, if the status code is not 2x or 403, an requests.HTTPError is raised. connection = connection_pool.get_available_connection() response = make_get_request('http://firebase.localhost/users', {'print': silent'}, {'X_FIREBASE_SOMETHING': 'Hi'}, connection) response => {'1': 'John Doe', '2': 'Jane Doe'}
def create_empty_resource(self, name): """Create an empty (length-0) resource. See DAVResource.create_empty_resource() """ assert "/" not in name if self.provider.readonly: raise DAVError(HTTP_FORBIDDEN) path = util.join_uri(self.path, name) fp = self.provider._loc_to_file_path(path, self.environ) f = open(fp, "wb") f.close() return self.provider.get_resource_inst(path, self.environ)
Create an empty (length-0) resource. See DAVResource.create_empty_resource()
def update(self, uid): ''' in infor. ''' postinfo = MPost.get_by_uid(uid) if postinfo.kind == self.kind: pass else: return False post_data, ext_dic = self.fetch_post_data() if 'gcat0' in post_data: pass else: return False if 'valid' in post_data: post_data['valid'] = int(post_data['valid']) else: post_data['valid'] = postinfo.valid ext_dic['def_uid'] = str(uid) cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip() cnt_new = post_data['cnt_md'].strip() if cnt_old == cnt_new: pass else: MPostHist.create_post_history(postinfo) MPost.modify_meta(uid, post_data, extinfo=ext_dic) self._add_download_entity(ext_dic) # self.update_tag(uid=uid) update_category(uid, post_data) update_label(uid, post_data) # self.update_label(uid) logger.info('post kind:' + self.kind) # cele_gen_whoosh.delay() tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh) self.redirect('/{0}/{1}'.format(router_post[postinfo.kind], uid))
in infor.
def add_castle(self, position): """ Adds kingside and queenside castling moves if legal :type: position: Board """ if self.has_moved or self.in_check(position): return if self.color == color.white: rook_rank = 0 else: rook_rank = 7 castle_type = { notation_const.KING_SIDE_CASTLE: { "rook_file": 7, "direction": lambda king_square, times: king_square.shift_right(times) }, notation_const.QUEEN_SIDE_CASTLE: { "rook_file": 0, "direction": lambda king_square, times: king_square.shift_left(times) } } for castle_key in castle_type: castle_dict = castle_type[castle_key] castle_rook = position.piece_at_square(Location(rook_rank, castle_dict["rook_file"])) if self._rook_legal_for_castle(castle_rook) and \ self._empty_not_in_check(position, castle_dict["direction"]): yield self.create_move(castle_dict["direction"](self.location, 2), castle_key)
Adds kingside and queenside castling moves if legal :type: position: Board
def read_stdout(self): """ Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed. """ output = "" if self._stdout_file: try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warning("Could not read {}: {}".format(self._stdout_file, e)) return output
Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed.
def load_libs(self, scripts_paths): """ Load script files into the context.\ This can be thought as the HTML script tag.\ The files content must be utf-8 encoded. This is a shortcut for reading the files\ and pass the content to :py:func:`run_script` :param list scripts_paths: Script file paths. :raises OSError: If there was an error\ manipulating the files. This should not\ normally be caught :raises V8Error: if there was\ an error running the JS script """ for path in scripts_paths: self.run_script(_read_file(path), identifier=path)
Load script files into the context.\ This can be thought as the HTML script tag.\ The files content must be utf-8 encoded. This is a shortcut for reading the files\ and pass the content to :py:func:`run_script` :param list scripts_paths: Script file paths. :raises OSError: If there was an error\ manipulating the files. This should not\ normally be caught :raises V8Error: if there was\ an error running the JS script
def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True): # type: (Optional[Configuration], Optional[str], int, bool) -> Tuple[Dict,List] """ Get tags cleanup dictionaries Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter). keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5. failchained (bool): Fail if chained rules found. Defaults to True. Returns: Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list) """ if not Tags._tags_dict: if configuration is None: configuration = Configuration.read() with Download(full_agent=configuration.get_user_agent()) as downloader: if url is None: url = configuration['tags_cleanup_url'] Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn) keys = Tags._tags_dict.keys() chainerror = False for i, tag in enumerate(keys): whattodo = Tags._tags_dict[tag] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] for final_tag in final_tags.split(';'): if final_tag in keys: index = list(keys).index(final_tag) if index != i: whattodo2 = Tags._tags_dict[final_tag] action2 = whattodo2[u'action'] if action2 != 'OK' and action2 != 'Other': final_tags2 = whattodo2[u'final tags (semicolon separated)'] if final_tag not in final_tags2.split(';'): chainerror = True if failchained: logger.error('Chained rules: %s (%s -> %s) | %s (%s -> %s)' % (action, tag, final_tags, action2, final_tag, final_tags2)) if failchained and chainerror: raise ChainRuleError('Chained rules for tags detected!') Tags._wildcard_tags = list() for tag in Tags._tags_dict: if '*' in tag: Tags._wildcard_tags.append(tag) return Tags._tags_dict, Tags._wildcard_tags
Get tags cleanup dictionaries Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter). keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5. failchained (bool): Fail if chained rules found. Defaults to True. Returns: Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list)
def get_rmse(self, data_x=None, data_y=None): """ Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max """ if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max
def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs): """An operator that asynchronously merges multiple source together. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : unused Keyword arguments ----------------- _OTHER1 : asyncPipe like object _OTHER2 : etc. Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items """ _input = yield _INPUT _OUTPUT = get_output(_input, **kwargs) returnValue(_OUTPUT)
An operator that asynchronously merges multiple source together. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : unused Keyword arguments ----------------- _OTHER1 : asyncPipe like object _OTHER2 : etc. Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items
def send(self, command, message=None): '''Send a command over the socket with length endcoded''' if message: joined = command + constants.NL + util.pack(message) else: joined = command + constants.NL if self._blocking: for sock in self.socket(): sock.sendall(joined) else: self._pending.append(joined)
Send a command over the socket with length endcoded
def _validate_arguments(self): """ Validates the command line arguments passed to the CLI Derived classes that override need to call this method before validating their arguments """ if self._email is None: self.set_error_message("E-mail for the account not provided") return False if self._api_token is None: self.set_error_message("API Token for the account not provided") return False return True
Validates the command line arguments passed to the CLI Derived classes that override need to call this method before validating their arguments
def dates(self, start, end): '''Internal function which perform pre-conditioning on dates: :keyword start: start date. :keyword end: end date. This function makes sure the *start* and *end* date are consistent. It *never fails* and always return a two-element tuple containing *start*, *end* with *start* less or equal *end* and *end* never after today. There should be no reason to override this function.''' td = date.today() end = safetodate(end) or td end = end if end <= td else td start = safetodate(start) if not start or start > end: start = end - timedelta(days=int(round(30.4* settings.months_history))) return start,end
Internal function which perform pre-conditioning on dates: :keyword start: start date. :keyword end: end date. This function makes sure the *start* and *end* date are consistent. It *never fails* and always return a two-element tuple containing *start*, *end* with *start* less or equal *end* and *end* never after today. There should be no reason to override this function.
def round_controlled(cycled_iterable, rounds=1): """Return after <rounds> passes through a cycled iterable.""" round_start = None rounds_completed = 0 for item in cycled_iterable: if round_start is None: round_start = item elif item == round_start: rounds_completed += 1 if rounds_completed == rounds: return yield item
Return after <rounds> passes through a cycled iterable.
def get_factory_kwargs(self): """ Returns the keyword arguments for calling the formset factory """ kwargs = {} kwargs.update({ 'can_delete': self.can_delete, 'extra': self.extra, 'exclude': self.exclude, 'fields': self.fields, 'formfield_callback': self.formfield_callback, 'fk_name': self.fk_name, }) if self.formset_class: kwargs['formset'] = self.formset_class if self.child_form: kwargs['form'] = self.child_form return kwargs
Returns the keyword arguments for calling the formset factory
def user_exists(name, host='localhost', **kwargs): """ Check if a MySQL user exists. """ with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): res = query(""" use mysql; SELECT COUNT(*) FROM user WHERE User = '%(name)s' AND Host = '%(host)s'; """ % { 'name': name, 'host': host, }, **kwargs) return res.succeeded and (int(res) == 1)
Check if a MySQL user exists.
def offer_trades(self, offer_id, cursor=None, order='asc', limit=10): """This endpoint represents all trades for a given offer. `GET /offers/{offer_id}/trades{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/trades-for-offer.html>`_ :param int offer_id: The offer ID to get trades on. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :return: A list of effects on the given operation. :rtype: dict """ endpoint = '/offers/{offer_id}/trades'.format(offer_id=offer_id) params = self.__query_params(cursor=cursor, order=order, limit=limit) return self.query(endpoint, params)
This endpoint represents all trades for a given offer. `GET /offers/{offer_id}/trades{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/trades-for-offer.html>`_ :param int offer_id: The offer ID to get trades on. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :return: A list of effects on the given operation. :rtype: dict
def is_fully_verified(self): """ Determine if this Job is fully verified based on the state of its Errors. An Error (TextLogError or FailureLine) is considered Verified once its related TextLogErrorMetadata has best_is_verified set to True. A Job is then considered Verified once all its Errors TextLogErrorMetadata instances are set to True. """ unverified_errors = TextLogError.objects.filter( _metadata__best_is_verified=False, step__job=self).count() if unverified_errors: logger.error("Job %r has unverified TextLogErrors", self) return False logger.info("Job %r is fully verified", self) return True
Determine if this Job is fully verified based on the state of its Errors. An Error (TextLogError or FailureLine) is considered Verified once its related TextLogErrorMetadata has best_is_verified set to True. A Job is then considered Verified once all its Errors TextLogErrorMetadata instances are set to True.
def size(self): """Total number of coefficients in the ScalarCoefs structure. Example:: >>> sz = c.size >>> N = c.nmax + 1 >>> L = N+ c.mmax * (2 * N - c.mmax - 1); >>> assert sz == L """ N = self.nmax + 1; NC = N + self.mmax * (2 * N - self.mmax - 1); assert NC == len(self._vec) return NC
Total number of coefficients in the ScalarCoefs structure. Example:: >>> sz = c.size >>> N = c.nmax + 1 >>> L = N+ c.mmax * (2 * N - c.mmax - 1); >>> assert sz == L
def getObjectId(self): """ Return the object id for this master, for associating state with the master. @returns: ID, via Deferred """ # try to get the cached value if self._object_id is not None: return defer.succeed(self._object_id) # failing that, get it from the DB; multiple calls to this function # at the same time will not hurt d = self.db.state.getObjectId(self.name, "buildbot.master.BuildMaster") @d.addCallback def keep(id): self._object_id = id return id return d
Return the object id for this master, for associating state with the master. @returns: ID, via Deferred
def get_dependencies_from_wheel_cache(ireq): """Retrieves dependencies for the given install requirement from the wheel cache. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None """ if ireq.editable or not is_pinned_requirement(ireq): return matches = WHEEL_CACHE.get(ireq.link, name_from_req(ireq.req)) if matches: matches = set(matches) if not DEPENDENCY_CACHE.get(ireq): DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches] return matches return
Retrieves dependencies for the given install requirement from the wheel cache. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
def current_index(self): """Get the currently selected index in the parent table view.""" i = self._parent.proxy_model.mapToSource(self._parent.currentIndex()) return i
Get the currently selected index in the parent table view.
def loadFile(self, fileName): """ Load and display the PDF file specified by ``fileName``. """ # Test if the file exists. if not QtCore.QFile(fileName).exists(): msg = "File <b>{}</b> does not exist".format(self.qteAppletID()) self.qteLogger.info(msg) self.fileName = None return # Store the file name and load the PDF document with the # Poppler library. self.fileName = fileName doc = popplerqt4.Poppler.Document.load(fileName) # Enable antialiasing to improve the readability of the fonts. doc.setRenderHint(popplerqt4.Poppler.Document.Antialiasing) doc.setRenderHint(popplerqt4.Poppler.Document.TextAntialiasing) # Convert each page to an image, then install that image as the # pixmap of a QLabel, and finally insert that QLabel into a # vertical layout. hbox = QtGui.QVBoxLayout() for ii in range(doc.numPages()): pdf_img = doc.page(ii).renderToImage() pdf_label = self.qteAddWidget(QtGui.QLabel()) pdf_label.setPixmap(QtGui.QPixmap.fromImage(pdf_img)) hbox.addWidget(pdf_label) # Use an auxiliary widget to hold that layout and then place # that auxiliary widget into a QScrollView. The auxiliary # widget is necessary because QScrollArea can only display a # single widget at once. tmp = self.qteAddWidget(QtGui.QWidget(self)) tmp.setLayout(hbox) self.qteScroll.setWidget(tmp)
Load and display the PDF file specified by ``fileName``.
def _is_master_running(self): ''' Perform a lightweight check to see if the master daemon is running Note, this will return an invalid success if the master crashed or was not shut down cleanly. ''' # Windows doesn't have IPC. Assume the master is running. # At worse, it will error 500. if salt.utils.platform.is_windows(): return True if self.opts['transport'] == 'tcp': ipc_file = 'publish_pull.ipc' else: ipc_file = 'workers.ipc' return os.path.exists(os.path.join( self.opts['sock_dir'], ipc_file))
Perform a lightweight check to see if the master daemon is running Note, this will return an invalid success if the master crashed or was not shut down cleanly.
def robot_files(self): '''Return a list of all folders, and test suite files (.txt, .robot) ''' result = [] for name in os.listdir(self.path): fullpath = os.path.join(self.path, name) if os.path.isdir(fullpath): result.append(RobotFactory(fullpath, parent=self)) else: if ((name.endswith(".txt") or name.endswith(".robot")) and (name not in ("__init__.txt", "__init__.robot"))): result.append(RobotFactory(fullpath, parent=self)) return result
Return a list of all folders, and test suite files (.txt, .robot)
def bbox(self): """ The minimal `~photutils.aperture.BoundingBox` for the cutout region with respect to the original (large) image. """ return BoundingBox(self.slices[1].start, self.slices[1].stop, self.slices[0].start, self.slices[0].stop)
The minimal `~photutils.aperture.BoundingBox` for the cutout region with respect to the original (large) image.
def maverage(size): """ Moving average This is the only strategy that uses a ``collections.deque`` object instead of a ZFilter instance. Fast, but without extra capabilites such as a frequency response plotting method. Parameters ---------- size : Data block window size. Should be an integer. Returns ------- A callable that accepts two parameters: a signal ``sig`` and the starting memory element ``zero`` that behaves like the ``LinearFilter.__call__`` arguments. The output from that callable is a Stream instance, and has no decimation applied. See Also -------- envelope : Signal envelope (time domain) strategies. """ size_inv = 1. / size @tostream def maverage_filter(sig, zero=0.): data = deque((zero * size_inv for _ in xrange(size)), maxlen=size) mean_value = zero for el in sig: mean_value -= data.popleft() new_value = el * size_inv data.append(new_value) mean_value += new_value yield mean_value return maverage_filter
Moving average This is the only strategy that uses a ``collections.deque`` object instead of a ZFilter instance. Fast, but without extra capabilites such as a frequency response plotting method. Parameters ---------- size : Data block window size. Should be an integer. Returns ------- A callable that accepts two parameters: a signal ``sig`` and the starting memory element ``zero`` that behaves like the ``LinearFilter.__call__`` arguments. The output from that callable is a Stream instance, and has no decimation applied. See Also -------- envelope : Signal envelope (time domain) strategies.
def null_advance_strain(self, blocksize): """ Advance and insert zeros Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel """ sample_step = int(blocksize * self.sample_rate) csize = sample_step + self.corruption * 2 self.strain.roll(-sample_step) # We should roll this off at some point too... self.strain[len(self.strain) - csize + self.corruption:] = 0 self.strain.start_time += blocksize # The next time we need strain will need to be tapered self.taper_immediate_strain = True
Advance and insert zeros Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel
def __populate_symbols(self): """Get a list of the symbols present in the bfd to populate our internal list. """ if not self._ptr: raise BfdException("BFD not initialized") try: symbols = _bfd.get_symbols(self._ptr) # Temporary dictionary ordered by section index. This is necessary # because the symbolic information return the section index it belongs # to. sections = {} for section in self.sections: sections[self.sections[section].index] = self.sections[section] for symbol in symbols: # Extract each field for further processing. symbol_section_index = symbol[0] symbol_name = symbol[1] symbol_value = symbol[2] symbol_flags = symbol[3] # Get the effective address of the current symbol. symbol_flags = tuple( [f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] ) # Create a new symbol instance to hold symbolic information. new_symbol = Symbol( sections.get(symbol_section_index, None), symbol_name, symbol_value, symbol_flags) if new_symbol.section is None: continue symbol_address = new_symbol.section.vma + new_symbol.value #if new_symbol.flags in \ # [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]: # symbol_address = new_symbol.section.vma + new_symbol.value #else: # # TODO: Enhance this! # # Discard any other symbol information. # continue self._symbols[symbol_address] = new_symbol del sections except BfdSectionException, err: raise BfdException("Exception on symbolic ifnormation parsing.")
Get a list of the symbols present in the bfd to populate our internal list.
def calc_regenerated(self, lastvotetime): ''' Uses math formula to calculate the amount of steem power that would have been regenerated given a certain datetime object ''' delta = datetime.utcnow() - datetime.strptime(lastvotetime,'%Y-%m-%dT%H:%M:%S') td = delta.days ts = delta.seconds tt = (td * 86400) + ts return tt * 10000 / 86400 / 5
Uses math formula to calculate the amount of steem power that would have been regenerated given a certain datetime object
def generate_maximum_validator(maximum, exclusiveMaximum=False, **kwargs): """ Generator function returning a callable for maximum value validation. """ return functools.partial(validate_maximum, maximum=maximum, is_exclusive=exclusiveMaximum)
Generator function returning a callable for maximum value validation.
def parse_compound_list(path, compounds): """Parse a structured list of compounds as obtained from a YAML file Yields CompoundEntries. Path can be given as a string or a context. """ context = FilePathContext(path) for compound_def in compounds: if 'include' in compound_def: file_format = compound_def.get('format') include_context = context.resolve(compound_def['include']) for compound in parse_compound_file(include_context, file_format): yield compound else: yield parse_compound(compound_def, context)
Parse a structured list of compounds as obtained from a YAML file Yields CompoundEntries. Path can be given as a string or a context.
def geo2apex(self, glat, glon, height): """Converts geodetic to modified apex coordinates. Parameters ========== glat : array_like Geodetic latitude glon : array_like Geodetic longitude height : array_like Altitude in km Returns ======= alat : ndarray or float Modified apex latitude alon : ndarray or float Modified apex longitude """ glat = helpers.checklat(glat, name='glat') alat, alon = self._geo2apex(glat, glon, height) if np.any(np.float64(alat) == -9999): warnings.warn('Apex latitude set to -9999 where undefined ' '(apex height may be < reference height)') # if array is returned, dtype is object, so convert to float return np.float64(alat), np.float64(alon)
Converts geodetic to modified apex coordinates. Parameters ========== glat : array_like Geodetic latitude glon : array_like Geodetic longitude height : array_like Altitude in km Returns ======= alat : ndarray or float Modified apex latitude alon : ndarray or float Modified apex longitude
def create_item(self, item): """ Create a new item in D4S2 service for item at the specified destination. :param item: D4S2Item data to use for creating a D4S2 item :return: requests.Response containing the successful result """ item_dict = { 'project_id': item.project_id, 'from_user_id': item.from_user_id, 'to_user_id': item.to_user_id, 'role': item.auth_role, 'user_message': item.user_message } if item.share_user_ids: item_dict['share_user_ids'] = item.share_user_ids data = json.dumps(item_dict) resp = requests.post(self.make_url(item.destination), headers=self.json_headers, data=data) self.check_response(resp) return resp
Create a new item in D4S2 service for item at the specified destination. :param item: D4S2Item data to use for creating a D4S2 item :return: requests.Response containing the successful result
def update_expression_list(self): """Extract a list of expressions from the dictionary of expressions.""" self.expression_list = [] # code arrives in dictionary, but is passed in this list self.expression_keys = [] # Keep track of the dictionary keys. self.expression_order = [] # This may be unecessary. It's to give ordering for cse for fname, fexpressions in self.expressions.items(): for type, texpressions in fexpressions.items(): if type == 'function': self.expression_list.append(texpressions) self.expression_keys.append([fname, type]) self.expression_order.append(1) elif type[-10:] == 'derivative': for dtype, expression in texpressions.items(): self.expression_list.append(expression) self.expression_keys.append([fname, type, dtype]) if type[:-10] == 'first_' or type[:-10] == '': self.expression_order.append(3) #sym.count_ops(self.expressions[type][dtype])) elif type[:-10] == 'second_': self.expression_order.append(4) #sym.count_ops(self.expressions[type][dtype])) elif type[:-10] == 'third_': self.expression_order.append(5) #sym.count_ops(self.expressions[type][dtype])) else: self.expression_list.append(fexpressions[type]) self.expression_keys.append([fname, type]) self.expression_order.append(2) # This step may be unecessary. # Not 100% sure if the sub expression elimination is order sensitive. This step orders the list with the 'function' code first and derivatives after. self.expression_order, self.expression_list, self.expression_keys = zip(*sorted(zip(self.expression_order, self.expression_list, self.expression_keys)))
Extract a list of expressions from the dictionary of expressions.
def readDOE(serialize_output=True): """ Read csv files of DOE buildings Sheet 1 = BuildingSummary Sheet 2 = ZoneSummary Sheet 3 = LocationSummary Sheet 4 = Schedules Note BLD8 & 10 = school Then make matrix of ref data as nested nested lists [16, 3, 16]: matrix refDOE = Building objs matrix Schedule = SchDef objs matrix refBEM (16,3,16) = BEMDef where: [16,3,16] is Type = 1-16, Era = 1-3, climate zone = 1-16 i.e. Type: FullServiceRestaurant, Era: Pre80, Zone: 6A Minneapolis Nested tree: [TYPE_1: ERA_1: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ERA_2: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ... ERA_3: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16] """ #Nested, nested lists of Building, SchDef, BEMDef objects refDOE = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #refDOE(16,3,16) = Building Schedule = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #Schedule (16,3,16) = SchDef refBEM = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #refBEM (16,3,16) = BEMDef #Purpose: Loop through every DOE reference csv and extract building data #Nested loop = 16 types, 3 era, 16 zones = time complexity O(n*m*k) = 768 for i in range(16): #i = 16 types of buildings #print "\tType: {} @i={}".format(BLDTYPE[i], i) # Read building summary (Sheet 1) file_doe_name_bld = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_BuildingSummary.csv".format(i+1)) list_doe1 = read_csv(file_doe_name_bld) #listof(listof 3 era values) nFloor = str2fl(list_doe1[3][3:6]) # Number of Floors, this will be list of floats and str if "basement" glazing = str2fl(list_doe1[4][3:6]) # [?] Total hCeiling = str2fl(list_doe1[5][3:6]) # [m] Ceiling height ver2hor = str2fl(list_doe1[7][3:6]) # Wall to Skin Ratio AreaRoof = str2fl(list_doe1[8][3:6]) # [m2] Gross Dimensions - Total area # Read zone summary (Sheet 2) file_doe_name_zone = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_ZoneSummary.csv".format(i+1)) list_doe2 = read_csv(file_doe_name_zone) #listof(listof 3 eras) AreaFloor = str2fl([list_doe2[2][5],list_doe2[3][5],list_doe2[4][5]]) # [m2] Volume = str2fl([list_doe2[2][6],list_doe2[3][6],list_doe2[4][6]]) # [m3] AreaWall = str2fl([list_doe2[2][8],list_doe2[3][8],list_doe2[4][8]]) # [m2] AreaWindow = str2fl([list_doe2[2][9],list_doe2[3][9],list_doe2[4][9]]) # [m2] Occupant = str2fl([list_doe2[2][11],list_doe2[3][11],list_doe2[4][11]]) # Number of People Light = str2fl([list_doe2[2][12],list_doe2[3][12],list_doe2[4][12]]) # [W/m2] Elec = str2fl([list_doe2[2][13],list_doe2[3][13],list_doe2[4][13]]) # [W/m2] Electric Plug and Process Gas = str2fl([list_doe2[2][14],list_doe2[3][14],list_doe2[4][14]]) # [W/m2] Gas Plug and Process SHW = str2fl([list_doe2[2][15],list_doe2[3][15],list_doe2[4][15]]) # [Litres/hr] Peak Service Hot Water Vent = str2fl([list_doe2[2][17],list_doe2[3][17],list_doe2[4][17]]) # [L/s/m2] Ventilation Infil = str2fl([list_doe2[2][20],list_doe2[3][20],list_doe2[4][20]]) # Air Changes Per Hour (ACH) Infiltration # Read location summary (Sheet 3) file_doe_name_location = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_LocationSummary.csv".format(i+1)) list_doe3 = read_csv(file_doe_name_location) #(listof (listof 3 eras (listof 16 climate types))) TypeWall = [list_doe3[3][4:20],list_doe3[14][4:20],list_doe3[25][4:20]] # Construction type RvalWall = str2fl([list_doe3[4][4:20],list_doe3[15][4:20],list_doe3[26][4:20]]) # [m2*K/W] R-value TypeRoof = [list_doe3[5][4:20],list_doe3[16][4:20],list_doe3[27][4:20]] # Construction type RvalRoof = str2fl([list_doe3[6][4:20],list_doe3[17][4:20],list_doe3[28][4:20]]) # [m2*K/W] R-value Uwindow = str2fl([list_doe3[7][4:20],list_doe3[18][4:20],list_doe3[29][4:20]]) # [W/m2*K] U-factor SHGC = str2fl([list_doe3[8][4:20],list_doe3[19][4:20],list_doe3[30][4:20]]) # [-] coefficient HVAC = str2fl([list_doe3[9][4:20],list_doe3[20][4:20],list_doe3[31][4:20]]) # [kW] Air Conditioning HEAT = str2fl([list_doe3[10][4:20],list_doe3[21][4:20],list_doe3[32][4:20]]) # [kW] Heating COP = str2fl([list_doe3[11][4:20],list_doe3[22][4:20],list_doe3[33][4:20]]) # [-] Air Conditioning COP EffHeat = str2fl([list_doe3[12][4:20],list_doe3[23][4:20],list_doe3[34][4:20]]) # [%] Heating Efficiency FanFlow = str2fl([list_doe3[13][4:20],list_doe3[24][4:20],list_doe3[35][4:20]]) # [m3/s] Fan Max Flow Rate # Read Schedules (Sheet 4) file_doe_name_schedules = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_Schedules.csv".format(i+1)) list_doe4 = read_csv(file_doe_name_schedules) #listof(listof weekday, sat, sun (list of 24 fractions))) SchEquip = str2fl([list_doe4[1][6:30],list_doe4[2][6:30],list_doe4[3][6:30]]) # Equipment Schedule 24 hrs SchLight = str2fl([list_doe4[4][6:30],list_doe4[5][6:30],list_doe4[6][6:30]]) # Light Schedule 24 hrs; Wkday=Sat=Sun=Hol SchOcc = str2fl([list_doe4[7][6:30],list_doe4[8][6:30],list_doe4[9][6:30]]) # Occupancy Schedule 24 hrs SetCool = str2fl([list_doe4[10][6:30],list_doe4[11][6:30],list_doe4[12][6:30]]) # Cooling Setpoint Schedule 24 hrs SetHeat = str2fl([list_doe4[13][6:30],list_doe4[14][6:30],list_doe4[15][6:30]]) # Heating Setpoint Schedule 24 hrs; summer design SchGas = str2fl([list_doe4[16][6:30],list_doe4[17][6:30],list_doe4[18][6:30]]) # Gas Equipment Schedule 24 hrs; wkday=sat SchSWH = str2fl([list_doe4[19][6:30],list_doe4[20][6:30],list_doe4[21][6:30]]) # Solar Water Heating Schedule 24 hrs; wkday=summerdesign, sat=winterdesgin for j in range(3): # j = 3 built eras #print"\tEra: {} @j={}".format(BUILTERA[j], j) for k in range(16): # k = 16 climate zones #print "\tClimate zone: {} @k={}".format(ZONETYPE[k], k) B = Building( hCeiling[j], # floorHeight by era 1, # intHeatNight 1, # intHeatDay 0.1, # intHeatFRad 0.1, # intHeatFLat Infil[j], # infil (ACH) by era Vent[j]/1000., # vent (m^3/s/m^2) by era, converted from liters glazing[j], # glazing ratio by era Uwindow[j][k], # uValue by era, by climate type SHGC[j][k], # SHGC, by era, by climate type 'AIR', # cooling condensation system type: AIR, WATER COP[j][k], # cop by era, climate type 297, # coolSetpointDay = 24 C 297, # coolSetpointNight 293, # heatSetpointDay = 20 C 293, # heatSetpointNight (HVAC[j][k]*1000.0)/AreaFloor[j], # coolCap converted to W/m2 by era, climate type EffHeat[j][k], # heatEff by era, climate type 293) # initialTemp at 20 C #Not defined in the constructor B.heatCap = (HEAT[j][k]*1000.0)/AreaFloor[j] # heating Capacity converted to W/m2 by era, climate type B.Type = BLDTYPE[i] B.Era = BUILTERA[j] B.Zone = ZONETYPE[k] refDOE[i][j][k] = B # Define wall, mass(floor), roof # Reference from E+ for conductivity, thickness (reference below) # Material: (thermalCond, volHeat = specific heat * density) Concrete = Material (1.311, 836.8 * 2240,"Concrete") Insulation = Material (0.049, 836.8 * 265.0, "Insulation") Gypsum = Material (0.16, 830.0 * 784.9, "Gypsum") Wood = Material (0.11, 1210.0 * 544.62, "Wood") Stucco = Material(0.6918, 837.0 * 1858.0, "Stucco") # Wall (1 in stucco, concrete, insulation, gypsum) # Check TypWall by era, by climate if TypeWall[j][k] == "MassWall": #Construct wall based on R value of Wall from refDOE and properties defined above # 1" stucco, 8" concrete, tbd insulation, 1/2" gypsum Rbase = 0.271087 # R val based on stucco, concrete, gypsum Rins = RvalWall[j][k] - Rbase #find insulation value D_ins = Rins * Insulation.thermalCond # depth of ins from m2*K/W * W/m*K = m if D_ins > 0.01: thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum] else: #if it's less then 1 cm don't include in layers thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum] wall = Element(0.08,0.92,thickness,layers,0.,293.,0.,"MassWall") # If mass wall, assume mass floor (4" concrete) # Mass (assume 4" concrete); alb = 0.2 emis = 0.9 thickness = [0.054,0.054] concrete = Material (1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[concrete,concrete],0,293,1,"MassFloor") elif TypeWall[j][k] == "WoodFrame": # 0.01m wood siding, tbd insulation, 1/2" gypsum Rbase = 0.170284091 # based on wood siding, gypsum Rins = RvalWall[j][k] - Rbase D_ins = Rins * Insulation.thermalCond #depth of insulatino if D_ins > 0.01: thickness = [0.01,D_ins,0.0127] layers = [Wood,Insulation,Gypsum] else: thickness = [0.01,0.0127] layers = [Wood,Gypsum] wall = Element(0.22,0.92,thickness,layers,0.,293.,0.,"WoodFrameWall") # If wood frame wall, assume wooden floor alb = 0.2 emis = 0.9 thickness = [0.05,0.05] wood = Material(1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[wood,wood],0.,293.,1.,"WoodFloor") elif TypeWall[j][k] == "SteelFrame": # 1" stucco, 8" concrete, tbd insulation, 1/2" gypsum Rbase = 0.271087 # based on stucco, concrete, gypsum Rins = RvalWall[j][k] - Rbase D_ins = Rins * Insulation.thermalCond if D_ins > 0.01: thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum] else: # If insulation is too thin, assume no insulation thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum] wall = Element(0.15,0.92,thickness,layers,0.,293.,0.,"SteelFrame") # If mass wall, assume mass foor # Mass (assume 4" concrete), alb = 0.2 emis = 0.93 thickness = [0.05,0.05] mass = Element(alb,emis,thickness,[Concrete,Concrete],0.,293.,1.,"MassFloor") elif TypeWall[j][k] == "MetalWall": # metal siding, insulation, 1/2" gypsum alb = 0.2 emis = 0.9 D_ins = max((RvalWall[j][k] * Insulation.thermalCond)/2, 0.01) #use derived insul thickness or 0.01 based on max thickness = [D_ins,D_ins,0.0127] materials = [Insulation,Insulation,Gypsum] wall = Element(alb,emis,thickness,materials,0,293,0,"MetalWall") # Mass (assume 4" concrete); alb = 0.2 emis = 0.9 thickness = [0.05, 0.05] concrete = Material(1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[concrete,concrete],0.,293.,1.,"MassFloor") # Roof if TypeRoof[j][k] == "IEAD": #Insulation Entirely Above Deck # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.93 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01); roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"IEAD") elif TypeRoof[j][k] == "Attic": # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.9 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01) roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"Attic") elif TypeRoof[j][k] == "MetalRoof": # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.9 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01) roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"MetalRoof") # Define bulding energy model, set fraction of the urban floor space of this typology to zero refBEM[i][j][k] = BEMDef(B, mass, wall, roof, 0.0) refBEM[i][j][k].building.FanMax = FanFlow[j][k] # max fan flow rate (m^3/s) per DOE Schedule[i][j][k] = SchDef() Schedule[i][j][k].Elec = SchEquip # 3x24 matrix of schedule for fraction electricity (WD,Sat,Sun) Schedule[i][j][k].Light = SchLight # 3x24 matrix of schedule for fraction light (WD,Sat,Sun) Schedule[i][j][k].Gas = SchGas # 3x24 matrix of schedule for fraction gas (WD,Sat,Sun) Schedule[i][j][k].Occ = SchOcc # 3x24 matrix of schedule for fraction occupancy (WD,Sat,Sun) Schedule[i][j][k].Cool = SetCool # 3x24 matrix of schedule for fraction cooling temp (WD,Sat,Sun) Schedule[i][j][k].Heat = SetHeat # 3x24 matrix of schedule for fraction heating temp (WD,Sat,Sun) Schedule[i][j][k].SWH = SchSWH # 3x24 matrix of schedule for fraction SWH (WD,Sat,Sun Schedule[i][j][k].Qelec = Elec[j] # W/m^2 (max) for electrical plug process Schedule[i][j][k].Qlight = Light[j] # W/m^2 (max) for light Schedule[i][j][k].Nocc = Occupant[j]/AreaFloor[j] # Person/m^2 Schedule[i][j][k].Qgas = Gas[j] # W/m^2 (max) for gas Schedule[i][j][k].Vent = Vent[j]/1000.0 # m^3/m^2 per person Schedule[i][j][k].Vswh = SHW[j]/AreaFloor[j] # litres per hour per m^2 of floor # if not test serialize refDOE,refBEM,Schedule and store in resources if serialize_output: # create a binary file for serialized obj pkl_file_path = os.path.join(DIR_CURR,'refdata','readDOE.pkl') pickle_readDOE = open(pkl_file_path, 'wb') # dump in ../resources # Pickle objects, protocol 1 b/c binary file pickle.dump(refDOE, pickle_readDOE,1) pickle.dump(refBEM, pickle_readDOE,1) pickle.dump(Schedule, pickle_readDOE,1) pickle_readDOE.close() return refDOE, refBEM, Schedule
Read csv files of DOE buildings Sheet 1 = BuildingSummary Sheet 2 = ZoneSummary Sheet 3 = LocationSummary Sheet 4 = Schedules Note BLD8 & 10 = school Then make matrix of ref data as nested nested lists [16, 3, 16]: matrix refDOE = Building objs matrix Schedule = SchDef objs matrix refBEM (16,3,16) = BEMDef where: [16,3,16] is Type = 1-16, Era = 1-3, climate zone = 1-16 i.e. Type: FullServiceRestaurant, Era: Pre80, Zone: 6A Minneapolis Nested tree: [TYPE_1: ERA_1: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ERA_2: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ... ERA_3: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16]