code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def disable(self): """ Disable the button, if in non-expert mode. """ w.ActButton.disable(self) g = get_root(self).globals if self._expert: self.config(bg=g.COL['start']) else: self.config(bg=g.COL['startD'])
Disable the button, if in non-expert mode.
def discussion_is_still_open(self, discussion_type, auto_close_after): """ Checks if a type of discussion is still open are a certain number of days. """ discussion_enabled = getattr(self, discussion_type) if (discussion_enabled and isinstance(auto_close_after, int) and auto_close_after >= 0): return (timezone.now() - ( self.start_publication or self.publication_date)).days < \ auto_close_after return discussion_enabled
Checks if a type of discussion is still open are a certain number of days.
def _check_stream_timeout(started, timeout): """Check if the timeout has been reached and raise a `StopIteration` if so. """ if timeout: elapsed = datetime.datetime.utcnow() - started if elapsed.seconds > timeout: raise StopIteration
Check if the timeout has been reached and raise a `StopIteration` if so.
def write_to_file(path, contents, file_type='text'): """Write ``contents`` to ``path`` with optional formatting. Small helper function to write ``contents`` to ``file`` with optional formatting. Args: path (str): the path to write to contents (str, object, or bytes): the contents to write to the file file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary`` (contents are unchanged) or ``json`` (contents are formatted). Defaults to ``text``. Raises: ScriptWorkerException: with an unknown ``file_type`` TypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable """ FILE_TYPES = ('json', 'text', 'binary') if file_type not in FILE_TYPES: raise ScriptWorkerException("Unknown file_type {} not in {}!".format(file_type, FILE_TYPES)) if file_type == 'json': contents = format_json(contents) if file_type == 'binary': with open(path, 'wb') as fh: fh.write(contents) else: with open(path, 'w') as fh: print(contents, file=fh, end="")
Write ``contents`` to ``path`` with optional formatting. Small helper function to write ``contents`` to ``file`` with optional formatting. Args: path (str): the path to write to contents (str, object, or bytes): the contents to write to the file file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary`` (contents are unchanged) or ``json`` (contents are formatted). Defaults to ``text``. Raises: ScriptWorkerException: with an unknown ``file_type`` TypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable
def _save_db(): """Serializes the contents of the script db to JSON.""" from pyci.utility import json_serial import json vms("Serializing DB to JSON in {}".format(datapath)) with open(datapath, 'w') as f: json.dump(db, f, default=json_serial)
Serializes the contents of the script db to JSON.
def insertFile(self, qInserts=False): """ API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....] """ if qInserts in (False, 'False'): qInserts=False try: body = request.body.read() indata = cjson.decode(body)["files"] if not isinstance(indata, (list, dict)): dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \ "insertFile expects input as list or dirc") businput = [] if isinstance(indata, dict): indata = [indata] indata = validateJSONInputNoCopy("files", indata) for f in indata: f.update({ #"dataset":f["dataset"], "creation_date": f.get("creation_date", dbsUtils().getTime()), "create_by" : dbsUtils().getCreateBy(), "last_modification_date": f.get("last_modification_date", dbsUtils().getTime()), "last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()), "file_lumi_list":f.get("file_lumi_list", []), "file_parent_list":f.get("file_parent_list", []), "file_assoc_list":f.get("assoc_list", []), "file_output_config_list":f.get("file_output_config_list", [])}) businput.append(f) self.dbsFile.insertFile(businput, qInserts) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
async def _create_upstream_applications(self): """ Create the upstream applications. """ loop = asyncio.get_event_loop() for steam_name, ApplicationsCls in self.applications.items(): application = ApplicationsCls(self.scope) upstream_queue = asyncio.Queue() self.application_streams[steam_name] = upstream_queue self.application_futures[steam_name] = loop.create_task( application( upstream_queue.get, partial(self.dispatch_downstream, steam_name=steam_name) ) )
Create the upstream applications.
def asBinary(self): """Get |ASN.1| value as a text string of bits. """ binString = binary.bin(self._value)[2:] return '0' * (len(self._value) - len(binString)) + binString
Get |ASN.1| value as a text string of bits.
def _make_methods(): "Automagically generates methods based on the API endpoints" for k, v in PokeAPI().get_endpoints().items(): string = "\t@BaseAPI._memoize\n" string += ("\tdef get_{0}(self, id_or_name='', limit=None," .format(k.replace('-', '_')) + ' offset=None):\n') string += ("\t\tparams = self._parse_params(locals().copy(), " + "['id_or_name'])\n") string += "\t\tquery_string = '{0}/'\n".format(v.split('/')[-2]) string += "\t\tquery_string += str(id_or_name) + '?'\n" string += '\t\tquery_string += params\n' string += '\t\treturn self._get(query_string)\n' print(string)
Automagically generates methods based on the API endpoints
def _get_annual_data(self, p_p_id): """Get annual data.""" params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_state": "normal", "p_p_mode": "view", "p_p_resource_id": "resourceObtenirDonneesConsommationAnnuelles"} try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecAnnualError("Can not get annual data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecAnnualError("Could not get annual data") if not json_output.get('success'): raise PyHydroQuebecAnnualError("Could not get annual data") if not json_output.get('results'): raise PyHydroQuebecAnnualError("Could not get annual data") if 'courant' not in json_output.get('results')[0]: raise PyHydroQuebecAnnualError("Could not get annual data") return json_output.get('results')[0]['courant']
Get annual data.
def CrossEntropyFlat(*args, axis:int=-1, **kwargs): "Same as `nn.CrossEntropyLoss`, but flattens input and target." return FlattenedLoss(nn.CrossEntropyLoss, *args, axis=axis, **kwargs)
Same as `nn.CrossEntropyLoss`, but flattens input and target.
def typeset(self, container, text_align, last_line=False): """Typeset the line in `container` below its current cursor position. Advances the container's cursor to below the descender of this line. `justification` and `line_spacing` are passed on from the paragraph style. `last_descender` is the previous line's descender, used in the vertical positioning of this line. Finally, `last_line` specifies whether this is the last line of the paragraph. Returns the line's descender size.""" document = container.document # drop spaces (and empty spans) at the end of the line while len(self) > 0: last_span = self[-1] if last_span and last_span.ends_with_space: self.cursor -= last_span.space.width self.pop() else: break else: # abort if the line is empty return for glyph_span in self: glyph_span.span.before_placing(container) # horizontal displacement left = self.indent if self._has_tab or text_align == TextAlign.JUSTIFY and last_line: text_align = 'left' extra_space = self.width - self.cursor if text_align == TextAlign.JUSTIFY: # TODO: padding added to spaces should be prop. to font size nr_spaces = sum(glyph_span.number_of_spaces for glyph_span in self) if nr_spaces > 0: add_to_spaces = extra_space / nr_spaces for glyph_span in self: if glyph_span.number_of_spaces > 0: glyph_span.space.width += add_to_spaces elif text_align == TextAlign.CENTER: left += extra_space / 2.0 elif text_align == TextAlign.RIGHT: left += extra_space canvas = container.canvas cursor = container.cursor current_annotation = AnnotationState(container) for span, glyph_and_widths in group_spans(self): try: width = canvas.show_glyphs(left, cursor, span, glyph_and_widths, container) except InlineFlowableException: ascender = span.ascender(document) if ascender > 0: top = cursor - ascender else: inline_height = span.virtual_container.height top = cursor - span.descender(document) - inline_height span.virtual_container.place_at(container, left, top) width = span.width current_annotation.update(span, left, width) left += width current_annotation.place_if_any()
Typeset the line in `container` below its current cursor position. Advances the container's cursor to below the descender of this line. `justification` and `line_spacing` are passed on from the paragraph style. `last_descender` is the previous line's descender, used in the vertical positioning of this line. Finally, `last_line` specifies whether this is the last line of the paragraph. Returns the line's descender size.
def _create_identity(id_type=None, username=None, password=None, tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None, return_context=False): """ Creates an instance of the current identity_class and assigns it to the module-level name 'identity' by default. If 'return_context' is True, the module-level 'identity' is untouched, and instead the instance is returned. """ if id_type: cls = _import_identity(id_type) else: cls = settings.get("identity_class") if not cls: raise exc.IdentityClassNotDefined("No identity class has " "been defined for the current environment.") if verify_ssl is None: verify_ssl = get_setting("verify_ssl") context = cls(username=username, password=password, tenant_id=tenant_id, tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl) if return_context: return context else: global identity identity = context
Creates an instance of the current identity_class and assigns it to the module-level name 'identity' by default. If 'return_context' is True, the module-level 'identity' is untouched, and instead the instance is returned.
def _babi_parser(tmp_dir, babi_task_id, subset, dataset_split, joint_training=True): """Parsing the bAbi dataset (train and test). Args: tmp_dir: temp directory to download and extract the dataset babi_task_id: babi task id subset: babi subset dataset_split: dataset split (train or eval) joint_training: if training the model on all tasks. Returns: babi_instances: set of training examples, each a dict containing a story, a question and an answer. babi_lines: all the texts in the data separated based on their appearance in the stories, questions, or answers. """ def _data_file(mode, task_id): """Generates the path to the data file for the given mode(train/test). Args: mode: either train or test for bAbi dataset task_id: babi task id Returns: data file path """ file_name = (_TASKS[task_id] + "_{}.txt") return os.path.join(_DIR_NAME, subset, file_name.format(mode)) def _all_task_raw_data_generator(tmp_dir, data_file, dataset_split): """Prepares raw data for all tasks to gether.. Args: tmp_dir: temp directory data_file: data file dataset_split: dataset split """ tf.logging.info("Preparing dataset of all task together") globe_name = ("*_{}.txt") mode_name = "test" if dataset_split == problem.DatasetSplit.TRAIN: mode_name = "train" files_name = os.path.join( tmp_dir, _DIR_NAME, subset, globe_name.format(mode_name)) with tf.gfile.GFile(data_file, "wb") as outfile: for filename in tf.gfile.Glob(files_name): if filename == data_file: # don"t want to copy the output into the output continue with tf.gfile.GFile(filename, "rb") as readfile: shutil.copyfileobj(readfile, outfile) def _parse_answer(answer): if (joint_training or babi_task_id in ["qa8", "qa19", "qa0" ]): # "lists-sets" or "path finding" return "".join([d for d in answer.split(",")]) # as a single token! else: return answer if dataset_split == problem.DatasetSplit.TRAIN: babi_train_task_id = "qa0" if joint_training else babi_task_id data_file = os.path.join(tmp_dir, _data_file("train", babi_train_task_id)) else: data_file = os.path.join(tmp_dir, _data_file("test", babi_task_id)) if ((babi_task_id == "qa0" or joint_training) and not tf.gfile.Exists(os.path.join(tmp_dir, data_file))): _all_task_raw_data_generator(tmp_dir, data_file, dataset_split) tf.logging.info("Parsing %s into training/testing instances...", data_file) babi_instances = [] with tf.gfile.GFile(data_file, mode="r") as f: story = [] for line in f: line_num, line = line.strip().split(" ", 1) if int(line_num) == 1: story = [] if "\t" in line: question, answer, _ = line.split("\t") question = _normalize_string(question) substories = [s for s in story if s] answer = _parse_answer(answer) instance = { FeatureNames.STORY: substories, FeatureNames.QUESTION: question, FeatureNames.ANSWER: answer } babi_instances.append(instance) story.append("") else: story.append(_normalize_string(line)) return babi_instances
Parsing the bAbi dataset (train and test). Args: tmp_dir: temp directory to download and extract the dataset babi_task_id: babi task id subset: babi subset dataset_split: dataset split (train or eval) joint_training: if training the model on all tasks. Returns: babi_instances: set of training examples, each a dict containing a story, a question and an answer. babi_lines: all the texts in the data separated based on their appearance in the stories, questions, or answers.
def mmPrettyPrintTraces(traces, breakOnResets=None): """ Returns pretty-printed table of traces. @param traces (list) Traces to print in table @param breakOnResets (BoolsTrace) Trace of resets to break table on @return (string) Pretty-printed table of traces. """ assert len(traces) > 0, "No traces found" table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces]) for i in xrange(len(traces[0].data)): if breakOnResets and breakOnResets.data[i]: table.add_row(["<reset>"] * (len(traces) + 1)) table.add_row([i] + [trace.prettyPrintDatum(trace.data[i]) for trace in traces]) return table.get_string().encode("utf-8")
Returns pretty-printed table of traces. @param traces (list) Traces to print in table @param breakOnResets (BoolsTrace) Trace of resets to break table on @return (string) Pretty-printed table of traces.
def get_jobs(plugin_name, verify_job=True, conn=None): """ :param plugin_name: <str> :param verify_job: <bool> :param conn: <connection> or <NoneType> :return: <generator> yields <dict> """ job_cur = _jobs_cursor(plugin_name).run(conn) for job in job_cur: if verify_job and not verify(job, Job()): continue #to the next job... warn? yield job
:param plugin_name: <str> :param verify_job: <bool> :param conn: <connection> or <NoneType> :return: <generator> yields <dict>
def shapefile(self, file): """ reprojette en WGS84 et recupere l'extend """ driver = ogr.GetDriverByName('ESRI Shapefile') dataset = driver.Open(file) if dataset is not None: # from Layer layer = dataset.GetLayer() spatialRef = layer.GetSpatialRef() # from Geometry feature = layer.GetNextFeature() geom = feature.GetGeometryRef() spatialRef = geom.GetSpatialReference() #WGS84 outSpatialRef = osr.SpatialReference() outSpatialRef.ImportFromEPSG(4326) coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef) env = geom.GetEnvelope() xmin = env[0] ymin = env[2] xmax = env[1] ymax = env[3] pointMAX = ogr.Geometry(ogr.wkbPoint) pointMAX.AddPoint(env[1], env[3]) pointMAX.Transform(coordTrans) pointMIN = ogr.Geometry(ogr.wkbPoint) pointMIN.AddPoint(env[0], env[2]) pointMIN.Transform(coordTrans) self.bbox = str(pointMIN.GetPoint()[0])+','+str(pointMIN.GetPoint()[1])+','+str(pointMAX.GetPoint()[0])+','+str(pointMAX.GetPoint()[1]) self.query = None else: exit(" shapefile not found. Please verify your path to the shapefile")
reprojette en WGS84 et recupere l'extend
def commit(self): """ Insert the specified text in all selected lines, always at the same column position. """ # Get the number of lines and columns in the last line. last_line, last_col = self.qteWidget.getNumLinesAndColumns() # If this is the first ever call to this undo/redo element # then backup the current cursor position because # it will be required for the redo operation. if self.cursorPos is None: if qteKilledTextFromRectangle is None: return self.insertedText = list(qteKilledTextFromRectangle) self.cursorPos = self.qteWidget.getCursorPosition() else: self.qteWidget.setCursorPosition(*self.cursorPos) # Insert the killed strings into their respective lines. col = self.cursorPos[1] for ii, text in enumerate(self.insertedText): line = ii + self.cursorPos[0] self.baseClass.insertAt(text, line, col)
Insert the specified text in all selected lines, always at the same column position.
def profile_slope(self, kwargs_lens_list, lens_model_internal_bool=None, num_points=10): """ computes the logarithmic power-law slope of a profile :param kwargs_lens_list: lens model keyword argument list :param lens_model_internal_bool: bool list, indicate which part of the model to consider :param num_points: number of estimates around the Einstein radius :return: """ theta_E = self.effective_einstein_radius(kwargs_lens_list) x0 = kwargs_lens_list[0]['center_x'] y0 = kwargs_lens_list[0]['center_y'] x, y = util.points_on_circle(theta_E, num_points) dr = 0.01 x_dr, y_dr = util.points_on_circle(theta_E + dr, num_points) if lens_model_internal_bool is None: lens_model_internal_bool = [True]*len(kwargs_lens_list) alpha_E_x_i, alpha_E_y_i = self._lensModel.alpha(x0 + x, y0 + y, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_r = np.sqrt(alpha_E_x_i**2 + alpha_E_y_i**2) alpha_E_dr_x_i, alpha_E_dr_y_i = self._lensModel.alpha(x0 + x_dr, y0 + y_dr, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_dr = np.sqrt(alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2) slope = np.mean(np.log(alpha_E_dr / alpha_E_r) / np.log((theta_E + dr) / theta_E)) gamma = -slope + 2 return gamma
computes the logarithmic power-law slope of a profile :param kwargs_lens_list: lens model keyword argument list :param lens_model_internal_bool: bool list, indicate which part of the model to consider :param num_points: number of estimates around the Einstein radius :return:
def validate(self): """ Verify that the contents of the OpaqueObject are valid. Raises: TypeError: if the types of any OpaqueObject attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("opaque value must be bytes") elif not isinstance(self.opaque_type, enums.OpaqueDataType): raise TypeError("opaque data type must be an OpaqueDataType " "enumeration") name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("opaque data name {0} must be a string".format( position))
Verify that the contents of the OpaqueObject are valid. Raises: TypeError: if the types of any OpaqueObject attributes are invalid.
def role_list(endpoint_id): """ Executor for `globus access endpoint-role-list` """ client = get_client() roles = client.endpoint_role_list(endpoint_id) resolved_ids = LazyIdentityMap( x["principal"] for x in roles if x["principal_type"] == "identity" ) def principal_str(role): principal = role["principal"] if role["principal_type"] == "identity": username = resolved_ids.get(principal) return username or principal elif role["principal_type"] == "group": return (u"https://app.globus.org/groups/{}").format(principal) else: return principal formatted_print( roles, fields=[ ("Principal Type", "principal_type"), ("Role ID", "id"), ("Principal", principal_str), ("Role", "role"), ], )
Executor for `globus access endpoint-role-list`
def make_url(self, returnURL, paymentReason, pipelineName, transactionAmount, **params): """ Generate the URL with the signature required for a transaction """ # use the sandbox authorization endpoint if we're using the # sandbox for API calls. endpoint_host = 'authorize.payments.amazon.com' if 'sandbox' in self.host: endpoint_host = 'authorize.payments-sandbox.amazon.com' base = "/cobranded-ui/actions/start" params['callerKey'] = str(self.aws_access_key_id) params['returnURL'] = str(returnURL) params['paymentReason'] = str(paymentReason) params['pipelineName'] = pipelineName params['transactionAmount'] = transactionAmount params["signatureMethod"] = 'HmacSHA256' params["signatureVersion"] = '2' if(not params.has_key('callerReference')): params['callerReference'] = str(uuid.uuid4()) parts = '' for k in sorted(params.keys()): parts += "&%s=%s" % (k, urllib.quote(params[k], '~')) canonical = '\n'.join(['GET', str(endpoint_host).lower(), base, parts[1:]]) signature = self._auth_handler.sign_string(canonical) params["signature"] = signature urlsuffix = '' for k in sorted(params.keys()): urlsuffix += "&%s=%s" % (k, urllib.quote(params[k], '~')) urlsuffix = urlsuffix[1:] # strip the first & fmt = "https://%(endpoint_host)s%(base)s?%(urlsuffix)s" final = fmt % vars() return final
Generate the URL with the signature required for a transaction
def add_index(self, attribute, ordered=False): """ Adds an index to this map for the specified entries so that queries can run faster. Example: Let's say your map values are Employee objects. >>> class Employee(IdentifiedDataSerializable): >>> active = false >>> age = None >>> name = None >>> #other fields >>> >>> #methods If you query your values mostly based on age and active fields, you should consider indexing these. >>> map = self.client.get_map("employees") >>> map.add_index("age" , true) #ordered, since we have ranged queries for this field >>> map.add_index("active", false) #not ordered, because boolean field cannot have range :param attribute: (str), index attribute of the value. :param ordered: (bool), for ordering the index or not (optional). """ return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)
Adds an index to this map for the specified entries so that queries can run faster. Example: Let's say your map values are Employee objects. >>> class Employee(IdentifiedDataSerializable): >>> active = false >>> age = None >>> name = None >>> #other fields >>> >>> #methods If you query your values mostly based on age and active fields, you should consider indexing these. >>> map = self.client.get_map("employees") >>> map.add_index("age" , true) #ordered, since we have ranged queries for this field >>> map.add_index("active", false) #not ordered, because boolean field cannot have range :param attribute: (str), index attribute of the value. :param ordered: (bool), for ordering the index or not (optional).
def rebin_scale(a, scale=1): """Scale an array to a new shape.""" newshape = tuple((side * scale) for side in a.shape) return rebin(a, newshape)
Scale an array to a new shape.
def isscalar(cls, dataset, dim): """ Tests if dimension is scalar in each subpath. """ if not dataset.data: return True ds = cls._inner_dataset_template(dataset) isscalar = [] for d in dataset.data: ds.data = d isscalar.append(ds.interface.isscalar(ds, dim)) return all(isscalar)
Tests if dimension is scalar in each subpath.
def decode(self, s): """ Decode special characters encodings found in string I{s}. @param s: A string to decode. @type s: str @return: The decoded string. @rtype: str """ if isinstance(s, basestring) and '&' in s: for x in self.decodings: s = s.replace(x[0], x[1]) return s
Decode special characters encodings found in string I{s}. @param s: A string to decode. @type s: str @return: The decoded string. @rtype: str
def tag(self, *tag, **kwtags): """Tag a Property instance with metadata dictionary""" if not tag: pass elif len(tag) == 1 and isinstance(tag[0], dict): self._meta.update(tag[0]) else: raise TypeError('Tags must be provided as key-word arguments or ' 'a dictionary') self._meta.update(kwtags) return self
Tag a Property instance with metadata dictionary
def close(self): """Close the file, and for mode "w" and "a" write the ending records.""" if self.fp is None: return if self.mode in ("w", "a") and self._didModify: # write ending records count = 0 pos1 = self.fp.tell() for zinfo in self.filelist: # write central directory count = count + 1 dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) extra = [] if zinfo.file_size > ZIP64_LIMIT \ or zinfo.compress_size > ZIP64_LIMIT: extra.append(zinfo.file_size) extra.append(zinfo.compress_size) file_size = 0xffffffff compress_size = 0xffffffff else: file_size = zinfo.file_size compress_size = zinfo.compress_size if zinfo.header_offset > ZIP64_LIMIT: extra.append(zinfo.header_offset) header_offset = 0xffffffff else: header_offset = zinfo.header_offset extra_data = zinfo.extra if extra: # Append a ZIP64 field to the extra's extra_data = struct.pack( '<HH' + 'Q'*len(extra), 1, 8*len(extra), *extra) + extra_data extract_version = max(45, zinfo.extract_version) create_version = max(45, zinfo.create_version) else: extract_version = zinfo.extract_version create_version = zinfo.create_version try: filename, flag_bits = zinfo._encodeFilenameFlags() centdir = struct.pack(structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) except DeprecationWarning: print((structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(zinfo.filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset), file=sys.stderr) raise self.fp.write(centdir) self.fp.write(filename) self.fp.write(extra_data) self.fp.write(zinfo.comment) pos2 = self.fp.tell() # Write end-of-zip-archive record centDirCount = count centDirSize = pos2 - pos1 centDirOffset = pos1 if (centDirCount >= ZIP_FILECOUNT_LIMIT or centDirOffset > ZIP64_LIMIT or centDirSize > ZIP64_LIMIT): # Need to write the ZIP64 end-of-archive records zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset) self.fp.write(zip64endrec) zip64locrec = struct.pack( structEndArchive64Locator, stringEndArchive64Locator, 0, pos2, 1) self.fp.write(zip64locrec) centDirCount = min(centDirCount, 0xFFFF) centDirSize = min(centDirSize, 0xFFFFFFFF) centDirOffset = min(centDirOffset, 0xFFFFFFFF) # check for valid comment length if len(self.comment) >= ZIP_MAX_COMMENT: if self.debug > 0: msg = 'Archive comment is too long; truncating to %d bytes' \ % ZIP_MAX_COMMENT self.comment = self.comment[:ZIP_MAX_COMMENT] endrec = struct.pack(structEndArchive, stringEndArchive, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset, len(self.comment)) self.fp.write(endrec) self.fp.write(self.comment) self.fp.flush() if not self._filePassed: self.fp.close() self.fp = None
Close the file, and for mode "w" and "a" write the ending records.
def posterior(self, x, sigma=1.): """Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed""" pr0 = 1. / self.scale**2 # prior precision prd = x.size / sigma**2 # data precision varp = 1. / (pr0 + prd) # posterior variance mu = varp * (pr0 * self.loc + prd * x.mean()) return Normal(loc=mu, scale=np.sqrt(varp))
Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed
def set_filter(self, filter): """Sets the filter to be used for resizing when using this pattern. See :ref:`FILTER` for details on each filter. Note that you might want to control filtering even when you do not have an explicit :class:`Pattern`, (for example when using :meth:`Context.set_source_surface`). In these cases, it is convenient to use :meth:`Context.get_source` to get access to the pattern that cairo creates implicitly. For example:: context.get_source().set_filter(cairocffi.FILTER_NEAREST) """ cairo.cairo_pattern_set_filter(self._pointer, filter) self._check_status()
Sets the filter to be used for resizing when using this pattern. See :ref:`FILTER` for details on each filter. Note that you might want to control filtering even when you do not have an explicit :class:`Pattern`, (for example when using :meth:`Context.set_source_surface`). In these cases, it is convenient to use :meth:`Context.get_source` to get access to the pattern that cairo creates implicitly. For example:: context.get_source().set_filter(cairocffi.FILTER_NEAREST)
def phase(args): """ %prog phase genbankfiles Input has to be gb file. Search the `KEYWORDS` section to look for PHASE. Also look for "chromosome" and "clone" in the definition line. """ p = OptionParser(phase.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fw = must_open(opts.outfile, "w") for gbfile in args: for rec in SeqIO.parse(gbfile, "gb"): bac_phase, keywords = get_phase(rec) chr, clone = get_clone(rec) keyword_field = ";".join(keywords) print("\t".join((rec.id, str(bac_phase), keyword_field, chr, clone)), file=fw)
%prog phase genbankfiles Input has to be gb file. Search the `KEYWORDS` section to look for PHASE. Also look for "chromosome" and "clone" in the definition line.
def transformChildrenToNative(self): """ Recursively replace children with their native representation. Sort to get dependency order right, like vtimezone before vevent. """ for childArray in (self.contents[k] for k in self.sortChildKeys()): for child in childArray: child = child.transformToNative() child.transformChildrenToNative()
Recursively replace children with their native representation. Sort to get dependency order right, like vtimezone before vevent.
def convertDict2Attrs(self, *args, **kwargs): """The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Client object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuClient just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each MambuClient, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list. """ for n,c in enumerate(self.attrs): # ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE! try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: client = self.mambuclientclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambuclientclass = MambuClient client = self.mambuclientclass(urlfunc=None, entid=None, *args, **kwargs) client.init(c, *args, **kwargs) self.attrs[n] = client
The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Client object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuClient just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each MambuClient, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list.
def get_package_version(self, feed, group_id, artifact_id, version, show_deleted=None): """GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>` """ route_values = {} if feed is not None: route_values['feed'] = self._serialize.url('feed', feed, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if artifact_id is not None: route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') query_parameters = {} if show_deleted is not None: query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool') response = self._send(http_method='GET', location_id='180ed967-377a-4112-986b-607adb14ded4', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Package', response)
GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>`
def get_title(self, index): """Gets the title of a container pages. Parameters ---------- index : int Index of the container page """ # JSON dictionaries have string keys, so we convert index to a string index = unicode_type(int(index)) if index in self._titles: return self._titles[index] else: return None
Gets the title of a container pages. Parameters ---------- index : int Index of the container page
def _get_labels(self, y): """ Construct pylearn2 dataset labels. Parameters ---------- y : array_like, optional Labels. """ y = np.asarray(y) if y.ndim == 1: return y.reshape((y.size, 1)) assert y.ndim == 2 return y
Construct pylearn2 dataset labels. Parameters ---------- y : array_like, optional Labels.
def get_online_date(self, **kwargs): """Get the online date from the meta creation date.""" qualifier = kwargs.get('qualifier', '') content = kwargs.get('content', '') # Handle meta-creation-date element. if qualifier == 'metadataCreationDate': date_match = META_CREATION_DATE_REGEX.match(content) (year, month, day) = date_match.groups('') # Create the date. creation_date = datetime.date(int(year), int(month), int(day)) return '%s/%s/%s' % ( format_date_string(creation_date.month), format_date_string(creation_date.day), creation_date.year, ) return None
Get the online date from the meta creation date.
def pst_prior(pst,logger=None, filename=None, **kwargs): """ helper to plot prior parameter histograms implied by parameter bounds. Saves a multipage pdf named <case>.prior.pdf Parameters ---------- pst : pyemu.Pst logger : pyemu.Logger filename : str PDF filename to save plots to. If None, return figs without saving. Default is None. kwargs : dict accepts 'grouper' as dict to group parameters on to a single axis (use parameter groups if not passed), 'unqiue_only' to only show unique mean-stdev combinations within a given group Returns ------- None TODO ---- external parcov, unique mean-std pairs """ if logger is None: logger=Logger('Default_Loggger.log',echo=False) logger.log("plot pst_prior") par = pst.parameter_data if "parcov_filename" in pst.pestpp_options: logger.warn("ignoring parcov_filename, using parameter bounds for prior cov") logger.log("loading cov from parameter data") cov = pyemu.Cov.from_parameter_data(pst) logger.log("loading cov from parameter data") logger.log("building mean parameter values") li = par.partrans.loc[cov.names] == "log" mean = par.parval1.loc[cov.names] info = par.loc[cov.names,:].copy() info.loc[:,"mean"] = mean info.loc[li,"mean"] = mean[li].apply(np.log10) logger.log("building mean parameter values") logger.log("building stdev parameter values") if cov.isdiagonal: std = cov.x.flatten() else: std = np.diag(cov.x) std = np.sqrt(std) info.loc[:,"prior_std"] = std logger.log("building stdev parameter values") if std.shape != mean.shape: logger.lraise("mean.shape {0} != std.shape {1}". format(mean.shape,std.shape)) if "grouper" in kwargs: raise NotImplementedError() #check for consistency here else: par_adj = par.loc[par.partrans.apply(lambda x: x in ["log","none"]),:] grouper = par_adj.groupby(par_adj.pargp).groups #grouper = par.groupby(par.pargp).groups if len(grouper) == 0: raise Exception("no adustable parameters to plot") fig = plt.figure(figsize=figsize) if "fig_title" in kwargs: plt.figtext(0.5,0.5,kwargs["fig_title"]) else: plt.figtext(0.5,0.5,"pyemu.Pst.plot(kind='prior')\nfrom pest control file '{0}'\n at {1}" .format(pst.filename,str(datetime.now())),ha="center") figs = [] ax_count = 0 grps_names = list(grouper.keys()) grps_names.sort() for g in grps_names: names = grouper[g] logger.log("plotting priors for {0}". format(','.join(list(names)))) if ax_count % (nr * nc) == 0: plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) fig = plt.figure(figsize=figsize) axes = get_page_axes() ax_count = 0 islog = False vc = info.partrans.value_counts() if vc.shape[0] > 1: logger.warn("mixed partrans for group {0}".format(g)) elif "log" in vc.index: islog = True ax = axes[ax_count] if "unique_only" in kwargs and kwargs["unique_only"]: ms = info.loc[names,:].apply(lambda x: (x["mean"],x["prior_std"]),axis=1).unique() for (m,s) in ms: x, y = gaussian_distribution(m, s) ax.fill_between(x, 0, y, facecolor='0.5', alpha=0.5, edgecolor="none") else: for m,s in zip(info.loc[names,'mean'],info.loc[names,'prior_std']): x,y = gaussian_distribution(m,s) ax.fill_between(x,0,y,facecolor='0.5',alpha=0.5, edgecolor="none") ax.set_title("{0}) group:{1}, {2} parameters". format(abet[ax_count],g,names.shape[0]),loc="left") ax.set_yticks([]) if islog: ax.set_xlabel("$log_{10}$ parameter value",labelpad=0.1) else: ax.set_xlabel("parameter value", labelpad=0.1) logger.log("plotting priors for {0}". format(','.join(list(names)))) ax_count += 1 for a in range(ax_count,nr*nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) if filename is not None: with PdfPages(filename) as pdf: plt.tight_layout() pdf.savefig(fig) plt.close(fig) logger.log("plot pst_prior") else: logger.log("plot pst_prior") return figs
helper to plot prior parameter histograms implied by parameter bounds. Saves a multipage pdf named <case>.prior.pdf Parameters ---------- pst : pyemu.Pst logger : pyemu.Logger filename : str PDF filename to save plots to. If None, return figs without saving. Default is None. kwargs : dict accepts 'grouper' as dict to group parameters on to a single axis (use parameter groups if not passed), 'unqiue_only' to only show unique mean-stdev combinations within a given group Returns ------- None TODO ---- external parcov, unique mean-std pairs
def install_sql_hook(): """If installed this causes Django's queries to be captured.""" try: from django.db.backends.utils import CursorWrapper except ImportError: from django.db.backends.util import CursorWrapper try: real_execute = CursorWrapper.execute real_executemany = CursorWrapper.executemany except AttributeError: # XXX(mitsuhiko): On some very old django versions (<1.6) this # trickery would have to look different but I can't be bothered. return def record_many_sql(vendor, alias, start, sql, param_list): duration = time.time() - start for params in param_list: record_sql(vendor, alias, start, duration, sql, params) def execute(self, sql, params=None): start = time.time() try: return real_execute(self, sql, params) finally: record_sql(self.db.vendor, getattr(self.db, 'alias', None), start, time.time() - start, sql, params) def executemany(self, sql, param_list): start = time.time() try: return real_executemany(self, sql, param_list) finally: record_many_sql(self.db.vendor, getattr(self.db, 'alias', None), start, sql, param_list) CursorWrapper.execute = execute CursorWrapper.executemany = executemany breadcrumbs.ignore_logger('django.db.backends')
If installed this causes Django's queries to be captured.
def cas2mach(Vcas, H): """Calibrated Airspeed to Mach number""" Vtas = cas2tas(Vcas, H) Mach = tas2mach(Vtas, H) return Mach
Calibrated Airspeed to Mach number
def __create(self, client_id, cc_number, cvv, expiration_month, expiration_year, user_name, email, address, **kwargs): """Call documentation: `/credit_card/create <https://www.wepay.com/developer/reference/credit_card#create>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'client_id': client_id, 'cc_number': cc_number, 'cvv': cvv, 'expiration_month': expiration_month, 'expiration_year': expiration_year, 'user_name': user_name, 'email': email, 'address': address } return self.make_call(self.__create, params, kwargs)
Call documentation: `/credit_card/create <https://www.wepay.com/developer/reference/credit_card#create>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def combo_exhaustive_label_definition_check( self, ontology: pd.DataFrame, label_predicate:str, definition_predicates:str, diff = True) -> List[List[dict]]: ''' Combo of label & definition exhaustive check out of convenience Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} label_obj = row[label_predicate] if isinstance(label_obj, list): if len(label_obj) != 1: exit('Need to have only 1 label in the cell from the onotology.') else: label_obj = label_obj[0] entity_label = self.local_degrade(label_obj) label_search_results = self.label2rows.get(entity_label) label_ilx_rows = label_search_results if label_search_results else [] definition_ilx_rows = [] for definition_predicate in definition_predicates: definition_objs = row[definition_predicate] if not definition_objs: continue definition_objs = [definition_objs] if not isinstance(definition_objs, list) else definition_objs for definition_obj in definition_objs: definition_obj = self.local_degrade(definition_obj) definition_search_results = self.definition2rows.get(definition_obj) if definition_search_results: definition_ilx_rows.extend(definition_search_results) ilx_rows = [dict(t) for t in {tuple(d.items()) for d in (label_ilx_rows + definition_ilx_rows)}] if ilx_rows: inside.append({ 'external_ontology_row': row, 'ilx_rows': ilx_rows, }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
Combo of label & definition exhaustive check out of convenience Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
def check(self, item_id): """Check if an analysis is complete :type item_id: int :param item_id: task_id to check. :rtype: bool :return: Boolean indicating if a report is done or not. """ response = self._request("tasks/view/{id}".format(id=item_id)) if response.status_code == 404: # probably an unknown task id return False try: content = json.loads(response.content.decode('utf-8')) status = content['task']["status"] if status == 'completed' or status == "reported": return True except ValueError as e: raise sandboxapi.SandboxError(e) return False
Check if an analysis is complete :type item_id: int :param item_id: task_id to check. :rtype: bool :return: Boolean indicating if a report is done or not.
def make_chunks_from_unused(self,length,trig_overlap,play=0,min_length=0, sl=0,excl_play=0,pad_data=0): """ Create an extra chunk that uses up the unused data in the science segment. @param length: length of chunk in seconds. @param trig_overlap: length of time start generating triggers before the start of the unused data. @param play: - 1 : only generate chunks that overlap with S2 playground data. - 2 : as 1 plus compute trig start and end times to coincide with the start/end of the playground @param min_length: the unused data must be greater than min_length to make a chunk. @param sl: slide by sl seconds before determining playground data. @param excl_play: exclude the first excl_play second from the start and end of the chunk when computing if the chunk overlaps with playground. @param pad_data: exclude the first and last pad_data seconds of the segment when generating chunks """ for seg in self.__sci_segs: # if there is unused data longer than the minimum chunk length if seg.unused() > min_length: end = seg.end() - pad_data start = end - length if (not play) or (play and (((end-sl-excl_play-729273613)%6370) < (600+length-2*excl_play))): trig_start = end - seg.unused() - trig_overlap if (play == 2): # calculate the start of the playground preceeding the chunk end play_start = 729273613 + 6370 * \ math.floor((end-sl-excl_play-729273613) / 6370) play_end = play_start + 600 trig_end = 0 if ( (play_end - 6370) > start ): print "Two playground segments in this chunk" print " Code to handle this case has not been implemented" sys.exit(1) else: if play_start > trig_start: trig_start = int(play_start) if (play_end < end): trig_end = int(play_end) if (trig_end == 0) or (trig_end > trig_start): seg.add_chunk(start, end, trig_start, trig_end) else: seg.add_chunk(start, end, trig_start) seg.set_unused(0)
Create an extra chunk that uses up the unused data in the science segment. @param length: length of chunk in seconds. @param trig_overlap: length of time start generating triggers before the start of the unused data. @param play: - 1 : only generate chunks that overlap with S2 playground data. - 2 : as 1 plus compute trig start and end times to coincide with the start/end of the playground @param min_length: the unused data must be greater than min_length to make a chunk. @param sl: slide by sl seconds before determining playground data. @param excl_play: exclude the first excl_play second from the start and end of the chunk when computing if the chunk overlaps with playground. @param pad_data: exclude the first and last pad_data seconds of the segment when generating chunks
def show_user(self, login=None, envs=[], query='/users/'): """ `login` - Login or username of user Show user in specified environments """ juicer.utils.Log.log_debug("Show User: %s", login) # keep track of which iteration of environment we're in count = 0 for env in self.args.envs: count += 1 juicer.utils.Log.log_info("%s:", env) if not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!", (login, env)) continue else: url = "%s%s/" % (query, login) _r = self.connectors[env].get(url) if _r.status_code == Constants.PULP_GET_OK: user = juicer.utils.load_json_str(_r.content) juicer.utils.Log.log_info("Login: %s" % user['login']) juicer.utils.Log.log_info("Name: %s" % user['name']) juicer.utils.Log.log_info("Roles: %s" % ', '.join(user['roles'])) if count < len(envs): # just want a new line juicer.utils.Log.log_info("") else: _r.raise_for_status() return True
`login` - Login or username of user Show user in specified environments
def safe_makedirs(path): """Safe makedirs. Works in a multithreaded scenario. """ if not os.path.exists(path): try: os.makedirs(path) except OSError: if not os.path.exists(path): raise
Safe makedirs. Works in a multithreaded scenario.
def condition_details_has_owner(condition_details, owner): """Check if the public_key of owner is in the condition details as an Ed25519Fulfillment.public_key Args: condition_details (dict): dict with condition details owner (str): base58 public key of owner Returns: bool: True if the public key is found in the condition details, False otherwise """ if 'subconditions' in condition_details: result = condition_details_has_owner(condition_details['subconditions'], owner) if result: return True elif isinstance(condition_details, list): for subcondition in condition_details: result = condition_details_has_owner(subcondition, owner) if result: return True else: if 'public_key' in condition_details \ and owner == condition_details['public_key']: return True return False
Check if the public_key of owner is in the condition details as an Ed25519Fulfillment.public_key Args: condition_details (dict): dict with condition details owner (str): base58 public key of owner Returns: bool: True if the public key is found in the condition details, False otherwise
def cold_spell_days(tas, thresh='-10 degC', window=5, freq='AS-JUL'): r"""Cold spell days The number of days that are part of a cold spell, defined as five or more consecutive days with mean daily temperature below a threshold in °C. Parameters ---------- tas : xarrray.DataArray Mean daily temperature [℃] or [K] thresh : str Threshold temperature below which a cold spell begins [℃] or [K]. Default : '-10 degC' window : int Minimum number of days with temperature below threshold to qualify as a cold spell. freq : str, optional Resampling frequency Returns ------- xarray.DataArray Cold spell days. Notes ----- Let :math:`T_i` be the mean daily temperature on day :math:`i`, the number of cold spell days during period :math:`\phi` is given by .. math:: \sum_{i \in \phi} \prod_{j=i}^{i+5} [T_j < thresh] where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. """ t = utils.convert_units_to(thresh, tas) over = tas < t group = over.resample(time=freq) return group.apply(rl.windowed_run_count, window=window, dim='time')
r"""Cold spell days The number of days that are part of a cold spell, defined as five or more consecutive days with mean daily temperature below a threshold in °C. Parameters ---------- tas : xarrray.DataArray Mean daily temperature [℃] or [K] thresh : str Threshold temperature below which a cold spell begins [℃] or [K]. Default : '-10 degC' window : int Minimum number of days with temperature below threshold to qualify as a cold spell. freq : str, optional Resampling frequency Returns ------- xarray.DataArray Cold spell days. Notes ----- Let :math:`T_i` be the mean daily temperature on day :math:`i`, the number of cold spell days during period :math:`\phi` is given by .. math:: \sum_{i \in \phi} \prod_{j=i}^{i+5} [T_j < thresh] where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
def get_gender(self, name, country=None): """Returns best gender for the given name and country pair""" if not self.case_sensitive: name = name.lower() if name not in self.names: return self.unknown_value elif not country: def counter(country_values): country_values = map(ord, country_values.replace(" ", "")) return (len(country_values), sum(map(lambda c: c > 64 and c-55 or c-48, country_values))) return self._most_popular_gender(name, counter) elif country in self.__class__.COUNTRIES: index = self.__class__.COUNTRIES.index(country) counter = lambda e: (ord(e[index])-32, 0) return self._most_popular_gender(name, counter) else: raise NoCountryError("No such country: %s" % country)
Returns best gender for the given name and country pair
def p_statement_list_1(self, p): '''statement_list : statement SEMICOLON statement_list''' p[0] = p[3] if p[1] is not None: p[0].children.insert(0, p[1])
statement_list : statement SEMICOLON statement_list
def unhandle(self, handler): """ unregister handler (removing callback function) """ with self._hlock: try: self._handler_list.remove(handler) except ValueError: raise ValueError("Handler is not handling this event, so cannot unhandle it.") return self
unregister handler (removing callback function)
def average_repetitions(df, keys_mean): """average duplicate measurements. This requires that IDs and norrec labels were assigned using the *assign_norrec_to_df* function. Parameters ---------- df DataFrame keys_mean: list list of keys to average. For all other keys the first entry will be used. """ if 'norrec' not in df.columns: raise Exception( 'The "norrec" column is required for this function to work!' ) # Get column order to restore later cols = list(df.columns.values) keys_keep = list(set(df.columns.tolist()) - set(keys_mean)) agg_dict = {x: _first for x in keys_keep} agg_dict.update({x: np.mean for x in keys_mean}) for key in ('id', 'timestep', 'frequency', 'norrec'): if key in agg_dict: del(agg_dict[key]) # print(agg_dict) # average over duplicate measurements extra_dimensions_raw = ['id', 'norrec', 'frequency', 'timestep'] extra_dimensions = [x for x in extra_dimensions_raw if x in df.columns] df = df.groupby(extra_dimensions).agg(agg_dict) df.reset_index(inplace=True) return df[cols]
average duplicate measurements. This requires that IDs and norrec labels were assigned using the *assign_norrec_to_df* function. Parameters ---------- df DataFrame keys_mean: list list of keys to average. For all other keys the first entry will be used.
def from_json_format(conf): '''Convert fields of parsed json dictionary to python format''' if 'fmode' in conf: conf['fmode'] = int(conf['fmode'], 8) if 'dmode' in conf: conf['dmode'] = int(conf['dmode'], 8)
Convert fields of parsed json dictionary to python format
def insert_object(self, db_object): """Create new entry in the database. Parameters ---------- db_object : (Sub-class of)ObjectHandle """ # Create object using the to_dict() method. obj = self.to_dict(db_object) obj['active'] = True self.collection.insert_one(obj)
Create new entry in the database. Parameters ---------- db_object : (Sub-class of)ObjectHandle
def _copy_files(source, target): """ Copy all the files in source directory to target. Ignores subdirectories. """ source_files = listdir(source) if not exists(target): makedirs(target) for filename in source_files: full_filename = join(source, filename) if isfile(full_filename): shutil.copy(full_filename, target)
Copy all the files in source directory to target. Ignores subdirectories.
def _query(action=None, command=None, args=None, method='GET', header_dict=None, data=None, url='https://api.linode.com/'): ''' Make a web call to the Linode API. ''' global LASTCALL vm_ = get_configured_provider() ratelimit_sleep = config.get_cloud_config_value( 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0, ) apikey = config.get_cloud_config_value( 'apikey', vm_, __opts__, search_global=False ) if not isinstance(args, dict): args = {} if 'api_key' not in args.keys(): args['api_key'] = apikey if action and 'api_action' not in args.keys(): args['api_action'] = '{0}.{1}'.format(action, command) if header_dict is None: header_dict = {} if method != 'POST': header_dict['Accept'] = 'application/json' decode = True if method == 'DELETE': decode = False now = int(time.mktime(datetime.datetime.now().timetuple())) if LASTCALL >= now: time.sleep(ratelimit_sleep) result = __utils__['http.query']( url, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type='json', text=True, status=True, hide_fields=['api_key', 'rootPass'], opts=__opts__, ) if 'ERRORARRAY' in result['dict']: if result['dict']['ERRORARRAY']: error_list = [] for error in result['dict']['ERRORARRAY']: msg = error['ERRORMESSAGE'] if msg == "Authentication failed": raise SaltCloudSystemExit( 'Linode API Key is expired or invalid' ) else: error_list.append(msg) raise SaltCloudException( 'Linode API reported error(s): {}'.format(", ".join(error_list)) ) LASTCALL = int(time.mktime(datetime.datetime.now().timetuple())) log.debug('Linode Response Status Code: %s', result['status']) return result['dict']
Make a web call to the Linode API.
def bind(function, *args, **kwargs): """ Wraps the given function such that when it is called, the given arguments are passed in addition to the connection argument. :type function: function :param function: The function that's ought to be wrapped. :type args: list :param args: Passed on to the called function. :type kwargs: dict :param kwargs: Passed on to the called function. :rtype: function :return: The wrapped function. """ def decorated(*inner_args, **inner_kwargs): kwargs.update(inner_kwargs) return function(*(inner_args + args), **kwargs) copy_labels(function, decorated) return decorated
Wraps the given function such that when it is called, the given arguments are passed in addition to the connection argument. :type function: function :param function: The function that's ought to be wrapped. :type args: list :param args: Passed on to the called function. :type kwargs: dict :param kwargs: Passed on to the called function. :rtype: function :return: The wrapped function.
def clear( self # type: ORMTask ): """Delete all objects created by this task. Iterate over `self.object_classes` and delete all objects of the listed classes. """ # mark this task as incomplete self.mark_incomplete() # delete objects for object_class in self.object_classes: self.session.query(object_class).delete() self.close_session()
Delete all objects created by this task. Iterate over `self.object_classes` and delete all objects of the listed classes.
def _group_until_different(items: Iterable[TIn], key: Callable[[TIn], TKey], value=lambda e: e): """Groups runs of items that are identical according to a keying function. Args: items: The items to group. key: If two adjacent items produce the same output from this function, they will be grouped. value: Maps each item into a value to put in the group. Defaults to the item itself. Examples: _group_until_different(range(11), key=is_prime) yields (False, [0, 1]) (True, [2, 3]) (False, [4]) (True, [5]) (False, [6]) (True, [7]) (False, [8, 9, 10]) Yields: Tuples containing the group key and item values. """ return ((k, [value(i) for i in v]) for (k, v) in groupby(items, key))
Groups runs of items that are identical according to a keying function. Args: items: The items to group. key: If two adjacent items produce the same output from this function, they will be grouped. value: Maps each item into a value to put in the group. Defaults to the item itself. Examples: _group_until_different(range(11), key=is_prime) yields (False, [0, 1]) (True, [2, 3]) (False, [4]) (True, [5]) (False, [6]) (True, [7]) (False, [8, 9, 10]) Yields: Tuples containing the group key and item values.
def get_instance(self, payload): """ Build an instance of ShortCodeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.short_code.ShortCodeInstance :rtype: twilio.rest.api.v2010.account.short_code.ShortCodeInstance """ return ShortCodeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of ShortCodeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.short_code.ShortCodeInstance :rtype: twilio.rest.api.v2010.account.short_code.ShortCodeInstance
def to_bytes(value): """Converts bytes, unicode, and C char arrays to bytes. Unicode strings are encoded to UTF-8. """ if isinstance(value, text_type): return value.encode('utf-8') elif isinstance(value, ffi.CData): return ffi.string(value) elif isinstance(value, binary_type): return value else: raise ValueError('Value must be text, bytes, or char[]')
Converts bytes, unicode, and C char arrays to bytes. Unicode strings are encoded to UTF-8.
def get_current_channel(self): """Get the current tv channel.""" self.request(EP_GET_CURRENT_CHANNEL) return {} if self.last_response is None else self.last_response.get('payload')
Get the current tv channel.
def unwatch(value): """Return the :class:`Specatator` of a :class:`Watchable` instance.""" if not isinstance(value, Watchable): raise TypeError("Expected a Watchable, not %r." % value) spectator = watcher(value) try: del value._instance_spectator except Exception: pass return spectator
Return the :class:`Specatator` of a :class:`Watchable` instance.
def load_template_source(self, *ka): """ Backward compatible method for Django < 2.0. """ template_name = ka[0] for origin in self.get_template_sources(template_name): try: return self.get_contents(origin), origin.name except TemplateDoesNotExist: pass raise TemplateDoesNotExist(template_name)
Backward compatible method for Django < 2.0.
def getChildren(self, name=None, ns=None): """ Get a list of children by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain a prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @return: The list of matching children. @rtype: [L{Element},...] """ if ns is None: if name is None: return self.children prefix, name = splitPrefix(name) if prefix is not None: ns = self.resolvePrefix(prefix) return [c for c in self.children if c.match(name, ns)]
Get a list of children by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain a prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @return: The list of matching children. @rtype: [L{Element},...]
def _server_response_handler(self, response: Dict[str, Any]): """处理100~199段状态码,针对不同的服务响应进行操作. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True """ code = response.get("CODE") if code == 100: if self.debug: print("auth succeed") self._login_fut.set_result(response) if code == 101: if self.debug: print('pong') return True
处理100~199段状态码,针对不同的服务响应进行操作. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def make_repr(inst, attrs): # type: (object, Sequence[str]) -> str """Create a repr from an instance of a class Args: inst: The class instance we are generating a repr of attrs: The attributes that should appear in the repr """ arg_str = ", ".join( "%s=%r" % (a, getattr(inst, a)) for a in attrs if hasattr(inst, a)) repr_str = "%s(%s)" % (inst.__class__.__name__, arg_str) return repr_str
Create a repr from an instance of a class Args: inst: The class instance we are generating a repr of attrs: The attributes that should appear in the repr
def invalidate_cache(self, klass, instance=None, extra=None, force_all=False): """ Use this method to invalidate keys related to a particular model or instance. Invalidating a cache is really just incrementing the version for the right key(s). :param klass: The model class you are invalidating. If the given \ class was not registered with this group no action will be taken. :param instance: The instance you want to use with the registered\ instance_values. Usually the instance that was just saved. \ Defaults to None. :param extra: A list of extra values that you would like incremented \ in addition to what was registered for this model. :param force_all: Ignore all registered values and provided \ arguments and increment the major version for this group. """ values = self._get_cache_extras(klass, instance=instance, extra=extra, force_all=force_all) if values == CacheConfig.ALL: self._increment_version() elif values: for value in values: self._increment_version(extra=value)
Use this method to invalidate keys related to a particular model or instance. Invalidating a cache is really just incrementing the version for the right key(s). :param klass: The model class you are invalidating. If the given \ class was not registered with this group no action will be taken. :param instance: The instance you want to use with the registered\ instance_values. Usually the instance that was just saved. \ Defaults to None. :param extra: A list of extra values that you would like incremented \ in addition to what was registered for this model. :param force_all: Ignore all registered values and provided \ arguments and increment the major version for this group.
def enable_host_event_handler(self, host): """Enable event handlers for a host Format of the line that triggers function call:: ENABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.event_handler_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value host.event_handler_enabled = True self.send_an_element(host.get_update_status_brok())
Enable event handlers for a host Format of the line that triggers function call:: ENABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
def activate(): """ Activates the version specified in ``env.project_version`` if it is different from the current active version. An active version is just the version that is symlinked. """ env_path = '/'.join([deployment_root(),'env',env.project_fullname]) if not exists(env_path): print env.host,"ERROR: The version",env.project_version,"does not exist at" print env_path sys.exit(1) active = active_version() servers = webserver_list() if env.patch or active <> env.project_fullname: for s in servers: stop_webserver(s) if not env.patch and active <> env.project_fullname: if env.verbosity: print env.host, "ACTIVATING version", env_path if not env.nomigration: sync_db() #south migration if 'south' in env.INSTALLED_APPS and not env.nomigration and not env.manualmigration: migration() if env.manualmigration or env.MANUAL_MIGRATION: manual_migration() #activate sites activate_sites = [''.join([d.name.replace('.','_'),'-',env.project_version,'.conf']) for d in domain_sites()] if 'apache2' in get_packages(): site_paths = ['/etc/apache2','/etc/nginx'] else: site_paths = ['/etc/nginx'] #disable existing sites for path in site_paths: for site in _ls_sites('/'.join([path,'sites-enabled'])): if site not in activate_sites: sudo("rm %s/sites-enabled/%s"% (path,site)) #activate new sites for path in site_paths: for site in activate_sites: if not exists('/'.join([path,'sites-enabled',site])): sudo("chmod 644 %s" % '/'.join([path,'sites-available',site])) sudo("ln -s %s/sites-available/%s %s/sites-enabled/%s"% (path,site,path,site)) if env.verbosity: print " * enabled", "%s/sites-enabled/%s"% (path,site) #delete existing symlink ln_path = '/'.join([deployment_root(),'env',env.project_name]) run('rm -f '+ln_path) #run post deploy hooks post_exec_hook('post_deploy') #activate run('ln -s %s %s'% (env_path,ln_path)) if env.verbosity: print env.host,env.project_fullname, "ACTIVATED" else: if env.verbosity and not env.patch: print env.project_fullname,"is the active version" if env.patch or active <> env.project_fullname: for s in servers: start_webserver(s) print return
Activates the version specified in ``env.project_version`` if it is different from the current active version. An active version is just the version that is symlinked.
def header(msg, *args, **kwargs): '''Display an header''' msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER))) echo(msg, *args, **kwargs)
Display an header
def setup_zmq(self): """Set up a PUSH and a PULL socket. The PUSH socket will push out requests to the workers. The PULL socket will receive responses from the workers and reply through the server socket.""" self.context = zmq.Context() self.push = self.context.socket(zmq.PUSH) self.push_port = self.push.bind_to_random_port("tcp://%s" % self.host) # start a listener for the pull socket eventlet.spawn(self.zmq_pull) eventlet.sleep(0)
Set up a PUSH and a PULL socket. The PUSH socket will push out requests to the workers. The PULL socket will receive responses from the workers and reply through the server socket.
def annotate(row, ax, x='x', y='y', text='name', xytext=(7, -5), textcoords='offset points', **kwargs): """Add a text label to the plot of a DataFrame indicated by the provided axis (ax). Reference: https://stackoverflow.com/a/40979683/623735 """ # idx = row.name text = row[text] if text in row else str(text) x = row[x] if x in row else float(x) y = row[y] if y in row else float(y) ax.annotate(text, (row[x], row[y]), xytext=xytext, textcoords=textcoords, **kwargs) return row[text]
Add a text label to the plot of a DataFrame indicated by the provided axis (ax). Reference: https://stackoverflow.com/a/40979683/623735
def bytes2guid(s): """Converts a serialized GUID to a text GUID""" assert isinstance(s, bytes) u = struct.unpack v = [] v.extend(u("<IHH", s[:8])) v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:])) return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
Converts a serialized GUID to a text GUID
def ListComp(xp, fp, it, test=None): """A list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. """ xp.prefix = u"" fp.prefix = u" " it.prefix = u" " for_leaf = Leaf(token.NAME, u"for") for_leaf.prefix = u" " in_leaf = Leaf(token.NAME, u"in") in_leaf.prefix = u" " inner_args = [for_leaf, fp, in_leaf, it] if test: test.prefix = u" " if_leaf = Leaf(token.NAME, u"if") if_leaf.prefix = u" " inner_args.append(Node(syms.comp_if, [if_leaf, test])) inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) return Node(syms.atom, [Leaf(token.LBRACE, u"["), inner, Leaf(token.RBRACE, u"]")])
A list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted.
def GetCallingModuleObjectAndName(): """Returns the module that's calling into this module. We generally use this function to get the name of the module calling a DEFINE_foo... function. Returns: The module object that called into this one. Raises: AssertionError: if no calling module could be identified. """ range_func = range if sys.version_info[0] >= 3 else xrange for depth in range_func(1, sys.getrecursionlimit()): # sys._getframe is the right thing to use here, as it's the best # way to walk up the call stack. globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access module, module_name = GetModuleObjectAndName(globals_for_frame) if id(module) not in disclaim_module_ids and module_name is not None: return _ModuleObjectAndName(module, module_name) raise AssertionError('No module was found')
Returns the module that's calling into this module. We generally use this function to get the name of the module calling a DEFINE_foo... function. Returns: The module object that called into this one. Raises: AssertionError: if no calling module could be identified.
def extractSNPs(snpsToExtract, options): """Extract markers using Plink. :param snpsToExtract: the name of the file containing markers to extract. :param options: the options :type snpsToExtract: str :type options: argparse.Namespace :returns: the prefix of the output files. """ outPrefix = options.out + ".pruned_data" plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--extract", snpsToExtract, "--make-bed", "--out", outPrefix] runCommand(plinkCommand) return outPrefix
Extract markers using Plink. :param snpsToExtract: the name of the file containing markers to extract. :param options: the options :type snpsToExtract: str :type options: argparse.Namespace :returns: the prefix of the output files.
def refresh(self, row=None): """Refresh widget""" for widget in self.selection_widgets: widget.setEnabled(self.listwidget.currentItem() is not None) not_empty = self.listwidget.count() > 0 if self.sync_button is not None: self.sync_button.setEnabled(not_empty)
Refresh widget
def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None): """Translate method's return value for stripping off success flag. There are a lot of methods which return a "success" boolean and have several out arguments. Translate such a method to return the out arguments on success and None on failure. """ @wraps(method) def wrapped(*args, **kwargs): ret = method(*args, **kwargs) if ret[0]: if len(ret) == 2: return ret[1] else: return ret[1:] else: if exc_type: raise exc_type(exc_str or 'call failed') return fail_ret return wrapped
Translate method's return value for stripping off success flag. There are a lot of methods which return a "success" boolean and have several out arguments. Translate such a method to return the out arguments on success and None on failure.
def get(self): """Return form result""" # It is import to avoid accessing Qt C++ object as it has probably # already been destroyed, due to the Qt.WA_DeleteOnClose attribute if self.outfile: if self.result in ['list', 'dict', 'OrderedDict']: fd = open(self.outfile + '.py', 'w') fd.write(str(self.data)) elif self.result == 'JSON': fd = open(self.outfile + '.json', 'w') data = json.loads(self.data, object_pairs_hook=OrderedDict) json.dump(data, fd) elif self.result == 'XML': fd = open(self.outfile + '.xml', 'w') root = ET.fromstring(self.data) tree = ET.ElementTree(root) tree.write(fd, encoding='UTF-8') fd.close() else: return self.data
Return form result
def strace(device, trace_address, breakpoint_address): """Implements simple trace using the STrace API. Args: device (str): the device to connect to trace_address (int): address to begin tracing from breakpoint_address (int): address to breakpoint at Returns: ``None`` """ jlink = pylink.JLink() jlink.open() # Do the initial connection sequence. jlink.power_on() jlink.set_tif(pylink.JLinkInterfaces.SWD) jlink.connect(device) jlink.reset() # Clear any breakpoints that may exist as of now. jlink.breakpoint_clear_all() # Start the simple trace. op = pylink.JLinkStraceOperation.TRACE_START jlink.strace_clear_all() jlink.strace_start() # Set the breakpoint and trace events, then restart the CPU so that it # will execute. bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True) trhandle = jlink.strace_code_fetch_event(op, address=trace_address) jlink.restart() time.sleep(1) # Run until the CPU halts due to the breakpoint being hit. while True: if jlink.halted(): break # Print out all instructions that were captured by the trace. while True: instructions = jlink.strace_read(1) if len(instructions) == 0: break instruction = instructions[0] print(jlink.disassemble_instruction(instruction)) jlink.power_off() jlink.close()
Implements simple trace using the STrace API. Args: device (str): the device to connect to trace_address (int): address to begin tracing from breakpoint_address (int): address to breakpoint at Returns: ``None``
def normpdf(x, mu, sigma): """ Describes the relative likelihood that a real-valued random variable X will take on a given value. http://en.wikipedia.org/wiki/Probability_density_function """ u = (x-mu)/abs(sigma) y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2) return y
Describes the relative likelihood that a real-valued random variable X will take on a given value. http://en.wikipedia.org/wiki/Probability_density_function
def __find_incongruities(self, op, index): """ Private method. Finds gaps and overlaps in a striplog. Called by find_gaps() and find_overlaps(). Args: op (operator): ``operator.gt`` or ``operator.lt`` index (bool): If ``True``, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the gaps. A sort of anti-striplog. """ if len(self) == 1: return hits = [] intervals = [] if self.order == 'depth': one, two = 'base', 'top' else: one, two = 'top', 'base' for i, iv in enumerate(self[:-1]): next_iv = self[i+1] if op(getattr(iv, one), getattr(next_iv, two)): hits.append(i) top = getattr(iv, one) base = getattr(next_iv, two) iv_gap = Interval(top, base) intervals.append(iv_gap) if index and hits: return hits elif intervals: return Striplog(intervals) else: return
Private method. Finds gaps and overlaps in a striplog. Called by find_gaps() and find_overlaps(). Args: op (operator): ``operator.gt`` or ``operator.lt`` index (bool): If ``True``, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the gaps. A sort of anti-striplog.
def __process_results(results): """Processes the result from __query to get valid json from every entry. :param results: Results from __query :type results: str :returns: python list of dictionaries containing the relevant results. :rtype: list """ if 'no match' in results and 'returning 0 elements' in results: return [] result_list = [] # Splts the result and cuts first and last dataset which are comments split = results.split(sep='\n\n')[1:-1] for entry in split: entry_dict = {} for value in entry.split('\n'): if len(value) < 1: continue (desc, val) = value.split(': ') entry_dict[desc.replace('-', '')] = val.strip(' ') result_list.append(entry_dict) return result_list
Processes the result from __query to get valid json from every entry. :param results: Results from __query :type results: str :returns: python list of dictionaries containing the relevant results. :rtype: list
def all(cls, domain=None): """ Return all sites @param domain: The domain to filter by @type domain: Domain @rtype: list of Site """ Site = cls site = Session.query(Site) if domain: site.filter(Site.domain == domain) return site.all()
Return all sites @param domain: The domain to filter by @type domain: Domain @rtype: list of Site
def compute_samples_displays( self, program: Union[circuits.Circuit, schedules.Schedule], param_resolver: 'study.ParamResolverOrSimilarType' = None, ) -> study.ComputeDisplaysResult: """Computes SamplesDisplays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. Returns: ComputeDisplaysResult for the simulation. """ return self.compute_samples_displays_sweep( program, study.ParamResolver(param_resolver))[0]
Computes SamplesDisplays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. Returns: ComputeDisplaysResult for the simulation.
def print_version(self, file=None): """ Outputs version information to the file if specified, or to the io_manager's stdout if available, or to sys.stdout. """ optparse.OptionParser.print_version(self, file) file.flush()
Outputs version information to the file if specified, or to the io_manager's stdout if available, or to sys.stdout.
def set_autoindent(self,value=None): """Set the autoindent flag, checking for readline support. If called with no arguments, it acts as a toggle.""" if value != 0 and not self.has_readline: if os.name == 'posix': warn("The auto-indent feature requires the readline library") self.autoindent = 0 return if value is None: self.autoindent = not self.autoindent else: self.autoindent = value
Set the autoindent flag, checking for readline support. If called with no arguments, it acts as a toggle.
def distinct(self, *args, **_filter): """Return all the unique (distinct) values for the given ``columns``. :: # returns only one row per year, ignoring the rest table.distinct('year') # works with multiple columns, too table.distinct('year', 'country') # you can also combine this with a filter table.distinct('year', country='China') """ if not self.exists: return iter([]) columns = [] clauses = [] for column in args: if isinstance(column, ClauseElement): clauses.append(column) else: if not self.has_column(column): raise DatasetException("No such column: %s" % column) columns.append(self.table.c[column]) clause = self._args_to_clause(_filter, clauses=clauses) if not len(columns): return iter([]) q = expression.select(columns, distinct=True, whereclause=clause, order_by=[c.asc() for c in columns]) return self.db.query(q)
Return all the unique (distinct) values for the given ``columns``. :: # returns only one row per year, ignoring the rest table.distinct('year') # works with multiple columns, too table.distinct('year', 'country') # you can also combine this with a filter table.distinct('year', country='China')
def list(payment): """ List all the refunds for a payment. :param payment: The payment object or the payment id :type payment: resources.Payment|string :return: A collection of refunds :rtype resources.APIResourceCollection """ if isinstance(payment, resources.Payment): payment = payment.id http_client = HttpClient() response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, payment_id=payment)) return resources.APIResourceCollection(resources.Refund, **response)
List all the refunds for a payment. :param payment: The payment object or the payment id :type payment: resources.Payment|string :return: A collection of refunds :rtype resources.APIResourceCollection
def make_model(self, add_indra_json=True): """Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model. """ self.add_indra_json = add_indra_json for stmt in self.statements: if isinstance(stmt, Modification): self._add_modification(stmt) if isinstance(stmt, SelfModification): self._add_self_modification(stmt) elif isinstance(stmt, RegulateActivity) or \ isinstance(stmt, RegulateAmount): self._add_regulation(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Influence): self._add_influence(stmt) network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': self.network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) cx_str = self.print_cx() return cx_str
Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model.
def make_dot(self, filename_or_stream, auts): """Create a graphviz .dot representation of the automaton.""" if isinstance(filename_or_stream, str): stream = file(filename_or_stream, 'w') else: stream = filename_or_stream dot = DotFile(stream) for aut in auts: dot.start(aut.name) dot.node('shape=Mrecord width=1.5') for st in aut.states: label = st.name if st.entering: label += '|%s' % '\\l'.join(str(st) for st in st.entering) if st.leaving: label += '|%s' % '\\l'.join(str(st) for st in st.leaving) label = '{%s}' % label dot.state(st.name, label=label) for st in aut.states: for tr in st.transitions: dot.transition(tr.s_from.name, tr.s_to.name, tr.when) dot.end() dot.finish()
Create a graphviz .dot representation of the automaton.
def main(): """ NAME convert_samples.py DESCRIPTION takes an er_samples or magic_measurements format file and creates an orient.txt template SYNTAX convert_samples.py [command line options] OPTIONS -f FILE: specify input file, default is er_samples.txt -F FILE: specify output file, default is: orient_LOCATION.txt INPUT FORMAT er_samples.txt or magic_measurements format file OUTPUT orient.txt format file """ # # initialize variables # version_num=pmag.get_version() orient_file,samp_file = "orient","er_samples.txt" args=sys.argv dir_path,out_path='.','.' default_outfile = True # # if '-WD' in args: ind=args.index('-WD') dir_path=args[ind+1] if '-OD' in args: ind=args.index('-OD') out_path=args[ind+1] if "-h" in args: print(main.__doc__) sys.exit() if "-F" in args: ind=args.index("-F") orient_file=sys.argv[ind+1] default_outfile = False if "-f" in args: ind=args.index("-f") samp_file=sys.argv[ind+1] orient_file=out_path+'/'+orient_file samp_file=dir_path+'/'+samp_file # # read in file to convert # ErSamples=[] Required=['sample_class','sample_type','sample_lithology','lat','long'] Samps,file_type=pmag.magic_read(samp_file) Locs=[] OrKeys=['sample_name','site_name','mag_azimuth','field_dip','sample_class','sample_type','sample_lithology','lat','long','stratigraphic_height','method_codes','site_description'] print("file_type", file_type) # LJ if file_type.lower()=='er_samples': SampKeys=['er_sample_name','er_site_name','sample_azimuth','sample_dip','sample_class','sample_type','sample_lithology','sample_lat','sample_lon','sample_height','magic_method_codes','er_sample_description'] elif file_type.lower()=='magic_measurements': SampKeys=['er_sample_name','er_site_name'] else: print('wrong file format; must be er_samples or magic_measurements only') for samp in Samps: if samp['er_location_name'] not in Locs:Locs.append(samp['er_location_name']) # get all the location names for location_name in Locs: loc_samps=pmag.get_dictitem(Samps,'er_location_name',location_name,'T') OrOut=[] for samp in loc_samps: if samp['er_sample_name'] not in ErSamples: ErSamples.append(samp['er_sample_name']) OrRec={} if 'sample_date' in list(samp.keys()) and samp['sample_date'].strip()!="": date=samp['sample_date'].split(':') OrRec['date']=date[1]+'/'+date[2]+'/'+date[0][2:4] for i in range(len(SampKeys)): if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]] for key in Required: if key not in list(OrRec.keys()):OrRec[key]="" # fill in blank required keys OrOut.append(OrRec) loc=location_name.replace(" ","_") if default_outfile: outfile=orient_file+'_'+loc+'.txt' else: outfile=orient_file pmag.magic_write(outfile,OrOut,location_name) print("Data saved in: ", outfile)
NAME convert_samples.py DESCRIPTION takes an er_samples or magic_measurements format file and creates an orient.txt template SYNTAX convert_samples.py [command line options] OPTIONS -f FILE: specify input file, default is er_samples.txt -F FILE: specify output file, default is: orient_LOCATION.txt INPUT FORMAT er_samples.txt or magic_measurements format file OUTPUT orient.txt format file
def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()): """Extract query parameters from cell body if provided Also validates the cell body schema using jsonschema to catch errors before sending the http request. This validation isn't complete, however; it does not validate recursive schemas, but it acts as a good filter against most simple schemas Args: args: arguments passed to the magic cell cell_body: body of the magic cell date_time: The timestamp at which the date-time related parameters need to be resolved. Returns: Validated object containing query parameters """ env = google.datalab.utils.commands.notebook_environment() config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False) sql = args['query'] if sql is None: raise Exception('Cannot extract query parameters in non-query cell') # Validate query_params if config: jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA) config = config or {} config_parameters = config.get('parameters', []) return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time)
Extract query parameters from cell body if provided Also validates the cell body schema using jsonschema to catch errors before sending the http request. This validation isn't complete, however; it does not validate recursive schemas, but it acts as a good filter against most simple schemas Args: args: arguments passed to the magic cell cell_body: body of the magic cell date_time: The timestamp at which the date-time related parameters need to be resolved. Returns: Validated object containing query parameters
def get_shortlink(self, shortlink_id_or_url): """Retrieve registered shortlink info Arguments: shortlink_id_or_url: Shortlink id or url, assigned by mCASH """ if "://" not in shortlink_id_or_url: shortlink_id_or_url = self.merchant_api_base_url + '/shortlink/' + shortlink_id_or_url + '/' return self.do_req('GET', shortlink_id_or_url).json()
Retrieve registered shortlink info Arguments: shortlink_id_or_url: Shortlink id or url, assigned by mCASH
def from_timestamp_pb(cls, stamp): """Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp message """ microseconds = int(stamp.seconds * 1e6) bare = from_microseconds(microseconds) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=stamp.nanos, tzinfo=pytz.UTC, )
Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp message
def get_module_uuid(plpy, moduleid): """Retrieve page uuid from legacy moduleid.""" plan = plpy.prepare("SELECT uuid FROM modules WHERE moduleid = $1;", ('text',)) result = plpy.execute(plan, (moduleid,), 1) if result: return result[0]['uuid']
Retrieve page uuid from legacy moduleid.
def apply_async(self, args, kwargs, **options): """ Put this task on the Celery queue as a singleton. Only one of this type of task with its distinguishing args/kwargs will be allowed on the queue at a time. Subsequent duplicate tasks called while this task is still running will just latch on to the results of the running task by synchronizing the task uuid. Additionally, identical task calls will return those results for the next ``cache_duration`` seconds. """ self._validate_required_class_vars() cache_key = self._get_cache_key(**kwargs) # Check for an already-computed and cached result task_id = self.cache.get(cache_key) # Check for the cached result if task_id: # We've already built this result, just latch on to the task that # did the work logging.info( 'Found existing cached and completed task: %s', task_id) return self.AsyncResult(task_id) # Check for an in-progress equivalent task to avoid duplicating work task_id = self.cache.get('herd:%s' % cache_key) if task_id: logging.info('Found existing in-progress task: %s', task_id) return self.AsyncResult(task_id) # It's not cached and it's not already running. Use an atomic lock to # start the task, ensuring there isn't a race condition that could # result in multiple identical tasks being fired at once. with self.cache.lock('lock:%s' % cache_key): task_meta = super(JobtasticTask, self).apply_async( args, kwargs, **options ) logging.info('Current status: %s', task_meta.status) if task_meta.status in (PROGRESS, PENDING): self.cache.set( 'herd:%s' % cache_key, task_meta.task_id, timeout=self.herd_avoidance_timeout) logging.info( 'Setting herd-avoidance cache for task: %s', cache_key) return task_meta
Put this task on the Celery queue as a singleton. Only one of this type of task with its distinguishing args/kwargs will be allowed on the queue at a time. Subsequent duplicate tasks called while this task is still running will just latch on to the results of the running task by synchronizing the task uuid. Additionally, identical task calls will return those results for the next ``cache_duration`` seconds.
def _dbsetup(self): """ Create/open local SQLite database """ self._dbconn = sqlite3.connect(self._db_file) # Create table for multiplicons sql = '''CREATE TABLE multiplicons (id, genome_x, list_x, parent, genome_y, list_y, level, number_of_anchorpoints, profile_length, begin_x, end_x, begin_y, end_y, is_redundant)''' self._dbconn.execute(sql) # Create table for multiplicons ('order' appears to be reserved) sql = '''CREATE TABLE segments (id, multiplicon, genome, list, first, last, ord)''' self._dbconn.execute(sql) self._dbconn.commit()
Create/open local SQLite database
def compute_alignments(self, prev_state, precomputed_values, mask=None): """ Compute the alignment weights based on the previous state. """ WaSp = T.dot(prev_state, self.Wa) UaH = precomputed_values # For test time the UaH will be (time, output_dim) if UaH.ndim == 2: preact = WaSp[:, None, :] + UaH[None, :, :] else: preact = WaSp[:, None, :] + UaH act = T.activate(preact, 'tanh') align_scores = T.dot(act, self.Va) # ~ (batch, time) if mask: mask = (1 - mask) * -99.00 if align_scores.ndim == 3: align_scores += mask[None, :] else: align_scores += mask align_weights = T.nnet.softmax(align_scores) return align_weights
Compute the alignment weights based on the previous state.