code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _split_stock_code(self, code): stock_str = str(code) split_loc = stock_str.find(".") '''do not use the built-in split function in python. The built-in function cannot handle some stock strings correctly. for instance, US..DJI, where the dot . itself is a part of original code''' if 0 <= split_loc < len( stock_str) - 1 and stock_str[0:split_loc] in MKT_MAP: market_str = stock_str[0:split_loc] partial_stock_str = stock_str[split_loc + 1:] return RET_OK, (market_str, partial_stock_str) else: error_str = ERROR_STR_PREFIX + "format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)" % stock_str return RET_ERROR, error_str
do not use the built-in split function in python. The built-in function cannot handle some stock strings correctly. for instance, US..DJI, where the dot . itself is a part of original code
def get_variable_for_feature(self, feature_key, variable_key): """ Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation. """ feature = self.feature_key_map.get(feature_key) if not feature: self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key) return None if variable_key not in feature.variables: self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key) return None return feature.variables.get(variable_key)
Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation.
def get_institute_usage(institute, start, end): """Return a tuple of cpu hours and number of jobs for an institute for a given period Keyword arguments: institute -- start -- start date end -- end date """ try: cache = InstituteCache.objects.get( institute=institute, date=datetime.date.today(), start=start, end=end) return cache.cpu_time, cache.no_jobs except InstituteCache.DoesNotExist: return 0, 0
Return a tuple of cpu hours and number of jobs for an institute for a given period Keyword arguments: institute -- start -- start date end -- end date
def itruediv(a, b): "Same as a /= b." if type(a) == int or type(a) == long: a = float(a) a /= b return a
Same as a /= b.
def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value
Deprecated, user `source'.
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0): """Writes a buffer into memory. This routine assumes that memory has already been erased. """ xfer_count = 0 xfer_bytes = 0 xfer_total = len(buf) xfer_base = addr while xfer_bytes < xfer_total: if __verbose and xfer_count % 512 == 0: print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)) if progress and xfer_count % 2 == 0: progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size) # Set mem write address set_address(xfer_base+xfer_bytes) # Send DNLOAD with fw data chunk = min(__cfg_descr.wTransferSize, xfer_total-xfer_bytes) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") xfer_count += 1 xfer_bytes += chunk
Writes a buffer into memory. This routine assumes that memory has already been erased.
def from_yaml(self, node): ''' Implementes a !from_yaml constructor with the following syntax: !from_yaml filename key Arguments: filename: Filename of external YAML document from which to load, relative to the current YAML file. key: Key from external YAML document to return, using a dot-separated syntax for nested keys. Examples: !from_yaml external.yml pop !from_yaml external.yml foo.bar.pop !from_yaml "another file.yml" "foo bar.snap crackle.pop" ''' # Load the content from the node, as a scalar content = self.construct_scalar(node) # Split on unquoted spaces try: parts = shlex.split(content) except UnicodeEncodeError: raise yaml.YAMLError('Non-ASCII arguments to !from_yaml are unsupported') if len(parts) != 2: raise yaml.YAMLError('Two arguments expected to !from_yaml') filename, key = parts # path is relative to the current YAML document path = os.path.join(self._root, filename) # Load the other YAML document with open(path, 'r') as f: doc = yaml.load(f, self.__class__) # Retrieve the key try: cur = doc for k in key.split('.'): cur = cur[k] except KeyError: raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename)) return cur
Implementes a !from_yaml constructor with the following syntax: !from_yaml filename key Arguments: filename: Filename of external YAML document from which to load, relative to the current YAML file. key: Key from external YAML document to return, using a dot-separated syntax for nested keys. Examples: !from_yaml external.yml pop !from_yaml external.yml foo.bar.pop !from_yaml "another file.yml" "foo bar.snap crackle.pop"
def state(self) -> SessionState: """The state of the managed Spark session.""" if self.session_id is None: raise ValueError("session not yet started") session = self.client.get_session(self.session_id) if session is None: raise ValueError("session not found - it may have been shut down") return session.state
The state of the managed Spark session.
def set_xticklabels_position(self, position): """Specify the position of the axis tick labels. This is generally only useful for multiplots containing only one row. This can be used to e.g. alternatively draw the tick labels on the bottom or the top of the subplot. :param position: 'top' or 'bottom' to specify the position of the tick labels. """ pgfplots_translation = {'top': 'right', 'bottom': 'left'} fixed_position = pgfplots_translation[position] self.xticklabel_pos = fixed_position
Specify the position of the axis tick labels. This is generally only useful for multiplots containing only one row. This can be used to e.g. alternatively draw the tick labels on the bottom or the top of the subplot. :param position: 'top' or 'bottom' to specify the position of the tick labels.
def sample_outcomes(probs, n): """ For a discrete probability distribution ``probs`` with outcomes 0, 1, ..., k-1 draw ``n`` random samples. :param list probs: A list of probabilities. :param Number n: The number of random samples to draw. :return: An array of samples drawn from distribution probs over 0, ..., len(probs) - 1 :rtype: numpy.ndarray """ dist = np.cumsum(probs) rs = np.random.rand(n) return np.array([(np.where(r < dist)[0][0]) for r in rs])
For a discrete probability distribution ``probs`` with outcomes 0, 1, ..., k-1 draw ``n`` random samples. :param list probs: A list of probabilities. :param Number n: The number of random samples to draw. :return: An array of samples drawn from distribution probs over 0, ..., len(probs) - 1 :rtype: numpy.ndarray
def twoDimensionalHistogram(title, title_x, title_y, z, bins_x, bins_y, lim_x=None, lim_y=None, vmin=None, vmax=None): """ Create a two-dimension histogram plot or binned map. If using the outputs of np.histogram2d, remember to transpose the histogram. INPUTS """ plt.figure() mesh_x, mesh_y = np.meshgrid(bins_x, bins_y) if vmin != None and vmin == vmax: plt.pcolor(mesh_x, mesh_y, z) else: plt.pcolor(mesh_x, mesh_y, z, vmin=vmin, vmax=vmax) plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title) plt.colorbar() if lim_x: plt.xlim(lim_x[0], lim_x[1]) if lim_y: plt.ylim(lim_y[0], lim_y[1])
Create a two-dimension histogram plot or binned map. If using the outputs of np.histogram2d, remember to transpose the histogram. INPUTS
def _fw_rule_update(self, drvr_name, data): """Firewall Rule update routine. Function to decode the updated rules and call routines that in turn calls the device routines to update rules. """ LOG.debug("FW Update %s", data) tenant_id = data.get('firewall_rule').get('tenant_id') fw_rule = data.get('firewall_rule') rule = self._fw_rule_decode_store(data) rule_id = fw_rule.get('id') if tenant_id not in self.fwid_attr or not ( self.fwid_attr[tenant_id].is_rule_present(rule_id)): LOG.error("Incorrect update info for tenant %s", tenant_id) return self.fwid_attr[tenant_id].rule_update(rule_id, rule) self._check_update_fw(tenant_id, drvr_name)
Firewall Rule update routine. Function to decode the updated rules and call routines that in turn calls the device routines to update rules.
def sync(self): """Keep the repository in sync. This method will synchronize the repository with its 'origin', fetching newest objects and updating references. It uses low level commands which allow to keep track of which things have changed in the repository. The method also returns a list of hashes related to the new commits fetched during the process. :returns: list of new commits :raises RepositoryError: when an error occurs synchronizing the repository """ pack_name, refs = self._fetch_pack() if pack_name: commits = self._read_commits_from_pack(pack_name) else: commits = [] logger.debug("Git repository %s (%s) does not have any new object", self.uri, self.dirpath) self._update_references(refs) logger.debug("Git repository %s (%s) is synced", self.uri, self.dirpath) return commits
Keep the repository in sync. This method will synchronize the repository with its 'origin', fetching newest objects and updating references. It uses low level commands which allow to keep track of which things have changed in the repository. The method also returns a list of hashes related to the new commits fetched during the process. :returns: list of new commits :raises RepositoryError: when an error occurs synchronizing the repository
def predict(self, dataset, output_type='class', batch_size=64): """ Return predictions for ``dataset``, using the trained logistic regression model. Predictions can be generated as class labels, probabilities that the target value is True, or margins (i.e. the distance of the observations from the hyperplane separating the classes). `probability_vector` returns a vector of probabilities by each class. For each new example in ``dataset``, the margin---also known as the linear predictor---is the inner product of the example and the model coefficients. The probability is obtained by passing the margin through the logistic function. Predicted classes are obtained by thresholding the predicted probabilities at 0.5. If you would like to threshold predictions at a different probability level, you can use the Turi Create evaluation toolkit. Parameters ---------- dataset : SFrame | SArray | turicreate.Image The images to be classified. If dataset is an SFrame, it must have columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'probability': Prediction probability associated with the True class (not applicable for multi-class classification) - 'probability_vector': Prediction probability associated with each class as a vector. The probability of the first class (sorted alphanumerically by name of the class in the training set) is in position 0 of the vector, the second in position 1 and so on. - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SArray An SArray with model predictions. If `dataset` is a single image, the return value will be a single prediction. See Also ---------- create, evaluate, classify Examples ---------- >>> probability_predictions = model.predict(data, output_type='probability') >>> margin_predictions = model.predict(data, output_type='margin') >>> class_predictions = model.predict(data, output_type='class') """ if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)): raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image') if(batch_size < 1): raise ValueError("'batch_size' must be greater than or equal to 1") dataset, unpack = self._canonize_input(dataset) extracted_features = self._extract_features(dataset, batch_size=batch_size) return unpack(self.classifier.predict(extracted_features, output_type=output_type))
Return predictions for ``dataset``, using the trained logistic regression model. Predictions can be generated as class labels, probabilities that the target value is True, or margins (i.e. the distance of the observations from the hyperplane separating the classes). `probability_vector` returns a vector of probabilities by each class. For each new example in ``dataset``, the margin---also known as the linear predictor---is the inner product of the example and the model coefficients. The probability is obtained by passing the margin through the logistic function. Predicted classes are obtained by thresholding the predicted probabilities at 0.5. If you would like to threshold predictions at a different probability level, you can use the Turi Create evaluation toolkit. Parameters ---------- dataset : SFrame | SArray | turicreate.Image The images to be classified. If dataset is an SFrame, it must have columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'probability': Prediction probability associated with the True class (not applicable for multi-class classification) - 'probability_vector': Prediction probability associated with each class as a vector. The probability of the first class (sorted alphanumerically by name of the class in the training set) is in position 0 of the vector, the second in position 1 and so on. - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SArray An SArray with model predictions. If `dataset` is a single image, the return value will be a single prediction. See Also ---------- create, evaluate, classify Examples ---------- >>> probability_predictions = model.predict(data, output_type='probability') >>> margin_predictions = model.predict(data, output_type='margin') >>> class_predictions = model.predict(data, output_type='class')
def insert(self, index, option): """ Insert a new `option` in the ButtonGroup at `index`. :param int option: The index of where to insert the option. :param string/List option: The option to append to the ButtonGroup. If a 2D list is specified, the first element is the text, the second is the value. """ self._options.insert(index, self._parse_option(option)) self._refresh_options() self.resize(self._width, self._height)
Insert a new `option` in the ButtonGroup at `index`. :param int option: The index of where to insert the option. :param string/List option: The option to append to the ButtonGroup. If a 2D list is specified, the first element is the text, the second is the value.
def Generate(self, items, token=None): """Generates archive from a given collection. Iterates the collection and generates an archive by yielding contents of every referenced AFF4Stream. Args: items: Iterable of rdf_client_fs.StatEntry objects token: User's ACLToken. Yields: Binary chunks comprising the generated archive. """ del token # unused, to be removed with AFF4 code client_ids = set() for item_batch in collection.Batch(items, self.BATCH_SIZE): client_paths = set() for item in item_batch: try: client_path = flow_export.CollectionItemToClientPath( item, self.client_id) except flow_export.ItemNotExportableError: continue if not self.predicate(client_path): self.ignored_files.add(client_path) self.processed_files.add(client_path) continue client_ids.add(client_path.client_id) client_paths.add(client_path) for chunk in file_store.StreamFilesChunks(client_paths): self.processed_files.add(chunk.client_path) for output in self._WriteFileChunk(chunk=chunk): yield output self.processed_files |= client_paths - ( self.ignored_files | self.archived_files) if client_ids: for client_id, client_info in iteritems( data_store.REL_DB.MultiReadClientFullInfo(client_ids)): client = api_client.ApiClient().InitFromClientInfo(client_info) for chunk in self._GenerateClientInfo(client_id, client): yield chunk for chunk in self._GenerateDescription(): yield chunk yield self.archive_generator.Close()
Generates archive from a given collection. Iterates the collection and generates an archive by yielding contents of every referenced AFF4Stream. Args: items: Iterable of rdf_client_fs.StatEntry objects token: User's ACLToken. Yields: Binary chunks comprising the generated archive.
def _handleEsc(self): """ Handler for CTRL+Z keypresses """ if self._typingSms: self.serial.write(self.ESC_CHARACTER) self._typingSms = False self.inputBuffer = [] self.cursorPos = 0
Handler for CTRL+Z keypresses
def get_taskruns(project_id, limit=100, offset=0, last_id=None): """Return a list of task runs for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param limit: Number of returned items, default 100 :type limit: integer :param offset: Offset for the query, default 0 :type offset: integer :param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored :type last_id: integer :rtype: list :returns: A list of task runs for the given project ID """ if last_id is not None: params = dict(limit=limit, last_id=last_id) else: params = dict(limit=limit, offset=offset) print(OFFSET_WARNING) params['project_id'] = project_id try: res = _pybossa_req('get', 'taskrun', params=params) if type(res).__name__ == 'list': return [TaskRun(taskrun) for taskrun in res] else: raise TypeError except: raise
Return a list of task runs for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param limit: Number of returned items, default 100 :type limit: integer :param offset: Offset for the query, default 0 :type offset: integer :param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored :type last_id: integer :rtype: list :returns: A list of task runs for the given project ID
def load_child_sections_for_section(context, section, count=None): ''' Returns all child sections If the `locale_code` in the context is not the main language, it will return the translations of the live articles. ''' page = section.get_main_language_page() locale = context.get('locale_code') qs = SectionPage.objects.child_of(page).filter( language__is_main_language=True) if not locale: return qs[:count] return get_pages(context, qs, locale)
Returns all child sections If the `locale_code` in the context is not the main language, it will return the translations of the live articles.
def get_attributes(self): """ Used by the uni_form_tags to get helper attributes """ items = {} items['form_method'] = self.form_method.strip() items['form_tag'] = self.form_tag items['form_style'] = self.form_style.strip() if self.form_action: items['form_action'] = self.form_action.strip() if self.form_id: items['id'] = self.form_id.strip() if self.form_class: items['class'] = self.form_class.strip() if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() return items
Used by the uni_form_tags to get helper attributes
def pages(self, limit=0): """Return iterator for pages""" if limit > 0: self.iterator.limit = limit return self.iterator
Return iterator for pages
def parse_metadata(metadata_obj: Metadata, metadata_dictionary: dict) -> None: """ Adds to a Metadata object any DublinCore or dts:Extensions object found in the given dictionary :param metadata_obj: :param metadata_dictionary: """ for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#dublincore", [{}])[0].items(): term = URIRef(key) for value_dict in value_set: metadata_obj.add(term, *dict_to_literal(value_dict)) for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#extensions", [{}])[0].items(): term = URIRef(key) for value_dict in value_set: metadata_obj.add(term, *dict_to_literal(value_dict))
Adds to a Metadata object any DublinCore or dts:Extensions object found in the given dictionary :param metadata_obj: :param metadata_dictionary:
def get_command_line(self): """Returns the command line for the job.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.command_line) if isinstance(self.command_line, bytes) else loads(self.command_line.encode())
Returns the command line for the job.
def fresh_jwt_required(fn): """ A decorator to protect a Flask endpoint. If you decorate an endpoint with this, it will ensure that the requester has a valid and fresh access token before allowing the endpoint to be called. See also: :func:`~flask_jwt_extended.jwt_required` """ @wraps(fn) def wrapper(*args, **kwargs): verify_fresh_jwt_in_request() return fn(*args, **kwargs) return wrapper
A decorator to protect a Flask endpoint. If you decorate an endpoint with this, it will ensure that the requester has a valid and fresh access token before allowing the endpoint to be called. See also: :func:`~flask_jwt_extended.jwt_required`
def draw_quadmesh(data, obj): """Returns the PGFPlots code for an graphics environment holding a rendering of the object. """ content = [] # Generate file name for current object filename, rel_filepath = files.new_filename(data, "img", ".png") # Get the dpi for rendering and store the original dpi of the figure dpi = data["dpi"] fig_dpi = obj.figure.get_dpi() obj.figure.set_dpi(dpi) # Render the object and save as png file from matplotlib.backends.backend_agg import RendererAgg cbox = obj.get_clip_box() width = int(round(cbox.extents[2])) height = int(round(cbox.extents[3])) ren = RendererAgg(width, height, dpi) obj.draw(ren) # Generate a image from the render buffer image = Image.frombuffer( "RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1 ) # Crop the image to the actual content (removing the the regions otherwise # used for axes, etc.) # 'image.crop' expects the crop box to specify the left, upper, right, and # lower pixel. 'cbox.extents' gives the left, lower, right, and upper # pixel. box = ( int(round(cbox.extents[0])), 0, int(round(cbox.extents[2])), int(round(cbox.extents[3] - cbox.extents[1])), ) cropped = image.crop(box) cropped.save(filename) # Restore the original dpi of the figure obj.figure.set_dpi(fig_dpi) # write the corresponding information to the TikZ file extent = obj.axes.get_xlim() + obj.axes.get_ylim() # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
Returns the PGFPlots code for an graphics environment holding a rendering of the object.
def on_btn_upload(self, event): """ Try to run upload_magic. Open validation mode if the upload file has problems. """ if not self.check_for_uncombined_files(): return outstring="upload_magic.py" print("-I- running python script:\n %s"%(outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() self.contribution.tables['measurements'].add_measurement_names() if self.data_model_num == 3: res, error_message, has_problems, all_failing_items = ipmag.upload_magic(concat=False, dir_path=self.WD, vocab=self.contribution.vocab, contribution=self.contribution) if self.data_model_num == 2: res, error_message, errors = ipmag.upload_magic2(dir_path=self.WD, data_model=self.er_magic.data_model) del wait if res: text = "You are ready to upload!\n{} was generated in {}".format(os.path.split(res)[1], os.path.split(res)[0]) dlg = pw.ChooseOne(self, "Go to MagIC for uploading", "Not ready yet", text, "Saved") del wait #dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) else: text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/message window for details".format(error_message) dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK) dlg.Centre() result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if result == wx.ID_YES: pw.on_database_upload(None) if self.data_model_num == 3: if not res: from programs import magic_gui self.Disable() self.Hide() self.magic_gui_frame = magic_gui.MainFrame(self.WD, dmodel=self.data_model, title="Validations", contribution=self.contribution) self.magic_gui_frame.validation_mode = ['specimens'] self.magic_gui_frame.failing_items = all_failing_items self.magic_gui_frame.change_dir_button.Disable() self.magic_gui_frame.Centre() self.magic_gui_frame.Show() self.magic_gui_frame.highlight_problems(has_problems) # # change name of upload button to 'exit validation mode' self.magic_gui_frame.bSizer2.GetStaticBox().SetLabel('return to main GUI') self.magic_gui_frame.btn_upload.SetLabel("exit validation mode") # bind that button to quitting magic gui and re-enabling Pmag GUI self.magic_gui_frame.Bind(wx.EVT_BUTTON, self.on_end_validation, self.magic_gui_frame.btn_upload) # do binding so that closing/quitting re-opens the main frame self.magic_gui_frame.Bind(wx.EVT_CLOSE, self.on_end_validation) # this makes it work with only the validation window open self.magic_gui_frame.Bind(wx.EVT_MENU, lambda event: self.menubar.on_quit(event, self.magic_gui_frame), self.magic_gui_frame.menubar.file_quit) # this makes it work if an additional grid is open self.Bind(wx.EVT_MENU, lambda event: self.menubar.on_quit(event, self.magic_gui_frame), self.magic_gui_frame.menubar.file_quit)
Try to run upload_magic. Open validation mode if the upload file has problems.
def eqstr(a, b): """ Determine whether two strings are equivalent. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eqstr_c.html :param a: Arbitrary character string. :type a: str :param b: Arbitrary character string. :type b: str :return: True if A and B are equivalent. :rtype: bool """ return bool(libspice.eqstr_c(stypes.stringToCharP(a), stypes.stringToCharP(b)))
Determine whether two strings are equivalent. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eqstr_c.html :param a: Arbitrary character string. :type a: str :param b: Arbitrary character string. :type b: str :return: True if A and B are equivalent. :rtype: bool
def kwargs(self): """combine GET and POST params to be passed to the controller""" kwargs = dict(self.query_kwargs) kwargs.update(self.body_kwargs) return kwargs
combine GET and POST params to be passed to the controller
def run_mainloop_with(self, target): """Start the OS's main loop to process asyncronous BLE events and then run the specified target function in a background thread. Target function should be a function that takes no parameters and optionally return an integer response code. When the target function stops executing or returns with value then the main loop will be stopped and the program will exit with the returned code. Note that an OS main loop is required to process asyncronous BLE events and this function is provided as a convenience for writing simple tools and scripts that don't need to be full-blown GUI applications. If you are writing a GUI application that has a main loop (a GTK glib main loop on Linux, or a Cocoa main loop on OSX) then you don't need to call this function. """ # Spin up a background thread to run the target code. self._user_thread = threading.Thread(target=self._user_thread_main, args=(target,)) self._user_thread.daemon = True # Don't let the user thread block exit. self._user_thread.start() # Spin up a GLib main loop in the main thread to process async BLE events. self._gobject_mainloop = GObject.MainLoop() try: self._gobject_mainloop.run() # Doesn't return until the mainloop ends. except KeyboardInterrupt: self._gobject_mainloop.quit() sys.exit(0) # Main loop finished. Check if an exception occured and throw it, # otherwise return the status code from the user code. if self._exception is not None: # Rethrow exception with its original stack trace following advice from: # http://nedbatchelder.com/blog/200711/rethrowing_exceptions_in_python.html raise_(self._exception[1], None, self._exception[2]) else: sys.exit(self._return_code)
Start the OS's main loop to process asyncronous BLE events and then run the specified target function in a background thread. Target function should be a function that takes no parameters and optionally return an integer response code. When the target function stops executing or returns with value then the main loop will be stopped and the program will exit with the returned code. Note that an OS main loop is required to process asyncronous BLE events and this function is provided as a convenience for writing simple tools and scripts that don't need to be full-blown GUI applications. If you are writing a GUI application that has a main loop (a GTK glib main loop on Linux, or a Cocoa main loop on OSX) then you don't need to call this function.
def update_alias(self, addressid, data): """Update alias address""" return self.api_call( ENDPOINTS['aliases']['update'], dict(addressid=addressid), body=data)
Update alias address
def police_priority_map_conform_map_pri6_conform(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') conform = ET.SubElement(police_priority_map, "conform") map_pri6_conform = ET.SubElement(conform, "map-pri6-conform") map_pri6_conform.text = kwargs.pop('map_pri6_conform') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def importable(obj): """Check if an object can be serialised as a qualified name. This is done by checking that a ``look_up(object_name(obj))`` gives back the same object. .. |importable| replace:: :py:func:`importable`""" try: return look_up(object_name(obj)) is obj except (AttributeError, TypeError, ImportError): return False
Check if an object can be serialised as a qualified name. This is done by checking that a ``look_up(object_name(obj))`` gives back the same object. .. |importable| replace:: :py:func:`importable`
def cleanup(self): ''' Stop backgroud thread and cleanup resources ''' self._processing_stop = True self._wakeup_processing_thread() self._processing_stopped_event.wait(3)
Stop backgroud thread and cleanup resources
def add_tree(self, tree, parent=None): """ Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call. """ if tree.path in self.path_db: self.remove_tree_by_path(tree.path) # index all indexable attributes for index in tree.indexes: if not getattr(tree, index): continue self._add_to( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._add_to(self.parent_db, tree.path, parent) # make sure, that all sub-trees starts with path of parent tree for sub_tree in tree.sub_trees: assert sub_tree.path.startswith(tree.path) for sub_tree in tree.sub_trees: self.add_tree(sub_tree, parent=tree)
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
def file_finder(dirname="."): """ Find the files in ``dirname`` under Mercurial version control according to the setuptools spec (see http://peak.telecommunity.com/DevCenter/setuptools#adding-support-for-other-revision-control-systems ). """ import distutils.log dirname = dirname or '.' try: valid_mgrs = managers.RepoManager.get_valid_managers(dirname) valid_mgrs = managers.RepoManager.existing_only(valid_mgrs) for mgr in valid_mgrs: try: return mgr.find_all_files() except Exception: e = sys.exc_info()[1] distutils.log.warn( "hgtools.%s could not find files: %s", mgr, e) except Exception: e = sys.exc_info()[1] distutils.log.warn( "Unexpected error finding valid managers in " "hgtools.file_finder_plugin: %s", e) return []
Find the files in ``dirname`` under Mercurial version control according to the setuptools spec (see http://peak.telecommunity.com/DevCenter/setuptools#adding-support-for-other-revision-control-systems ).
def get_dimension_index(self, dimension): """Get the index of the requested dimension. Args: dimension: Dimension to look up by name or by index Returns: Integer index of the requested dimension """ if isinstance(dimension, int): if (dimension < (self.ndims + len(self.vdims)) or dimension < len(self.dimensions())): return dimension else: return IndexError('Dimension index out of bounds') dim = dimension_name(dimension) try: dimensions = self.kdims+self.vdims return [i for i, d in enumerate(dimensions) if d == dim][0] except IndexError: raise Exception("Dimension %s not found in %s." % (dim, self.__class__.__name__))
Get the index of the requested dimension. Args: dimension: Dimension to look up by name or by index Returns: Integer index of the requested dimension
def query(self, u, v): """:returns: the lowest common ancestor of u and v :complexity: O(log n) """ # -- assume w.l.o.g. that v is not higher than u in the tree if self.level[u] > self.level[v]: u, v = v, u # -- put v at the same level as u depth = len(self.anc) for k in range(depth-1, -1, -1): if self.level[u] <= self.level[v] - (1 << k): v = self.anc[k][v] assert self.level[u] == self.level[v] if u == v: return u # -- climb until the lowest common ancestor for k in range(depth-1, -1, -1): if self.anc[k][u] != self.anc[k][v]: u = self.anc[k][u] v = self.anc[k][v] assert self.anc[0][u] == self.anc[0][v] return self.anc[0][u]
:returns: the lowest common ancestor of u and v :complexity: O(log n)
def write(grp, out_path): """ Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None """ with open(out_path, "w") as f: for x in grp: f.write(str(x) + "\n")
Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None
def description_from_content(self): """ Returns the first block or sentence of the first content-like field. """ description = "" # Use the first RichTextField, or TextField if none found. for field_type in (RichTextField, models.TextField): if not description: for field in self._meta.fields: if (isinstance(field, field_type) and field.name != "description"): description = getattr(self, field.name) if description: from yacms.core.templatetags.yacms_tags \ import richtext_filters description = richtext_filters(description) break # Fall back to the title if description couldn't be determined. if not description: description = str(self) # Strip everything after the first block or sentence. ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>", "\n", ". ", "! ", "? ") for end in ends: pos = description.lower().find(end) if pos > -1: description = TagCloser(description[:pos]).html break else: description = truncatewords_html(description, 100) try: description = unicode(description) except NameError: pass # Python 3. return description
Returns the first block or sentence of the first content-like field.
def get_local_references(tb, max_string_length=1000): """ Find the values of the local variables within the traceback scope. :param tb: traceback :return: list of tuples containing (variable name, value) """ if 'self' in tb.tb_frame.f_locals: _locals = [('self', repr(tb.tb_frame.f_locals['self']))] else: _locals = [] for k, v in tb.tb_frame.f_locals.iteritems(): if k == 'self': continue try: vstr = format_reference(v, max_string_length=max_string_length) _locals.append((k, vstr)) except TypeError: pass return _locals
Find the values of the local variables within the traceback scope. :param tb: traceback :return: list of tuples containing (variable name, value)
def compress_json(data): """ Take a Python data object. Convert to JSON and compress using lzstring """ json_string = json.dumps(data).encode('utf-8', 'ignore').decode('utf-8') # JSON.parse() doesn't handle `NaN`, but it does handle `null`. json_string = json_string.replace('NaN', 'null'); x = lzstring.LZString() return x.compressToBase64(json_string)
Take a Python data object. Convert to JSON and compress using lzstring
def elementInActiveFormattingElements(self, name): """Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false""" for item in self.activeFormattingElements[::-1]: # Check for Marker first because if it's a Marker it doesn't have a # name attribute. if item == Marker: break elif item.name == name: return item return False
Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false
def subpt(method, target, et, abcorr, obsrvr): """ Deprecated: This routine has been superseded by the CSPICE routine :func:`subpnt`. This routine is supported for purposes of backward compatibility only. Compute the rectangular coordinates of the sub-observer point on a target body at a particular epoch, optionally corrected for planetary (light time) and stellar aberration. Return these coordinates expressed in the body-fixed frame associated with the target body. Also, return the observer's altitude above the target body. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpt_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000 TDB. :type et: Union[float,Iterable[float]] :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :return: Sub-observer point on the target body, Altitude of the observer above the target body. :rtype: tuple """ method = stypes.stringToCharP(method) target = stypes.stringToCharP(target) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) spoint = stypes.emptyDoubleVector(3) alt = ctypes.c_double() if hasattr(et, "__iter__"): points = [] alts = [] for t in et: libspice.subpt_c(method, target, ctypes.c_double(t), abcorr, obsrvr, spoint, ctypes.byref(alt)) checkForSpiceError(None) points.append(stypes.cVectorToPython(spoint)) alts.append(alt.value) return points, alts else: et = ctypes.c_double(et) libspice.subpt_c(method, target, et, abcorr, obsrvr, spoint, ctypes.byref(alt)) return stypes.cVectorToPython(spoint), alt.value
Deprecated: This routine has been superseded by the CSPICE routine :func:`subpnt`. This routine is supported for purposes of backward compatibility only. Compute the rectangular coordinates of the sub-observer point on a target body at a particular epoch, optionally corrected for planetary (light time) and stellar aberration. Return these coordinates expressed in the body-fixed frame associated with the target body. Also, return the observer's altitude above the target body. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpt_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000 TDB. :type et: Union[float,Iterable[float]] :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :return: Sub-observer point on the target body, Altitude of the observer above the target body. :rtype: tuple
def _GetMetadataRequest(self, metadata_url, params=None, timeout=None): """Performs a GET request with the metadata headers. Args: metadata_url: string, the URL to perform a GET request on. params: dictionary, the query parameters in the GET request. timeout: int, timeout in seconds for metadata requests. Returns: HTTP response from the GET request. Raises: urlerror.HTTPError: raises when the GET request fails. """ headers = {'Metadata-Flavor': 'Google'} params = urlparse.urlencode(params or {}) url = '%s?%s' % (metadata_url, params) request = urlrequest.Request(url, headers=headers) request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) timeout = timeout or self.timeout return request_opener.open(request, timeout=timeout*1.1)
Performs a GET request with the metadata headers. Args: metadata_url: string, the URL to perform a GET request on. params: dictionary, the query parameters in the GET request. timeout: int, timeout in seconds for metadata requests. Returns: HTTP response from the GET request. Raises: urlerror.HTTPError: raises when the GET request fails.
def AgregarAjusteFisico(self, cantidad, cantidad_cabezas=None, cantidad_kg_vivo=None, **kwargs): "Agrega campos al detalle de item por un ajuste fisico" d = {'cantidad': cantidad, 'cantidadCabezas': cantidad_cabezas, 'cantidadKgVivo': cantidad_kg_vivo, } item_liq = self.solicitud['itemDetalleAjusteLiquidacion'][-1] item_liq['ajusteFisico'] = d return True
Agrega campos al detalle de item por un ajuste fisico
def add_comment(self, comment, metadata=""): """ Add a canned comment :type comment: str :param comment: New canned comment :type metadata: str :param metadata: Optional metadata :rtype: dict :return: A dictionnary containing canned comment description """ data = { 'comment': comment, 'metadata': metadata } return self.post('createComment', data)
Add a canned comment :type comment: str :param comment: New canned comment :type metadata: str :param metadata: Optional metadata :rtype: dict :return: A dictionnary containing canned comment description
def get_jamo_class(jamo): """Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class. """ # TODO: Perhaps raise a separate error for U+3xxx jamo. if jamo in JAMO_LEADS or jamo == chr(0x115F): return "lead" if jamo in JAMO_VOWELS or jamo == chr(0x1160) or\ 0x314F <= ord(jamo) <= 0x3163: return "vowel" if jamo in JAMO_TAILS: return "tail" else: raise InvalidJamoError("Invalid or classless jamo argument.", jamo)
Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class.
def style(self, index, *args): """ Add style to your axis, one at a time args are of the form:: <axis color>, <font size>, <alignment>, <drawing control>, <tick mark color> APIPARAM: chxs """ args = color_args(args, 0) self.data['styles'].append( ','.join([str(index)]+list(map(str,args))) ) return self.parent
Add style to your axis, one at a time args are of the form:: <axis color>, <font size>, <alignment>, <drawing control>, <tick mark color> APIPARAM: chxs
def push_update(self, params, values): """ Perform a parameter update and keep track of the change on the state. Same call structure as :func:`peri.states.States.update` """ curr = self.get_values(params) self.stack.append((params, curr)) self.update(params, values)
Perform a parameter update and keep track of the change on the state. Same call structure as :func:`peri.states.States.update`
def attach_template(self, _template, _key, **unbound_var_values): """Attaches the template to this such that _key=this layer. Note: names were chosen to avoid conflicts with any likely unbound_var keys. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template. """ if _key in unbound_var_values: raise ValueError('%s specified twice.' % _key) unbound_var_values[_key] = self return _template.as_layer().construct(**unbound_var_values)
Attaches the template to this such that _key=this layer. Note: names were chosen to avoid conflicts with any likely unbound_var keys. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template.
def download(self, field): """Download a file. :param field: file field to download :type field: string :rtype: a file handle """ if not field.startswith('output'): raise ValueError("Only processor results (output.* fields) can be downloaded") if field not in self.annotation: raise ValueError("Download field {} does not exist".format(field)) ann = self.annotation[field] if ann['type'] != 'basic:file:': raise ValueError("Only basic:file: field can be downloaded") return next(self.gencloud.download([self.id], field))
Download a file. :param field: file field to download :type field: string :rtype: a file handle
def dump_object(self, obj): """ Called to encode unrecognized object. :param object obj: the object to encode :return: the encoded object :raises TypeError: when `obj` cannot be encoded This method is passed as the ``default`` keyword parameter to :func:`json.dumps`. It provides default representations for a number of Python language/standard library types. +----------------------------+---------------------------------------+ | Python Type | String Format | +----------------------------+---------------------------------------+ | :class:`bytes`, | Base64 encoded string. | | :class:`bytearray`, | | | :class:`memoryview` | | +----------------------------+---------------------------------------+ | :class:`datetime.datetime` | ISO8601 formatted timestamp in the | | | extended format including separators, | | | milliseconds, and the timezone | | | designator. | +----------------------------+---------------------------------------+ | :class:`uuid.UUID` | Same as ``str(value)`` | +----------------------------+---------------------------------------+ """ if isinstance(obj, uuid.UUID): return str(obj) if hasattr(obj, 'isoformat'): return obj.isoformat() if isinstance(obj, (bytes, bytearray, memoryview)): return base64.b64encode(obj).decode('ASCII') raise TypeError('{!r} is not JSON serializable'.format(obj))
Called to encode unrecognized object. :param object obj: the object to encode :return: the encoded object :raises TypeError: when `obj` cannot be encoded This method is passed as the ``default`` keyword parameter to :func:`json.dumps`. It provides default representations for a number of Python language/standard library types. +----------------------------+---------------------------------------+ | Python Type | String Format | +----------------------------+---------------------------------------+ | :class:`bytes`, | Base64 encoded string. | | :class:`bytearray`, | | | :class:`memoryview` | | +----------------------------+---------------------------------------+ | :class:`datetime.datetime` | ISO8601 formatted timestamp in the | | | extended format including separators, | | | milliseconds, and the timezone | | | designator. | +----------------------------+---------------------------------------+ | :class:`uuid.UUID` | Same as ``str(value)`` | +----------------------------+---------------------------------------+
def _cache_from_source(path: str) -> str: """Return the path to the cached file for the given path. The original path does not have to exist.""" cache_path, cache_file = os.path.split(importlib.util.cache_from_source(path)) filename, _ = os.path.splitext(cache_file) return os.path.join(cache_path, filename + ".lpyc")
Return the path to the cached file for the given path. The original path does not have to exist.
def regexp_filter(self_or_cls, pattern): """ Builds a parameter filter using the supplied pattern (may be a general Python regular expression) """ def inner_filter(name, p): name_match = re.search(pattern,name) if name_match is not None: return True doc_match = re.search(pattern,p.doc) if doc_match is not None: return True return False return inner_filter
Builds a parameter filter using the supplied pattern (may be a general Python regular expression)
def _get_baremetal_switches(self, port): """Get switch ip addresses from baremetal transaction. This method is used to extract switch information from the transaction where VNIC_TYPE is baremetal. :param port: Received port transaction :returns: list of all switches :returns: list of only switches which are active """ all_switches = set() active_switches = set() all_link_info = port[bc.portbindings.PROFILE]['local_link_information'] for link_info in all_link_info: switch_info = self._get_baremetal_switch_info(link_info) if not switch_info: continue switch_ip = switch_info['switch_ip'] # If not for Nexus if not self._switch_defined(switch_ip): continue all_switches.add(switch_ip) if self.is_switch_active(switch_ip): active_switches.add(switch_ip) return list(all_switches), list(active_switches)
Get switch ip addresses from baremetal transaction. This method is used to extract switch information from the transaction where VNIC_TYPE is baremetal. :param port: Received port transaction :returns: list of all switches :returns: list of only switches which are active
def GTax(x, ax): """ Compute transpose of gradient of `x` along axis `ax`. Parameters ---------- x : array_like Input array ax : int Axis on which gradient transpose is to be computed Returns ------- xg : ndarray Output array """ slc0 = (slice(None),) * ax xg = np.roll(x, 1, axis=ax) - x xg[slc0 + (slice(0, 1),)] = -x[slc0 + (slice(0, 1),)] xg[slc0 + (slice(-1, None),)] = x[slc0 + (slice(-2, -1),)] return xg
Compute transpose of gradient of `x` along axis `ax`. Parameters ---------- x : array_like Input array ax : int Axis on which gradient transpose is to be computed Returns ------- xg : ndarray Output array
def slug(request, url): """Look up a page by url (which is a tree of slugs)""" page = None if url: for slug in url.split('/'): if not slug: continue try: page = Page.objects.get(slug=slug, parent=page) except Page.DoesNotExist: raise Http404 else: try: page = Page.objects.get(slug='index', parent=None) except Page.DoesNotExist: return TemplateView.as_view( template_name='wafer/index.html')(request) if 'edit' in request.GET: if not request.user.has_perm('pages.change_page'): raise PermissionDenied return EditPage.as_view()(request, pk=page.id) if 'compare' in request.GET: if not request.user.has_perm('pages.change_page'): raise PermissionDenied return ComparePage.as_view()(request, pk=page.id) return ShowPage.as_view()(request, pk=page.id)
Look up a page by url (which is a tree of slugs)
def complete_dict( self, values_dict): """ Keys of nested dictionaries can be arbitrary objects. """ if self.orientation != "rows": values_dict = transpose_nested_dictionary(values_dict) row_keys, column_keys = collect_nested_keys(values_dict) if self.verbose: print("[SimilarityWeightedAveraging] # rows = %d" % (len(row_keys))) print("[SimilarityWeightedAveraging] # columns = %d" % (len(column_keys))) similarities, overlaps, weights = \ self.jacard_similarity_from_nested_dicts(values_dict) if self.verbose: print( "[SimilarityWeightedAveraging] Computed %d similarities between rows" % ( len(similarities),)) column_to_row_values = reverse_lookup_from_nested_dict(values_dict) result = defaultdict(dict) exponent = self.similarity_exponent shrinkage_coef = self.shrinkage_coef for i, row_key in enumerate(row_keys): for column_key, value_triplets in column_to_row_values.items(): total = 0 denom = shrinkage_coef for (other_row_key, y) in value_triplets: sample_weight = 1.0 sim = similarities.get((row_key, other_row_key), 0) combined_weight = sim ** exponent combined_weight *= sample_weight total += combined_weight * y denom += combined_weight if denom > shrinkage_coef: result[row_key][column_key] = total / denom if self.orientation != "rows": result = transpose_nested_dictionary(result) return result
Keys of nested dictionaries can be arbitrary objects.
def _indent(text, level=1): ''' Does a proper indenting for Sphinx rst ''' prefix = ' ' * (4 * level) def prefixed_lines(): for line in text.splitlines(True): yield (prefix + line if line.strip() else line) return ''.join(prefixed_lines())
Does a proper indenting for Sphinx rst
def callable_check(func, arg_count=1, arg_value=None, allow_none=False): """Check whether func is callable, with the given number of positional arguments. Returns True if check succeeded, False otherwise.""" if func is None: if not allow_none: raise ValueError('callable cannot be None') elif not arg_checker(func, *[arg_value for _ in range(arg_count)]): raise ValueError('callable %s invalid (for %d arguments)' % (func, arg_count))
Check whether func is callable, with the given number of positional arguments. Returns True if check succeeded, False otherwise.
def read_padding(fp, size, divisor=2): """ Read padding bytes for the given byte size. :param fp: file-like object :param divisor: divisor of the byte alignment :return: read byte size """ remainder = size % divisor if remainder: return fp.read(divisor - remainder) return b''
Read padding bytes for the given byte size. :param fp: file-like object :param divisor: divisor of the byte alignment :return: read byte size
def cost(self, tileStorage=0, fileStorage=0, featureStorage=0, generatedTileCount=0, loadedTileCount=0, enrichVariableCount=0, enrichReportCount=0, serviceAreaCount=0, geocodeCount=0): """ returns the cost values for a given portal Inputs: tileStorage - int - numbe of tiles to store in MBs fileStorage - int - size of file to store in MBs featureStorage - int - size in MBs generateTileCount - int - number of tiles to genearte on site loadedTileCount -int- cost to host a certian number of tiles enrichVariableCount - int - cost to enrich data enrichReportCount - int - cost to generate an enrichment report serviceAreaCount - int - cost to generate x number of service areas geocodeCount - int - cost to generate x number of addresses """ params = { "f" : "json", "tileStorage": tileStorage, "fileStorage": fileStorage, "featureStorage": featureStorage, "generatedTileCount": generatedTileCount, "loadedTileCount":loadedTileCount, "enrichVariableCount": enrichVariableCount, "enrichReportCount" : enrichReportCount, "serviceAreaCount" : serviceAreaCount, "geocodeCount" : geocodeCount } url = self._url + "/cost" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the cost values for a given portal Inputs: tileStorage - int - numbe of tiles to store in MBs fileStorage - int - size of file to store in MBs featureStorage - int - size in MBs generateTileCount - int - number of tiles to genearte on site loadedTileCount -int- cost to host a certian number of tiles enrichVariableCount - int - cost to enrich data enrichReportCount - int - cost to generate an enrichment report serviceAreaCount - int - cost to generate x number of service areas geocodeCount - int - cost to generate x number of addresses
def main(): """MAIN""" config = { "api": { "services": [ { "name": "my_api", "testkey": "testval", }, ], "calls": { "hello_world": { "delay": 5, "priority": 1, "arguments": None, }, "marco": { "delay": 1, "priority": 1, }, "pollo": { "delay": 1, "priority": 1, }, } } } app = AppBuilder([MyAPI], Strategy(Print()), AppConf(config)) app.run()
MAIN
def percent_point(self, y, V): """Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1` Args: y: `np.ndarray` value of :math:`C(u|v)`. v: `np.ndarray` given value of v. """ self.check_fit() if self.theta < 0: return V else: a = np.power(y, self.theta / (-1 - self.theta)) b = np.power(V, self.theta) u = np.power((a + b - 1) / b, -1 / self.theta) return u
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1` Args: y: `np.ndarray` value of :math:`C(u|v)`. v: `np.ndarray` given value of v.
def match_box_with_gt(self, boxes, iou_threshold): """ Args: boxes: Nx4 Returns: BoxProposals """ if self.is_training: with tf.name_scope('match_box_with_gt_{}'.format(iou_threshold)): iou = pairwise_iou(boxes, self.gt_boxes) # NxM max_iou_per_box = tf.reduce_max(iou, axis=1) # N best_iou_ind = tf.argmax(iou, axis=1) # N labels_per_box = tf.gather(self.gt_labels, best_iou_ind) fg_mask = max_iou_per_box >= iou_threshold fg_inds_wrt_gt = tf.boolean_mask(best_iou_ind, fg_mask) labels_per_box = tf.stop_gradient(labels_per_box * tf.cast(fg_mask, tf.int64)) return BoxProposals(boxes, labels_per_box, fg_inds_wrt_gt) else: return BoxProposals(boxes)
Args: boxes: Nx4 Returns: BoxProposals
def custom_callback(self, view_func): """ Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server. """ @wraps(view_func) def decorated(*args, **kwargs): plainreturn, data = self._process_callback('custom') if plainreturn: return data else: return view_func(data, *args, **kwargs) self._custom_callback = decorated return decorated
Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server.
def metainfo_to_protobuf(self) -> bytes: '''encode card_transfer info to protobuf''' card = cardtransferproto() card.version = self.version card.amount.extend(self.amount) card.number_of_decimals = self.number_of_decimals if self.asset_specific_data: if not isinstance(self.asset_specific_data, bytes): card.asset_specific_data = self.asset_specific_data.encode() else: card.asset_specific_data = self.asset_specific_data if card.ByteSize() > net_query(self.network).op_return_max_bytes: raise OverSizeOPReturn(''' Metainfo size exceeds maximum of {max} bytes supported by this network.''' .format(max=net_query(self.network) .op_return_max_bytes)) return card.SerializeToString()
encode card_transfer info to protobuf
def _get_resource_params(self, resource, for_update=False): """Get dictionary containing all parameters for the given resource. When getting params for a coordinate frame update, only name and description are returned because they are the only fields that can be updated. Args: resource (intern.resource.boss.resource.BossResource): A sub-class whose parameters will be extracted into a dictionary. for_update (bool): True if params will be used for an update. Returns: (dictionary): A dictionary containing the resource's parameters as required by the Boss API. Raises: TypeError if resource is not a supported class. """ if isinstance(resource, CollectionResource): return self._get_collection_params(resource) if isinstance(resource, ExperimentResource): return self._get_experiment_params(resource, for_update) if isinstance(resource, CoordinateFrameResource): return self._get_coordinate_params(resource, for_update) if isinstance(resource, ChannelResource): return self._get_channel_params(resource, for_update) raise TypeError('resource is not supported type.')
Get dictionary containing all parameters for the given resource. When getting params for a coordinate frame update, only name and description are returned because they are the only fields that can be updated. Args: resource (intern.resource.boss.resource.BossResource): A sub-class whose parameters will be extracted into a dictionary. for_update (bool): True if params will be used for an update. Returns: (dictionary): A dictionary containing the resource's parameters as required by the Boss API. Raises: TypeError if resource is not a supported class.
def acquire(self,blocking=True,timeout=None): """Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise. """ if timeout is None: return self.__lock.acquire(blocking) else: # Simulated timeout using progressively longer sleeps. # This is the same timeout scheme used in the stdlib Condition # class. If there's lots of contention on the lock then there's # a good chance you won't get it; but then again, Python doesn't # guarantee fairness anyway. We hope that platform-specific # extensions can provide a better mechanism. endtime = _time() + timeout delay = 0.0005 while not self.__lock.acquire(False): remaining = endtime - _time() if remaining <= 0: return False delay = min(delay*2,remaining,0.05) _sleep(delay) return True
Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise.
def get_groupname(taskfileinfo): """Return a suitable name for a groupname for the given taskfileinfo. :param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None """ element = taskfileinfo.task.element name = element.name return name + "_grp"
Return a suitable name for a groupname for the given taskfileinfo. :param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None
def __get_live_version(self): """ Get a live version string using versiontools """ try: import versiontools except ImportError: return None else: return str(versiontools.Version.from_expression(self.name))
Get a live version string using versiontools
def add_nodes(network_id, nodes,**kwargs): """ Add nodes to network """ start_time = datetime.datetime.now() names=[] # used to check uniqueness of node name for n_i in nodes: if n_i.name in names: raise HydraError("Duplicate Node Name: %s"%(n_i.name)) names.append(n_i.name) user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) _add_nodes_to_database(net_i, nodes) net_i.project_id=net_i.project_id db.DBSession.flush() node_s = db.DBSession.query(Node).filter(Node.network_id==network_id).all() #Maps temporary node_ids to real node_ids node_id_map = dict() iface_nodes = dict() for n_i in node_s: iface_nodes[n_i.name] = n_i for node in nodes: node_id_map[node.id] = iface_nodes[node.name] _bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes) log.info("Nodes added in %s", get_timing(start_time)) return node_s
Add nodes to network
def get_voms_proxy_user(): """ Returns the owner of the voms proxy. """ out = _voms_proxy_info(["--identity"])[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in voms proxy: {}".format(out))
Returns the owner of the voms proxy.
def encode(self, word, max_length=4, zero_pad=True): """Return the Phonex code for a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Phonex value Examples -------- >>> pe = Phonex() >>> pe.encode('Christopher') 'C623' >>> pe.encode('Niall') 'N400' >>> pe.encode('Schmidt') 'S253' >>> pe.encode('Smith') 'S530' """ name = unicode_normalize('NFKD', text_type(word.upper())) name = name.replace('ß', 'SS') # Clamp max_length to [4, 64] if max_length != -1: max_length = min(max(4, max_length), 64) else: max_length = 64 name_code = last = '' # Deletions effected by replacing with next letter which # will be ignored due to duplicate handling of Soundex code. # This is faster than 'moving' all subsequent letters. # Remove any trailing Ss while name[-1:] == 'S': name = name[:-1] # Phonetic equivalents of first 2 characters # Works since duplicate letters are ignored if name[:2] == 'KN': name = 'N' + name[2:] # KN.. == N.. elif name[:2] == 'PH': name = 'F' + name[2:] # PH.. == F.. (H ignored anyway) elif name[:2] == 'WR': name = 'R' + name[2:] # WR.. == R.. if name: # Special case, ignore H first letter (subsequent Hs ignored # anyway) # Works since duplicate letters are ignored if name[0] == 'H': name = name[1:] if name: # Phonetic equivalents of first character if name[0] in self._uc_vy_set: name = 'A' + name[1:] elif name[0] in {'B', 'P'}: name = 'B' + name[1:] elif name[0] in {'V', 'F'}: name = 'F' + name[1:] elif name[0] in {'C', 'K', 'Q'}: name = 'C' + name[1:] elif name[0] in {'G', 'J'}: name = 'G' + name[1:] elif name[0] in {'S', 'Z'}: name = 'S' + name[1:] name_code = last = name[0] # Modified Soundex code for i in range(1, len(name)): code = '0' if name[i] in {'B', 'F', 'P', 'V'}: code = '1' elif name[i] in {'C', 'G', 'J', 'K', 'Q', 'S', 'X', 'Z'}: code = '2' elif name[i] in {'D', 'T'}: if name[i + 1 : i + 2] != 'C': code = '3' elif name[i] == 'L': if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len( name ): code = '4' elif name[i] in {'M', 'N'}: if name[i + 1 : i + 2] in {'D', 'G'}: name = name[: i + 1] + name[i] + name[i + 2 :] code = '5' elif name[i] == 'R': if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len( name ): code = '6' if code != last and code != '0' and i != 0: name_code += code last = name_code[-1] if zero_pad: name_code += '0' * max_length if not name_code: name_code = '0' return name_code[:max_length]
Return the Phonex code for a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Phonex value Examples -------- >>> pe = Phonex() >>> pe.encode('Christopher') 'C623' >>> pe.encode('Niall') 'N400' >>> pe.encode('Schmidt') 'S253' >>> pe.encode('Smith') 'S530'
def _get_adjustment(mag, year, mmin, completeness_year, t_f, mag_inc=0.1): ''' If the magnitude is greater than the minimum in the completeness table and the year is greater than the corresponding completeness year then return the Weichert factor :param float mag: Magnitude of an earthquake :param float year: Year of earthquake :param np.ndarray completeness_table: Completeness table :param float mag_inc: Magnitude increment :param float t_f: Weichert adjustment factor :returns: Weichert adjustment factor is event is in complete part of catalogue (0.0 otherwise) ''' if len(completeness_year) == 1: if (mag >= mmin) and (year >= completeness_year[0]): # No adjustment needed - event weight == 1 return 1.0 else: # Event should not be counted return False kval = int(((mag - mmin) / mag_inc)) + 1 if (kval >= 1) and (year >= completeness_year[kval - 1]): return t_f else: return False
If the magnitude is greater than the minimum in the completeness table and the year is greater than the corresponding completeness year then return the Weichert factor :param float mag: Magnitude of an earthquake :param float year: Year of earthquake :param np.ndarray completeness_table: Completeness table :param float mag_inc: Magnitude increment :param float t_f: Weichert adjustment factor :returns: Weichert adjustment factor is event is in complete part of catalogue (0.0 otherwise)
def decouple(fn): """ Inverse operation of couple. Create two functions of one argument and one return from a function that takes two arguments and has two returns Examples -------- >>> h = lambda x: (2*x**3, 6*x**2) >>> f, g = decouple(h) >>> f(5) 250 >>> g(5) 150 """ def fst(*args, **kwargs): return fn(*args, **kwargs)[0] def snd(*args, **kwargs): return fn(*args, **kwargs)[1] return fst, snd
Inverse operation of couple. Create two functions of one argument and one return from a function that takes two arguments and has two returns Examples -------- >>> h = lambda x: (2*x**3, 6*x**2) >>> f, g = decouple(h) >>> f(5) 250 >>> g(5) 150
def getCenter(self): ''' Gets the center coords of the View @author: U{Dean Morin <https://github.com/deanmorin>} ''' (left, top), (right, bottom) = self.getCoords() x = left + (right - left) / 2 y = top + (bottom - top) / 2 return (x, y)
Gets the center coords of the View @author: U{Dean Morin <https://github.com/deanmorin>}
def indices_within_times(times, start, end): """ Return an index array into times that lie within the durations defined by start end arrays Parameters ---------- times: numpy.ndarray Array of times start: numpy.ndarray Array of duration start times end: numpy.ndarray Array of duration end times Returns ------- indices: numpy.ndarray Array of indices into times """ # coalesce the start/end segments start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce()) tsort = times.argsort() times_sorted = times[tsort] left = numpy.searchsorted(times_sorted, start) right = numpy.searchsorted(times_sorted, end) if len(left) == 0: return numpy.array([], dtype=numpy.uint32) return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))]
Return an index array into times that lie within the durations defined by start end arrays Parameters ---------- times: numpy.ndarray Array of times start: numpy.ndarray Array of duration start times end: numpy.ndarray Array of duration end times Returns ------- indices: numpy.ndarray Array of indices into times
def update_continuously(records, update_interval=600): """Update `records` every `update_interval` seconds""" while True: for record in records: try: record.update() except (ApiError, RequestException): pass time.sleep(update_interval)
Update `records` every `update_interval` seconds
def simple_search(self, *keywords): """ Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned. """ matches = [] keywords = [kw.lower() for kw in keywords] logger.verbose( "Performing simple search on %s (%s) ..", pluralize(len(keywords), "keyword"), concatenate(map(repr, keywords)), ) for entry in self.filtered_entries: normalized = entry.name.lower() if all(kw in normalized for kw in keywords): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using simple search.", pluralize(len(matches), "password"), ) return matches
Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned.
def get_name(tags_or_instance_or_id): """Helper utility to extract name out of tags dictionary or intancce. [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus' Assert fails if there's more than one name. Returns '' if there's less than one name. """ ec2 = get_ec2_resource() if hasattr(tags_or_instance_or_id, 'tags'): tags = tags_or_instance_or_id.tags elif isinstance(tags_or_instance_or_id, str): tags = ec2.Instance(tags_or_instance_or_id).tags elif tags_or_instance_or_id is None: return EMPTY_NAME else: assert isinstance(tags_or_instance_or_id, Iterable), "expected iterable of tags" tags = tags_or_instance_or_id if not tags: return EMPTY_NAME names = [entry['Value'] for entry in tags if entry['Key'] == 'Name'] if not names: return '' if len(names) > 1: assert False, "have more than one name: " + str(names) return names[0]
Helper utility to extract name out of tags dictionary or intancce. [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus' Assert fails if there's more than one name. Returns '' if there's less than one name.
def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PATH, 'html') fnames = os.listdir(dirname) os.chdir(dirname) self._run_os('zip', zip_fname, '-r', '-q', *fnames)
Compress HTML documentation into a zip file.
def get_prep_value(self, value): """Returns field's value prepared for saving into a database.""" if isinstance(value, LocalizedValue): prep_value = LocalizedValue() for k, v in value.__dict__.items(): if v is None: prep_value.set(k, '') else: # Need to convert File objects provided via a form to # unicode for database insertion prep_value.set(k, six.text_type(v)) return super().get_prep_value(prep_value) return super().get_prep_value(value)
Returns field's value prepared for saving into a database.
def _import_next_layer(self, proto, length, error=False): """Import next layer extractor. Positional arguments: * proto -- str, next layer protocol name * length -- int, valid (not padding) length Keyword arguments: * error -- bool, if function call on error Returns: * bool -- flag if extraction of next layer succeeded * Info -- info of next layer * ProtoChain -- protocol chain of next layer * str -- alias of next layer Protocols: * Ethernet (data link layer) * IPv4 (internet layer) * IPv6 (internet layer) """ if proto == 1: from pcapkit.protocols.link import Ethernet as Protocol elif proto == 228: from pcapkit.protocols.internet import IPv4 as Protocol elif proto == 229: from pcapkit.protocols.internet import IPv6 as Protocol else: from pcapkit.protocols.raw import Raw as Protocol next_ = Protocol(self._file, length, error=error, layer=self._exlayer, protocol=self._exproto) return next_
Import next layer extractor. Positional arguments: * proto -- str, next layer protocol name * length -- int, valid (not padding) length Keyword arguments: * error -- bool, if function call on error Returns: * bool -- flag if extraction of next layer succeeded * Info -- info of next layer * ProtoChain -- protocol chain of next layer * str -- alias of next layer Protocols: * Ethernet (data link layer) * IPv4 (internet layer) * IPv6 (internet layer)
def get_jobs(self): """ Retrieve all jobs""" url_jenkins = urijoin(self.base_url, "api", "json") response = self.fetch(url_jenkins) return response.text
Retrieve all jobs
def _recv_flow(self, method_frame): ''' Receive a flow control command from the broker ''' self.channel._active = method_frame.args.read_bit() args = Writer() args.write_bit(self.channel.active) self.send_frame(MethodFrame(self.channel_id, 20, 21, args)) if self._flow_control_cb is not None: self._flow_control_cb()
Receive a flow control command from the broker
def invalid_example_number(region_code): """Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in. """ if not _is_valid_region_code(region_code): return None # We start off with a valid fixed-line number since every country # supports this. Alternatively we could start with a different number # type, since fixed-line numbers typically have a wide breadth of valid # number lengths and we may have to make it very short before we get an # invalid number. metadata = PhoneMetadata.metadata_for_region(region_code.upper()) desc = _number_desc_by_type(metadata, PhoneNumberType.FIXED_LINE) if desc is None or desc.example_number is None: # This shouldn't happen; we have a test for this. return None # pragma no cover example_number = desc.example_number # Try and make the number invalid. We do this by changing the length. We # try reducing the length of the number, since currently no region has a # number that is the same length as MIN_LENGTH_FOR_NSN. This is probably # quicker than making the number longer, which is another # alternative. We could also use the possible number pattern to extract # the possible lengths of the number to make this faster, but this # method is only for unit-testing so simplicity is preferred to # performance. We don't want to return a number that can't be parsed, # so we check the number is long enough. We try all possible lengths # because phone number plans often have overlapping prefixes so the # number 123456 might be valid as a fixed-line number, and 12345 as a # mobile number. It would be faster to loop in a different order, but we # prefer numbers that look closer to real numbers (and it gives us a # variety of different lengths for the resulting phone numbers - # otherwise they would all be MIN_LENGTH_FOR_NSN digits long.) phone_number_length = len(example_number) - 1 while phone_number_length >= _MIN_LENGTH_FOR_NSN: number_to_try = example_number[:phone_number_length] try: possibly_valid_number = parse(number_to_try, region_code) if not is_valid_number(possibly_valid_number): return possibly_valid_number except NumberParseException: # pragma no cover # Shouldn't happen: we have already checked the length, we know # example numbers have only valid digits, and we know the region # code is fine. pass phone_number_length -= 1 # We have a test to check that this doesn't happen for any of our # supported regions. return None
Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in.
def refresh(path=None): """Convenience method for setting the git executable path.""" global GIT_OK GIT_OK = False if not Git.refresh(path=path): return if not FetchInfo.refresh(): return GIT_OK = True
Convenience method for setting the git executable path.
def from_value(self, value): """ Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType` """ if value is None: sql_type = NVarCharType(size=1) else: sql_type = self._from_class_value(value, type(value)) return sql_type
Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType`
def create(cls, issue_id, *, properties=None, auto_commit=False): """Creates a new Issue object with the properties and tags provided Attributes: issue_id (str): Unique identifier for the issue object account (:obj:`Account`): Account which owns the issue properties (dict): Dictionary of properties for the issue object. """ if cls.get(issue_id): raise IssueException('Issue {} already exists'.format(issue_id)) res = Issue() res.issue_id = issue_id res.issue_type_id = IssueType.get(cls.issue_type).issue_type_id if properties: for name, value in properties.items(): prop = IssueProperty() prop.issue_id = res.issue_id prop.name = name prop.value = value.isoformat() if type(value) == datetime else value res.properties.append(prop) db.session.add(prop) db.session.add(res) if auto_commit: db.session.commit() return cls.get(res.issue_id)
Creates a new Issue object with the properties and tags provided Attributes: issue_id (str): Unique identifier for the issue object account (:obj:`Account`): Account which owns the issue properties (dict): Dictionary of properties for the issue object.
def set_image(self): """This code must be in its own method since the fetch functions need credits to be set. m2m fields are not yet set at the end of either the save method or post_save signal.""" if not self.image: scrape_image(self) # If still no image then use first contributor image if not self.image: contributors = self.get_primary_contributors() if contributors: self.image = contributors[0].image self.save(set_image=False) # If still not image then default if not self.image: filename = settings.STATIC_ROOT + 'music/images/default.png' if os.path.exists(filename): image = File( open(filename, 'rb') ) image.name = 'default.png' self.image = image self.save(set_image=False)
This code must be in its own method since the fetch functions need credits to be set. m2m fields are not yet set at the end of either the save method or post_save signal.
def clear_output(self, stdout=True, stderr=True, other=True): """Clear the output of the cell receiving output.""" if stdout: print('\033[2K\r', file=io.stdout, end='') io.stdout.flush() if stderr: print('\033[2K\r', file=io.stderr, end='') io.stderr.flush()
Clear the output of the cell receiving output.
def setencoding(): """Set the string encoding used by the Unicode implementation. The default is 'ascii', but if you're willing to experiment, you can change this.""" encoding = "ascii" # Default value set by _PyUnicode_Init() if 0: # Enable to support locale aware default string encodings. import locale loc = locale.getdefaultlocale() if loc[1]: encoding = loc[1] if 0: # Enable to switch off string to Unicode coercion and implicit # Unicode to string conversion. encoding = "undefined" if encoding != "ascii": # On Non-Unicode builds this will raise an AttributeError... sys.setdefaultencoding(encoding)
Set the string encoding used by the Unicode implementation. The default is 'ascii', but if you're willing to experiment, you can change this.
def __minimum_noiseless_description_length(self, clusters, centers): """! @brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion. @param[in] clusters (list): Clusters for which splitting criterion should be calculated. @param[in] centers (list): Centers of the clusters. @return (double) Returns splitting criterion in line with bayesian information criterion. Low value of splitting cretion means that current structure is much better. @see __bayesian_information_criterion(clusters, centers) """ scores = float('inf') W = 0.0 K = len(clusters) N = 0.0 sigma_sqrt = 0.0 alpha = 0.9 betta = 0.9 for index_cluster in range(0, len(clusters), 1): Ni = len(clusters[index_cluster]) if Ni == 0: return float('inf') Wi = 0.0 for index_object in clusters[index_cluster]: # euclidean_distance_square should be used in line with paper, but in this case results are # very poor, therefore square root is used to improved. Wi += euclidean_distance(self.__pointer_data[index_object], centers[index_cluster]) sigma_sqrt += Wi W += Wi / Ni N += Ni if N - K > 0: sigma_sqrt /= (N - K) sigma = sigma_sqrt ** 0.5 Kw = (1.0 - K / N) * sigma_sqrt Ks = ( 2.0 * alpha * sigma / (N ** 0.5) ) * ( (alpha ** 2.0) * sigma_sqrt / N + W - Kw / 2.0 ) ** 0.5 scores = sigma_sqrt * (2 * K)**0.5 * ((2 * K)**0.5 + betta) / N + W - sigma_sqrt + Ks + 2 * alpha**0.5 * sigma_sqrt / N return scores
! @brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion. @param[in] clusters (list): Clusters for which splitting criterion should be calculated. @param[in] centers (list): Centers of the clusters. @return (double) Returns splitting criterion in line with bayesian information criterion. Low value of splitting cretion means that current structure is much better. @see __bayesian_information_criterion(clusters, centers)
def outline_segments(self, mask_background=False): """ Outline the labeled segments. The "outlines" represent the pixels *just inside* the segments, leaving the background pixels unmodified. Parameters ---------- mask_background : bool, optional Set to `True` to mask the background pixels (labels = 0) in the returned image. This is useful for overplotting the segment outlines on an image. The default is `False`. Returns ------- boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray` An image with the same shape of the segmentation image containing only the outlines of the labeled segments. The pixel values in the outlines correspond to the labels in the segmentation image. If ``mask_background`` is `True`, then a `~numpy.ma.MaskedArray` is returned. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 0, 0, 0, 0, 0]]) >>> segm.outline_segments() array([[0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0]]) """ from scipy.ndimage import grey_erosion, grey_dilation # mode='constant' ensures outline is included on the image borders selem = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) eroded = grey_erosion(self.data, footprint=selem, mode='constant', cval=0.) dilated = grey_dilation(self.data, footprint=selem, mode='constant', cval=0.) outlines = ((dilated != eroded) & (self.data != 0)).astype(int) outlines *= self.data if mask_background: outlines = np.ma.masked_where(outlines == 0, outlines) return outlines
Outline the labeled segments. The "outlines" represent the pixels *just inside* the segments, leaving the background pixels unmodified. Parameters ---------- mask_background : bool, optional Set to `True` to mask the background pixels (labels = 0) in the returned image. This is useful for overplotting the segment outlines on an image. The default is `False`. Returns ------- boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray` An image with the same shape of the segmentation image containing only the outlines of the labeled segments. The pixel values in the outlines correspond to the labels in the segmentation image. If ``mask_background`` is `True`, then a `~numpy.ma.MaskedArray` is returned. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 0, 0, 0, 0, 0]]) >>> segm.outline_segments() array([[0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0]])
def _uri(self, url): """Returns request absolute URI""" if url and not url.startswith('/'): # Then this must be a proxy request. return url uri = "{0}://{1}{2}{3}".format( self._protocol, self.real_connection.host, self._port_postfix(), url, ) return uri
Returns request absolute URI
async def founder(self, root): """Regional Founder. Returned even if the nation has ceased to exist. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region is Game-Created and doesn't have a founder. """ nation = root.find('FOUNDER').text if nation == '0': return None return aionationstates.Nation(nation)
Regional Founder. Returned even if the nation has ceased to exist. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region is Game-Created and doesn't have a founder.
def set_sqlite_pragmas(self): """ Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None """ def _pragmas_on_connect(dbapi_con, con_record): dbapi_con.execute("PRAGMA journal_mode = WAL;") event.listen(self.engine, "connect", _pragmas_on_connect)
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None
def _determineVolumeSize(self, source_type, source_uuid): """ Determine the minimum size the volume needs to be for the source. Returns the size in GiB. """ nova = self.novaclient if source_type == 'image': # The size returned for an image is in bytes. Round up to the next # integer GiB. image = nova.images.get(source_uuid) if hasattr(image, 'OS-EXT-IMG-SIZE:size'): size = getattr(image, 'OS-EXT-IMG-SIZE:size') size_gb = int(math.ceil(size / 1024.0**3)) return size_gb elif source_type == 'volume': # Volumes are easy because they are already in GiB. volume = nova.volumes.get(source_uuid) return volume.size elif source_type == 'snapshot': snap = nova.volume_snapshots.get(source_uuid) return snap.size else: unknown_source = ("The source type '%s' for UUID '%s' is" " unknown" % (source_type, source_uuid)) raise ValueError(unknown_source)
Determine the minimum size the volume needs to be for the source. Returns the size in GiB.
def experiment_data(self, commit=None, must_contain_results=False): """ :param commit: the commit that all the experiments should have happened or None to include all :type commit: str :param must_contain_results: include only tags that contain results :type must_contain_results: bool :return: all the experiment data :rtype: dict """ results = {} for tag in self.__repository.tags: if not tag.name.startswith(self.__tag_prefix): continue data = json.loads(tag.tag.message) if "results" not in data and must_contain_results: continue if commit is not None and tag.tag.object.hexsha != name_to_object(self.__repository, commit).hexsha: continue results[tag.name] = data return results
:param commit: the commit that all the experiments should have happened or None to include all :type commit: str :param must_contain_results: include only tags that contain results :type must_contain_results: bool :return: all the experiment data :rtype: dict