content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import click import sys def find_modules(device_path, bundles_list): """ Extracts metadata from the connected device and available bundles and returns this as a list of Module instances representing the modules on the device. :param str device_path: The path to the connected board. :param Bundle bundles_list: List of supported bundles as Bundle objects. :return: A list of Module instances describing the current state of the modules on the connected device. """ # pylint: disable=broad-except,too-many-locals try: device_modules = get_device_versions(device_path) bundle_modules = get_bundle_versions(bundles_list) result = [] for name, device_metadata in device_modules.items(): if name in bundle_modules: path = device_metadata["path"] bundle_metadata = bundle_modules[name] repo = bundle_metadata.get("__repo__") bundle = bundle_metadata.get("bundle") device_version = device_metadata.get("__version__") bundle_version = bundle_metadata.get("__version__") mpy = device_metadata["mpy"] compatibility = device_metadata.get("compatibility", (None, None)) result.append( Module( path, repo, device_version, bundle_version, mpy, bundle, compatibility, ) ) return result except Exception as ex: # If it's not possible to get the device and bundle metadata, bail out # with a friendly message and indication of what's gone wrong. logger.exception(ex) click.echo("There was a problem: {}".format(ex)) sys.exit(1) # pylint: enable=broad-except,too-many-locals
dc51d1ca622d33c721785a3a3f8bb30b7a3b7833
3,627,900
from typing import OrderedDict def sort_ordered_games_list(ordered_games_lists): """ Reverses as sorts ordered games lists alphabetically """ new_order = OrderedDict() for group, games in reversed(ordered_games_lists.items()): new_order[group] = OrderedDict( sorted(games.items(), key=lambda x: x[1].title)) return new_order
bc1b40884ad450d8a2b447c2628bfe77cd2797fa
3,627,901
import random def get_challenge_id() -> int: """ Get the challenge ID, a number from 1 to NUMBER_OF_CHALLNEGE_VARIANTS. """ random_seed = get_random_seed() random.seed(random_seed) variant_number = random.randint(0, NUMBER_OF_CHALLENGE_VARIANTS - 1) return variant_number
ec5e5c817841e30e6fc38129d405f0b5424c04e8
3,627,902
def _conv1(in_channels, out_channels, stride=1, bias=False): """point-wise convolution""" return Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)
82e218f5700ad2d63d0820410707d5a69b6b0ff6
3,627,903
def mixer_b16_224_in21k(pretrained=False, **kwargs): """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=12, hidden_dim=768, **kwargs) model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) return model
05fc49bd21cfa37108dcad9f99c81c4ff3eac521
3,627,904
def service_proxy_settings(private_base_url): """Change api_backend to echo-api for service2.""" return rawobj.Proxy(f"{private_base_url('echo_api')}/service1")
11e23d2d1ed2cb32ccdd64c3917ae1fd409774c6
3,627,905
import time import os import subprocess def get_map_mrr(qids, predictions, labels, device=0, keep_results=False): """ Get the map and mrr using the trec_eval utility. qids, predictions, labels should have the same length. device is not a required parameter, it is only used to prevent potential naming conflicts when you are calling this concurrently from different threads of execution. :param qids: query ids of predictions and labels :param predictions: iterable of predictions made by the models :param labels: iterable of labels of the dataset :param device: device (GPU index or -1 for CPU) for identification purposes only """ qrel_fname = 'trecqa_{}_{}.qrel'.format(time.time(), device) results_fname = 'trecqa_{}_{}.results'.format(time.time(), device) qrel_template = '{qid} 0 {docno} {rel}\n' results_template = '{qid} 0 {docno} 0 {sim} castor-model\n' with open(qrel_fname, 'w') as f1, open(results_fname, 'w') as f2: docnos = range(len(qids)) for qid, docno, predicted, actual in zip(qids, docnos, predictions, labels): f1.write(qrel_template.format(qid=qid, docno=docno, rel=actual)) f2.write(results_template.format(qid=qid, docno=docno, sim=predicted)) trec_eval_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'trec_eval-9.0.5/trec_eval') trec_out = subprocess.check_output([trec_eval_path, '-m', 'map', '-m', 'recip_rank', qrel_fname, results_fname]) trec_out_lines = str(trec_out, 'utf-8').split('\n') mean_average_precision = float(trec_out_lines[0].split('\t')[-1]) mean_reciprocal_rank = float(trec_out_lines[1].split('\t')[-1]) if keep_results: print("Saving prediction file to {}".format(results_fname)) print("Saving qrel file to {}".format(qrel_fname)) else: os.remove(results_fname) os.remove(qrel_fname) return mean_average_precision, mean_reciprocal_rank
0ddc68fd12afebd2954e0c71e1bc58ffd0f6a1bf
3,627,906
from pathlib import Path from typing import Union from typing import List def glob_suffixes(root_path: Path, suffixes: Union[List[str], str]) -> List[Path]: """Returns all suffixes located in the path and in the input variable suffixes""" if isinstance(suffixes, str): suffixes = [suffixes] return sorted([f for f in root_path.glob("**/*") if file_suffix_in(f, suffixes)])
51847ffcc125d4307f83938e31d6cd5f049598d5
3,627,907
from niworkflows.interfaces import SimpleBeforeAfter from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms from niworkflows.interfaces.images import extract_wm def init_sdc_unwarp_report_wf(name='sdc_unwarp_report_wf', forcedsyn=False): """ Save a reportlet showing how SDC unwarping performed. This workflow generates and saves a reportlet showing the effect of fieldmap unwarping a BOLD image. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf wf = init_sdc_unwarp_report_wf() Parameters ---------- name : str, optional Workflow name (default: ``sdc_unwarp_report_wf``) forcedsyn : bool, optional Whether SyN-SDC was forced. Inputs ------ in_pre Reference image, before unwarping in_post Reference image, after unwarping in_seg Segmentation of preprocessed structural image, including gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF) in_xfm Affine transform from T1 space to BOLD space (ITK format) """ DEFAULT_MEMORY_MIN_GB = 0.01 workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface( fields=['in_pre', 'in_post', 'in_seg', 'in_xfm']), name='inputnode') map_seg = pe.Node(ApplyTransforms( dimension=3, float=True, interpolation='MultiLabel'), name='map_seg', mem_gb=0.3) sel_wm = pe.Node(niu.Function(function=extract_wm), name='sel_wm', mem_gb=DEFAULT_MEMORY_MIN_GB) bold_rpt = pe.Node(SimpleBeforeAfter(), name='bold_rpt', mem_gb=0.1) ds_report_sdc = pe.Node( DerivativesDataSink(desc='sdc' if not forcedsyn else 'forcedsyn', suffix='bold'), name='ds_report_sdc', mem_gb=DEFAULT_MEMORY_MIN_GB, run_without_submitting=True ) workflow.connect([ (inputnode, bold_rpt, [('in_post', 'after'), ('in_pre', 'before')]), (bold_rpt, ds_report_sdc, [('out_report', 'in_file')]), (inputnode, map_seg, [('in_post', 'reference_image'), ('in_seg', 'input_image'), ('in_xfm', 'transforms')]), (map_seg, sel_wm, [('output_image', 'in_seg')]), (sel_wm, bold_rpt, [('out', 'wm_seg')]), ]) return workflow
097d576e3df8f3c11aade2b00362ba14ce4ec32a
3,627,908
def maxabs_normalization(X, *Xtest): """Scale features in the [-1, 1] range. Linearly normalize each input feature in the [-1, 1] range by dividing them by the maximum absolute value. Test features, when given, are scaled using the statistics computed on X. Parameters ---------- X : ndarray, shape (m, n) input features (one row per feature vector). Xtest : ndarray, shape (mtest, n) or None zero or more arrays of test features (one row per feature vector). Returns ------- ndarray, shape (m, n) normalized features. ndarray, shape (mtest, n) normalized test features (one for each array in Xtest). """ _check_all_same_size(X, *Xtest) # 1e-15 avoids division by zero amax = np.maximum(np.abs(X).max(0), 1e-15) X = X / amax if not Xtest: return X Xtest = tuple(Xt / amax for Xt in Xtest) return (X,) + Xtest
f73e689d97cb5aea59dd7340375c95c00dae4843
3,627,909
def authenticate_user(email, password): """Authenticate user by checking they exist and that the password is correct.""" user = get_user(email) if not user: return False """If present, verify password against password hash in database.""" password_hash = user.hashed_password if not verify_password(password, password_hash): return False return user
307cc1b8fcb1b00d437a573f174e4b7a63bcde80
3,627,910
import os import re def new_files(filetype): """ Find new files to download from the Census' FTP webpage. Parameters: filetype (character): The type of file you want to search for (.txt, .zip, ect.) Returns (list): List of files not found in local directory """ pattern = pub_filename(filetype) pub_links = cps_ftp_links(filetype) present = os.listdir() pub_files = {} for file in pub_links: if re.search(pattern, file): filename = re.search(pattern, file).group(0) pub_files[filename] = file missing_file = [] for file in pub_files: if file not in present: missing_file.append(pub_files[file]) return missing_file
e2ef5997629f4a05838daf3e368fd08b123922d2
3,627,911
def positive_integer(v): """ Is arg a positive integer? """ return integer(v) and positive(v)
553f79b2b0991b0f9fcfbaec01d715c3f46fe9d2
3,627,912
def precipitation(): """Get the date and corresponding precipitation level or a 404 if not.""" # Query all dates query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365) sel = [Measurement.date, Measurement.prcp] last_twelve_months = session.query(*sel).\ filter(Measurement.date > query_date).\ order_by(Measurement.date).all() # Create a dictionary from the row data and append to a list all_prcp = [] for date, prcp in last_twelve_months: dates_dict = {} dates_dict["Date"] = date dates_dict["Prcp"] = prcp all_prcp.append(dates_dict) return jsonify(all_prcp)
9bf72cb3f5ab0f2fc8e53fbfa6b2a5241ba2d374
3,627,913
def alias_setup(probs): """ Build tables for the alias sampling :param probs: the probability distribution to build alias table Returns ------- alias table and probability table for probs """ K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = list() larger = list() for k, prob in enumerate(probs): q[k] = K*prob if q[k] < 1.0: smaller.append(k) else: larger.append(k) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() J[small] = large q[large] = q[large] + q[small] - 1.0 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q
bc053c4fe35571a2cc2ca31230f3fa2293ad69b1
3,627,914
def sv_variant(institute_id, case_name, variant_id): """Display a specific structural variant.""" data = controllers.sv_variant(store, institute_id, case_name, variant_id) return data
4e837d7e71173e800e9b47e564ae554615838410
3,627,915
import torch def box_yxyx_to_cxcywh(x): """ Converts bounding box with format [y0, x0, y1, x1] to format [center_x, center_y, w, h] """ y0, x0, y1, x1 = torch.split(x, 1, dim=-1) b = [ (x0 + x1) / 2, (y0 + y1) / 2, x1 - x0, y1 - y0 ] return torch.cat(b, dim=-1)
631e5015d6d74fab6cd334ee0db32242e0462cb6
3,627,916
from collections import defaultdict def deserialize_attributes(data, sep, original_class=None, original_pk=None): """ Deserialize the attributes from the format internally stored in the DB to the actual format (dictionaries, lists, integers, ... :param data: must be a dictionary of dictionaries. In the top-level dictionary, the key must be the key of the attribute. The value must be a dictionary with the following keys: datatype, tval, fval, ival, bval, dval. Other keys are ignored. NOTE that a type check is not performed! tval is expected to be a string, dval a date, etc. :param sep: a string, the separator between subfields (to separate the name of a dictionary from the keys it contains, for instance) :param original_class: if these elements come from a specific subclass of DbMultipleValueAttributeBaseClass, pass here the class (note: the class, not the instance!). This is used only in case the wrong number of elements is found in the raw data, to print a more meaningful message (if the class has a dbnode associated to it) :param original_pk: if the elements come from a specific subclass of DbMultipleValueAttributeBaseClass that has a dbnode associated to it, pass here the PK integer. This is used only in case the wrong number of elements is found in the raw data, to print a more meaningful message :return: a dictionary, where for each entry the corresponding value is returned, deserialized back to lists, dictionaries, etc. Example: if ``data = {'a': {'datatype': "list", "ival": 2, ...}, 'a.0': {'datatype': "int", "ival": 2, ...}, 'a.1': {'datatype': "txt", "tval": "yy"}]``, it will return ``{"a": [2, "yy"]}`` """ # I group results by zero-level entity found_mainitems = {} found_subitems = defaultdict(dict) for mainkey, descriptiondict in data.iteritems(): prefix, thissep, postfix = mainkey.partition(sep) if thissep: found_subitems[prefix][postfix] = {k: v for k, v in descriptiondict.iteritems() if k != "key"} else: mainitem = descriptiondict.copy() mainitem['key'] = prefix found_mainitems[prefix] = mainitem # There can be mainitems without subitems, but there should not be subitems # without mainitmes. lone_subitems = set(found_subitems.keys()) - set(found_mainitems.keys()) if lone_subitems: raise DeserializationException("Missing base keys for the following " "items: {}".format(",".join(lone_subitems))) # For each zero-level entity, I call the _deserialize_attribute function retval = {} for k, v in found_mainitems.iteritems(): # Note: found_subitems[k] will return an empty dictionary it the # key does not exist, as it is a defaultdict retval[k] = _deserialize_attribute(mainitem=v, subitems=found_subitems[k], sep=sep, original_class=original_class, original_pk=original_pk) return retval
21fb26c20bb1b265eb7380ebf0f7536b928b9db7
3,627,917
from typing import Union from typing import Dict from typing import Optional def get_config_float( current: Union[int, float], config: Dict[str, str], name: str ) -> Optional[float]: """ Convenience function to get config values as float. :param current: current config value to use when one is not provided :param config: config to get values from :param name: name of config value to get :return: current config value when not provided, new value otherwise """ value = config.get(name) if value is not None: if value == "": value = None else: value = float(value) else: value = current return value
d2bb436c4b2b4aef35a8f46927bc9145ecfed04c
3,627,918
import argparse def getargs(): """ Parse program arguments. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('ori_lst', type=str, help='original image list') parser.add_argument('tri_lst', type=str, help='triplet list') parser.add_argument('num_tri', type=int, nargs='?', default=1000000, help='num of triplets') parser.add_argument("--log", type=str, default="INFO", help="log level") return parser.parse_args()
a84b53ebdb54e9170c4b87fd7cda85f47691c4a2
3,627,919
import os import io import json def _collect_finish_info(container_dir): """Read exitinfo, aborted, oom and terminated files to check how container finished. :returns ``(exitinfo, aborted, oom, terminated)``: Returns a tuple of exitinfo, a ``(service_name,return_code,signal)`` if present or None otherwise, aborted reason if persent or None otherwise, oom (True if container was killed due to oom event or False otherwise), terminated (True if container was terminated/evicted, False otherwise). """ exitinfo = None exitinfo_file = os.path.join(container_dir, 'exitinfo') try: with io.open(exitinfo_file) as f: try: exitinfo = json.load(f) except ValueError: _LOGGER.warning('Invalid exitinfo file: %s', exitinfo_file) exitinfo = {} except IOError: _LOGGER.debug('exitinfo file does not exist: %s', exitinfo_file) aborted = None aborted_file = os.path.join(container_dir, 'aborted') try: with io.open(aborted_file) as f: try: aborted = json.load(f) except ValueError: _LOGGER.warning('Invalid aborted file: %s', aborted_file) aborted = app_abort.ABORTED_UNKNOWN except IOError: _LOGGER.debug('aborted file does not exist: %s', aborted_file) oom_file = os.path.join(container_dir, 'oom') oom = os.path.exists(oom_file) if not oom: _LOGGER.debug('oom file does not exist: %s', oom_file) terminated_file = os.path.join(container_dir, 'terminated') terminated = os.path.exists(terminated_file) if not terminated: _LOGGER.debug('terminated file does not exist: %s', terminated_file) return exitinfo, aborted, oom, terminated
7084bbb7cfd36d17b7db2a91ce4edcd313fb7d89
3,627,920
def getRatingDistributionOfAMovie(ratingRDD, movieID): """ Get the rating distribution of a specific movie Args: ratingRDD: a RDD containing tuples of (UserID, MovieID, Rating) movieID: the ID of a specific movie Returns: [(rating score, number of this rating score)] """ return ratingRDD.filter(lambda x: x[1] == movieID).map(lambda x: (x[2], 1)).countByKey()
708c67e51d318b887deea1ec3ec4dc4a272e794e
3,627,921
import math def decode_fsw(fsw_array): """ Parse the array into specific faultgroups """ faultGroup = [0] * NUM_OF_FAULTGROUPS; faultGroup[FAULTGROUP_TRANSIENT] = 0 faultGroup[FAULTGROUP_CRITICAL] = (fsw_array[FSW_CRITICAL_FAULTS_INDEX] & FSW_CRITICAL_FAULTS_MASK) >> FSW_CRITICAL_FAULTS_SHIFT; faultGroup[FAULTGROUP_COMM] = (fsw_array[FSW_COMM_FAULTS_INDEX] & FSW_COMM_FAULTS_MASK) >> FSW_COMM_FAULTS_SHIFT; faultGroup[FAULTGROUP_SENSORS] = (fsw_array[FSW_SENSORS_FAULTS_INDEX] & FSW_SENSORS_FAULTS_MASK) >> FSW_SENSORS_FAULTS_SHIFT; faultGroup[FAULTGROUP_IMU] = (fsw_array[FSW_IMU_FAULTS_INDEX] & FSW_IMU_FAULTS_MASK) >> FSW_IMU_FAULTS_SHIFT; faultGroup[FAULTGROUP_MD] = (fsw_array[FSW_MD_FAULTS_INDEX] & FSW_MD_FAULTS_MASK) >> FSW_MD_FAULTS_SHIFT; faultGroup[FAULTGROUP_ARCHITECTURE] = (fsw_array[FSW_ARCH_FAULTS_INDEX] & FSW_ARCH_FAULTS_MASK) >> FSW_ARCH_FAULTS_SHIFT; faultGroup[FAULTGROUP_INTERNAL] = (fsw_array[FSW_INTERNAL_FAULTS_INDEX] & FSW_INTERNAL_FAULTS_MASK) >> FSW_INTERNAL_FAULTS_SHIFT; """ MCU specific faults get a special category because there is more information """ mcu_specific_group = []; mcu_specific_group.append(fsw_array[4]); mcu_specific_group.append(fsw_array[5]); mcu_specific_group.append(fsw_array[6]); mcu_specific_group.append(fsw_array[7]); """ Create a master list of present faults then check all the bits in each faultgroup and go to the dictionaries to get the names """ faults_present = []; for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_CRITICAL] & temp); if (temp): faults_present.append(critical_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_COMM] & temp); if (temp): faults_present.append(comm_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_SENSORS] & temp); if (temp): faults_present.append(sensor_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_IMU] & temp); if (temp): faults_present.append(imu_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_MD] & temp); if (temp): faults_present.append(md_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_ARCHITECTURE] & temp); if (temp): faults_present.append(arch_fault_decode[temp]); for x in range(0,32): temp = int(math.pow(2,x)) temp = (faultGroup[FAULTGROUP_INTERNAL] & temp); if (temp): faults_present.append(internal_fault_decode[temp]); return faults_present
421dfea23813e73d12918f361063614d3cbf33a8
3,627,922
def lower_allbutfirst_letter(mystring): """Lowercase all letters except the first one """ return mystring[0].upper() + mystring[1:].lower()
860d1449865790e15ccc840ee85ea366b2de5a64
3,627,923
def get_world_size(): """Replace linklink.get_world_size""" try: world_size = get_world_size_from_env() if world_size is not None: return world_size else: # return link.get_world_size() return dist.get_world_size() except Exception as e: # noqa return 1 return dist.get_world_size()
2e308370e42d9bc847488efb8c17e46344060839
3,627,924
def orginal(S,R,RT,nNodes=20550): """ This function is used to calculate the reconstructed data from reduced variables using POD. This function is used to deal with flow past cylinder data. Parameters ---------- S : array The array contains total data that combine two features as a column index : array It can be training index, valid index, test index or total index. Depends on the MSE of which kind of data you want to calculate. R : array It contains the basis functions. The basis function can be calculated through using singular value decomposition RT : array The transpose of the R nNodes: integer(optional) The value of the nNodes is fixed. This is the number of the nodes of each solution Returns ---------- orginal: array The reconstructed data from reduced variables using POD """ N = len(S[1]) orginal = np.zeros([2*nNodes,N]) for i in range(N): middle = RT@S[:,i] # Add this one is used to reduce the computation cost orginal[:,i] = R@middle return orginal
1fa4bd17afdc83d8209cd1b97082594d89382d98
3,627,925
import random def strtest(aString): """this function takes the string and returns the string in a random order""" newstring = random.sample(aString, len(aString)) newstring = "".join(newstring) return(newstring)
28bb6ed6b9f3a10ea19fbebb8ef60091123b0fd5
3,627,926
import urllib def build_url(base_url=DEFAULT_BASE_URL, command=None): """Append a command (if it exists) to a base URL. Args: base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. command (str): the command (if any) to append to the base_url Returns: str: URL composed of base and URL-encoded command Raises: none Examples: >>> build_url() 'http://127.0.0.1:1234/v1' >>> build_url('collections/1043355/tables/default') 'http://127.0.0.1:1234/v1/collections/1043355/tables/default' """ if command is not None: return base_url + "/" + urllib.parse.quote(command) else: return base_url
5dc322f459bdf8d4f58ab7c8da78dbcf40e2696f
3,627,927
from typing import Optional def get_first_bonding_box(boxes: BoundingBoxes) -> Optional[BoundingBox]: """ Get the first bounding box that belongs to the trash class. :param boxes: the list of detected bounding boxes :return: the first box that belongs to the trash class. If no valid box can be found, return None """ for box in boxes: if box.Class in trash_classes: return box return None
3c9244983af82287f6044579d813ea792c53f7cb
3,627,928
def svn_diff_contains_diffs(*args): """svn_diff_contains_diffs(svn_diff_t diff) -> svn_boolean_t""" return _diff.svn_diff_contains_diffs(*args)
76a47b063dcf14088bb05d46fb1d8cf3346c5401
3,627,929
def init_shared_manager(items): """Initialize and start shared manager.""" for cls in items: proxy = create_proxy(cls) SyncManager.register(cls.__name__, cls, proxy) manager = SyncManager() manager.start() return manager
62a4ce5b2bf5eb1b178104ced772ec08ad9778c9
3,627,930
def cnn2d(image: np.ndarray, filters: np.ndarray): """ Vanilla convolutions. Args: image: (hi, wi, cin). filters: (hf, wf, cin, cout). Returns: (hi, wi, cout) """ filter_len, image_padded = pad_image(filters, image) out = np.zeros([image.shape[0], image.shape[1], filters.shape[-1]]) for idx_row in range(image.shape[0]): for idx_col in range(image.shape[1]): for idx_c_out in range(filters.shape[-1]): filter = filters[:, :, :, idx_c_out] patch = image_padded[idx_row: idx_row + filter_len, idx_col: idx_col + filter_len] out[idx_row, idx_col, idx_c_out] += (filter * patch).sum() return out
5b3710736912fe0d7ee6b815fd8fa2e7cac013a4
3,627,931
def mongo_uses_error_check(store): """ Does mongo use the error check as a separate message? """ if hasattr(store, 'modulestores'): return any(mongo_uses_error_check(substore) for substore in store.modulestores) return False
52d4a5135531ff18b0e19ac7aa91a453a2e736f1
3,627,932
def bootstrap_consensus(msa, times, tree_constructor, consensus): """Consensus tree of a series of bootstrap trees for a multiple sequence alignment. :Parameters: msa : MultipleSeqAlignment Multiple sequence alignment to generate replicates. times : int Number of bootstrap times. tree_constructor : TreeConstructor Tree constructor to be used to build trees. consensus : function Consensus method in this module: ``strict_consensus``, ``majority_consensus``, ``adam_consensus``. """ trees = bootstrap_trees(msa, times, tree_constructor) tree = consensus(list(trees)) return tree
ee5aa0f4a2457a55ad9b975606c39a88f721d0cc
3,627,933
def get_snapshot_seconds(): """Returns the amount of time in seconds between snapshots of a fuzzer's corpus during an experiment.""" return environment.get('SNAPSHOT_PERIOD', DEFAULT_SNAPSHOT_SECONDS)
fc6d1d940b64c69e202ba2e8ac32d45681c24c19
3,627,934
import random import re import collections def realize_question(dialog, template, filter_objs): """Samples attributes for template using filtered objects. In addition, creates scene graph for the new information added. Args: scene: Current scene graph template: Text template to use to generate questions filter_objs: Set of objects satisfying constraints of current template Returns: sample: Contains the text realization and scene graph """ # Number of inputs. n_inputs = template.get('inputs', 0) # Sample a text template. text_sample = random.choice(template['text']) text_sample_index = template['text'].index(text_sample) # Extract attribute tags and get them into groups. tags = re.findall('(<[\d\w]*>)', text_sample) tag_groups = collections.defaultdict(list) for tag in tags: group_id = get_tag_group(tag) tag_groups[group_id].append(tag) # Sample a random element from filtered. arg_sample = random.choice(filter_objs) # Remove tags from text not allowed by filter_objs. for arg_ind in range(n_inputs): obj_sample = arg_sample['objects'][arg_ind] avail_attrs = obj_sample['optional'] + obj_sample['required'] for ii in tag_groups[arg_ind][::-1]: if mapping(ii) not in avail_attrs: tag_groups[arg_ind].remove(ii) text_sample = replace_attribute(text_sample, ii, arg_sample, True) # Assert that all required attributes are present as tags. for attribute in obj_sample['required']: required_tag = inv_mapping(attribute, arg_ind) # Make an exception for <R> and <P> if required_tag == '<R>' and '<P>' in tag_groups[arg_ind]: continue assert required_tag in tag_groups[arg_ind], \ 'A required attribute is missing in template!' # Start compiling tags to keep. tags_to_keep = [inv_mapping(ii, arg_ind) for ii in obj_sample['required']] # Filter out those not present in text template. optional_tags = [inv_mapping(ii,arg_ind) for ii in obj_sample['optional']] optional_tags = [ii for ii in optional_tags if ii in tag_groups[arg_ind]] # If tags_to_keep is empty, sample from optional with (1:70, 2:25, 3:5). if len(optional_tags) > 0: if len(tags_to_keep) > 0: n_tags_sample = [0, 1, 2] else: n_tags_sample = [1, 2, 3] n_sample = np.random.choice(n_tags_sample, 1, p=gvars.METAINFO['probabilities'], replace=False) # Lower cap at the length of optional. n_sample = min(n_sample[0], len(optional_tags)) if n_sample > 0: tags_to_keep += random.sample(optional_tags, n_sample) # Now create a dictionary of placeholders with actual attribute values. for tag in tag_groups[arg_ind]: remove = tag not in tags_to_keep text_sample = replace_attribute(text_sample, tag, arg_sample, remove) # Record info and merge scene graphs. dialog_datum = {'question': text_sample, 'answer': arg_sample['answer'], 'template': template['label']} dialog['template_info'].append(template.copy()) del dialog['template_info'][-1]['text'] dialog['template_info'][-1]['index'] = text_sample_index dialog['dialog'].append(dialog_datum) graph_item = arg_sample['graph'] # If mergeable, add it to the objects list. dialog['graph'] = utils.merge_update_scene_graph(dialog['graph'], graph_item) # If there are volatile objects in the graph item, remove them. for obj in graph_item['objects'][::-1]: if obj.get('volatile', False): graph_item['objects'].remove(obj) dialog['graph']['history'].append(graph_item) return dialog
512c92025ee6ac0b93ac49be9a3957df9b8f241e
3,627,935
def coroutine(func): """ _coroutine_ Decorator method used to prime coroutines """ def start(*args,**kwargs): cr = func(*args,**kwargs) next(cr) return cr return start
f096958d45cb391e0f12e5bfd162e5250085bb52
3,627,936
import os import csv def getCountry(isoFile, tzdir, verbose): """Get the dictionary containing the iso3166 country code to name conversion Args: isoFile (string): file name of the iso3166.tab file tzdir (string): path holding the zoneFile verbose (string): verbosity level Returns: dict: dictionary containing the conversion from iso3166 country code to country name """ fpath = os.path.join(tzdir, isoFile) pout("processing {path}".format(path=fpath), verbose, Level.DEBUG) with open(fpath, encoding="utf-8") as fp: rdr = csv.reader(filter(lambda row: row[0]!='#', fp), delimiter='\t') countryList = {} for row in rdr: countryList[row[0]] = row[1] return countryList
ce1f7aca4e4f4bf5f9e7515b5f99cd7cce189645
3,627,937
from typing import Union from typing import Optional from typing import Tuple def image_to_tensor( image: Union[PILImage, np.ndarray, str], roi: Optional[Rect] = None, output_size: Optional[Tuple[int, int]] = None, keep_aspect_ratio: bool = False, output_range: Tuple[float, float] = (0., 1.), flip_horizontal: bool = False ) -> ImageTensor: """Load an image into an array and return data, image size, and padding. This function combines the mediapipe calculator-nodes ImageToTensor, ImageCropping, and ImageTransformation into one function. Args: image (Image|ndarray|str): Input image; preferably RGB, but will be converted if necessary; loaded from file if a string is given roi (Rect|None): Location within the image where to convert; can be `None`, in which case the entire image is converted. Rotation is supported. output_size (tuple|None): Tuple of `(width, height)` describing the output tensor size; defaults to ROI if `None`. keep_aspect_ratio (bool): `False` (default) will scale the image to the output size; `True` will keep the ROI aspect ratio and apply letterboxing. output_range (tuple): Tuple of `(min_val, max_val)` containing the minimum and maximum value of the output tensor. Defaults to (0, 1). flip_horizontal (bool): Flip the resulting image horizontally if set to `True`. Default: `False` Returns: (ImageTensor) Tensor data, padding for reversing letterboxing and original image dimensions. """ img = _normalize_image(image) image_size = img.size if roi is None: roi = Rect(0.5, 0.5, 1.0, 1.0, rotation=0.0, normalized=True) roi = roi.scaled(image_size) if output_size is None: output_size = (int(roi.size[0]), int(roi.size[1])) width, height = (roi.size if keep_aspect_ratio # type: ignore[misc] else output_size) src_points = roi.points() dst_points = [(0., 0.), (width, 0.), (width, height), (0., height)] coeffs = _perspective_transform_coeff(src_points, dst_points) roi_image = img.transform(size=(width, height), method=Image.PERSPECTIVE, data=coeffs, resample=Image.LINEAR) # free some memory - we don't need the temporary image anymore if img != image: img.close() pad_x, pad_y = 0., 0. if keep_aspect_ratio: # perform letterboxing if required out_aspect = output_size[1] / output_size[0] # type: ignore[index] roi_aspect = roi.height / roi.width new_width, new_height = int(roi.width), int(roi.height) if out_aspect > roi_aspect: new_height = int(roi.width * out_aspect) pad_y = (1 - roi_aspect / out_aspect) / 2 else: new_width = int(roi.height / out_aspect) pad_x = (1 - out_aspect / roi_aspect) / 2 if new_width != int(roi.width) or new_height != int(roi.height): pad_h, pad_v = int(pad_x * new_width), int(pad_y * new_height) roi_image = roi_image.transform( size=(new_width, new_height), method=Image.EXTENT, data=(-pad_h, -pad_v, new_width - pad_h, new_height - pad_v)) roi_image = roi_image.resize(output_size, resample=Image.BILINEAR) if flip_horizontal: roi_image = roi_image.transpose(method=Image.FLIP_LEFT_RIGHT) # finally, apply value range transform min_val, max_val = output_range tensor_data = np.asarray(roi_image, dtype=np.float32) tensor_data *= (max_val - min_val) / 255 tensor_data += min_val return ImageTensor(tensor_data, padding=(pad_x, pad_y, pad_x, pad_y), original_size=image_size)
cadbe61514321c2bac9ba3d96d2f3243fdfcc9b0
3,627,938
from io import StringIO import sys def get_policy_map(policy, world_shape, mode='human'): """ Generates a visualization grid from the policy to be able to print which action is most likely from every state """ unicode_arrows = np.array([u'\u2191', u'\u2192', u'\u2193', u'\u2190' # up, right, down, left u'\u2194', u'\u2195'], dtype='<U1') # left-right, up-down policy_arrows_map = np.empty(policy.shape[0], dtype='<U4') for state in np.nditer(np.arange(policy.shape[0])): # find index of actions where the probability is > 0 # todo could be cleaner optimal_actions = np.where(np.around(policy[state], 8) > np.around(np.float64(0), 8))[0] # match actions to unicode values of the arrows to be displayed for action in optimal_actions: policy_arrows_map[state] = np.core.defchararray.add(policy_arrows_map[state], unicode_arrows[action]) policy_probabilities = np.fromiter((policy[state] for state in np.nditer(np.arange(policy.shape[0]))), dtype='float64, float64, float64, float64') outfile = StringIO() if mode == 'ansi' else sys.stdout for row in reshape_as_griduniverse(policy_arrows_map, world_shape): for state in row: outfile.write((state + u' ')) outfile.write('\n') outfile.write('\n') return policy_arrows_map, reshape_as_griduniverse(policy_probabilities, world_shape)
5db1f6a97cca7d90d709e8a2bcdf655e4d5ecfb1
3,627,939
def _setup_request(bucket_acl=None, object_acl=None): """ add a foo key, and specified key and bucket acls to a (new or existing) bucket. """ bucket = _create_keys(keys=['foo']) key = bucket.get_key('foo') if bucket_acl is not None: bucket.set_acl(bucket_acl) if object_acl is not None: key.set_acl(object_acl) return (bucket, key)
be9d09c6ddadaa6a55d84e3e8385bf46aa06efc9
3,627,940
def _some_lt(t1: 'Tensor', t2: 'Tensor', only_value: bool) -> bool: """ :param t1: :param t2: :param only_value: :return: """ res = np.sum(t1.data < t2.data) if only_value: if res > 0: return True else: return False else: raise NotImplementedError
8b816eeb57a41611275c7dc2f4d018134e387a01
3,627,941
from typing import Optional def get_folders(parent_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFoldersResult: """ Retrieve information about a set of folders based on a parent ID. See the [REST API](https://cloud.google.com/resource-manager/reference/rest/v3/folders/list) for more details. ## Example Usage ### Searching For Folders At The Root Of An Org ```python import pulumi import pulumi_gcp as gcp my_org_folders = gcp.organizations.get_folders(parent_id=f"organizations/{var['organization_id']}") first_folder = gcp.organizations.get_folder(folder=my_org_folders.folders[0].name) ``` :param str parent_id: A string parent as defined in the [REST API](https://cloud.google.com/resource-manager/reference/rest/v3/folders/list#query-parameters). """ __args__ = dict() __args__['parentId'] = parent_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('gcp:organizations/getFolders:getFolders', __args__, opts=opts, typ=GetFoldersResult).value return AwaitableGetFoldersResult( folders=__ret__.folders, id=__ret__.id, parent_id=__ret__.parent_id)
e507c2d36b20997859db55bce9a7f555d6ac122f
3,627,942
def bedLine(chrom, chromStart, chromEnd, name, score=None, strand=None, thickStart=None, thickEnd=None, itemRgb=None, blockCount=None, blockSizes=None, blockStarts=None): """ Give the fields, create a bed line string """ s = ('%s %d %d %s' % (chrom, chromStart, chromEnd, name)) if score is not None: for v in [strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts]: assert(v is not None) s += (' %d %s %d %d %s %d %s %s' % (score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)) return s
dd294d5d31ea3a2f7beb8a11a7ec705eb10cf1a4
3,627,943
import pickle def tuning(sortedfile): """ Fit r = a0 + a1*f1. """ # Load sorted trials with open(sortedfile) as f: t, sorted_trials = pickle.load(f) # Get f1s f1s = sorted(sorted_trials.keys()) # Active units units = get_active_units(sorted_trials) units = range(next(sorted_trials.itervalues()).shape[0]) # Gather data in convenient form data = np.zeros((len(units), len(t), len(f1s))) for k, f1 in enumerate(f1s): data[:,:,k] = sorted_trials[f1][units,:] # Regress a1s = np.zeros((len(units), len(t))) pvals = np.zeros_like(a1s) for i in xrange(len(units)): for j in xrange(len(t)): slope, intercept, rval, pval, stderr = stats.linregress(f1s, data[i,j]) a1s[i,j] = slope pvals[i,j] = pval return units, a1s, pvals
1c5c71c8ef2ce04088f164befb619ccb03eb2079
3,627,944
def balance_conversion_constraint_rule(backend_model, loc_tech, timestep): """ Balance energy carrier consumption and production .. container:: scrolling-wrapper .. math:: -1 * \\boldsymbol{carrier_{con}}(loc::tech::carrier, timestep) \\times \\eta_{energy}(loc::tech, timestep) = \\boldsymbol{carrier_{prod}}(loc::tech::carrier, timestep) \\times \\eta_{energy}(loc::tech, timestep) \\quad \\forall loc::tech \\in locs::techs_{conversion}, \\forall timestep \\in timesteps """ model_data_dict = backend_model.__calliope_model_data["data"] loc_tech_carrier_out = model_data_dict["lookup_loc_techs_conversion"][ ("out", loc_tech) ] loc_tech_carrier_in = model_data_dict["lookup_loc_techs_conversion"][ ("in", loc_tech) ] energy_eff = get_param(backend_model, "energy_eff", (loc_tech, timestep)) return ( backend_model.carrier_prod[loc_tech_carrier_out, timestep] == -1 * backend_model.carrier_con[loc_tech_carrier_in, timestep] * energy_eff )
b10fa6203eb8ae0dc5054a98bce2d4ed4412a586
3,627,945
def follow(id): """Follow a user""" user = token_auth.current_user() followed_user = db.session.get(User, id) or abort(404) if user.is_following(followed_user): abort(409) user.follow(followed_user) db.session.commit() return {}
e88f1968f41f819bae4baecab006889f2d42eb0d
3,627,946
def get_rules(): """ Get the virtual server rules CLI Example: .. code-block:: bash salt '*' lvs.get_rules """ cmd = "{} -S -n".format(__detect_os()) ret = __salt__["cmd.run"](cmd, python_shell=False) return ret
76d7e4fcb1e2fc30769011fa110cc8c98532a5ba
3,627,947
def all_user_tickets(uid, conference): """ Cache-friendly version of user_tickets: returns a list of (ticket_id, fare_type, fare_code, complete) for each ticket associated to the user. """ qs = _user_ticket(User.objects.get(id=uid), conference) output = [] for t in qs: output.append(( t.id, t.fare.ticket_type, t.fare.code, _ticket_complete(t) )) return output
9fedff351ce896dfe6a431813c60ba7b68b4dff5
3,627,948
from typing import List def get_titles(url: str = URL) -> List[str]: """List titles in feed.""" articles = _feed(url).entries return [a.title for a in articles]
325d431fdb350188077156e6a9541c21ce73986a
3,627,949
def fatorial(num): """ Calcula a fatorial :param num: :return: """ fat = 1 if num == 0: return fat for i in range(1,num+1,1): fat *= i # fat = fat * i return fat
181ea2bde3acef3f6ff4311054fb209edfad6160
3,627,950
def indent_code(*code, indent: int = 1) -> str: """Indent multiple lines (`*code`) by the given amount, then join on newlines.""" return "\n".join(indent_str(line, indent, end="") for line in code) + "\n"
d78fac12e726638321799142cbd68b326ebc02f0
3,627,951
import torch def evaluate_accuracy_gpu(net, data_iter, device=None): """使用GPU计算模型在数据集上的精度。 Defined in :numref:`sec_lenet`""" if isinstance(net, nn.Module): net.eval() # 设置为评估模式 if not device: device = next(iter(net.parameters())).device # 正确预测的数量,总预测的数量 metric = d2l.Accumulator(2) with torch.no_grad(): for X, y in data_iter: if isinstance(X, list): # BERT微调所需的(之后将介绍) X = [x.to(device) for x in X] else: X = X.to(device) y = y.to(device) metric.add(d2l.accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1]
84d78795e541b5d60c8338356952c47786a35722
3,627,952
def get_rdns_from_ip(ip): """Basic get RDNS via gethostbyaddr. :param ip: IP address to lookup :type ip: str :return: Returns `hostname` if found, or empty string '' otherwise :rtype: str """ try: coro = resolver.gethostbyaddr(ip) result = loop.run_until_complete(coro) return result.name except: return ""
bf7927d97767d7ca6091a2318a8dc879ec2cda01
3,627,953
def GePacketOut(egress_port, mcast, padding): """ Generate packet_out packet with bytearray format """ out1 = "{0:09b}".format(egress_port) out2 = "{0:016b}".format(mcast) out3 = "{0:07b}".format(padding) out = out1+out2+out3 a = bytearray([int(out[0:8],2),int(out[8:16],2),int(out[16:24],2),int(out[24:32],2)]) return a
c56abb84ec067cf8abb8e69d82c1342c4f20e0e9
3,627,954
def damage_per_max_ammo(weapon_dic: dict, damage_range_arr: np.ndarray) -> np.ndarray: """ Calculate the damage per max ammo at varying distances. This assumes you were to fire all ammo in the gun and in reserve at a set distance. Parameters ---------- weapon_dic : dict Dict of specific weapon stats. damage_range_arr : np.ndarray The damage at varying distances. Returns ------- np.ndarray """ return np.multiply(np.divide(weapon_dic['Ammo Capacity'], weapon_dic['Burst']), damage_range_arr)
8724a1bbe408a58399e6547a63b3e80c5ecf7eda
3,627,955
import requests from bs4 import BeautifulSoup import warnings def get_PDB_summary(PDB_id, verbose=False): """ Similar info to get_meta, but for a PDB entry - returns info about when it was regitered etc. Parameters ---------- PDB_id : verbose : Bool, optional, default: False Flag to turn on verbose output Returns ------- See Also -------- *:func:`~pyresid.get_meta` """ status = get_PDB_status(PDB_id=PDB_id, verbose=verbose) if status == "CURRENT": url = "http://www.rcsb.org/pdb/rest/describePDB?structureId=" + PDB_id elif status == "UNRELEASED": url = "http://www.rcsb.org//pdb/rest/getUnreleased?structureId=" + PDB_id else: print("Do not recognise status", status) return False r = requests.get(url) if r.status_code == 200: soup = BeautifulSoup(r.text, "lxml-xml") if status == "CURRENT": attrs_dict = soup.find("PDB").attrs if hasattr(soup.find("PDB"), "contents"): contents = [i for i in soup.find("PDB").contents if i != "\n"] for item in contents: if item.name not in attrs_dict: attrs_dict[item.name] = [] if "pdbId" in item.attrs: attrs_dict[item.name].append(item.attrs["pdbId"]) else: attrs_dict[item.name].append(item.attrs) else: if verbose: print("No contents") elif status == "UNRELEASED": attrs_dict = soup.find("record").attrs if hasattr(soup.find("record"), "contents"): contents = [i for i in soup.find("record").contents if i != "\n"] for item in contents: if verbose: print(item.name) if len(item.attrs) > 0: for key in item.attrs: attrs_dict[key] = item.attrs[key] if len(item.contents) > 0: attrs_dict[item.name] = item.contents else: if verbose: print("No contents") return attrs_dict else: warnings.warn("request to " + str(PDB_id) + " has failed to return 200, and has returned " + str(r.status_code)) pass
0fb0c1a2623656c0dc18067b3f52f7ee67d303a0
3,627,956
import os import shutil def copy_dir(src: str, dest: str) -> bool: """Recursively copy a directory. Args: src: The source directory. dest: The destination directory. Returns: Indicate if the copy was success or not. """ if os.path.exists(dest): shutil.rmtree(dest, ignore_errors=True) try: shutil.copytree(src, dest) except shutil.Error as err: # Directories are the same get_default_logger('Util').error('Directory not copied. Error: %s', str(err)) return False except OSError as err: # Any error saying that the directory doesn't exist get_default_logger('Util').error('Directory not copied. Error: %s', str(err)) return False return True
a8969138fc1c042a9ea7da8f3e452ced4d8168b2
3,627,957
import requests import json def distance_from_user(beer): """ This method is used to calculate distace from user to each store that carry specific beer that user seached for and return list of store, beer and distance from user to those stores sorted by distance """ # user_lat = 40.8200471 # user_lon = -73.9514611 send_url = 'http://freegeoip.net/json' req = requests.get(send_url) j = json.loads(req.text) user_lat = j['latitude'] user_lon = j['longitude'] distances = [] for store in beer.stores: dist = distance(user_lat, user_lon, store.lat, store.lon) distances.append((beer, store, dist)) sorted_distances = sorted(distances, key=itemgetter(2)) return sorted_distances
67b0de0751a2dfd2f440bcb0595c31be3c644026
3,627,958
from mesh.Triangulation import TrivialSystem, ComposedSystem, State def meshes2system(meshes): """ recursively generate system. """ firstKey = list(meshes.keys())[0] mm = meshes[firstKey] state = State(translation=mm['translate'], rotation=mm['rotate'], velocity=mm['velocity'], angular=mm['angular']) if 'triangulation' in mm['system']: return TrivialSystem(coordinates=mm['system']['coordinates'], velocities=mm['system']['velocities'], triangulation=mm['system']['triangulation'], state=state, id=firstKey) else: return ComposedSystem(state=state, subsystems=[meshes2system({m: mm['system'][m]}) for m in mm['system']], id=firstKey)
7a6dc5a7f41735a37e54f6444148141085d9fece
3,627,959
def cmp_public_numbers(pn1, pn2): """ Compare 2 sets of public numbers. These is a way to compare 2 public RSA keys. If the sets are the same then the keys are the same. :param pn1: The set of values belonging to the 1st key :param pn2: The set of values belonging to the 2nd key :return: True is the sets are the same otherwise False. """ if pn1.n == pn2.n: if pn1.e == pn2.e: return True return False
a91a7204412d07808dbd6d5040f6df8baa576417
3,627,960
import os def authenticate(): """Shows basic usage of the Gmail API. Lists the user's Gmail labels. """ creds = None # The file token.json stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists(token_path): creds = Credentials.from_authorized_user_file(token_path, SCOPES) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( credentials_path, SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(token_path, 'w') as token: token.write(creds.to_json()) return build('gmail', 'v1', credentials=creds)
f99305aa81033310fb078b1531bfcc5770947add
3,627,961
def phred(vals): """ apply the phred scale to the vals provided """ return -10*np.log10(1-vals) return -10*np.ma.log10(1-vals).filled(-3)
c09b38a5f736ddef994ea1eff8f0cc614362f3d3
3,627,962
def __get_useconds_of(stage_idx, block_idx, event_acc, wanted, block_prefix=''): """ gets useconds of chan/kernel of specific stage_idx & block_idx, from tensorboard event_acc :param wanted: 'chan' or 'kernel' """ useconds = [] summary_prefix = block_prefix + 'use_%s_' % wanted idx = 0 while True: usecond = get_usecond(summary_prefix + str(idx), stage_idx, block_idx, event_acc) if usecond is None: break useconds.append(usecond) idx += 1 return useconds
1fed126224649e368694d63013643cf2fb5f4aaa
3,627,963
def create_menu(*args): """ create_menu(name, label, menupath=None) -> bool Create a menu with the given name, label and optional position, either in the menubar, or as a submenu. If 'menupath' is non-NULL, it provides information about where the menu should be positioned. First, IDA will try and resolve the corresponding menu by its name. If such an existing menu is found and is present in the menubar, then the new menu will be inserted in the menubar before it. Otherwise, IDA will try to resolve 'menupath' as it would for 'attach_action_to_menu()' and, if found, add the new menu like so: // The new 'My menu' submenu will appear in the 'Comments' submenu // before the 'Enter comment..." command create_menu("(...)", "My menu", "Edit/Comments/Enter comment..."); or // The new 'My menu' submenu will appear at the end of the // 'Comments' submenu. create_menu("(...)", "My menu", "Edit/Comments/"); If the above fails, the new menu will be appended to the menubar. @param name: name of menu (must be unique) (C++: const char *) @param label: label of menu (C++: const char *) @param menupath: where should the menu be inserted (C++: const char *) @return: success """ return _ida_kernwin.create_menu(*args)
0c3cb60c1193b422b11ca1ac92f7124f05086a69
3,627,964
def parse_proxy_url(purl): """Adapted from UStreamTV plugin (ustreamtv.py)""" proxy_options = {} if purl: p = urlparse(purl) proxy_options['proxy_type'] = p.scheme proxy_options['http_proxy_host'] = p.hostname if p.port: proxy_options['http_proxy_port'] = p.port if p.username: proxy_options['http_proxy_auth'] = \ (unquote_plus(p.username), unquote_plus(p.password or "")) return proxy_options
94b903cc3199b34c61f0b86c16a75bd503f13325
3,627,965
def cma(data): """ Cumulative Moving Average :type data: np.ndarray :rtype: np.ndarray """ size = len(data) out = np.array([np.nan] * size) last_sum = np.array([np.nan] * size) last_sum[1] = sum(data[:2]) for i in range(2, size): last_sum[i] = last_sum[i - 1] + data[i] out[i] = last_sum[i] / (i + 1) return out
caeb4d7b30d8cc5a079532aac817bbc49ef85746
3,627,966
from pycalphad import __version__ as pycalphad_version from typing import OrderedDict def starting_point(conditions, state_variables, phase_records, grid): """ Find a starting point for the solution using a sample of the system energy surface. Parameters ---------- conditions : OrderedDict Mapping of StateVariable to array of condition values. state_variables : list A list of the state variables (e.g., N, P, T) used in this calculation. phase_records : dict Mapping of phase names (strings) to PhaseRecords. grid : Dataset A sample of the energy surface of the system. The sample should at least cover the same state variable space as specified in the conditions. Returns ------- Dataset """ global_min_enabled = global_min_is_possible(conditions, state_variables) active_phases = sorted(phase_records.keys()) # Ensure that '_FAKE_' will fit in the phase name array max_phase_name_len = max(max([len(x) for x in active_phases]), 6) maximum_internal_dof = max(prx.phase_dof for prx in phase_records.values()) nonvacant_elements = phase_records[active_phases[0]].nonvacant_elements coord_dict = OrderedDict([(str(key), value) for key, value in conditions.items()]) grid_shape = tuple(len(x) for x in coord_dict.values()) coord_dict['vertex'] = np.arange( len(nonvacant_elements) + 1) # +1 is to accommodate the degenerate degree of freedom at the invariant reactions coord_dict['component'] = nonvacant_elements conds_as_strings = [str(k) for k in conditions.keys()] specified_elements = set() for i in conditions.keys(): # Assume that a condition specifying a species contributes to constraining it if not hasattr(i, 'species'): continue specified_elements |= set(i.species.constituents.keys()) - {'VA'} dependent_comp = set(nonvacant_elements) - specified_elements if len(dependent_comp) != 1: raise ValueError('Number of dependent components different from one') ds_vars = {'NP': (conds_as_strings + ['vertex'], np.empty(grid_shape + (len(nonvacant_elements)+1,))), 'GM': (conds_as_strings, np.empty(grid_shape)), 'MU': (conds_as_strings + ['component'], np.empty(grid_shape + (len(nonvacant_elements),))), 'X': (conds_as_strings + ['vertex', 'component'], np.empty(grid_shape + (len(nonvacant_elements)+1, len(nonvacant_elements),))), 'Y': (conds_as_strings + ['vertex', 'internal_dof'], np.empty(grid_shape + (len(nonvacant_elements)+1, maximum_internal_dof,))), 'Phase': (conds_as_strings + ['vertex'], np.empty(grid_shape + (len(nonvacant_elements)+1,), dtype='U%s' % max_phase_name_len)), 'points': (conds_as_strings + ['vertex'], np.empty(grid_shape + (len(nonvacant_elements)+1,), dtype=np.int32)) } # If we have free state variables, they will also be data variables / output variables free_statevars = sorted(set(state_variables) - set(conditions.keys())) for f_sv in free_statevars: ds_vars.update({str(f_sv): (conds_as_strings, np.empty(grid_shape))}) result = LightDataset(ds_vars, coords=coord_dict, attrs={'engine': 'pycalphad %s' % pycalphad_version}) if global_min_enabled: result = lower_convex_hull(grid, state_variables, result) else: raise NotImplementedError('Conditions not yet supported') return result
59537ea36fa7b73e250ccbcc660f1363213109bc
3,627,967
def intersects(a0, a1, b0, b1): """ Checks whether two line segments, each defined by two end points, will intersect. """ # First line is vertical if a0[0] == a1[0]: # Both lines are vertical if b0[0] == b1[0]: return (a0[0] == b0[0]) and (in_range(b0[1], a0[1], a1[1]) or in_range(b1[1], a0[1], a1[1])) eqn = get_eqn(b0, b1) y = apply_eqn(eqn, a0[0]) return in_range(y, a0[1], a1[1]) # Only second line is vertical if b0[0] == b1[0]: eqn = get_eqn(a0, a1) y = apply_eqn(eqn, b0[0]) return in_range(y, b0[1], b1[1]) # Parallel lines eqn0 = get_eqn(a0, a1) eqn1 = get_eqn(b0, b1) if eqn0[0] == eqn1[0]: if eqn0[1] != eqn1[1]: return False return in_range(a0[0], b0[0], b1[0]) or in_range(a1[0], b0[0], b1[0]) # Get intersection i = intersection(eqn0, eqn1) # Check if intersection is between end points return in_range(i[0], a0[0], a1[0]) and in_range(i[0], b0[0], b1[0]) and in_range(i[1], a0[1], a1[1]) and in_range(i[1], b0[1], b1[1])
626a682e24358243faa43c18fefec2c7c874f10d
3,627,968
def getAccident(id=None): """ return the Accident object or None if not exist return a list of all Accident if no id passed. return one object if filtered by 'id'""" if id: return Accident.query.get(id) return Accident.query.all()
798b4cb51d7d15400b376ca13b77c17bb0da9568
3,627,969
def ranges(int_list): """ Given a sorted list of integers function will return an array of strings that represent the ranges """ begin = 0 end = 0 ranges = [] for i in int_list: # At the start of iteration set the value of # `begin` and `end` to equal the first element if begin == 0: begin = i end = i # Set the current element as the value of `end` # as long as the array is in sequence elif i-1 == end: end = i # Reset flags to current element when iterating through # multiple integers that are of broken sequence elif begin == end: begin = i end = i else: # Sequence of array has been broken, append current range # to `ranges` and set the value of `begin and `end` flags to # equal the current element ranges.append("{0}->{1}".format(begin, end)) begin = i end = i # Grab the last range from the array if begin != end: ranges.append("{0}->{1}".format(begin, end)) return ranges
cc6aab9442a6f6986acccb1fa46cd61ff1e4ba07
3,627,970
def format_data(x_data=None,y_data=None): """ ============================================================================= Function converts a list of separate x and y coordinates to a format suitable for plotting in ReportLab Arguments: x_data - a list of x coordinates (or any object that can be indexed) y_data - a list of y coordinates (or any object that can be indexed) Returns: A tuple of tuples containing the paired data Notes: The output from this function should be in a list for plotting in ReportLab. Multiple items in the list represent multiple data sets ----------------------------------------------------------------------------- """ assert(x_data is not None) assert(y_data is not None) assert(len(x_data) == len(y_data)) # store data data = [] for x,y in zip(x_data,y_data): data.append((x,y)) data = tuple(data) return data
34ac418f38194644f9372f20d47535424c7bfb52
3,627,971
import math def gaslib_to_network_data(network_file, scenario_file, contract_aux_elements=True, debug=False): """Read a gaslib instance from files and create network data. The function returns data that can be passed immediatley to the constructor of Network. Parameters ---------- network_file : str The filename of the gaslib XML network file scenario_file : str The filename of the gaslib XML scenario file to be used as demands contract_aux_elements : boolean, default=True If set to True, auxiliary components (i.e. componentss that are not pipes, such as vales and compressors) are contracted. If set to False these components are modeled as pipes with very small resistances debug : boolean, default=False If set to True, some debug information are printed Returns ------- edge_data : tuple The network edge data node_data : tuple The network node data cost_data : tuple The network polynomial cost data demand_data : tuple The network demand data """ # Read the demand from the scenario file scn_tree = etree.parse(scenario_file) scn_root = scn_tree.getroot() demand = read_gaslib_scn(scn_root) # Read the network XML file net_tree = etree.parse(network_file) root = net_tree.getroot() nodes = read_gaslib_nodes(root) # Compute average node values for node properties # Needed because some "global" properties such as # gas temperature are needed in the computation of the beta # coefficients. The average value of all nodes are used for # these computations avg_n_vals = dict() for n in nodes: for p in nodes[n]['p']: avg_n_vals[p] = avg_n_vals.get(p, []) + [nodes[n]['p'][p]] for p in avg_n_vals: vals = avg_n_vals.get(p, []) if len(vals) != 0: avg_n_vals[p] = sum(vals) / len(vals) if debug: print("\n== GLOBAL NODE VALUES (AVG) ==") for p in avg_n_vals: print("{:30s}: {:s}".format(str(p), str(avg_n_vals[p]))) # Read pipe data from the XML file edges = read_gaslib_pipes(root) # Compute the minimal beta of all edges # (used as beta for auxiliary elements, if contract_aux_elements == False) min_beta = math.inf for e in edges: edges[e]['beta'] = calc_beta(edges[e], nodes, avg_n_vals) min_beta = min(min_beta, edges[e]['beta']) if debug: print('MIN BETA', min_beta) # Read further components (non-pipe edges) from XML aux_elements = read_gaslib_aux_elements(root) # Handle the auxiliary components contract_gaslib_instance(nodes, edges, demand, aux_elements, contract_aux_elements, min_beta, debug) return raw_data_to_network_data(nodes, edges, demand)
3979d4345eb01f8fc49ade176e8b2303f98b8c72
3,627,972
import json import os import sys def write_plugins_index(file_name, plugins): """ Writes the list of (name, version, description) of the plugins given into the index file in JSON format. Returns True if the file was actually updated, or False if it was already up-to-date. """ # separators is given to avoid trailing whitespaces; see docs plugin_contents = [] for (name, version, description) in plugins: plugin_contents.append({"name": name, "version": version, "description": description}) contents = json.dumps(plugin_contents, indent=2, separators=(",", ": "), sort_keys=True) if os.path.isfile(file_name): if sys.version_info < (3,): mode = "rU" else: # universal newlines is enabled by default, and specifying it # will cause deprecation warnings mode = "r" with open(file_name, mode) as f: current_contents = f.read() else: current_contents = "" if contents.strip() != current_contents.strip(): with open(file_name, "w") as f: f.write(contents + "\n") return True else: return False
52d110bb0e90c661f95b144fa95d6143d7e719a7
3,627,973
from typing import List from typing import Counter def explicit_endorsements(user: domain.User) -> List[domain.Category]: """ Load endorsed categories for a user. These are endorsements (including auto-endorsements) that have been explicitly commemorated. Parameters ---------- user : :class:`.domain.User` Returns ------- list Each item is a :class:`.domain.Category` for which the user is explicitly endorsed. """ with util.transaction() as session: data: List[DBEndorsement] = ( session.query( DBEndorsement.archive, DBEndorsement.subject_class, DBEndorsement.point_value, ) .filter(DBEndorsement.endorsee_id == user.user_id) .filter(DBEndorsement.flag_valid == 1) .all() ) pooled: Counter = Counter() for archive, subject, points in data: pooled[domain.Category(archive, subject)] += points return [category for category, points in pooled.items() if points]
1e70415443a6f9a0bd27a8e2bbac3998dab090b5
3,627,974
def detec_apache_root(binPath): """ 根据apachectl -V 获得apache的安装路径 """ result = commands.getoutput(binPath + """ -V | grep -i "HTTPD_ROOT" | awk -F '[="]' '{print $3}'""") return result
dfa07a0311dbc3cc9ee74425c103ed94d768da83
3,627,975
def hindu_zodiac(tee): """Return the zodiacal sign of the sun, as integer in range 1..12, at moment tee.""" return quotient(float(hindu_solar_longitude(tee)), deg(30)) + 1
87c293c20ee0880ac844e27000e2f8f774b2bbb1
3,627,976
def SearchRelativeLongitude(body, targetRelLon, startTime): """Searches for when the Earth and another planet are separated by a certain ecliptic longitude. Searches for the time when the Earth and another planet are separated by a specified angle in ecliptic longitude, as seen from the Sun. A relative longitude is the angle between two bodies measured in the plane of the Earth's orbit (the ecliptic plane). The distance of the bodies above or below the ecliptic plane is ignored. If you imagine the shadow of the body cast onto the ecliptic plane, and the angle measured around that plane from one body to the other in the direction the planets orbit the Sun, you will get an angle somewhere between 0 and 360 degrees. This is the relative longitude. Given a planet other than the Earth in `body` and a time to start the search in `startTime`, this function searches for the next time that the relative longitude measured from the planet to the Earth is `targetRelLon`. Certain astronomical events are defined in terms of relative longitude between the Earth and another planet: - When the relative longitude is 0 degrees, it means both planets are in the same direction from the Sun. For planets that orbit closer to the Sun (Mercury and Venus), this is known as *inferior conjunction*, a time when the other planet becomes very difficult to see because of being lost in the Sun's glare. (The only exception is in the rare event of a transit, when we see the silhouette of the planet passing between the Earth and the Sun.) - When the relative longitude is 0 degrees and the other planet orbits farther from the Sun, this is known as *opposition*. Opposition is when the planet is closest to the Earth, and also when it is visible for most of the night, so it is considered the best time to observe the planet. - When the relative longitude is 180 degrees, it means the other planet is on the opposite side of the Sun from the Earth. This is called *superior conjunction*. Like inferior conjunction, the planet is very difficult to see from the Earth. Superior conjunction is possible for any planet other than the Earth. Parameters ---------- body : Body A planet other than the Earth. If `body` is not a planet, or if it is `Body.Earth`, an error occurs. targetRelLon : float The desired relative longitude, expressed in degrees. Must be in the range [0, 360). startTime : Time The date and time at which to begin the search. Returns ------- Time The date and time of the relative longitude event. """ if body == Body.Earth: raise EarthNotAllowedError() if body == Body.Moon or body == Body.Sun: raise InvalidBodyError() syn = _SynodicPeriod(body) direction = +1 if _IsSuperiorPlanet(body) else -1 # Iterate until we converge on the desired event. # Calculate the error angle, which will be a negative number of degrees, # meaning we are "behind" the target relative longitude. error_angle = _rlon_offset(body, startTime, direction, targetRelLon) if error_angle > 0.0: error_angle -= 360.0 # force searching forward in time time = startTime iter = 0 while iter < 100: # Estimate how many days in the future (positive) or past (negative) # we have to go to get closer to the target relative longitude. day_adjust = (-error_angle/360.0) * syn time = time.AddDays(day_adjust) if abs(day_adjust) * _SECONDS_PER_DAY < 1.0: return time prev_angle = error_angle error_angle = _rlon_offset(body, time, direction, targetRelLon) if abs(prev_angle) < 30.0 and prev_angle != error_angle: # Improve convergence for Mercury/Mars (eccentric orbits) # by adjusting the synodic period to more closely match the # variable speed of both planets in this part of their respective orbits. ratio = prev_angle / (prev_angle - error_angle) if 0.5 < ratio < 2.0: syn *= ratio iter += 1 raise NoConvergeError()
effce7c99297e183182b8142e0dc037bb9c924da
3,627,977
import time def _fitFunc2(x, *pfit, verbose=True, follow=[], errs=None): """ for curve_fit """ global pfitKeys, pfix, _func, Ncalls, verboseTime Ncalls +=1 params = {} # -- build dic from parameters to fit and their values: for i,k in enumerate(pfitKeys): params[k]=pfit[i] # -- complete with the non fitted parameters: for k in pfix: params[k]=pfix[k] res = _func(x, params) if verbose and time.time()>(verboseTime+5): verboseTime = time.time() print('[dpfit]', time.asctime(), '%5d'%Ncalls,end='') try: chi2=np.sum(res**2)/(len(res)-len(pfit)+1.0) print('CHI2: %6.4e'%chi2,end='') except: # list of elements chi2 = 0 N = 0 res2 = [] for r in res: if np.isscalar(r): chi2 += r**2 N+=1 res2.append(r) else: chi2 += np.sum(np.array(r)**2) N+=len(r) res2.extend(list(r)) res = res2 print('CHI2: %6.4e'%(chi2/float(N-len(pfit)+1)), end=' ') if follow is None: print('') else: _follow = list(filter(lambda x: x in params.keys(), follow)) print(' '.join([k+'='+'%5.2e'%params[k] for k in _follow])) return res
1f61a3746761d47ffa43dcfdb370b756df9733b8
3,627,978
import io import numpy def sendForward(cwt,solver): """Use this function to communicate data between nodes""" if solver.nodeMasterBool: cwt = io.sweptWrite(cwt,solver) buff = numpy.copy(solver.sharedArray[:,:,-solver.splitx:,:]) buffer = solver.clusterComm.Sendrecv(sendobj=buff,dest=solver.neighbors[1],source=solver.neighbors[0]) solver.clusterComm.Barrier() #Barrier to make sure all buffers are copied before writing solver.sharedArray[:,:,solver.splitx:,:] = solver.sharedArray[:,:,:-solver.splitx,:] #Shift solver.sharedArray data forward by solver.splitx solver.sharedArray[:,:,:solver.splitx,:] = buffer[:,:,:,:] solver.nodeComm.Barrier() #Wait for copy before calculating again return cwt
b47c1a2db202a2bfead6f76e2e2c8b9eb03dc50a
3,627,979
import re def get_parameters(img_path_complete): """ Get the parameters of an hyper-spectral image from file. :param img_path_complete: complete path of the bil file to get the parameters. Ex. <path>/img.bil :return: array of totals of [lines, samples, bands] """ file_info = img_path_complete + dcube_h # header of the datacube image in_file = open(file_info, "rt") # open the header file for reading text data info_file_content = in_file.read() # read the entire file into a string variable in_file.close() # close the file # regex for: lines, samples, bands regex = re.compile(r'.*lines\s*=\s*(\d+)\n.*' r'.*samples\s*=\s*(\d+)\n.*' r'.*bands\s*=\s*(\d+)\n.*', re.I) params = regex.findall(info_file_content) if params is not None: lines = int(params[0][0]) samples = int(params[0][1]) bands = int(params[0][2]) return [lines, samples, bands] else: return None
26fb411d737b259801681fb7152d7570583df969
3,627,980
def phone_move_handler(pid): """ @api {post} /v1/asset/phone/move/{int:id} 流转 资产设备 @apiName MovePhone @apiGroup 项目 @apiDescription 流转 资产设备 @apiParam {int} id @apiParam {int} borrow_id 流转人 ID @apiParamExample {json} Request-Example: { "borrow_id": 2 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ if is_admin() or PhoneBusiness.can_move_status(pid): borrow_id = parse_json_form('phone_move') ret, msg = PhoneBusiness.move_to_user(pid, borrow_id) return json_detail_render(ret, [], msg) else: return json_detail_render(403)
fd6e36849f9461544144bea5f60fcc8a174c54c0
3,627,981
def extractLargestRegion(actor): """Keep only the largest connected part of a mesh and discard all the smaller pieces. .. hint:: |largestregion.py|_ """ conn = vtk.vtkConnectivityFilter() conn.SetExtractionModeToLargestRegion() conn.ScalarConnectivityOff() poly = actor.GetMapper().GetInput() conn.SetInputData(poly) conn.Update() epoly = conn.GetOutput() eact = Actor(epoly) pr = vtk.vtkProperty() pr.DeepCopy(actor.GetProperty()) eact.SetProperty(pr) return eact
d481cdd5975eb9c8d7e835d24c49deb9a0a5961a
3,627,982
import zlib def query_stock_concept(code="", date=""): """获取概念分类 @param code:股票代码,默认为空。 @param date:查询日期,默认为空。不为空时,格式 XXXX-XX-XX。 """ data = rs.ResultData() if code is None or code == "": code = "" if code != "" and code is not None: if len(code) != cons.STOCK_CODE_LENGTH: print("股票代码应为" + str(cons.STOCK_CODE_LENGTH) + "位,请检查。格式示例:sh.600000。") data.error_msg = "股票代码应为" + str(cons.STOCK_CODE_LENGTH) + "位,请检查。格式示例:sh.600000。" data.error_code = cons.BSERR_PARAM_ERR return data code = code.lower() if (code.endswith("sh") or code.endswith("sz")): code = code[7:9].lower() + "." + code[0:6] if date is None or date == "": date = "" else: if strUtil.is_valid_date(date): pass else: print("日期格式不正确,请修改。") data.error_code = cons.BSERR_DATE_ERR data.error_msg = "日期格式不正确,请修改。" return data user_id = getattr(conx,"user_id") try: user_id = getattr(conx, "user_id") except Exception: print("you don't login.") data.error_code = cons.BSERR_NO_LOGIN data.error_msg = "you don't login." return data param = "query_stock_concept," + str(user_id) + ",1," + \ str(cons.BAOSTOCK_PER_PAGE_COUNT) + \ "," + str(code) + "," + str(date) msg_body = strUtil.organize_msg_body(param) msg_header = msgheader.to_message_header( cons.MESSAGE_TYPE_QUERYSTOCKCONCEPT_REQUEST, len(msg_body)) data.msg_type = cons.MESSAGE_TYPE_QUERYSTOCKCONCEPT_REQUEST data.msg_body = msg_body head_body = msg_header + msg_body crc32str = zlib.crc32(bytes(head_body, encoding='utf-8')) receive_data = sock.send_msg(head_body + cons.MESSAGE_SPLIT + str(crc32str)) if receive_data is None or receive_data.strip() == "": data.error_code = cons.BSERR_RECVSOCK_FAIL data.error_msg = "网络接收错误。" return data msg_header = receive_data[0:cons.MESSAGE_HEADER_LENGTH] msg_body = receive_data[cons.MESSAGE_HEADER_LENGTH:-1] header_arr = msg_header.split(cons.MESSAGE_SPLIT) body_arr = msg_body.split(cons.MESSAGE_SPLIT) data.msg_body_length = header_arr[2] data.error_code = body_arr[0] data.error_msg = body_arr[1] if cons.BSERR_SUCCESS == data.error_code: data.method = body_arr[2] data.user_id = body_arr[3] data.cur_page_num = body_arr[4] data.per_page_count = body_arr[5] data.setData(body_arr[6]) data.code = body_arr[7] data.date = body_arr[8] data.setFields(body_arr[9]) return data
4ca53f065564fd78855e94a89d43f4590c51e89a
3,627,983
def checkForVideoRetainment(op, graph, frm, to): """ Confirm video channel is retained in the resulting media file. :param op: :param graph: :param frm: :param to: :return: @type op: Operation @type graph: ImageGraph @type frm: str @type to: str """ sourceim, source = graph.get_image(frm) im, dest = graph.get_image(to) sourcemetadata = get_meta_from_video(source, show_streams=True, media_types=['video'])[0] destmetadata = get_meta_from_video(dest, show_streams=True, media_types=['video'])[0] sourcevidcount = len(sourcemetadata) destvidcount = len(destmetadata) if sourcevidcount != destvidcount: return (Severity.ERROR, 'Video is missing from audio sample') return None
d04296adc58611798007066e26cf1a784949520c
3,627,984
def get_console_scripts(entry_points): """pygradle's 'entrypoints' are misnamed: they really mean 'consolescripts'""" if not entry_points: return None if isinstance(entry_points, dict): return entry_points.get("console_scripts") if isinstance(entry_points, list): result = [] in_console_scripts = False for line in entry_points: line = line.strip() if line and line.startswith("["): in_console_scripts = "console_scripts" in line continue if in_console_scripts: result.append(line) return result return get_console_scripts(entry_points.split("\n"))
4ca1f6bb50959570c1c6d28312aabb939fe9daf8
3,627,985
def create_deepcopied_groupby_dict(orig_df, obs_id_col): """ Will create a dictionary where each key corresponds to a unique value in `orig_df[obs_id_col]` and each value corresponds to all of the rows of `orig_df` where `orig_df[obs_id_col] == key`. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. Returns ------- groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. """ # Get the observation id values obs_id_vals = orig_df[obs_id_col].values # Get the unique observation ids unique_obs_ids = np.unique(obs_id_vals) # Initialize the dictionary to be returned. groupby_dict = {} # Populate the dictionary with dataframes for each individual. for obs_id in unique_obs_ids: # Filter out only the rows corresponding to the current observation id. desired_rows = obs_id_vals == obs_id # Add the desired dataframe to the dictionary. groupby_dict[obs_id] = orig_df.loc[desired_rows].copy(deep=True) # Return the desired object. return groupby_dict
5af41d6410adf643ccd7f5f2072a7e6539609ccb
3,627,986
from typing import Optional def _create_configuration( user_agent: Optional[str] = None, user_agent_config_yaml: Optional[str] = None, user_agent_lookup: Optional[str] = None, hdx_url: Optional[str] = None, hdx_site: Optional[str] = None, hdx_read_only: bool = False, hdx_key: Optional[str] = None, hdx_config_json: Optional[str] = None, hdx_config_yaml: Optional[str] = None, project_config_json: Optional[str] = None, project_config_yaml: Optional[str] = None, hdx_base_config_json: Optional[str] = None, hdx_base_config_yaml: Optional[str] = None, ) -> str: """ Create HDX configuration Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (Optional[str]): HDX url to use. Overrides hdx_site. hdx_site (Optional[str]): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (Optional[str]): Your HDX key. Ignored if hdx_read_only = True. hdx_config_json (Optional[str]): Path to JSON HDX configuration OR hdx_config_yaml (Optional[str]): Path to YAML HDX configuration project_config_json (Optional[str]): Path to JSON Project configuration OR project_config_yaml (Optional[str]): Path to YAML Project configuration hdx_base_config_json (Optional[str]): Path to JSON HDX base configuration OR hdx_base_config_yaml (Optional[str]): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: str: HDX site url """ arguments = locals() return Configuration._create(**arguments)
4ef7a985b8507e3e710a0465101d5db01c1329ed
3,627,987
import os,sys def import_path(fullpath): """ Import a file with full path specification. Allows one to import from anywhere, something __import__ does not do. """ path, filename = os.path.split(fullpath) filename, ext = os.path.splitext(filename) sys.path.append(path) module = __import__(filename) reload(module) # Might be out of date del sys.path[-1] return module
e9cb5365434f9fa82c121fbeb7a264f703fb86f8
3,627,988
def batch_autocorr(data, lag, starts, ends, threshold, backoffset=0): """ Calculate autocorrelation for batch (many time series at once) :param data: Time series, shape [n_pages, n_days] :param lag: Autocorrelation lag :param starts: Start index for each series :param ends: End index for each series :param threshold: Minimum support (ratio of time series length to lag) to calculate meaningful autocorrelation. :param backoffset: Offset from the series end, days. :return: autocorrelation, shape [n_series]. If series is too short (support less than threshold), autocorrelation value is NaN """ n_series = data.shape[0] n_days = data.shape[1] max_end = n_days - backoffset corr = np.empty(n_series, dtype=np.float64) support = np.empty(n_series, dtype=np.float64) for i in range(n_series): series = data[i] end = min(ends[i], max_end) real_len = end - starts[i] support[i] = real_len/lag if support[i] > threshold: series = series[starts[i]:end] c_365 = single_autocorr(series, lag) c_364 = single_autocorr(series, lag-1) c_366 = single_autocorr(series, lag+1) # Average value between exact lag and two nearest neighborhs for smoothness corr[i] = 0.5 * c_365 + 0.25 * c_364 + 0.25 * c_366 else: corr[i] = np.NaN return corr
8c6b9cdb3a62a4e8d1bd613414bda54f3fa75c9a
3,627,989
import yaml def merge_yaml(y1, y2): """ Merge two yaml HOT into one The parameters, resources and outputs sections are merged. :param y1: the first yaml :param y2: the second yaml :return y: merged yaml """ d1 = yaml.load(y1) d2 = yaml.load(y2) for key in ('parameters', 'resources', 'outputs'): if key in d2: d1[key].update(d2[key]) return yaml.dump(d1, default_flow_style=False)
08351fcbd6ba5d5350b166224a33d558df6c8010
3,627,990
def infix(token_list): """ Parses Infix notation and returns the equivilant RPN form (Pseudocode used from: https://en.wikipedia.org/wiki/Shunting-yard_algorithm) Parameters ========== token_list : list The list of infix tokens Returns -------- output : list This is the output queue where the final answer is arranged """ # Output Queue output = [] # Operators Stack operators = [] processed_tokens = 0 # Iterating through each token in the token_list for token in token_list: if is_numeric_token(token): enqueue(token, output) processed_tokens += 1 elif is_function(token): push(token, operators) processed_tokens += 1 elif token in lexer.OPS: # If there are tokens to be read then # 1. If the operators stack is not empty (for the first operator, we want to skip this loop and go push the token to the opeators stack) # 2. And the operator on teh operator stack has greater precedence (for order of operations) # 3. Or they have equal precedense and the token is left associative because if the tokens are 1+1+1, then we need to know which way to group them # 4. If the operator on the operator stack is not a "(" while ( (len(operators) != 0) and ( (OPS_RANKING[peek(operators)] < OPS_RANKING[token]) or ( OPS_RANKING[peek(operators)] == OPS_RANKING[token] and OPS_ASSOSIATIVITY[token] == "l" ) ) and (peek(operators) != "(") ): # Pushing the token on top of the operators stack onto the output queue enqueue(pop(operators), output) processed_tokens += 1 push(token, operators) # This is basically useless, that is why we skip it in the operator while loop, but we still need to recognize it as an operator processed if token == "(": push(token, operators) processed_tokens += 1 elif token == ")": # If the operator on the top of the operator stack is not a "(", then it is an operator, and since this is the end of the group then we have to update the output with the operator # If the top of the operator stack is not a "(" (Left Parenthesis) if peek(operators) != "(" and len(operators) != 0: enqueue(pop(operators), output) processed_tokens += 1 # Ignoring this char again, but we still need to count it as a operator operated on # If the top of the operator stack is a "(" (Left Parenthesis) if peek(operators) == "(" and len(operators) != 0: pop(operators) processed_tokens += 1 if len(token_list) - 1 == processed_tokens: while len(operators) != 0: if peek(operators) != "(": enqueue(pop(operators), output) return output
97fc5c75b173aeed42c9383625037374caddf261
3,627,991
import os def download(isamAppliance, filename, id=None, comment=None, check_mode=False, force=False): """ Download one snapshot file to a zip file. Multiple file download is now supported. Simply pass a list of id. For backwards compatibility the id parameter and old behaviour is checked at the beginning. """ ids = [] download_flag = False if (isinstance(id, list)): for i in id: if _check(isamAppliance, id=i) is True: download_flag = True ids.append(i) elif (_check(isamAppliance, id=id) is True): download_flag = True ids.append(id) elif (comment is not None): ret_obj = search(isamAppliance, comment=comment) if ret_obj != {} and ret_obj['data'] != {}: download_flag = True ids = ret_obj['data'] logger.info("Downloading the following list of IDs: {}".format(ids)) if force is True or ( os.path.exists(filename) is False and download_flag is True): # Don't overwrite if not forced to if check_mode is False: # We are in check_mode but would try to download named ids # Download all ids known so far return isamAppliance.invoke_get_file("Downloading multiple snapshots", "/snapshots/download?record_ids=" + ",".join(ids), filename) return isamAppliance.create_return_object()
0e58fa59515b8b7f943f5b7e17d5daaa810251e9
3,627,992
from typing import Union from typing import Tuple from typing import Dict def pre_process_steps( draw, return_kwargs: bool = False ) -> Union[ st.SearchStrategy[pre_process_step_pb2.PreProcessStep], st.SearchStrategy[Tuple[pre_process_step_pb2.PreProcessStep, Dict]], ]: """Returns a SearchStrategy for a pre_process_step plus maybe the kwargs.""" kwargs: Dict = {} kwargs["stage"] = draw(stages()) descript = pre_process_step_pb2.PreProcessStep.DESCRIPTOR step_type_str = draw( st.sampled_from( [f.name for f in descript.oneofs_by_name["pre_process_step"].fields] ) ) if step_type_str == "mfcc": kwargs["mfcc"] = draw(_mfccs()) elif step_type_str == "standardize": kwargs["standardize"] = draw(_standardizes()) elif step_type_str == "context_frames": kwargs["context_frames"] = draw(_context_frames()) else: raise ValueError(f"unknown pre_process_step type {step_type_str}") # initialise return all_fields_set(pre_process_step_pb2.PreProcessStep, kwargs) step = pre_process_step_pb2.PreProcessStep(**kwargs) if not return_kwargs: return step return step, kwargs
d13cf4343abd670ffd0be4ec83db36483fd761ee
3,627,993
def _decode(hdf5_handle): """ Construct the object stored at the given HDF5 location. """ if 'symmetries' in hdf5_handle: return _decode_symgroup(hdf5_handle) elif 'rotation_matrix' in hdf5_handle: return _decode_symop(hdf5_handle) elif 'matrix' in hdf5_handle: return _decode_repr(hdf5_handle) elif '0' in hdf5_handle: return _decode_iterable(hdf5_handle) else: raise ValueError('File structure not understood.')
ea9420799b8abe435ce7d5d98dd156356d790e1f
3,627,994
def r_network(): """Loads network from the R library tmlenet for comparison""" df = pd.read_csv("tests/tmlenet_r_data.csv") df['IDs'] = df['IDs'].str[1:].astype(int) df['NETID_split'] = df['Net_str'].str.split() G = nx.DiGraph() G.add_nodes_from(df['IDs']) for i, c in zip(df['IDs'], df['NETID_split']): if type(c) is list: for j in c: G.add_edge(i, int(j[1:])) # Adding attributes for node in G.nodes(): G.nodes[node]['W'] = np.int(df.loc[df['IDs'] == node, 'W1']) G.nodes[node]['A'] = np.int(df.loc[df['IDs'] == node, 'A']) G.nodes[node]['Y'] = np.int(df.loc[df['IDs'] == node, 'Y']) return G
5dc728cef2118c981b78da29e73308fbc8ce8cab
3,627,995
def _legend_with_triplot_fix(ax: plt.Axes, **kwargs): """Add legend for triplot with fix that avoids duplicate labels. Parameters ---------- ax : matplotlib.axes.Axes Matplotlib axes to apply legend to. **kwargs These parameters are passed to :func:`matplotlib.pyplot.legend`. Returns ------- matplotlib.legend.Legend """ handles_labels = ax.get_legend_handles_labels() new_handles_labels = _deduplicate_labels(handles_labels) return ax.legend(*new_handles_labels, **kwargs)
565fb3937aa0d1deb8f07d0f632354135282389c
3,627,996
from typing import Dict from typing import Any def gjson_from_tasks(tasks: Dict[TileIdx_txy, Any], grid_info: Dict[TileIdx_xy, Any]) -> Dict[str, Dict[str, Any]]: """ Group tasks by time period and compute geosjon describing every tile covered by each time period. Returns time_period -> GeoJSON mapping Each feature in GeoJSON describes one tile and has following propreties .total -- number of datasets .days -- number of days with at least one observation .utc_offset -- utc_offset used to compute day boundaries """ def _get(idx): xy_idx = idx[-2:] geo = deepcopy(grid_info[xy_idx]) dss = tasks[idx] utc_offset = timedelta(hours=geo['properties']['utc_offset']) ndays = len(set((ds.time+utc_offset).date() for ds in dss)) geo['properties']['total'] = len(dss) geo['properties']['days'] = ndays return geo def process(idxs): return dict(type='FeatureCollection', features=[_get(idx) for idx in idxs]) return {t: process(idxs) for t, idxs in toolz.groupby(toolz.first, sorted(tasks)).items()}
bb82ec50d79e7425db83ce6b55b7a089b0456cec
3,627,997
import logging def getLogLevelNumber(loglevelname): """Parses log level name into log level number. Returns int value of log level. On failure, DEBUG level value is returned.""" number = getattr(logging, loglevelname, None) if not isinstance(number, int): module_logger.debug("failed to parse log level name '{name}'".format(name = loglevelname)) number = logging.DEBUG return number
61eb798cd760437249a8dfc5b0bfd8b0397ff7f7
3,627,998
def arcz_to_arcs(arcz): """Convert a compact textual representation of arcs to a list of pairs. The text has space-separated pairs of letters. Period is -1, 1-9 are 1-9, A-Z are 10 through 36. The resulting list is sorted regardless of the order of the input pairs. ".1 12 2." --> [(-1,1), (1,2), (2,-1)] Minus signs can be included in the pairs: "-11, 12, 2-5" --> [(-1,1), (1,2), (2,-5)] """ arcs = [] for pair in arcz.split(): asgn = bsgn = 1 if len(pair) == 2: a, b = pair else: assert len(pair) == 3 if pair[0] == '-': _, a, b = pair asgn = -1 else: assert pair[1] == '-' a, _, b = pair bsgn = -1 arcs.append((asgn * _arcz_map[a], bsgn * _arcz_map[b])) return sorted(arcs)
80f37daaa57f7b7ae1a89385a22df8e17c3bf46a
3,627,999