content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_discriminator_inputs(images, conditional_vectors): """ 識別器用入力画像(画像+条件画像)を生成する。 Args: images: 画像 conditional_vectors: 条件ベクトル index: image_seqから取得するデータのインデックス Returns 画像+条件画像を統合したテンソル (B, H, W, A + C) B: バッチサイズ。images.shape[0] H: 画像の高さ。images.shape[1] W: 画像の幅。images.shape[2] A: 画像の成分数。images.shape[3] C: 条件ベクトルの次元 """ # eagerモードにしておかないと、tensor.numpy()やスライスが使用困難 tf.config.experimental_run_functions_eagerly(True) conditional_images = np.zeros((images.shape[0], images.shape[1], images.shape[2], conditional_vectors.shape[-1]), dtype='float32') if tf.is_tensor(conditional_vectors): conditional_vectors = conditional_vectors.numpy() conditional_images[:, ] = conditional_vectors.reshape((images.shape[0], 1, 1, conditional_vectors.shape[-1])) else: conditional_images[:, ] = conditional_vectors.reshape((images.shape[0], 1, 1, conditional_vectors.shape[-1])) return tf.concat([images, conditional_images], axis=-1)
75a14931106c05dd4007a0963c4152f0b54b04d5
3,635,700
def add_to_master_list(single_list, master_list): """This function appends items in a list to the master list. :param single_list: List of dictionaries from the paginated query :type single_list: list :param master_list: Master list of dictionaries containing group information :type master_list: list :returns: The master list with the appended data """ for list_item in single_list: master_list.append(list_item) return master_list
4b4e122e334624626c7db4f09278b44b8b141504
3,635,701
from typing import Optional def weight_by_attr( attr: str, prev_edge: Optional[models.Edge], edge: models.Edge ) -> float: """ Generic weight function to retrieve a value from an edge. """ return getattr(edge, attr)
292ab3d8cd551122eb57663bdc20f0aed288dd43
3,635,702
from typing import Union import warnings def check_if_porous(structure: Structure, threshold: float = 2.4) -> Union[bool, None]: """Runs zeo++ to check if structure is porous according to the CoRE-MOF definition (PLD > 2.4, https://pubs.acs.org/doi/10.1021/acs.jced.9b00835) Args: structure (Structure): MOF structure to check threshold (float, optional): Threshold on the sphere diameter in Angstrom. Defaults to 2.4. Returns: bool: True if porous. """ if is_tool("network"): zeopp_results = run_zeopp(structure) if zeopp_results["lifs"] >= threshold: return True return False warnings.warn(NO_ZEOPP_WARNING) return None
a6af20bcb3273b4d516309fbe156b997db7e30dd
3,635,703
import os def load_vlay( #load a layer from a file fp, providerLib='ogr', logger=mod_logger): """ what are we using this for? see instanc emethod """ log = logger.getChild('load_vlay') assert os.path.exists(fp), 'requested file does not exist: %s'%fp basefn = os.path.splitext(os.path.split(fp)[1])[0] #Import a Raster Layer vlay_raw = QgsVectorLayer(fp,basefn,providerLib) #check if this is valid if not vlay_raw.isValid(): log.error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name()) raise Error('vlay loading produced an invalid layer') #check if it has geometry if vlay_raw.wkbType() == 100: log.error('loaded vlay has NoGeometry') raise Error('no geo') #========================================================================== # report #========================================================================== vlay = vlay_raw dp = vlay.dataProvider() log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s' %(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp)) return vlay
b3794e219dcf3c580dbe8123cfca2c75d5af9e57
3,635,704
def order_node_list(tree): """ Sorts a list of node dict from a LightGBM instance. Key `tree_structure` is specific for LightGBM. Parameters ---------- tree : list, Unsorted list of node dicts Returns ------- ordered_node_list : list, Ordered list of node dicts compatible with `GbmModel` """ node = [] node.append(tree['tree_structure']) ordered_node_list = [] add_next_nodes(ordered_node_list, node) return ordered_node_list
812dcaeb96e4c0a55dece5e678fd86d27f42ddf0
3,635,705
import logging import traceback import json def teardown_request_wrap(exception): """ Prints tracebacks and handles bugs """ if exception: logging.error(traceback.format_exc()) return json.dumps({"result":None, 'error':{'message':'Invalid request'}, 'id':1})
eaf4a6ebc75a5166704794b0c028a4410f21cfdc
3,635,706
import logging def sanitize_parameters(func): """Sets any queryparams in the kwargs""" @wraps(func) def wrapper(*args, **kwargs): try: logging.info(f'[middleware] [sanitizer] args: {args}') myargs = dict(request.args) # Exclude params like loggedUser here sanitized_args = remove_keys(['loggedUser'], myargs) kwargs['params'] = sanitized_args except GeostoreNotFound: return error(status=404, detail='body params not found') return func(*args, **kwargs) return wrapper
b76d4361e73671130b03463ac74144a3a2111bef
3,635,707
def build_render_setup(cfg): """Build information struct about the rendering backup from a configuration. This performs type conversion to the expected types. Paths contained in cfg are expected to be alread expanded. That is, it should not contain global variables or other system dependent abbreviations. Args: cfg (dict): dictionary with Dataset configuration Returns: dict Raises: None """ render_setup = dict() render_setup['backend'] = str(cfg['backend']) if render_setup['backend'] == 'blender-cycles': render_setup['samples'] = float(cfg['samples']) render_setup['integrator'] = str(cfg['integrator']) render_setup['denoising'] = bool(cfg['denoising']) try: render_setup['allow_occlusions'] = bool(cfg['allow_occlusions']) render_setup['motion_blur'] = bool(cfg['motion_blur']) except KeyError: render_setup['allow_occlusions'] = '' logger.warn('Dataset does not contain occlusions/blur info. It might be an old dataset version.') else: logger.warn('Loading dataset which have not been rendered with ABR') return render_setup
e7631ccbda98a99d1db961b3bbcbcf36d1b7b3ad
3,635,708
def generate_coupled_image_from_self(img, out_img, noise_amp=10): """ Generates an input image for siam by concatenating an image with a transformed version of itself """ def __synthesize_prev_img(in_img, noise_amp=10): """Synthesizes previous frame by transforming the input image Args: in_img (str): input image path noise_amp (int, optional): Defaults to 10. Returns: 2-D ndarray: the synthesized previous image """ data = tifffile.imread(in_img) image = data modes_x, modes_y = 10, 4 amp = 1 amps_x, amps_y = np.random.random_sample(modes_x)*amp, np.random.random_sample(modes_y)*amp def func(xy): return (xy[0]+ np.sum(amps_y * np.sin(modes_y*2*np.pi*xy[0]/image.shape[0])), xy[1] + np.sum(amps_x * np.sin(modes_x*2*np.pi*xy[1]/image.shape[1]))) out = geometric_transform(image, func) noise = np.random.normal(0, noise_amp, size=image.shape) out = out +noise out[out<0] = 0 out[out>255] = 255 return out curr_frame = tifffile.imread(img) synthesized_previous_frame = __synthesize_prev_img(img, noise_amp) if curr_frame is None: raise IOError # tiff file not found out = np.concatenate((synthesized_previous_frame, curr_frame), axis=1).astype(np.uint8) cv2.imwrite(filename=out_img, img=out, )
778de50045b2b8932453c61863923bb9d2127ad6
3,635,709
def pca_preprocess(df, pca_components): """Preprocess the given dataframe using PCA""" # Drop rows df.dropna(axis=0, inplace=True) # Separate features and targets X = df.drop('ASPFWR5', axis=1) y = df['ASPFWR5'] # Dimensionality reduction with principal component analysis X = StandardScaler().fit_transform(X) X = PCA(n_components = pca_components).fit_transform(X) # Set index to datetime df = pd.DataFrame(X, index=y.index) # Merge X and y df['ASPFWR5'] = y return df
b09506efb52502aacbb66a9b80a6d2abe55f84d9
3,635,710
from datetime import datetime def add_nonce(func): """Helper function which adds a nonce to the kwargs dict""" @wraps(func) def inner(*args, **kwargs): if "nonce" not in kwargs: kwargs["nonce"] = int(datetime.datetime.utcnow().timestamp() * 1000) return func(*args, **kwargs) return inner
9138066e65416dab677c42ac8a6106f4dc421832
3,635,711
from typing import Dict from typing import Any import os import toml def build_train_dict(config_file: str, task: str) -> Dict[str, Any]: """ Read the configuration file given by the user. If it is a TOML file, ensures that the format corresponds to the one in resources. Args: config_file: path to a configuration file (JSON of TOML). task: task learnt by the network (example: classification, regression, reconstruction...). Returns: dictionary of values ready to use for the MapsManager """ if config_file is None: # read default values clinicadl_root_dir = os.path.abspath(os.path.join(__file__, "../..")) config_path = os.path.join( clinicadl_root_dir, "resources", "config", "train_config.toml", ) config_dict = toml.load(config_path) config_dict = remove_unused_tasks(config_dict, task) train_dict = dict() # Fill train_dict from TOML files arguments for config_section in config_dict: for key in config_dict[config_section]: train_dict[key] = config_dict[config_section][key] elif config_file.endswith(".toml"): user_dict = toml.load(config_file) if "Random_Search" in user_dict: del user_dict["Random_Search"] # read default values clinicadl_root_dir = os.path.abspath(os.path.join(__file__, "../..")) config_path = os.path.join( clinicadl_root_dir, "resources", "config", "train_config.toml", ) config_dict = toml.load(config_path) # Check that TOML file has the same format as the one in clinicadl/resources/config/train_config.toml if user_dict is not None: for section_name in user_dict: if section_name not in config_dict: raise ClinicaDLConfigurationError( f"{section_name} section is not valid in TOML configuration file. " f"Please see the documentation to see the list of option in TOML configuration file." ) for key in user_dict[section_name]: if key not in config_dict[section_name]: raise ClinicaDLConfigurationError( f"{key} option in {section_name} is not valid in TOML configuration file. " f"Please see the documentation to see the list of option in TOML configuration file." ) config_dict[section_name][key] = user_dict[section_name][key] train_dict = dict() # task dependent config_dict = remove_unused_tasks(config_dict, task) # Fill train_dict from TOML files arguments for config_section in config_dict: for key in config_dict[config_section]: train_dict[key] = config_dict[config_section][key] elif config_file.endswith(".json"): train_dict = read_json(config_file) else: raise ClinicaDLConfigurationError( f"config_file {config_file} should be a TOML or a JSON file." ) return train_dict
cffccce5763596323e49a2633c70eebdbb96a1e8
3,635,712
def morse_encode(string): """Converts a string to morse code""" words = [morse_encode_word(word) for word in string.split(' ')] return ' '.join(words)
aea0ffc0172096f8507c16ee5f7fc9e75f36c596
3,635,713
import glob import os def examples(): """Load example paths.""" return [(loader(path), path) for path in glob.glob(os.path.join(RESOURCE_DIR, "examples", "*.json"))]
2e4aefd01f783d94d30418ccd302c63deead2022
3,635,714
def TIMES_cleanup (file, Model_Module): """Cleans data genrated by Oasis TIMES and returns a dataframe witht he DTXSID of the parent compound and InChI key of each metabolite""" """The Model_Module argument should be a string to designate the model used for metabolism (e.g., TIMES_RatLiver S9, TIMES_RatInVivo""" df = [] df = pd.read_csv(file, delimiter = "\t", usecols = ['Chem. Name', 'Smiles']) #Reads 'Chem. Name' and 'Smiles' columns for tab-delimited TIMES file df = df.rename(columns={'Chem. Name':'DTXSID'}) #Renames 'Chem. Name' to 'DTXSID' df = df[:-1] #Remove empty bottom row df = df[1:] #Remove empty top row df['Smiles'] = df['Smiles'].str.replace('{','[').str.replace('}',']') #Replaces curly brackets for normal brackets in SMILES strings df[Model_Module] = 1 #Adds Times_(Model_Module) to designate model generating metabolite df['DTXSID'].replace({' ': num.NaN}, inplace = True) #Cleans DTXSID to list NaN in empty rows df['Metabolite_INCHIKEY'] = num.NaN #Initialized column for metabolite InChI key metabList = df.Smiles[df['DTXSID'].isnull()] #Establishes boolean list to desgiante indecies with metabolite smiles df['Metabolite_INCHIKEY'] = SMILES_to_InchiKey(metabList) #Converts metabolie SMILES to InChI keys df['DTXSID'] = df['DTXSID'].fillna(method = 'ffill') #Fills empty spaces with DTXSID, empty spaces are filled with the preceeding DTXSID df = df[df['Metabolite_INCHIKEY'].notnull()] #Removes any all parent entries, whcih are represented as nulls in the metabolite INCHIKEY list df = df.drop_duplicates() df['Clean_SMILES'] = clean_SMILES(df['Smiles']) return df[['DTXSID','Metabolite_INCHIKEY','Clean_SMILES', Model_Module]];
ae5d566ad606ea74d7209b8304693238c71981e0
3,635,715
import random def generate_key(): """Generate an key for our cipher""" shuffled = sorted(chars, key=lambda k: random.random()) return dict(zip(chars, shuffled))
dc0cc2c5ac063f0b0e5f7b53445a43680d34be8f
3,635,716
import os import re def get_sdkconfig_value(sdkconfig_file, key): """ Return the value of given key from sdkconfig_file. If sdkconfig_file does not exist or the option is not present, returns None. """ assert key.startswith('CONFIG_') if not os.path.exists(sdkconfig_file): return None # keep track of the last seen value for the given key value = None # if the value is quoted, this excludes the quotes from the value pattern = re.compile(r"^{}=\"?([^\"]*)\"?$".format(key)) with open(sdkconfig_file, 'r') as f: for line in f: match = re.match(pattern, line) if match: value = match.group(1) return value
d8f11dec3406d5fc166883d99bc3f42ca4eb6483
3,635,717
def unmatched(match): """Return unmatched part of re.Match object.""" start, end = match.span(0) return match.string[:start] + match.string[end:]
6d34396c2d3c957d55dbef16c2673bb7f571205c
3,635,718
def cubicgw(ipparams, width, etc = []): """ This function fits the variation in Gaussian-measured PRF half-widths using a 2D cubic. Parameters ---------- x1: linear coefficient in x x2: quadratic coefficient in x x3: cubic coefficient in x y1: linear coefficient in y y2: quadratic coefficient in y y3: cubic coefficient in y c : constant Returns ------- returns the flux values for the intra-pixel model Revisions --------- 2018-11-16 Kevin Stevenson, STScI kbs@stsci.edu Original version """ x1 = ipparams[0] x2 = ipparams[1] x3 = ipparams[2] y1 = ipparams[3] y2 = ipparams[4] y3 = ipparams[5] c = ipparams[6] s0 = ipparams[7] sy, sx = width return x1*(sx-s0) + x2*(sx-s0)**2 + x3*(sx-s0)**3 + y1*(sy-s0) + y2*(sy-s0)**2 + y3*(sy-s0)**3 + c
334be9d8dc8baaddf122243e4f19d681efc707cf
3,635,719
import argparse def _make_parser(): """ Generates argument parser with all necessarry parameters. :returns script's arguments (host, port, index, type, id, searchserver, server, stdin, pipeline) :rtype argparse.ArgumentParser """ p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) # noqa inputgroup = p.add_mutually_exclusive_group(required=True) inputgroup.add_argument('-server', type=str, # noqa help="use http://host:port/index/type. " "Defines the Elasticsearch node and its index " "for the input data") inputgroup.add_argument('-stdin', action="store_true", # noqa help="get data from stdin. Might be used with -pipeline.") p.add_argument('-pipeline', action="store_true", help="output every record (even if not enriched) " "to put this script into a pipeline") p.add_argument('-searchserver', type=str, help="use http://host:port/index/type " "to provide a local Elasticsearch instance " "with entityfacts in the specified index") p.add_argument('-ignhub', action="store_true", help="ignore hub.culturegraph.org. Here a local " "searchserver must be provided.") return p
30e4daf3ef65684c57f918716bcfd61eebdefffe
3,635,720
def main(): """ This function displays all the gui elements of the music recommender system Parameters: - Returns: df_list (DataFrame): the list of input audio entered by the user """ fileslist = get_static_store() folderPath = col1.text_input('Enter folder path:') # Declaring the cm variable by the # color palette from seaborn cm = sns.light_palette("blue", as_cmap=True) if folderPath: filename = file_selector(folderPath) if not filename in fileslist.values(): fileslist[filename] = filename else: fileslist.clear() # Hack to clear list if the user clears the cache and reloads the page col1.info("Select an audio file") df_list = pd.DataFrame(columns = ['Title']) # clear list if col1.button("Clear music list"): fileslist.clear() df_list = list(fileslist.keys()) # show list if col1.checkbox("Show music list?", True): # transform list into dataframe for ease of use df_list = pd.DataFrame(columns = ['Title']) df_list['Title'] = list(fileslist.keys()) # color palette from seaborn cm = sns.light_palette("green", as_cmap=True) col1.dataframe(df_list.style.background_gradient(cmap=cm).set_precision(2)) return df_list
dcde78271a7358848b0947d2ad0674d15fcf9ae1
3,635,721
def get_columns_by_type(df, req_type): """ get columns by type of data frame Parameters: df : data frame req_type : type of column like categorical, integer, Returns: df: Pandas data frame """ g = df.columns.to_series().groupby(df.dtypes).groups type_dict = {k.name: v for k, v in g.items()} return type_dict.get(req_type)
aeedea92fbfb720ca6e7a9cd9920827a6ad8c6b0
3,635,722
def get_total(lines): """ This function takes in a list of lines and returns a single float value that is the total of a particular variable for a given year and tech. Parameters: ----------- lines : list This is a list of datalines that we want to total. Returns: -------- total : float This is the sum total from the data lines. """ total = 0.0 for line in lines: data_sep = line.split() total += float(data_sep[0]) return total
284f8061f3659999ae7e4df104c86d0077b384da
3,635,723
def get_ipv6_by_ids(ip_ids): """Get Many Ipv6.""" networks = list() for ip_id in ip_ids: networks.append(get_ipv6_by_id(ip_id)) return networks
29511fca93063921ace5019225c03ede518b4c0d
3,635,724
def box(t, t_start, t_stop): """Box-shape (Theta-function) The shape is 0 before `t_start` and after `t_stop` and 1 elsewhere. Args: t (float): Time point or time grid t_start (float): First value of `t` for which the box has value 1 t_stop (float): Last value of `t` for which the box has value 1 Note: You may use :class:`numpy.vectorize`, :func:`functools.partial`, or :func:`qutip_callback`, cf. :func:`flattop`. """ if t < t_start: return 0.0 if t > t_stop: return 0.0 return 1.0
8f4f0e57323f38c9cfa57b1661c597b756e8c4e7
3,635,725
from datetime import datetime import os import multiprocessing def updateBaselines(product, date:datetime, n_workers=20, block_scale_factor= 1, time=False) -> dict: """Updates anomaly baselines *** Parameters ---------- product:str date:datetime n_workers:int block_scale_factor:int time:bool Returns ------- Dictionary with the following key/value pairs: product:str Product name paths:tuple Tuple of filepaths of the anomalybaseline files that were updated """ startTime = datetime.now() # create dict of anomaly baseline folders for each baseline type baseline_locations = {anomaly_type:os.path.join(BASELINE_DIR,product,anomaly_type) for anomaly_type in ["mean_5year","median_5year",'mean_10year','median_10year']} # get list of input data files input_paths = _listFiles(product,date) # check to make sure we got at least 10 if len(input_paths) < 10: raise UnavailableError(f"Only {len(input_paths)} input image paths found") # get raster metadata and dimensions with rasterio.open(input_paths[0]) as tempmeta: metaprofile = tempmeta.profile width = tempmeta.width height = tempmeta.height # add BIGTIFF where necessary if product in NDVI_PRODUCTS: metaprofile['BIGTIFF'] = 'YES' # set output filenames output_date = _getMatchingBaselineDate(product,date) mean_5yr_name = os.path.join(baseline_locations["mean_5year"], f"{product}.{output_date}.anomaly_mean_5year.tif") median_5yr_name = os.path.join(baseline_locations["median_5year"], f"{product}.{output_date}.anomaly_median_5year.tif") mean_10yr_name = os.path.join(baseline_locations["mean_10year"], f"{product}.{output_date}.anomaly_mean_10year.tif") median_10yr_name = os.path.join(baseline_locations["median_10year"],f"{product}.{output_date}.anomaly_median_10year.tif") # open output handles log.debug("Opening handles") mean_5yr_handle = rasterio.open(mean_5yr_name, 'w', **metaprofile) median_5yr_handle = rasterio.open(median_5yr_name, 'w', **metaprofile) mean_10yr_handle = rasterio.open(mean_10yr_name, 'w', **metaprofile) median_10yr_handle = rasterio.open(median_10yr_name, 'w', **metaprofile) # set block size and get windows blocksize = metaprofile['blockxsize'] * int(block_scale_factor) windows = getWindows(width,height,blocksize) # use windows to create parallel args parallel_args = [(w, input_paths, metaprofile['dtype']) for w in windows] # do multiprocessing p = multiprocessing.Pool(n_workers) for win, values in p.imap(_mp_worker, parallel_args): mean_5yr_handle.write(values['mean_5year'], window=win, indexes=1) median_5yr_handle.write(values['median_5year'], window=win, indexes=1) mean_10yr_handle.write(values['mean_10year'], window=win, indexes=1) median_10yr_handle.write(values['median_10year'], window=win, indexes=1) ## close pool p.close() p.join() ## close handles mean_5yr_handle.close() median_5yr_handle.close() mean_10yr_handle.close() median_10yr_handle.close() # cloud-optimize new anomalies log.debug("Converting baselines to cloud-optimized geotiffs and ingesting to S3") # cloud-optimize outputs output_paths = (mean_5yr_name, median_5yr_name, mean_10yr_name, median_10yr_name) p = multiprocessing.Pool(len(output_paths)) p.imap(cloud_optimize_inPlace,output_paths) ## close pool p.close() p.join() # if time==True, log total time for anomaly generation endTime = datetime.now() if time: log.info(f"Finished in {endTime-startTime}") # return dict return {'product':product, 'paths':output_paths}
d731ca3f8af79b279a5f995f51ef110c4952c453
3,635,726
import os import collections import re def analyze_integration_target_dependencies(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[str]] """Analyze the given list of integration test targets and return a dictionary expressing target names and the target names which depend on them.""" real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/' role_targets = [target for target in integration_targets if target.type == 'role'] hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases) dependencies = collections.defaultdict(set) # handle setup dependencies for target in integration_targets: for setup_target_name in target.setup_always + target.setup_once: dependencies[setup_target_name].add(target.name) # handle target dependencies for target in integration_targets: for need_target in target.needs_target: dependencies[need_target].add(target.name) # handle symlink dependencies between targets # this use case is supported, but discouraged for target in integration_targets: for path in data_context().content.walk_files(target.path): if not os.path.islink(to_bytes(path.rstrip(os.path.sep))): continue real_link_path = os.path.realpath(path) if not real_link_path.startswith(real_target_root): continue link_target = real_link_path[len(real_target_root):].split('/')[0] if link_target == target.name: continue dependencies[link_target].add(target.name) # intentionally primitive analysis of role meta to avoid a dependency on pyyaml # script based targets are scanned as they may execute a playbook with role dependencies for target in integration_targets: meta_dir = os.path.join(target.path, 'meta') if not os.path.isdir(meta_dir): continue meta_paths = data_context().content.get_files(meta_dir) for meta_path in meta_paths: if os.path.exists(meta_path): # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file) try: meta_lines = read_text_file(meta_path).splitlines() except UnicodeDecodeError: continue for meta_line in meta_lines: if re.search(r'^ *#.*$', meta_line): continue if not meta_line.strip(): continue for hidden_target_name in hidden_role_target_names: if hidden_target_name in meta_line: dependencies[hidden_target_name].add(target.name) while True: changes = 0 for dummy, dependent_target_names in dependencies.items(): for dependent_target_name in list(dependent_target_names): new_target_names = dependencies.get(dependent_target_name) if new_target_names: for new_target_name in new_target_names: if new_target_name not in dependent_target_names: dependent_target_names.add(new_target_name) changes += 1 if not changes: break for target_name in sorted(dependencies): consumers = dependencies[target_name] if not consumers: continue display.info('%s:' % target_name, verbosity=4) for consumer in sorted(consumers): display.info(' %s' % consumer, verbosity=4) return dependencies
affbfb081bbbd4d40a2c1dd21f047d139b54dc77
3,635,727
import numpy def newton_cotes(order, domain=(0, 1), growth=False, segments=1): """ Generate the abscissas and weights in Newton-Cotes quadrature. Newton-Cotes quadrature, are a group of formulas for numerical integration based on evaluating the integrand at equally spaced points. Args: order (int, numpy.ndarray:): Quadrature order. domain (:func:`chaospy.Distribution`, ;class:`numpy.ndarray`): Either distribution or bounding of interval to integrate over. growth (bool): If True sets the growth rule for the quadrature rule to only include orders that enhances nested samples. segments (int): Split intervals into N subintervals and create a patched quadrature based on the segmented quadrature. Can not be lower than `order`. If 0 is provided, default to square root of `order`. Nested samples only exist when the number of segments are fixed. Returns: (numpy.ndarray, numpy.ndarray): abscissas: The quadrature points for where to evaluate the model function with ``abscissas.shape == (len(dist), N)`` where ``N`` is the number of samples. weights: The quadrature weights with ``weights.shape == (N,)``. Examples: >>> abscissas, weights = chaospy.quadrature.newton_cotes(4) >>> abscissas.round(4) array([[0. , 0.25, 0.5 , 0.75, 1. ]]) >>> weights.round(4) array([0.0778, 0.3556, 0.1333, 0.3556, 0.0778]) >>> abscissas, weights = chaospy.quadrature.newton_cotes(4, segments=2) >>> abscissas.round(4) array([[0. , 0.25, 0.5 , 0.75, 1. ]]) >>> weights.round(4) array([0.0833, 0.3333, 0.1667, 0.3333, 0.0833]) """ order = numpy.asarray(order) order = numpy.where(growth, numpy.where(order, 2**order, 0), order) return hypercube_quadrature( _newton_cotes, order=order, domain=domain, segments=segments, )
72c4afcd7dce50752f349556356db000addba649
3,635,728
def transpose(a, axes=None): """transpose(a, axes=None) returns array with dimensions permuted according to axes. If axes is None (default) returns array with dimensions reversed. """ # if axes is None: # this test has been moved into multiarray.transpose # axes = arange(len(array(a).shape))[::-1] return multiarray.transpose(a, axes)
80fd37c9ab9e48d9bddc95eb8ae32f6d48250b6a
3,635,729
def find_next_prime(N: int) -> int: """Find next prime >= N Parameters ---------- N : int Starting point to find the next prime >= N. Returns ------- int the next prime found after the number N """ def is_prime(n): if n % 2 == 0: return False i = 3 while i * i <= n: if n % i != 0: i += 2 else: return False return True if N < 3: return 2 if N % 2 == 0: N += 1 for n in range(N, 2 * N, 2): if is_prime(n): return n
8648b3583e84a520eca0435cf6ebeb5a939af2fd
3,635,730
def in_16(library, session, space, offset, extended=False): """Reads in an 16-bit value from the specified memory space and offset. Corresponds to viIn16* function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param extended: Use 64 bits offset independent of the platform. :return: Data read from memory, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ value_16 = ViUInt16() if extended: ret = library.viIn16Ex(session, space, offset, byref(value_16)) else: ret = library.viIn16(session, space, offset, byref(value_16)) return value_16.value, ret
af7f28001faed46e52af0645462cd429e5ca7eb8
3,635,731
from typing import Pattern def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
cd1b418635dd9a974a0ca4643641ad97add0ed7d
3,635,732
def update(number): """ update() : Update document in Firestore collection with request body. Ensure you pass a custom ID as part of json body in post request, e.g. json={'id': '1', 'title': 'Write a blog post today'} """ try: todo_ref = user_ref.document(number).collection("todos") id = request.json['id'] todo_ref.document(id).update(request.json) all_todos = [doc.to_dict() for doc in todo_ref.stream()] return jsonify(all_todos), 200 except Exception as e: return f"An Error Occured: {e}"
acb36cb6bcd066af635c97030bb9d843159869b0
3,635,733
import json import time def sfn_result(session, arn, wait=10): """Get the results of a StepFunction execution Args: session (Session): Boto3 session arn (string): ARN of the execution to get the results of wait (int): Seconds to wait between polling Returns: dict|None: Dict of Json data or None if there was an error getting the failure output """ client = session.client('stepfunctions') while True: resp = client.describe_execution(executionArn = arn) if resp['status'] != 'RUNNING': if 'output' in resp: return json.loads(resp['output']) else: resp = client.get_execution_history(executionArn = arn, reverseOrder = True) event = resp['events'][0] for key in ['Failed', 'Aborted', 'TimedOut']: key = 'execution{}EventDetails'.format(key) if key in event: return event[key] return None else: time.sleep(wait)
ba8a80e81aa5929360d5c9f63fb7dff5ebaf91f3
3,635,734
def forum_latest_user_posts(parser, token): """ {% forum_latest_user_posts user [number] as [context_var] %} """ bits = token.contents.split() if len(bits) not in (2, 3, 5): raise TemplateSyntaxError('%s tag requires one, two or four arguments' % bits[0]) if bits[3] != 'as': raise TemplateSyntaxError("Second argument to %s tag must be 'as'" % bits[0]) if not bits[2]: bits[2] = 5 # Default number of items if not bits[3]: bits[4] = 'latest_user_posts' return ForumLatestUserPostsNode(bits[1], bits[2], bits[4])
3138f7f43a7cc2b45d7d05ba82cc74bb512dcc29
3,635,735
import argparse def parse_args(args): """Parse command line arguments. """ parser = argparse.ArgumentParser(description='YouTube Subscription Search') parser.add_argument( '-s', '--secrets-file', default='client_id.json', help='Client secret file. See README.md on how to get this file.') parser.add_argument( '-p', '--set-playlist', action='store_true', help='Set the playlist to save videos to.') parser.add_argument( '-P', '--just-set-playlist', action='store_true', help='Just set playlist to save videos to and exit.') parser.add_argument( '-r', '--refresh-subscriptions', action='store_true', help='Force a refresh of subscriptions, and search subs.') parser.add_argument( '-R', '--just-refresh-subscriptions', action='store_true', help='Refresh subscriptions, and do not search subs.') parser.add_argument( '-v', '--verbose', action='store_true', help='Verbose output') parser.add_argument( '-d', '--debug', action='store_true', help='Debug output') return parser.parse_args(args)
8d1bacab9754ada84fe0a3da0b229e6ab30e3550
3,635,736
def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0, mode='train', **kwargs): """Pure transformer-style multi-headed attention. Args: x: inputs ((q, k, v), mask) params: parameters (none) num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' **kwargs: other arguments including the rng Returns: Pure Multi-headed attention result, and the mask. """ del params rng = kwargs.get('rng', None) (q, k, v), mask = x feature_depth = q.shape[-1] assert feature_depth % num_heads == 0 head_depth = feature_depth // num_heads nbatch = np.shape(q)[0] # nbatch, seqlen, feature_depth --> nbatch, num_heads, seqlen, head_depth def SplitHeads(x): return np.transpose( np.reshape(x, (nbatch, -1, num_heads, head_depth)), (0, 2, 1, 3)) # nbatch, num_heads, seqlen, head_depth --> nbatch, seqlen, feature_depth def JoinHeads(x): # pylint: disable=invalid-name return np.reshape( np.transpose(x, (0, 2, 1, 3)), (nbatch, -1, num_heads*head_depth)) # Split heads, dot-product attention, rejoin heads. res = JoinHeads( DotProductAttention( SplitHeads(q), SplitHeads(k), SplitHeads(v), mask, dropout=dropout, mode=mode, rng=rng)) return res, mask # Keep the mask.
32fb6aee5c82b6eaa5aae4cab3b98fb0b5cc423b
3,635,737
def validate_options(options): """ Validate the options and return bool. :param options: options to validate :type options: dict :rtype: bool """ pywikibot.log('Options:') notice_keys = [ 'email_subject', 'email_subject2', 'email_text', 'email_text2', 'note_summary', 'note_summary2', 'note_text', 'note_text2', ] required_keys = notice_keys + ['date', 'exclusions'] has_keys = [] result = True for key, value in options.items(): pywikibot.log(f'-{key} = {value}') if key in required_keys: has_keys.append(key) if key == 'date': if not isinstance(value, date): result = False elif key == 'exclusions': if isinstance(value, str): options[key] = [value] elif not isinstance(value, list): result = False else: for item in value: if not isinstance(item, str): result = False elif key == 'max_attempts': try: options[key] = int(value) except ValueError: result = False elif key in notice_keys: if not isinstance(value, str): result = False else: result = False pywikibot.log(f'\u2192{key} = {options[key]}') if sorted(has_keys) != sorted(required_keys): result = False return result
dade1084873dc9eec95a3be364560d115bbb670c
3,635,738
import requests import os def verify() -> bool: """Verify access to the NFVIS Device.""" print("==> Verifying access to the NFVIS Device Environment.") nip, url, login, password = nvfis_getgcred() s = requests.Session() s.auth = (login, password) s.headers = ({'Content-type': 'application/vnd.yang.data+json', 'Accept': 'application/vnd.yang.data+json'}) s.verify = False # Test: Is device pingable response = os.system( "ping -c 2 {} >> nfvis_tests.txt".format(nip) ) # and then check the response... if response == 0: pingstatus = "Ping Success" print(" " + pingstatus) else: pingstatus = "Ping Failed" print(" " + pingstatus) return False # Test: Is the REST API running r = nfv_get_networks_configuration(s, url) if r.status_code == 200: print(" REST API Success") else: print(" REST API Failed") return False print("Tests complete.\n") return True
4d446ced8208314f51a98d2cb3eab90812cbd1cf
3,635,739
from typing import List from typing import Dict from typing import Any import logging def main( domain: InnerEnv, planner: planning_types.Planner, belief: belief_types.Belief, runs: int, logging_level: str, ) -> List[Dict[str, Any]]: """plan online function of online planning Handles calling :func:`episode` :: for r in runs: rewards = episode(domain, planner, belief_updat) In the episode actions are taken in ``domain`` according to the ``planner``, which uses a belief maintained by ``belief``. Returns a one-dimensional list of dictionaries. This is a flat concatenation of the results returned by :func:`episode`. Each entry (dict) has a key "run" that indicates the number o fthe run it was produced. :param domain: :param planner: :param belief: :param runs: :param logging_level: :return: flat concatenation of the results of each episode """ utils.set_logging_options(logging_level) logger = logging.getLogger("plan-online") logger.info("starting %s run(s)", runs) output: List[Dict[str, Any]] = [] for run in range(runs): belief.distribution = domain.functional_reset episode_output = episode( planner=planner, belief=belief, domain=domain, ) # here we explicitly add the information of which run the result was # generated to each entry in the results for o in episode_output: o["run"] = run # extend -- flat concatenation -- of our results output.extend(episode_output) logger.info( "run %s/%s terminated: r(%s)", run + 1, runs, utils.discounted_return([t["reward"] for t in episode_output], 0.95), ) return output
e45d8aa43933bd85779d48572e4db327003887ec
3,635,740
import os def convert_to_target(filepath, data_ann_path_out, target_map): """ Saves the image as a .png in the data path """ if not os.path.isfile(filepath): print("No such file found: ", filepath) return False ann_file = os.path.basename(filepath) # open the image mask = target_map.read(filepath) source_labels = set(np.unique(mask)) num_added = 0 new_mask = np.ones_like(mask) * target_map.UNLABELLED for cat_id in source_labels: # skip the category if not in the mapping if not cat_id in target_map.MAP: continue cat_mask = mask == cat_id new_mask[cat_mask] = target_map.MAP[cat_id] num_added += 1 if num_added == 0: return False im = Image.fromarray(new_mask).convert("L") im.save(os.path.join(data_ann_path_out, ann_file)) return True
6633c311de2b4fda12ae2fabca1bee43c8b78b1b
3,635,741
from typing import Union from typing import Any def format_color( color: Union[ColorInputType, Any], warn_if_invalid: bool = True ) -> Union[ColorType, Any]: """ Format color from string, int, or tuple to tuple type. Available formats: - Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py) - HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided - tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided :param color: Color to format. If format is valid returns the same input value :param warn_if_invalid: If ``True`` warns if the color is invalid :return: Color in (r, g, b, a) format """ if not isinstance(color, ColorInputInstance): return color if not isinstance(color, pygame.Color): try: if isinstance(color, VectorInstance) and 3 <= len(color) <= 4: if PYGAME_V2: for j in color: if not isinstance(j, int): raise ValueError('color cannot contain floating point values') c = pygame.Color(*color) else: c = pygame.Color(color) except ValueError: if warn_if_invalid: warn(f'invalid color value "{color}"') else: raise return color else: c = color return c.r, c.g, c.b, c.a
e4b5413ce96824e7e4990d9e78ec36ad1690a400
3,635,742
from typing import Optional def is_yaml_requested( content_type: str = None, proto: ExtendedProto = None, path_suffix: Optional[str] = None, ) -> bool: """Checks whether YAML is requested by the user, depending on params.""" is_yaml = False if content_type is not None: is_yaml = ("yaml" in content_type) or ("yml" in content_type) if proto is not None: is_yaml = is_yaml or (proto == "yaml") if path_suffix is not None: is_yaml = is_yaml or (path_suffix in [".yaml", ".yml"]) return is_yaml
93ace7639b00430d7f3731a0a54792037edff4cc
3,635,743
def _pqs_in_range(dehn_pq_limit, num_cusps): """ Return an iterator. This iterator, at each step, returns a tuple. The contents of this tuple are num_cusps other tuples, and each of these is of the form (p,q), where 0 <= p <= dehn_pq_limit, -dehn_pq_limit <= q <= dehn_pq_limit, and gcd(p,q) <= 1. Ex: pqs_in_range(3, 2) returns ((-3, 1), (-3, 1)), ((-3, 1), (-3, 2)), ((-3, 1), (-2, 1)), ((-3, 1), (-2, 3)), ((-3, 1), (-1, 0)), ... ((3, 2), (2, 1)) ((3, 2), (2, 3)) ((3, 2), (3, 1)) ((3, 2), (3, 2)) """ pqs = list() for p in range(-1 * dehn_pq_limit, dehn_pq_limit + 1): for q in range(0, dehn_pq_limit + 1): if abs(gcd(p,q)) <= 1: pqs.append((p,q)) # pqs_mult = [ pqs, pqs, pqs... ] # because we wish to return pqs x pqs x pqs ... x pqs pqs_mult = list() for i in range(0, num_cusps): pqs_mult.append(pqs) return product(*pqs_mult)
fe28e43823c6b2510ed80294d4b7ed4bed02ed54
3,635,744
def _units_defaults(calendar, has_year_zero=None): """ Set calendar specific default units as 'days since reference_date' Day 0 of *excel* and *excel1900* starts at 1899-12-31 00:00:00. Day 0 of *excel1904* starts at 1903-12-31 00:00:00. Decimal calendars *decimal*, *decimal360*, *decimal365*, and *decimal366* do not need units so 0001-01-01 00:00:00 is taken. Day 0 of *julian*, *gregorian* and *standard* starts at -4713-01-01 12:00:00 if not has_year_zero, and at -4712-01-01 12:00:00 if has_year_zero. Day 0 of *proleptic_gregorian* starts at -4714-11-24 12:00:00 if not has_year_zero, and at -4713-11-24 12:00:00 if has_year_zero. Day 0 of *360_day*, *365_day*, *366_day*, *all_leap*, and *noleap* starts at 0000-01-01 12:00:00. Parameters ---------- calendar : str One of the supported calendar names in *_cfcalendars* and *_noncfcalendars* has_year_zero : bool, optional Astronomical year numbering is used, i.e. year zero exists, if True and possible for the given *calendar*. If *None* (default), calendar-specific defaults are assumed. Returns ------- str 'days since reference_date' with calendar-specific reference_date Examples -------- >>> print(_units_defaults('Excel')) 'days since 1899-12-31 00:00:00' """ calendar = calendar.lower() if has_year_zero is None: has_year_zero = _year_zero_defaults(calendar) if calendar in ['standard', 'gregorian', 'julian']: if has_year_zero: return 'days since -4712-01-01 12:00:00' else: return 'days since -4713-01-01 12:00:00' elif calendar in ['proleptic_gregorian']: if has_year_zero: return 'days since -4713-11-24 12:00:00' else: return 'days since -4714-11-24 12:00:00' elif calendar in _idealized_cfcalendars: return 'days since 0000-01-01 12:00:00' elif calendar in ['excel', 'excel1900']: return 'days since 1899-12-31 00:00:00' elif calendar in ['excel1904']: return 'days since 1903-12-31 00:00:00' elif calendar in _decimalcalendars: return 'days since 0001-01-01 00:00:00' else: raise ValueError(f'Unknown calendar: {calendar}')
06b7cbc78ad49bdfc24324249c89c49cc7a63723
3,635,745
def submit_a_feed(request): """ 用户添加一个自定义的订阅源 """ feed_url = request.POST.get('url', '').strip()[:1024] user = get_login_user(request) if feed_url: host = get_host_name(feed_url) if host in settings.ALLOWED_HOSTS: rsp = add_self_feed(feed_url) elif settings.QNMLGB_HOST in host: rsp = add_qnmlgb_feed(feed_url) else: # 区分播客还是普通 RSS feed_obj = feedparser.parse(feed_url) if is_podcast_feed(feed_obj): rsp = add_postcast_feed(feed_obj) else: rsp = add_atom_feed(feed_obj) if rsp: logger.warning(f"有新订阅源被提交:`{feed_url}") set_active_site(rsp['site']) # 已登录用户,自动订阅 if user: add_user_sub_feeds(user.oauth_id, [rsp['site'], ]) # if rsp.get('creator') == 'user': # # 新增的普通 RSS 才触发异步更新任务 # django_rq.enqueue(update_sites_async, [rsp['site'], ], result_ttl=1, ttl=3600, failure_ttl=3600) return JsonResponse(rsp) else: logger.warning(f"RSS 解析失败:`{feed_url}") return HttpResponseNotFound("Param Error")
bf9d4abc850c8012e7c5f56a18df6880b0ea5b04
3,635,746
def check_existing_credendtials(account_Name): """ Function that check if a Credentials exists with that account name and return a Boolean """ return Credentials.credential_exist(account_Name)
31a0edad670b15c9e6e45175c24a55705e9eac4c
3,635,747
import os def hash_paths(paths, log_interval): """Returns a map of the base64 hash to the filename for all paths in path.""" output = {} count_since_log = 0 for path in paths: output[hash_file(path)] = os.path.basename(path) count_since_log += 1 if count_since_log >= log_interval: print("Hashed {} of {} files: {}".format(len(output), len(paths), os.path.basename(path))) count_since_log = 0 return output
97d210473344dbd8612389366e7648ef125c3fc5
3,635,748
def scmplx(p,a,b): """ p is a string designating a type, either scalar_f or scalar_d. """ if p == 'scalar_f': return vsip_cmplx_f(a,b) elif p == 'scalar_d': return vsip_cmplx_d(a,b) else: assert False,'Type %s not defined for cmplx.'%p
56773eaded2b676c09cdd3b93ef320d9e8a615b3
3,635,749
import json def ips_description(request): """See :class:`bgpranking.api.get_ips_descs`""" asn = request.get('asn') block = request.get('block') if asn is None or block is None: return json.dumps({}) return json.dumps(bgpranking.get_ips_descs(asn, block, request.get('date'), request.get('sources')))
47318917517cd519e646e477cd933bd639aa4ceb
3,635,750
def handle_msg(msg: dict) ->list: """ Handler for message request object. Logs message and returns list of responses.""" msg_alert(msg['From'], msg['Body']) msg, lol = parse_msg(msg) if lol is not None: resp = lol elif lol is None: resp = get_response(msg) log_msg = [ {'From': msg['From']}, {'Message': msg['Body']}, ] return(resp)
d3f751dacf2594ae1aa691c4d4f9e58ee41b4f44
3,635,751
def create_app(register_blueprints=True): """Function to instantiate, configure, and return a flask app""" app = Flask(__name__, instance_relative_config=True) app.config.from_object('app.default_config') # default config # app.config.from_pyfile('application.cfg.py') # server config file, do not include in versioning db.init_app(app) api = Api(app) api.add_resource(UserList, '/api/users') if register_blueprints: register_blueprints_on_app(app) return app
459c776e713f6e4c4157d9599a625235565c50c8
3,635,752
def RoleAdmin(): """超级管理员""" return 1
78a4fce55fa0fb331c0274c23213ae72afe7184f
3,635,753
import pyproj from pyproj.exceptions import DataDirError def _get_proj_info(): """Information on system PROJ Returns ------- proj_info: dict system PROJ information """ try: data_dir = pyproj.datadir.get_data_dir() except DataDirError: data_dir = None blob = [ ("pyproj", pyproj.__version__), ("PROJ", pyproj.proj_version_str), ("data dir", data_dir), ] return dict(blob)
4e6d7b3f1375f32a5fe4dd106b8e9ac79f29912f
3,635,754
def run_program(intcodes): """run intcodes, which are stored as a dict of step: intcode pairs""" pc = 0 last = len(intcodes) - 1 while pc <= last: if intcodes[pc] == 1: # add if pc + 3 > last: raise Exception("out of opcodes") arg1 = intcodes[pc + 1] arg2 = intcodes[pc + 2] dest = intcodes[pc + 3] intcodes[dest] = intcodes[arg1] + intcodes[arg2] pc += 4 elif intcodes[pc] == 2: # multiply if pc + 3 > last: raise Exception("out of opcodes") arg1 = intcodes[pc + 1] arg2 = intcodes[pc + 2] dest = intcodes[pc + 3] intcodes[dest] = intcodes[arg1] * intcodes[arg2] pc += 4 elif intcodes[pc] == 99: # end program return intcodes else: # invalid raise Exception("invalid opcode: {}".format(intcodes[pc])) # should never reach this point (only if end is reached before program # stop instruction) raise Exception("ran out of intcodes before program stop reached")
e87343483abddffd9508be6da7814abcbcd59a79
3,635,755
from re import T def concat(lst, cat_symb=None, append_to_end=False): """Concatenates `lst` of Tensors, optionally with a join symbol. Args: lst: list of Tensors to concatenate. cat_symb: concatenation symbol. append_to_end: if set to ``True``, it will add the `cat_symb` to the end of the concatenated sequence. Returns: cat_tens (Tensor): concatenated tensor where sub-tensors are separated by 'cat_symb'. """ assert isinstance(lst, list) if cat_symb is not None: new_lst = [] if not isinstance(cat_symb, list): cat_symb = [cat_symb] cat_symb = T.tensor(cat_symb).to(lst[0].device) for indx, e in enumerate(lst): new_lst.append(e) if indx == len(lst)-1 and not append_to_end: continue new_lst.append(cat_symb) else: new_lst = lst cat_tens = T.cat(new_lst) return cat_tens
c8a17b7c44abd3f41ca57097782a2707ba9aaa63
3,635,756
def find_mcs(mols): """Function to count the number of molecules making ito the end of the test""" out_mols = ROMol_Vect() while mols.hasNext(): molobj = mols.next() rdmol, molobj = get_or_create_rdmol(molobj) # Add this mol to that vector out_mols.add(rdmol) # Now find the MCS mcs=RDKFuncs.findMCS(out_mols)#,True,1,60,False,False,False,False,AtomComparator.AtomCompareElements,BondComparator.BondCompareAny) # Now just return the count return mcs.getSmartsString()
b1ca9cba06187918559bd5ce6b13319b793c4fc6
3,635,757
def to_numpy(tensor): """Convert 3-D torch tensor to a 3-D numpy array. Args: tensor: Tensor to be converted. """ return tensor.transpose(0, 1).transpose(1, 2).clone().numpy()
034e016caccdf18e8e33e476673884e2354e21c7
3,635,758
def calib_constants(det, exp=None, ctype='pedestals', run=None, time_sec=None, vers=None, url=cc.URL) : """Returns calibration constants and document with metadata for specified parameters. To get meaningful constants, at least a few parameters must be specified, e.g.: - det, ctype, time_sec - det, ctype, version - det, exp, ctype, run - det, exp, ctype, time_sec - det, exp, ctype, run, version etc... """ db_det, db_exp, colname, query = dbnames_collection_query(det, exp, ctype, run, time_sec, vers) logger.debug('get_constants: %s %s %s %s' % (db_det, db_exp, colname, str(query))) dbname = db_det if exp is None else db_exp doc = find_doc(dbname, colname, query, url) if doc is None : logger.warning('document is not available for query: %s' % str(query)) return (None, None) return (get_data_for_doc(dbname, colname, doc, url), doc)
bb84690d11747c5bcc408ae7ccef05b62e0267ba
3,635,759
import time def is_cluster_healthy(admin, zk, retries=10, retry_wait=30): """Return true if cluster is healthy.""" retries_left = retries while retries_left: md = _request_meta(admin) if md is not None and not _unhealthy(md, zk): logger.info("Cluster is healthy!") return True else: logger.warning( "Cluster is not healthy, retries left/total = %d/%d.", retries_left, retries, ) time.sleep(retry_wait) retries_left = retries_left - 1 logger.error("Cluster was found to be un-healthy after multiple retries.") return False
2995067e30664a616cc48409b6597bf1a80f0067
3,635,760
def load_data(loc): """ Load in the csv file """ df = pd.read_csv(loc, engine = "python", encoding = "utf-8") df.fillna("") df = np.asarray(df) return df
b59cc344cdc2ad2805f7d237e22c65c8b2f7300c
3,635,761
import os def listdir(folder, suffix): """ Output the path of files in the folder with specific suffix""" list_path = [] for root, _, files in os.walk(folder, followlinks=True): for f in files: if f.endswith(suffix): list_path.append(osp.join(root, f)) return list_path
6762a7fb0a8531af0de2aa9c3306c85b0306d820
3,635,762
def clif_deps_to_cclibs(labels): """Gets the cc_library name for each of label as a list.""" return [_clif_to_lib(name, PYCLIF_CC_LIB_SUFFIX) for name in labels]
aeb85cd716282099b9efc36efd6dcd6cd49413ba
3,635,763
def _get_cache_filename(year=2020): """Returns the `Path` to the COBS data file for a given year.""" return CACHEDIR / f'cobs{year}.feather'
70f989165b6e3e10604a468d06f5f565e499ab28
3,635,764
def get_critical_hours_end(critical_ffmc: float, solar_noon_ffmc: float, critical_hour_start: float): """ Returns the hour of day (on 24H clock) at which the hourly FFMC drops below the threshold of critical_ffmc. Should only be called if critical_hour_start is not None. """ if critical_hour_start is None: return None if critical_hour_start < 13: # if critical_hour_start is in the morning, we know that based on the diurnal curve, # the critical hour is going to extend into the afternoon, so set clock_time to then clock_time = 14.0 else: clock_time = critical_hour_start + 1.0 # increase time in increments of 1 hours while get_afternoon_overnight_diurnal_ffmc(clock_time, solar_noon_ffmc) >= critical_ffmc: clock_time += 1.0 if clock_time >= 32: # break if clock_time is now 08:00 of the next day break # subtract the hour that caused FFMC to drop below critical_ffmc clock_time -= 1.0 if clock_time >= 24.0: clock_time = clock_time - 24.0 return clock_time
f50bfca5769bbe36d597ad1ff42d73c8aa4b4bae
3,635,765
def dolpc(x, model_order=8): """ Function dolpc computes the autoregressive model from spectral magnitude samples. @param x: Critical band filters. @param model_order: Order of model. Default is 8. @returns: Autoregressive model from spectral magnitude samples. """ num_bands, num_frames = x.shape # Calculate autocorrelation R = np.zeros((2 * (num_bands - 1), num_frames)) R[0:num_bands, :] = x for i in range(num_bands - 1): R[i + num_bands - 1, :] = x[num_bands - (i + 1), :] r = fft.ifft(R.T).real.T r = r[0:num_bands, :] y = np.ones((num_frames, model_order + 1)) e = np.zeros((num_frames, 1)) # Find LPC coeffs by durbin if model_order == 0: for i in range(num_frames): _ , e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True) e[i, 0] = e_tmp else: for i in range(num_frames): y_tmp, e_tmp, _ = spectrum.LEVINSON(r[:, i], model_order, allow_singularity = True) y[i, 1:model_order + 1] = y_tmp e[i, 0] = e_tmp # Normalize each poly by gain. y = np.divide(y.T, np.add(np.tile(e.T, (model_order + 1, 1)), 1e-8)) return y
81e008bd00fd5f8efa3f55df1e310eb52727aa85
3,635,766
def filter_domains(domains, by="evalue", coverage_pct=0.5, tolerance_pct=0.1): """Filter overlapping Domain objects and test adjcency rules. Adjacency rules are tested again here, in case they are missed within overlap groups. For example, the NRPS-para261 domain is not always entirely contained by a condensation domain, so should be caught by this pass. Parameters: domains (list): Domain instances to be filtered by (str): Metric used to choose representative domain hit (def. 'evalue') coverage_pct (float): Conserved domain coverage percentage threshold tolerance_pct (float): CD length tolerance percentage threshold Returns: list: Domain objects remaining after filtering """ domains = [ choose_representative_domain(group, by) for group in group_overlapping_hits(domains) ] i, total = 1, len(domains) while i < total: if i + 1 == total: break previous, current = domains[i - 1 : i + 1] # When domains are likely together, e.g. two small C domain hits right next # to each other or multiple Methyltransf_X domains, extend its border if previous.type == current.type and is_fragmented_domain( previous, current, coverage_pct, tolerance_pct ): previous.end = current.end del domains[i] continue i += 1 return domains
b980c69bf3309628cbaf3d91c59b77b33b3be4e2
3,635,767
def _replicate_and_maybe_restore_latest_checkpoint( unreplicated_optimizer_state, unreplicated_params, unreplicated_batch_stats, unreplicated_training_metrics_grabber, train_dir, use_deprecated_checkpointing): """Restore from the latest checkpoint, if it exists.""" uninitialized_global_step = -1 unreplicated_checkpoint_state = dict( params=unreplicated_params, optimizer_state=unreplicated_optimizer_state, batch_stats=unreplicated_batch_stats, training_metrics_grabber=unreplicated_training_metrics_grabber, global_step=uninitialized_global_step, preemption_count=0, sum_train_cost=0.0) latest = checkpoint.load_latest_checkpoint( train_dir, target=unreplicated_checkpoint_state, recents_filename='latest', use_deprecated_checkpointing=use_deprecated_checkpointing) found_checkpoint = ( latest and latest['global_step'] != uninitialized_global_step) optimizer_state = jax_utils.replicate(unreplicated_optimizer_state) params = jax_utils.replicate(unreplicated_params) batch_stats = jax_utils.replicate(unreplicated_batch_stats) training_metrics_grabber = jax_utils.replicate( unreplicated_training_metrics_grabber) if not found_checkpoint: return ( optimizer_state, params, batch_stats, training_metrics_grabber, 0, # global_step 0.0, # sum_train_cost 0, # preemption_count False) # is_restored pytree_dict, extra_state = restore_checkpoint( latest, pytree_keys=[ 'optimizer_state', 'params', 'batch_stats', 'training_metrics_grabber', ], use_deprecated_checkpointing=use_deprecated_checkpointing) return ( pytree_dict['optimizer_state'], pytree_dict['params'], pytree_dict['batch_stats'], pytree_dict['training_metrics_grabber'], extra_state['global_step'], extra_state['sum_train_cost'], extra_state['preemption_count'], True)
a49b0b74d4a1fdd6ed8cae715249ab8febd7c352
3,635,768
def minkowskiSum(obj1, obj2): """ Minkowski sum of two polygon objects Args: obj1, obj2: (n,2) array of corner point Return: poly: (n,2) array of minkowski polygon vertices centered at (0, 0) bound: [min_x, min_y] max/min signed distances from vertices [max_x, max_y] to center of polygon """ # assert obj1.ndim == 2 and obj1.shape[1] == 2 # assert obj2.ndim == 2 and obj2.shape[1] == 2 poly = np.array([], dtype=np.float).reshape(0, 2) for p1 in obj1: for p2 in obj2: poly = np.vstack([poly, np.array([p1+p2])]) poly00 = poly - np.mean(obj1, axis=0) - np.mean(obj2, axis=0) hull = ConvexHull(poly00) poly = poly00[hull.vertices] bound = {'max': hull.max_bound, 'min': hull.min_bound} # assert poly.ndim == 2 and poly.shape[1] == 2 return poly, bound
7000676601c40c7e32961f26a12c2c79c2c592bd
3,635,769
def get_rotated_image_from_contour(img, contour, rotation=90): """ Returns a rotated version of img based on cv2.minAreaRect of contour. First side, (i.e most left to top edge) is always "Width" from minAreaRect. If our width > height, we know we have the sheet rotated to the right. We need to shift our angle and width 90 degree in order to get the correct coordinates from cv2.boxPoints """ rotated_rect = cv2.minAreaRect(contour) # Get the center x,y and width and height. x_center = int(rotated_rect[0][0]) y_center = int(rotated_rect[0][1]) width = int(rotated_rect[1][0]) height = int(rotated_rect[1][1]) angle_degrees = rotated_rect[2] if(width > height): temp_height = height height = width width = temp_height angle_degrees = 90 + angle_degrees # Reassign rotated rect with updated values rotated_rect = ((x_center, y_center), (width, height), angle_degrees) # Find the 4 (x,y) coordinates for the rotated rectangle, order: bl, tl,tr, br rect_box_points = cv2.boxPoints(rotated_rect) img_debug_contour = img.copy() cv2.drawContours(img_debug_contour, [contour], 0, (0, 0, 255), 3) show_window('biggest_contour', img_debug_contour) img_debug = img.copy() cv2.drawContours(img_debug, [np.int0(rect_box_points)], 0, (0, 255, 255), 3) # show_window('min_area_rect_original_image', img_debug) # Prepare for rotation transformation src_pts = rect_box_points.astype("float32") dst_pts = np.array([ [0, height-1], # Bottom Left [0, 0], # Top Left [width-1, 0], # Top Right ], dtype="float32") # Affine rotation transformation ROTATION_MAT = cv2.getAffineTransform(src_pts[:3], dst_pts) rotated = cv2.warpAffine( img, ROTATION_MAT, (width, height)) return ndimage.rotate(rotated, rotation)
5351303dbc9d786b32c1760ccadfce81e1174b70
3,635,770
def fix_dataset_dims(d): """Given one of the dataset files given by the organizers, fix its dimensions so its easier to concatenate and use with xr.open_mfdataset. Arguments: d. xr.Dataset. The dataset you get when you open one of the provided files. """ month = int(d.forecast_time[0].dt.month) day = int(d.forecast_time[0].dt.day) label = f"{month:02}{day:02}" new_d = d.expand_dims("forecast_monthday").assign_coords( forecast_monthday=xr.DataArray([label], dims="forecast_monthday") ) new_d = new_d.assign_coords(forecast_year=new_d.forecast_time.dt.year).swap_dims( forecast_time="forecast_year" ) # Reorder the dimensions to something that is more intuitive (according to me). dims = set(new_d.dims) dims.difference_update( ("forecast_monthday", "forecast_year", "latitude", "longitude") ) new_d = new_d.transpose( "forecast_year", "forecast_monthday", *dims, "latitude", "longitude" ) # new_d = new_d.chunk(chunks="auto") return new_d
323aa2c89cfcf124e06d9efa97c4d61775680bdf
3,635,771
def prettyprint_xml(element): """ A rough and dirty way to prettyprint an Element with indention. :param lxml.etree._Element element: The Element or ElementTree to format. :rtype: str :returns: A prettyprinted representation of the element. """ return etree.tostring(element, pretty_print=True).decode('utf-8')
58749d409c3735b021045ba614888858d12b6651
3,635,772
import sys def load_vimba_lib(vimba_project: str): """ Load shared library shipped with the Vimba installation Arguments: vimba_project - Library name without prefix or extension Return: CDLL or WinDLL Handle on loaded library Raises: VimbaSystemError if given library could not be loaded. """ platform_handlers = { 'linux': _load_under_linux, 'win32': _load_under_windows } if sys.platform not in platform_handlers: msg = 'Abort. Unsupported Platform ({}) detected.' raise VimbaSystemError(msg.format(sys.platform)) return platform_handlers[sys.platform](vimba_project)
dd27ced38906f9922594035564e8b66c007e3d34
3,635,773
def decorator(IterativeReconAlg, name=None, docstring=None): """ Calls run_main_iter when parameters are given to it. :param IterativeReconAlg: obj, class instance of IterativeReconAlg :param name: str for name of func :param docstring: str other documentation that may need to be included from external source. :return: func Examples -------- >>> import tigre >>> from tigre.demos.Test_data.data_loader import load_head_phantom >>> geo = tigre.geometry_defaut(high_quality=False) >>> src = load_head_phantom(number_of_voxels=geo.nVoxel) >>> proj = Ax(src,geo,angles) >>> angles = np.linspace(0,2*np.pi,100) >>> iterativereconalg = decorator(IterativeReconAlg) >>> output = iterativereconalg(proj,geo,angles, niter=50) """ def iterativereconalg(proj, geo, angles, niter, **kwargs): alg = IterativeReconAlg(proj, geo, angles, niter, **kwargs) if name is not None: alg.name = name alg.run_main_iter() if alg.computel2: return alg.getres(), alg.geterrors() else: return alg.getres() if docstring is not None: setattr( iterativereconalg, '__doc__', docstring + IterativeReconAlg.__doc__) else: setattr(iterativereconalg, '__doc__', IterativeReconAlg.__doc__) if name is not None: setattr(iterativereconalg, '__name__', name) return iterativereconalg
0c7224ea3d58c367d8b7519f7f8ba4d68c00076e
3,635,774
from typing import Any import json def read_json_file(filepath: str) -> Any: """Read JSON from a file. Args: filepath (str): Path to file Returns: Any: The parsed JSON """ with open(filepath, 'r') as json_file: data = json.load(json_file) return data
b4b492aa796b55b81dc8f6a8b91713fe1f00ecd4
3,635,775
def penalized_loss(loss_func, model, inputs, targets, output_regularization, l2_regularization = 0.0, use_dnn = False): """Computes penalized loss with L2 regularization and output penalty. Args: loss_func: Loss function. model: Neural network model. inputs: Input values to be fed into the model for computing predictions. targets: Target values containing either real values or binary labels. output_regularization: Coefficient for feature output penalty. l2_regularization: Coefficient for L2 regularization. use_dnn: Whether using DNN or not when computing L2 regularization. Returns: The penalized loss. """ loss = loss_func(model, inputs, targets) reg_loss = 0.0 if output_regularization > 0: reg_loss += output_regularization * feature_output_regularization( model, inputs) if l2_regularization > 0: num_networks = 1 if use_dnn else len(model.feature_nns) reg_loss += l2_regularization * weight_decay( model, num_networks=num_networks) return loss + reg_loss
c1e8403b274cef6d37ed419b8ba7ed2dc0e30845
3,635,776
import glob from typing import Optional from pathlib import Path import json import os def load_corpus(corpus_id: str, download_if_missing=False) -> Optional[list]: """Loads a corpus that has previously been downloaded Parameters ---------- corpus_id: str The id of the corpus to load. download_if_missing: bool, False If the corpus is not present on disk, should Canary attempt to download it? Returns ------- Union[list, None] if a corpus can be loaded, a list of relevant dataset files will be returned. Otherwise nothing will be returned. Raises ------- UserWarning A warning is raised if the requested corpus cannot be found. """ allowed_values = [x.stem for x in Path(CANARY_CORPORA_LOCATION).iterdir() if x.is_dir()] with open(f"{CANARY_ROOT_DIR}/_data/corpora.json") as corpora: corpora = json.load(corpora) corpora_ids = [corpus['id'] for corpus in corpora] allowed_values += corpora_ids if corpus_id not in allowed_values: raise ValueError(f"Incorrect corpus id supplied. Allowed values are: {allowed_values}") # Corpus id should now be valid and will be in corpora root if downloaded corpus_location = Path(CANARY_CORPORA_LOCATION) / corpus_id if os.path.isdir(corpus_location) is False and download_if_missing is True: download_corpus(corpus_id) return load_corpus(corpus_id) if os.path.isdir(corpus_location) is False and download_if_missing is False: raise UserWarning("It appears the requested corpus has not been downloaded and is not present on disk. " "Have you downloaded it? You can set download_if_missing to True and the " "corpus will be downloaded. Alternatively, use the function download_corpus.") return glob.glob(f"{corpus_location}/*")
02798717b05a45d4bd0fb6d48525073a56ddffbb
3,635,777
def tx_deserialize( tx_hex ): """ Given a serialized transaction, return its inputs, outputs, locktime, and version Each input will have: * txid: string * vout: int * [optional] sequence: int * [optional] scriptSig: {"asm": ..., "hex": ...} Each output will have: * value: Decimal (in BTC) * script_hex: string Return tx, formatted as {'locktime': ..., 'version': ..., 'vin': ..., 'vout': ...} """ tx = bitcoin.deserialize( tx_hex ) inputs = tx["ins"] outputs = tx["outs"] ret_inputs = [] ret_outputs = [] for inp in inputs: ret_inp = { "txid": inp["outpoint"]["hash"], "vout": int(inp["outpoint"]["index"]), } if "sequence" in inp: ret_inp["sequence"] = int(inp["sequence"]) if "script" in inp: ret_inp["scriptSig"] = { "asm": tx_script_to_asm(inp['script']), "hex": inp["script"] } ret_inputs.append( ret_inp ) for i in xrange(0, len(outputs)): out = outputs[i] assert len(out['script']) > 0, "Invalid transaction scriptpubkey:\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True) assert out['value'] < 1000 * (10**8), "High transaction value\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True) ret_out = { "n": i, "value": Decimal(out["value"]) / 10**8, "scriptPubKey": { "hex": out["script"], "asm": tx_script_to_asm(out['script']) }, # compat with pybitcoin "script_hex": out["script"] } ret_outputs.append( ret_out ) ret = { "txid": bitcoin.txhash(tx_hex), "hex": tx_hex, "size": len(tx_hex) / 2, "locktime": tx['locktime'], "version": tx['version'], "vin": ret_inputs, "vout": ret_outputs } return ret
dfd97a8493430ea6600d8c45c0c0b5ea81cc803e
3,635,778
import time def wait_for_result(polling_function, polling_config): """ wait_for_result will periodically run `polling_function` using the parameters described in `polling_config` and return the output of the polling function. Args: polling_config (PollingConfig): The parameters to use to poll the db. polling_function (Callable[[], (bool, Any)]): The function being polled. The function takes no arguments and must return a status which indicates if the function was succesful or not, as well as some return value. Returns: Any: The output of the polling function, if it is succesful, None otherwise. """ if polling_config.polling_interval == 0: iterations = 1 else: iterations = int(polling_config.timeout // polling_config.polling_interval) + 1 for _ in range(iterations): (status, result) = polling_function() if status: return result time.sleep(polling_config.polling_interval) if polling_config.strict: assert False return None
663f23b3134dabcf3cc3c2f72db33d09ca480555
3,635,779
def regrid(idx): """ Decorator factory to compute a model on a constant grid, then interpolate. This is to be used for reconvolution fits when the independant axis isn't evently spaced. This function returns a decorator. You should call the result of this function with the model to regrid. The constant grid Parameters ---------- idx : int Index of variable to regrid in client function. Returns ------- regridder : decorator Example ------- ``` def model(x, amp, tau, t0, sig): # Convolution assumes constant grid spacing. return convolve(step(x)*exp_decay(x, amp, tau), gauss_kernel(x, t0, sig)) deco = regrid(1) regridded = deco(model) # Or, on a single line regridded = regrid(1)(model) # compute on first axis # Or, during definition @regrid(1) def model(x, *args): ... ``` """ #logger.debug("Applying 'regrid' decorator") def _regrid(func, *args, **kw): #logger.debug("Regridding func {}".format(func.__name__)) x = args[idx] #print("regridding...") mn, mx = np.min(x), np.max(x) extension=1 margin = (mx-mn)*extension dx = np.abs(np.min(x[1:]-x[:-1])) #print("regrid args", args) #print("regrid kw", kw) #print("regrid func", func) grid = np.arange(mn-margin, mx+margin+dx, dx) args = list(args) args[idx] = grid y = func(*args, **kw) #print("y", y) intrp = interp1d(grid, y, kind=3, copy=False, assume_sorted=True) return intrp(x) return decorator(_regrid)
c0d3ef6b5f32545a004fcbaebc585ca2e6e1d984
3,635,780
from typing import Optional def create_random_bytes( min_length: Optional[int] = None, max_length: Optional[int] = None, lower_case: bool = False ) -> bytes: """Generates a random bytes given the constraints""" if min_length is None: min_length = 0 if max_length is None: max_length = min_length + 1 * 2 length = randint(min_length, max_length) result = hexlify(urandom(length)) if lower_case: result = result.lower() if max_length and len(result) > max_length: end = randint(min_length or 0, max_length) return result[0:end] return result
1e71debc3a495d2291a7989fe92c0f3712556baa
3,635,781
def calculate_v_correction(df, photopic_response): """ Closure to calculate the e correction factor from a dataframe """ # Get angles from column names first try: angles = df.drop(["0_deg", "wavelength"], axis=1).columns.to_numpy(float) except: angles = df.drop(["wavelength"], axis=1).columns.to_numpy(float) def calculate_vfactor(column): """ Function to calculate the vfactor """ return sum(column * photopic_response["photopic_response"].to_numpy()) / sum( df["0.0"] * photopic_response["photopic_response"].to_numpy() ) try: v_factor = df.drop(["0_deg", "wavelength"], axis=1).apply(calculate_vfactor) except: v_factor = df.drop(["wavelength"], axis=1).apply(calculate_vfactor) # It is now important to only integrate from 0 to 90° and not the entire spectrum # It is probably smarter to pull this at some point up but this works. relevant_v_factor = v_factor.loc[ np.logical_and( np.array(v_factor.index).astype(float) >= 0, np.array(v_factor.index).astype(float) <= 90, ) ] relevant_angles = np.array( v_factor.loc[ np.logical_and( np.array(v_factor.index).astype(float) >= 0, np.array(v_factor.index).astype(float) <= 90, ) ].index ).astype(float) return np.sum( relevant_v_factor * np.sin(np.deg2rad(relevant_angles)) * np.deg2rad(np.diff(relevant_angles)[0]) )
f9768d204813a89df6f246864ed669c8b8b305cf
3,635,782
def wrap_statement(token_str): """ Wraps a long string of space-separated tokens or a list of tokens. """ if isinstance(token_str, list): token_str = ' '.join(token_str) wrap_ind = '\n' + INDENT * 4 return wrap_ind.join(gtextWrapper.wrap(token_str))
447b74a2d33d6a053791c35112d43e356194f575
3,635,783
def mlas_packb(B, K, N, transb_size, transb=True): """Pre-pack B matrix if it is constant for mlas_matmul, C = A * B^T. It only supports float32 datatype. Parameters ---------- B : tvm.te.Tensor The second input of mlas_matmul. K : int The number of colums of A. N : int The number of colums of output C. transb_size : int The size (in bytes) of the output pre-packed B matrix. transb : bool Whether the B matrix is transposed. Returns ------- PackedB: tvm.te.Tensor The pre-packed B matrix. """ return te.extern( (transb_size), [B], lambda ins, outs: tvm.tir.call_packed( "tvm.contrib.mlas.gemm_packb", N, K, K if transb else N, transb, ins[0], outs[0], ), name="PackedB", )
c232e0f00b008c044c9843db90058425f6050cd3
3,635,784
def file_version_summary(list_of_files): """ Given the result of list_file_versions, returns a list of all file versions, with "+" for upload and "-" for hide, looking like this: ['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg'] """ return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files]
8ca8e75c3395ea13c6db54149b12e62f07aefc13
3,635,785
def make_players(data, what_to_replace_null_data_with): """ 1. feature selection 2. replacing null values :param data: :param what_to_replace_null_data_with: accepted values: "1", "mean", "median" :return: players """ players = data[["Overall", "Potential", "Position", "Skill Moves", "Crossing", "Finishing", "HeadingAccuracy", "ShortPassing", "Volleys", "Dribbling", "Curve", "FKAccuracy", "LongPassing", "BallControl", "Acceleration", "SprintSpeed", "Agility", "Reactions", "Balance", "ShotPower", "Jumping", "Stamina", "Strength", "LongShots", "Aggression", "Interceptions", "Positioning", "Vision", "Penalties", "Composure", "Marking", "StandingTackle", "SlidingTackle", "GKDiving", "GKHandling", "GKKicking", "GKPositioning", "GKReflexes"]] for col in players: if col != "Position": if what_to_replace_null_data_with == "1": players[col].fillna(1, inplace=True) elif what_to_replace_null_data_with == "mean": players[col].fillna(players[col].mean(), inplace=True) elif what_to_replace_null_data_with == "median": players[col].fillna(players[col].median(), inplace=True) else: raise ValueError("Invalid value for second parameter") # drop 60 NA positions from dataframe players = players.dropna() return players
081e563f475e7e05caf3761954646b8a35ec8e54
3,635,786
def pwm_to_duty_cycle(pulsewidth_micros, pwm_params): """Converts a pwm signal (measured in microseconds) to a corresponding duty cycle on the gpio pwm pin Parameters ---------- pulsewidth_micros : float Width of the pwm signal in microseconds pwm_params : PWMParams PWMParams object Returns ------- float PWM duty cycle corresponding to the pulse width """ return int(pulsewidth_micros / 1e6 * pwm_params.freq * pwm_params.range)
e627b84bf7e01f3d4dcb98ec94271cd34249fb23
3,635,787
import json def update_plugin_packages_in_kv(rid, runit): """Update the plugin packages for this unit in the kv store. It returns a tuple of 'install_packages' and 'purge_packages' that are different from that which was previously stored. :param rid: The relation_id of the unit :type rid: str :param runit: The unit name of the unit :type runit: str :returns: tuple of (added, removed) packages. :rtype: Tuple[List[Str],List[str]] """ current = get_plugin_packages_from_kv(rid, runit) rdata = relation_get(unit=runit, rid=rid) install_packages_json = rdata.get("install-packages", "[]") install_packages = json.loads(install_packages_json) conflicting_packages_json = rdata.get("conflicting-packages", "[]") conflicting_packages = json.loads(conflicting_packages_json) removed = list( (set(current['install_packages']) - set(install_packages)) | (set(conflicting_packages) - set(current['conflicting_packages']))) added = list( (set(install_packages) - set(current['install_packages'])) | (set(current['conflicting_packages']) - set(conflicting_packages))) store_plugin_packages_in_kv( rid, runit, conflicting_packages, install_packages) return (added, removed)
66d342e014f738e178629973b81c7a5c8d68dd41
3,635,788
def get_a_record(dns_name, zone_name): """Lookup an 'A' record with the supplied name. Args: dns_name: DNS nname of the resource. zone_name: Cloud DNS managed zone name. Returns: The first A record for the DNS resource. None if not found """ rr_set_response = api.CLIENTS.dns.resourceRecordSets().list( managedZone=zone_name, project=zones.CONFIG.managed_zone_project, name=dns_name, type='A').execute() # There should only be one with this name. rr_set = rr_set_response.get('rrsets', []) if len(rr_set) >= 1: return rr_set[0] else: return None
c982fdb603d1c6accb7c205d64fc57a783559979
3,635,789
def get_file_obj(fname, mode='r', encoding=None): """ Light wrapper to handle strings and let files (anything else) pass through. It also handle '.gz' files. Parameters ---------- fname: string or file-like object File to open / forward mode: string Argument passed to the 'open' or 'gzip.open' function encoding: string For Python 3 only, specify the encoding of the file Returns ------- A file-like object that is always a context-manager. If the `fname` was already a file-like object, the returned context manager *will not close the file*. """ if _is_string_like(fname): return _open(fname, mode, encoding) try: # Make sure the object has the write methods if 'r' in mode: assert hasattr(fname, 'read') if 'w' in mode or 'a' in mode: assert hasattr(fname, 'write') except AssertionError: # pragma: no cover raise ValueError('fname must be a string or a file-like object') return EmptyContextManager(fname)
c8a24ef76869be8f743a7ddb7e66bf6ea4f0edf1
3,635,790
def decode(codes, alphabet): """ Converts one-hot encodings to string Parameters ---------- code : torch.Tensor One-hot encodings. alphabet : Alphabet Matches one-hot encodings to letters. Returns ------- genes : list of Tensor List of proteins others : list of Tensor List of proteins states : list of Tensor List of alignment state strings dm : torch.Tensor B x N x M dimension matrix with padding. """ s = list(map(lambda x: alphabet[int(x)], codes)) return ''.join(s)
79ff69034293a8fb7d005ec89c98ae5e7535e487
3,635,791
import logging def GetSuites(milo_client, waterfall, builder_name, build_number): """Gets a list of suites ids for a given build from Milo. Args: milo_client: MiloClient object. waterfall: Buildbot waterfall. builder_name: Buildbot builder name. build_number: Buidlbot build number. Returns: A set of suite ids. """ buildinfo = milo_client.BuildInfoGetBuildbot(waterfall, builder_name, build_number) suite_ids = set() for step in buildinfo['steps']: for link in buildinfo['steps'][step].get('otherLinks', []): if link.get('label') == 'Link to suite': url = link.get('url') m = SUITE_RE.search(url) if m: suite_ids.add(m.group(1)) else: logging.error('Unable to parse suite link for %s: %s' % (buildinfo['steps'][step]['name'], url)) return suite_ids
266ef9d5042d247b01d0f0819d8222104a591960
3,635,792
import re def getNormform_space(synonym): """ """ return re.sub("[^a-z0-9]", " ", synonym.lower())
5e03a89ca25cb5b4ae9a76ef9fb44c213a043cbd
3,635,793
def electrolyte_conductivity_PeymanMPM(c_e, T): """ Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original data is from [1]. The fit is from Dualfoil [2]. References ---------- .. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal properties of non-aqueous electrolyte solutions for rechargeable lithium batteries. Journal of power sources 81 (1999): 859-862. .. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html Parameters ---------- c_e: :class:`pybamm.Symbol` Dimensional electrolyte concentration T: :class:`pybamm.Symbol` Dimensional temperature Returns ------- :class:`pybamm.Symbol` Electrolyte conductivity """ sigma_e = 1.3 E_k_e = 34700 arrhenius = exp(E_k_e / constants.R * (1 / 298.15 - 1 / T)) return sigma_e * arrhenius
61c5a0f8a8b514607d6829fabd784ab620e4bdf8
3,635,794
import re def rename_leaves_taxids(tree): """ Rename the leaf nodes with just the NCBI taxonomy ID if we have it :param tree: the tree to rename :return: the tree with renamed leaves """ for n in tree.get_leaves(): m = re.search(r'\[(\d+)\]', n.name) if m: n.name = m.groups()[0] return tree
26b55177b1e9372ff58f3a79ab703c639661551c
3,635,795
def hshift(x, shifts=0): """shift batch of images horizontally""" return paddle.roll(x, int(shifts*x.shape[3]), axis=3)
176336fb7953197697b123041183798bf445b43f
3,635,796
def getFirstCatalogKeyPath(cataloglist, keypath, default = None): """ Get the value of the keypath in the first catalog containing it. """ for name in cataloglist: catalog = getCatalog(name) if catalog is not None: value = valueForKeyPath(catalog, keypath) if value is not None: return value return default
9e5a999e3bf9ae2e24c56c4e4fc1d6a8bf3e0095
3,635,797
import argparse import os from datetime import datetime def load_context(): """Load and parse command line arguments and create runtime context. Parse command line arguments and create runtime context. Also set any logging parameters passed in (just to file for the moment). Returns: context: runtime parameters """ context = None try: parser = argparse.ArgumentParser(description='crape applied for jobs from recruiter web pages') parser.add_argument('-s', '--source', default=os.getcwd(), help='Source folder') parser.add_argument('-c', '--cutoff', default=JobSite.convert_from_datetime(datetime.today() - timedelta(weeks=2)), help='Cutoff date') parser.add_argument('-o', '--output', default='jobs_applied_for.csv', help='CSV output file') context = parser.parse_args() context.cutoff = JobSite.convert_to_datetime(context.cutoff) except Exception as e: print(e) raise e return context
14174c40cff4f8b1ed8660992534527ec1990fb9
3,635,798
def HT_DCPHASE(ds, count): """Hilbert Transform - Dominant Cycle Phase""" return call_talib_with_ds(ds, count, talib.HT_DCPHASE)
bb1f98e8adc8f90f2f35418b7598bec574f014ae
3,635,799