content
stringlengths
22
815k
id
int64
0
4.91M
def py_cpu_nms(dets, thresh): """Pure Python NMS baseline.""" x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) ## index for dets order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= thresh)[0] order = order[inds + 1] return keep
36,200
def photo_handler(message): """ Handler for a photo content type. """ process_photo(message)
36,201
def _list_news(): """List the content of type ``content_type``.""" folder = os.path.join(PATH, g.project_name, "news") if not os.path.isdir(folder): raise NotFound filenames = {} for user in os.listdir(folder): news = os.listdir(os.path.join(folder, user)) for filename in news: filenames[filename] = os.path.join( folder, user, os.path.splitext(filename)[0]) for key in sorted(filenames, reverse=True): yield filenames[key]
36,202
def _split_iterators(iterator, n=None): """Split itererator of tuples into multiple iterators. :param iterator: Iterator to be split. :param n: Amount of iterators it will be split in. toolz.peak can be used to determine this value, but that is not lazy. This is basically the same as x, y, z = zip(*a), however, this function is lazy. """ #if n is None: # item, iterator = cytoolz.peek(iterator) # n = len(item) iterators = itertools.tee(iterator, n) #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators)) # Above does not work?! out = list() out.append(s[0] for s in iterators[0]) out.append(s[1] for s in iterators[1]) out.append(s[2] for s in iterators[2]) iterators = out return iterators
36,203
def get_league_listing(**kwargs): """ Get a list of leagues """ return make_request("GetLeaguelisting", **kwargs)
36,204
async def perhaps_this_is_it( disc_channel: disnake.TextChannel = commands.Param(lambda i: i.channel), large: int = commands.Param(0, large=True), ) -> PerhapsThis: """This description should not be shown Parameters ---------- disc_channel: A channel which should default to the current one - uses the id large: A large number which defaults to 0 - divided by 2 """ return PerhapsThis(disc_channel.id, large / 2)
36,205
def enable() -> None: """ Enable automatic garbage collection. """ ...
36,206
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv parser = E.ArgumentParser(description=__doc__) parser.add_argument("--version", action='version', version="1.0") parser.add_argument("-b", "--bin-size", dest="bin_size", type=str, help="bin size.") parser.add_argument("--min-value", dest="min_value", type=float, help="minimum value for histogram.") parser.add_argument( "--max-value", dest="max_value", type=float, help="maximum value for histogram.") parser.add_argument( "--no-empty-bins", dest="no_empty_bins", action="store_true", help="do not display empty bins.") parser.add_argument( "--with-empty-bins", dest="no_empty_bins", action="store_false", help="display empty bins.") parser.add_argument( "--ignore-out-of-range", dest="ignore_out_of_range", action="store_true", help="ignore values that are out of range (as opposed to truncating " "them to range border.") parser.add_argument("--missing-value", dest="missing_value", type=str, help="entry for missing values .") parser.add_argument("--use-dynamic-bins", dest="dynamic_bins", action="store_true", help="each value constitutes its own bin.") parser.add_argument("--format", dest="format", type=str, choices=("gff", "gtf", "bed"), help="input file format .") parser.add_argument("--method", dest="methods", type=str, action="append", choices=("all", "hist", "stats", "overlaps", "values"), help="methods to apply .") parser.add_argument("--output-section", dest="output_section", type=str, choices=("all", "size", "distance"), help="data to compute .") parser.set_defaults( no_empty_bins=True, bin_size=None, dynamic_bins=False, ignore_out_of_range=False, min_value=None, max_value=None, nonull=None, missing_value="na", output_filename_pattern="%s", methods=[], output_section="all", format="gff", ) (args) = E.start(parser, add_output_options=True) if "all" in args.methods: args.methods = ("hist", "stats", "overlaps") if not args.output_filename_pattern: args.output_filename_pattern = "%s" if len(args.methods) == 0: raise ValueError( "please provide counting method using --method option") if args.format in ("gff", "gtf"): gffs = GTF.iterator(args.stdin) elif args.format == "bed": gffs = Bed.iterator(args.stdin) values_between = [] values_within = [] values_overlaps = [] if "overlaps" in args.methods: if not args.output_filename_pattern: args.output_filename_pattern = "%s" outfile_overlaps = E.open_output_file("overlaps") else: outfile_overlaps = None last = None ninput, noverlaps = 0, 0 for this in gffs: ninput += 1 values_within.append(this.end - this.start) if last and last.contig == this.contig: if this.start < last.end: noverlaps += 1 if outfile_overlaps: outfile_overlaps.write("%s\t%s\n" % (str(last), str(this))) values_overlaps.append( min(this.end, last.end) - max(last.start, this.start)) if this.end > last.end: last = this continue else: values_between.append(this.start - last.end) # if this.start - last.end < 10: # print str(last) # print str(this) # print "==" values_overlaps.append(0) last = this if "hist" in args.methods: outfile = E.open_output_file("hist") h_within = Histogram.Calculate( values_within, no_empty_bins=args.no_empty_bins, increment=args.bin_size, min_value=args.min_value, max_value=args.max_value, dynamic_bins=args.dynamic_bins, ignore_out_of_range=args.ignore_out_of_range) h_between = Histogram.Calculate( values_between, no_empty_bins=args.no_empty_bins, increment=args.bin_size, min_value=args.min_value, max_value=args.max_value, dynamic_bins=args.dynamic_bins, ignore_out_of_range=args.ignore_out_of_range) if "all" == args.output_section: outfile.write("residues\tsize\tdistance\n") combined_histogram = Histogram.Combine( [h_within, h_between], missing_value=args.missing_value) Histogram.Write(outfile, combined_histogram, nonull=args.nonull) elif args.output_section == "size": outfile.write("residues\tsize\n") Histogram.Write(outfile, h_within, nonull=args.nonull) elif args.output_section == "distance": outfile.write("residues\tdistance\n") Histogram.Write(outfile, h_between, nonull=args.nonull) outfile.close() if "stats" in args.methods: outfile = E.open_output_file("stats") outfile.write("data\t%s\n" % Stats.Summary().getHeader()) if args.output_section in ("size", "all"): outfile.write("size\t%s\n" % str(Stats.Summary(values_within))) if args.output_section in ("distance", "all"): outfile.write("distance\t%s\n" % str(Stats.Summary(values_between))) outfile.close() if "values" in args.methods: outfile = E.open_output_file("distances") outfile.write("distance\n%s\n" % "\n".join(map(str, values_between))) outfile.close() outfile = E.open_output_file("sizes") outfile.write("size\n%s\n" % "\n".join(map(str, values_within))) outfile.close() outfile = E.open_output_file("overlaps") outfile.write("overlap\n%s\n" % "\n".join(map(str, values_overlaps))) outfile.close() E.info("ninput=%i, ndistance=%i, nsize=%i, noverlap=%i" % (ninput, len(values_between), len(values_within), noverlaps)) E.stop()
36,207
def maybe_setup_moe_params(model_p: InstantiableParams): """Convert a FeedforwardLayer to a MoE Layer for StackedTransformer.""" if model_p.cls == layers.StackedTransformerRepeated: model_p = model_p.block if model_p.num_experts == 0: return model_p ff_p = model_p.transformer_layer_params_tpl.tr_fflayer_tpl assert issubclass(ff_p.cls, layers.TransformerFeedForward) moe_p = model_p.moe_layer_tpl # Copy over the base params. base_layer.BaseLayer.copy_base_params(ff_p, moe_p) # Copy over othe params. moe_p.name = ff_p.name moe_p.input_dims = ff_p.input_dims moe_p.hidden_dims = ff_p.hidden_dims moe_p.ln_tpl = ff_p.ln_tpl.Copy() moe_p.activation = ff_p.activation moe_p.relu_dropout_tpl = ff_p.relu_dropout_tpl.Copy() moe_p.relu_dropout_prob = ff_p.relu_dropout_prob moe_p.residual_dropout_tpl = ff_p.residual_dropout_tpl.Copy() moe_p.residual_dropout_prob = ff_p.residual_dropout_prob moe_p.add_skip_connection = ff_p.add_skip_connection moe_p.norm_policy = ff_p.norm_policy
36,208
def verify_message( message ): """Verifies that a message is valid. i.e. it's similar to: 'daily-0400/20140207041736'""" r = re.compile( "^[a-z]+(-[0-9])?-([a-z]{3})?[0-9]+/[0-9]+" ) return r.match( message )
36,209
def get_all_unicode_chars(): """Get all unicode characters.""" all_unicode_chars = [] i = 0 while True: try: all_unicode_chars.append(chr(i)) except ValueError: break i += 1 return all_unicode_chars
36,210
def displayString(q=1,d=1,ex=1,k=1,r=1,v="string"): """ http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/displayString.html ----------------------------------------- displayString is NOT undoable, queryable, and NOT editable. Assign a string value to a string identifier. Allows you define a string in one location and then refer to it by its identifier in many other locations. Formatted strings are also supported (NOTE however, this functionality is now provided in a more general fashion by the format command, use of format is recommended). You may embed up to 3 special character sequences ^1s, ^2s, and ^3s to perform automatic string replacement. The embedded characters will be replaced with the extra command arguments. See example section for more detail. Note the extra command arguments do not need to be display string identifiers. ----------------------------------------- Return Value: None In query mode, return type is based on queried flag. ----------------------------------------- Flags: ----------------------------------------- d : delete [boolean] [] This flag is used to remove an identifer string. The command will fail if the identifier does not exist. ----------------------------------------- ex : exists [boolean] [] Returns true or false depending upon whether the specified identifier exists. ----------------------------------------- k : keys [boolean] ['query'] List all displayString keys that match the identifier string. The identifier string may be a whole or partial key string. The command will return a list of all identifier keys that contain this identifier string as a substring. ----------------------------------------- r : replace [boolean] ['query'] Since a displayString command will fail if it tries to assign a new value to an existing identifer, this flag is required to allow updates to the value of an already-existing identifier. If the identifier does not already exist, a new identifier is added as if the -replace flag were not present. ----------------------------------------- v : value [string] The display string\'s value. If you do not specify this flag when creating a display string then the value will be the same as the identifier. """
36,211
def get_east_asian_width_property(value, binary=False): """Get `EAST ASIAN WIDTH` property.""" obj = unidata.ascii_east_asian_width if binary else unidata.unicode_east_asian_width if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated) else: value = unidata.unicode_alias['eastasianwidth'].get(value, value) return obj[value]
36,212
def ModifyListRequest(instance_ref, args, req): """Parse arguments and construct list backups request.""" req.parent = instance_ref.RelativeName() if args.database: database = instance_ref.RelativeName() + '/databases/' + args.database req.filter = 'database="{}"'.format(database) return req
36,213
def output_encoding(outfile=None): """Determine the encoding to use for output written to `outfile` or stdout.""" if outfile is None: outfile = sys.stdout encoding = ( getattr(outfile, "encoding", None) or getattr(sys.__stdout__, "encoding", None) or locale.getpreferredencoding() ) return encoding
36,214
def day_start(src_time): """Return the beginning of the day of the specified datetime""" return datetime(src_time.year, src_time.month, src_time.day)
36,215
def _construct_corrections_dict(file): """Construct a dictionary of corrections. Given the name of a .ifa corrections file, construct a dictionary where the keys are wavelengths (represented as integers) and the values are measures of the instrument sensitivity (represented as floats). Intensity data should be divided by the correction value corresponding to the wavelength at which it was collected. """ str_file = resources.read_text(wrangling.fluorimeter.corrections, file) data = str_file[str_file.find("[Data]") + 6 :] data = [x for x in data.split("\n") if x != ""] corrections = {} for entry in data: wavelength, correction = [ x.strip() for x in entry.split("\t") if x != "" ] corrections.update({int(wavelength[:-3]) : float(correction)}) return corrections
36,216
def get_activation_func(activation_label): """ Returns the activation function given the label Args: activation_label: Name of the function """ if activation_label == 'sigmoid': return tf.nn.sigmoid elif activation_label == 'identity': return tf.identity elif activation_label == 'relu': return tf.nn.relu elif activation_label == 'tanh': return tf.nn.tanh else: raise ValueError('Unknown activation function %s' % activation_label)
36,217
def sim_share( df1, df2, group_pop_var1, total_pop_var1, group_pop_var2, total_pop_var2, ): """Simulate the spatial population distribution of a region using the CDF of a comparison region. For each spatial unit i in region 1, take the unit's percentile in the distribution, and swap the group share with the value of the corresponding percentile in region 2. The share is the minority population of unit i divided by total population of minority population. This approach will shift the total population of each unit without changing the regional proportion of each group Parameters ---------- df1 : pandas.DataFrame or geopandas.GeoDataFrame dataframe for first dataset with columns holding group and total population counts df2 : pandas.DataFrame or geopandas.GeoDataFrame dataframe for second dataset with columns holding group and total population counts group_pop_var1 : str column holding population counts for group of interest on input df1 total_pop_var1 : str column holding total population counts on input df1 group_pop_var2 : str column holding population counts for group of interest on input df2 total_pop_var2 : str column holding total population counts on input df2 Returns ------- two pandas.DataFrame dataframes with simulated population columns appended """ df1, df2 = _prepare_comparative_data(df1, df2, group_pop_var1, group_pop_var2, total_pop_var1, total_pop_var2) df1["compl_pop_var"] = df1[total_pop_var1] - df1[group_pop_var1] df2["compl_pop_var"] = df2[total_pop_var2] - df2[group_pop_var2] df1["share"] = (df1[group_pop_var1] / df1[group_pop_var1].sum()).fillna(0) df2["share"] = (df2[group_pop_var2] / df2[group_pop_var2].sum()).fillna(0) df1["compl_share"] = (df1["compl_pop_var"] / df1["compl_pop_var"].sum()).fillna(0) df2["compl_share"] = (df2["compl_pop_var"] / df2["compl_pop_var"].sum()).fillna(0) # Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1 # CT stands for Correction Term CT1_2_group = df1["share"].rank(pct=True).apply(df2["share"].quantile).sum() CT2_1_group = df2["share"].rank(pct=True).apply(df1["share"].quantile).sum() df1["counterfactual_group_pop"] = ( df1["share"].rank(pct=True).apply(df2["share"].quantile) / CT1_2_group * df1[group_pop_var1].sum() ) df2["counterfactual_group_pop"] = ( df2["share"].rank(pct=True).apply(df1["share"].quantile) / CT2_1_group * df2[group_pop_var2].sum() ) # Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1 # CT stands for Correction Term CT1_2_compl = ( df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile).sum() ) CT2_1_compl = ( df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile).sum() ) df1["counterfactual_compl_pop"] = ( df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile) / CT1_2_compl * df1["compl_pop_var"].sum() ) df2["counterfactual_compl_pop"] = ( df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile) / CT2_1_compl * df2["compl_pop_var"].sum() ) df1["counterfactual_total_pop"] = ( df1["counterfactual_group_pop"] + df1["counterfactual_compl_pop"] ) df2["counterfactual_total_pop"] = ( df2["counterfactual_group_pop"] + df2["counterfactual_compl_pop"] ) return df1.fillna(0), df2.fillna(0)
36,218
def SetFileExecutable(path): """Sets the file's executable bit. Args: path: The file path. """ st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IXUSR)
36,219
def test_get_command_bad_id() -> None: """It should raise if a requested command ID isn't in state.""" command = create_completed_command(command_id="command-id") subject = get_command_view(commands_by_id=[("command-id", command)]) with pytest.raises(errors.CommandDoesNotExistError): subject.get("asdfghjkl")
36,220
def fixextensions(peeps, picmap, basedir="."): """replaces image names with ones that actually exist in picmap""" fixed = [peeps[0].copy()] missing = [] for i in range(1, len(peeps)): name, ext = peeps[i][2].split(".", 1) if (name in picmap): fixed.append(peeps[i].copy()) fixed[i][2] = picmap[name] else: missing.append(i) return fixed, missing
36,221
def do_divide(data, interval): """ 使用贪心算法,得到“最优”的分段 """ category = [] p_value, chi2, index = divide_data(data, interval[0], interval[1]) if chi2 < 15: category.append(interval) else: category += do_divide(data, [interval[0], index]) category += do_divide(data, [index, interval[1]]) return category
36,222
def opened_bin_w_error(filename, mode="rb"): """ This context ensures the file is closed. """ try: f = open(filename, mode) except IOError as err: yield None, err else: try: yield f, None finally: f.close()
36,223
def test_is_question(): """Function does some tests to the is_question function""" assert isinstance(is_question('lol'), bool) assert is_question('test?') == True assert callable(is_question)
36,224
def get_config(key_path='/'): """ Return (sub-)configuration stored in config file. Note that values may differ from the current ``CONFIG`` variable if it was manipulated directly. Parameters ---------- key_path : str, optional ``'/'``-separated path to sub-configuration. Default is ``'/'``, which returns the full configuration dict. Returns ------- sub_config (sub-)configuration, either a dict or a value """ keys = [k for k in key_path.split('/') if k != ''] with open(CONFIG_FILENAME, 'r') as config_fp: config = json.load(config_fp) sub_config = config for k in keys: sub_config = sub_config[k] return sub_config
36,225
def find_cloudtrails(ocredentials, fRegion, fCloudTrailnames=None): """ ocredentials is an object with the following structure: - ['AccessKeyId'] holds the AWS_ACCESS_KEY - ['SecretAccessKey'] holds the AWS_SECRET_ACCESS_KEY - ['SessionToken'] holds the AWS_SESSION_TOKEN - ['AccountNumber'] holds the account number fRegion=region fCloudTrailnames=List of CloudTrail names we're looking for (null value returns all cloud trails) Returned Object looks like this: { 'trailList': [ { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'HomeRegion': 'string', 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string', 'HasCustomEventSelectors': True|False, 'HasInsightSelectors': True|False, 'IsOrganizationTrail': True|False }, ] } """ import boto3, logging from botocore.exceptions import ClientError session_ct=boto3.Session( aws_access_key_id=ocredentials['AccessKeyId'], aws_secret_access_key=ocredentials['SecretAccessKey'], aws_session_token=ocredentials['SessionToken'], region_name=fRegion) client_ct=session_ct.client('cloudtrail') logging.info("Looking for CloudTrail trails in account %s from Region %s", ocredentials['AccountNumber'], fRegion) if fCloudTrailnames == None: # Therefore - they're really looking for a list of trails try: response=client_ct.list_trails() trailname = "Various" fullresponse = response['Trails'] if 'NextToken' in response.keys(): while 'NextToken' in response.keys(): response=client_ct.list_trails() for i in range(len(response['Trails'])): fullresponse.append(response['Trails'][i]) except ClientError as my_Error: if str(my_Error).find("InvalidTrailNameException") > 0: logging.error("Bad CloudTrail name provided") fullresponse=trailname+" didn't work. Try Again" return(fullresponse, trailname) else: # TODO: This doesn't work... Needs to be fixed. # They've provided a list of trails and want specific info about them for trailname in fCloudTrailnames: try: response=client_ct.describe_trails(trailNameList=[trailname]) if len(response['trailList']) > 0: return(response, trailname) except ClientError as my_Error: if str(my_Error).find("InvalidTrailNameException") > 0: logging.error("Bad CloudTrail name provided") response=trailname+" didn't work. Try Again" return(response, trailname)
36,226
def evaluate(sess, logits, loss, labels, img_name, dataset): """ Trains the network Args: sess: TF session logits: network logits """ conf_mat = np.ascontiguousarray( np.zeros((FLAGS.num_classes, FLAGS.num_classes), dtype=np.uint64)) loss_avg = 0 for i in trange(dataset.num_examples()): #for i in trange(100): out_logits, gt_labels, loss_val, img_prefix = sess.run([logits, labels, loss, img_name]) loss_avg += loss_val #net_labels = out_logits[0].argmax(2).astype(np.int32, copy=False) net_labels = out_logits[0].argmax(2).astype(np.int32) #gt_labels = gt_labels.astype(np.int32, copy=False) cylib.collect_confusion_matrix(net_labels.reshape(-1), gt_labels.reshape(-1), conf_mat) if FLAGS.draw_predictions: img_prefix = img_prefix[0].decode("utf-8") save_path = FLAGS.debug_dir + '/val/' + img_prefix + '.png' eval_helper.draw_output(net_labels, CityscapesDataset.CLASS_INFO, save_path) #print(q_size) #print(conf_mat) print('') pixel_acc, iou_acc, recall, precision, _ = eval_helper.compute_errors( conf_mat, 'Validation', CityscapesDataset.CLASS_INFO, verbose=True) return loss_avg / dataset.num_examples(), pixel_acc, iou_acc, recall, precision
36,227
def run_worker(queues): """Run service workers.""" from renku.service.jobs.queues import QUEUES from renku.service.worker import start_worker if not queues: queues = os.getenv("RENKU_SVC_WORKER_QUEUES", "") queues = [queue_name.strip() for queue_name in queues.strip().split(",") if queue_name.strip()] if not queues: queues = QUEUES start_worker(queues)
36,228
def _get_options(): """ Function that aggregates the configs for sumo and returns them as a list of dicts. """ if __mods__['config.get']('hubblestack:returner:sumo'): sumo_opts = [] returner_opts = __mods__['config.get']('hubblestack:returner:sumo') if not isinstance(returner_opts, list): returner_opts = [returner_opts] for opt in returner_opts: processed = {'sumo_nebula_return': opt.get('sumo_nebula_return'), 'proxy': opt.get('proxy', {}), 'timeout': opt.get('timeout', 9.05)} sumo_opts.append(processed) return sumo_opts try: sumo_nebula_return = __mods__['config.get']('hubblestack:returner:sumo:sumo_nebula_return') except Exception: return None sumo_opts = {'sumo_nebula_return': sumo_nebula_return, 'proxy': __mods__['config.get']('hubblestack:nebula:returner:sumo:proxy', {}), 'timeout': __mods__['config.get']('hubblestack:nebula:returner:sumo:timeout', 9.05)} return [sumo_opts]
36,229
def maybe_unzip(local_archive='\\'.join([os.getcwd(), 'data', 'dataset.zip']), unzip_path='\\'.join([os.getcwd(), 'data', 'unzipped_dataset'])): """Unzip the specified local zip archive to the specified folder. Args: local_archive (:obj:`str`): the full path to the zip archive, including its name. unzip_path (:obj:`str`): the full path to the unzipped folder. """ if not os.path.exists(unzip_path): os.mkdir(unzip_path) with zipfile.ZipFile(local_archive, 'r') as archive: archive.extractall(unzip_path) print('Unzip complete.') else: print('Archive already uncompressed!')
36,230
def convert_boolean(value: Any) -> Optional[bool]: """Convert a value from the ToonAPI to a boolean.""" if value is None: return None return bool(value)
36,231
def Split4(thisBrep, cutters, normal, planView, intersectionTolerance, multiple=False): """ Splits a Brep into pieces using a combination of curves, to be extruded, and Breps as cutters. Args: cutters (IEnumerable<GeometryBase>): The curves, surfaces, faces and Breps to be used as cutters. Any other geometry is ignored. normal (Vector3d): A construction plane normal, used in deciding how to extrude a curve into a cutter. planView (bool): Set True if the assume view is a plan, or parallel projection, view. intersectionTolerance (double): The tolerance with which to compute intersections. Returns: Brep[]: A new array of Breps. This array can be empty. """ url = "rhino/geometry/brep/split-brep_geometrybasearray_vector3d_bool_double" if multiple: url += "?multiple=true" args = [thisBrep, cutters, normal, planView, intersectionTolerance] if multiple: args = list(zip(thisBrep, cutters, normal, planView, intersectionTolerance)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
36,232
def generate_passwords_brute_force(state): """ String Based Generation :param state: item position for response :return: """ if state is None: state = [0, 0] k, counter = state password = '' i = counter while i > 0: r = i % base password = alphabet[r] + password i = i // base password = alphabet[0] * (k - len(password)) + password counter += 1 if password == alphabet[-1] * k: k += 1 counter = 0 return password, [k, counter]
36,233
def main() -> None: """Start maruberu server.""" options.parse_command_line(final=False) if pathlib.Path(options.conf).is_file(): options.parse_config_file(options.conf, final=False) options.parse_command_line() else: options.parse_command_line() logging.warning("conf '{}' is not found.".format(options.conf)) cwd = pathlib.Path(__file__).resolve().parent if options.ring_command[:2] == ":/": options.ring_command = str(cwd / options.ring_command[2:]) if options.admin_password_hashed == "": options.admin_password_hashed = crypt.crypt(options.admin_password) try: pytz.timezone(options.timezone) except pytz.exceptions.UnknownTimeZoneError: logging.warning("Timezone '{}' is not found.\ 'Asia/Tokyo' will be used.".format(options.timezone)) options.timezone = "Asia/Tokyo" settings = { "xsrf_cookies": True, "cookie_secret": options.cookie_secret, "static_path": pathlib.Path(__file__).parent / "static", "template_path": pathlib.Path(__file__).parent / "templates", "login_url": "/admin/login/", "autoescape": "xhtml_escape", "debug": options.debug, } env = get_env(options.env) app = web.Application([ (r"/", IndexHandler, env), (r"/resource/([0-9a-f-]+)?/?", ResourceHandler, env), (r"/admin/?", AdminTokenHandler, env), (r"/admin/login/?", AdminLoginHandler, env), (r"/admin/logout/?", AdminLogoutHandler, env), (r"/static/(.*)", web.StaticFileHandler), ], **settings) server = httpserver.HTTPServer(app) server.listen(options.port) try: ioloop.IOLoop.current().start() except KeyboardInterrupt: ioloop.IOLoop.current().add_callback(get_env("ON_MEMORY")["bell"]._ring_queue.put, None)
36,234
def tparse(instring: str, lenout: int = _default_len_out) -> Tuple[float, str]: """ Parse a time string and return seconds past the J2000 epoch on a formal calendar. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html :param instring: Input time string, UTC. :param lenout: Available space in output error message string. :return: Equivalent UTC seconds past J2000, Descriptive error message. """ errmsg = stypes.string_to_char_p(lenout) lenout = ctypes.c_int(lenout) instring = stypes.string_to_char_p(instring) sp2000 = ctypes.c_double() libspice.tparse_c(instring, lenout, ctypes.byref(sp2000), errmsg) return sp2000.value, stypes.to_python_string(errmsg)
36,235
def _init_density_est_data_stub(num_time_steps_block: int, max_num_walkers: int, cfc_spec: CFCSpec) -> DensityExecData: """Stub for the init_density_est_data function (p.d.f.).""" pass
36,236
def release(ctx, version, skip_release_notes=False): """Tag a new release.""" status = run("git status --porcelain", hide=True).stdout.strip() if status != "": raise Exit(message="git checkout not clean, cannot release") version = semver.parse_version_info(version) is_patch_release = version.patch != 0 # Check that we have release notes for the desired version. run("git checkout main", echo=True) if not skip_release_notes: with open("website/content/release-notes/_index.md") as release_notes: if "## Version {}".format(version) not in release_notes.read(): raise Exit(message="no release notes for v{}".format(version)) # Move HEAD to the correct release branch - either a new one, or # an existing one. if is_patch_release: run("git checkout v{}.{}".format(version.major, version.minor), echo=True) else: run("git checkout -b v{}.{}".format(version.major, version.minor), echo=True) # Copy over release notes from main. if not skip_release_notes: run("git checkout main -- website/content/release-notes/_index.md", echo=True) # Update links on the website to point to files at the version # we're creating. if is_patch_release: previous_version = "v{}.{}.{}".format(version.major, version.minor, version.patch-1) else: previous_version = "main" def _replace(pattern): oldpat = pattern.format(previous_version) newpat = pattern.format("v{}").format(version) run("perl -pi -e 's#{}#{}#g' website/content/*.md website/content/*/*.md".format(oldpat, newpat), echo=True) _replace("/google/metallb/{}") _replace("/google/metallb/tree/{}") _replace("/google/metallb/blob/{}") # Update the version listed on the website sidebar run("perl -pi -e 's/MetalLB .*/MetalLB v{}/g' website/content/_header.md".format(version), echo=True) # Update the manifests with the new version run("perl -pi -e 's,image: metallb/speaker:.*,image: metallb/speaker:v{},g' manifests/metallb.yaml".format(version), echo=True) run("perl -pi -e 's,image: metallb/controller:.*,image: metallb/controller:v{},g' manifests/metallb.yaml".format(version), echo=True) # Update the version in kustomize instructions # # TODO: Check if kustomize instructions really need the version in the # website or if there is a simpler way. For now, though, we just replace the # only page that mentions the version on release. run("perl -pi -e 's,github.com/metallb/metallb//manifests\?ref=.*,github.com/metallb/metallb//manifests\?ref=v{},g' website/content/installation/_index.md".format(version), echo=True) # Update the version embedded in the binary run("perl -pi -e 's/version\s+=.*/version = \"{}\"/g' internal/version/version.go".format(version), echo=True) run("gofmt -w internal/version/version.go", echo=True) run("git commit -a -m 'Automated update for release v{}'".format(version), echo=True) run("git tag v{} -m 'See the release notes for details:\n\nhttps://metallb.universe.tf/release-notes/#version-{}-{}-{}'".format(version, version.major, version.minor, version.patch), echo=True) run("git checkout main", echo=True)
36,237
def align_decision_ref(id_human, title): """ In German, decisions are either referred to as 'Beschluss' or 'Entscheidung'. This function shall align the term used in the title with the term used in id_human. """ if 'Beschluss' in title: return id_human return id_human.replace('Beschluss ', 'Entscheidung ')
36,238
def get_headers(soup): """get nutrient headers from the soup""" headers = {'captions': [], 'units': []} footer = soup.find('tfoot') for cell in footer.findAll('td', {'class': 'nutrient-column'}): div = cell.find('div') headers['units'].append(div.text) headers['captions'].append(div.previous_sibling.strip()) return headers
36,239
def table2rank(table, transpose=False, is_large_value_high_performance=True, add_averaged_rank=False): """ transform a performance value table to a rank table :param table: pandas DataFrame or numpy array, the table with performance values :param transpose: bool, whether to transpose table (default: False; the method is column and data set is row) :param is_large_value_high_performance: bool, whether a larger value has higher performance :param add_averaged_rank: bool, whether add averaged ranks after the last row/column :return: a rank table (numpy.array or pd.DataFrame) """ table = table.copy() if isinstance(table, pd.DataFrame): column_name = table.columns.values if table.iloc[:, 0].dtype == 'object': index_name = table.iloc[:, 0].values table = table.iloc[:, 1:] else: index_name = None data = table.values else: data = table if transpose: data = data.transpose() # rank each row rank_table = list() for row in data: if is_large_value_high_performance: index = np.argsort(-row) else: index = np.argsort(row) rank = np.zeros(len(index)) for i, value in enumerate(index): if i > 0: if row[value] == row[index[i - 1]]: rank[value] = i - 1 continue rank[value] = i rank += 1 rank_table.append(rank) rank_table = np.asarray(rank_table) if add_averaged_rank: averaged_rank = [np.mean(rank_table[:, i]) for i in range(rank_table.shape[1])] rank_table = np.concatenate([rank_table, np.asarray([averaged_rank])]) if transpose: rank_table = rank_table.transpose() if isinstance(table, pd.DataFrame): # reconstruct the pandas table if index_name is not None: if add_averaged_rank: if not transpose: index_name = np.concatenate([index_name, np.array(['AR'])]) else: column_name = np.concatenate([column_name, np.asarray(['AR'])]) rank_table = np.concatenate([index_name[:, np.newaxis], rank_table], axis=1) rank_table = pd.DataFrame(data=rank_table, columns=column_name) return rank_table
36,240
def test_device_section_method_failed(flask_app, db): # pylint: disable=unused-argument """ To verify that registration section method is working properly and response is correct""" headers = {'Content-Type': 'multipart/form-data'} rv = flask_app.get('{0}/{1}'.format(DEVICE_REGISTRATION_SECTION_API, 'abc'), headers=headers) data = json.loads(rv.data.decode('utf-8')) assert rv.status_code == 422 assert 'message' in data assert data['message'][0] == 'Registration Request not found.'
36,241
def F_z_i(z, t, r1, r2, A): """ Function F for Newton's method :param z: :param t: :param r1: :param r2: :param A: :return: F: function """ mu = mu_Earth C_z_i = c2(z) S_z_i = c3(z) y_z = r1 + r2 + A * (z * S_z_i - 1.0) / np.sqrt(C_z_i) F = (y_z / C_z_i) ** 1.5 * S_z_i + A * np.sqrt(np.abs(y_z)) - np.sqrt(mu) * t return F
36,242
def sigma_clip(array,nsigma=3.0,MAD=False): """This returns the n-sigma boundaries of an array, mainly used for scaling plots. Parameters ---------- array : list, np.ndarray The array from which the n-sigma boundaries are required. nsigma : int, float The number of sigma's away from the mean that need to be provided. MAD : bool Use the true standard deviation or MAD estimator of the standard deviation (works better in the presence of outliers). Returns ------- vmin,vmax : float The bottom and top n-sigma boundaries of the input array. """ from tayph.vartests import typetest import numpy as np typetest(array,[list,np.ndarray],'array in fun.sigma_clip()') typetest(nsigma,[int,float],'nsigma in fun.sigma_clip()') typetest(MAD,bool,'MAD in fun.sigma_clip()') m = np.nanmedian(array) if MAD: from astropy.stats import mad_std s = mad_std(array,ignore_nan=True) else: s = np.nanstd(array) vmin = m-nsigma*s vmax = m+nsigma*s return vmin,vmax
36,243
def main(): """example code lives in one function""" # grab a token token = oauth2_wrappers.gen_token() # set up our simple query string # # field names and matchTypes are documented at # https://app.swaggerhub.com/apis-docs/datafinnovation/clientapi/1.0/ query_dict = { "fields": { "companyname": { "type" : "matchquery", "matchType" : "startsWith", "value" : "ibm" } } } # and any query param q_params = {'maxresult' : 2} # the url stub we post to api_url = 'facts/formquery' # send off the request resp_data = oauth2_wrappers.df_post(api_url, token, query_dict, q_params) # and iterate over all the elasticsearch hits for ele in resp_data['hits']: print(ele)
36,244
def points_distance(xyz_1, xyz_2): """ :param xyz_1: :param xyz_2: :return: """ if len(xyz_1.shape) >= 2: distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2, axis=1)) else: distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2)) return distance
36,245
def test_fastparcel_transition_point_mixed(): """Test FastParcel._transition_point for mixed moist/dry descent.""" z_init = 3000*units.meter t_initial = -2*units.celsius q_initial = 0.004751707262581661*units.dimensionless l_initial = 2e-3*units.dimensionless rate = 0.5/units.km theta_e = sydneyfast.parcel_equivalent_potential_temperature( z_init, t_initial, q_initial, rate) total_water = sydneyfast.water_content( z_init, q_initial, l_initial, rate) actual_z, actual_t = sydneyfast._transition_point( z_init, t_initial, l_initial, theta_e, total_water) truth_z = 2391.6137533395254*units.meter truth_t = 273.44171068846475*units.kelvin assert_almost_equal(actual_z, truth_z, 3) assert_almost_equal(actual_t, truth_t, 3) assert not hasattr(actual_z, 'size') assert not hasattr(actual_t, 'size')
36,246
def ensure_society(sess: SQLASession, name: str, description: str, role_email: Optional[str] = None) -> Collect[Society]: """ Register or update a society in the database. For existing societies, this will synchronise member relations with the given list of admins. """ try: society = get_society(name, sess) except KeyError: res_record = yield from _create_society(sess, name, description, role_email) society = res_record.value else: yield _update_society(sess, society, description, role_email) return society
36,247
async def test_form_login_failed(hass): """Test we handle invalid auth error.""" result = await test_external_url_callback(hass) flow_id = result["flow_id"] with patch( "custom_components.tesla_custom.config_flow.TeslaAPI.connect", return_value={}, ): result = await hass.config_entries.flow.async_configure( flow_id=flow_id, user_input={}, ) # "type": RESULT_TYPE_ABORT, # "flow_id": flow_id, # "handler": handler, # "reason": reason, # "description_placeholders": description_placeholders, assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["flow_id"] == flow_id assert result["handler"] == DOMAIN assert result["reason"] == "login_failed" assert result["description_placeholders"] is None
36,248
def odd_numbers_list(n): """ Returns the list of n first odd numbers """ return [2 * k - 1 for k in range(1, n + 1)]
36,249
def domain_delete(domainName): # noqa: E501 """domain_delete Remove the domain # noqa: E501 :param domainName: :type domainName: str :rtype: DefaultMessage """ return 'do some magic!'
36,250
def ParseDate(s): """ ParseDate(s) -> datetime This function converts a string containing the subset of ISO8601 that can be represented with xs:dateTime into a datetime object. As such it's suitable for parsing Collada's <created> and <modified> elements. The date must be of the form '-'? yyyy '-' mm '-' dd 'T' hh ':' mm ':' ss ('.' s+)? (zzzzzz)? See http://www.w3.org/TR/xmlschema-2/#dateTime for more info on the various parts. return: A datetime or None if the string wasn't formatted correctly. """ # Split the date (yyyy-mm-dd) and time by the "T" in the middle parts = s.split("T") if len(parts) != 2: return None date = parts[0] time = parts[1] # Parse the yyyy-mm-dd part parts = date.split("-") yearMultiplier = 1 if date[0] == "-": yearMultiplier = -1 parts.remove(0) if len(parts) != 3: return None try: year = yearMultiplier * int(parts[0]) month = int(parts[1]) day = int(parts[2]) except ValueError: return None # Split the time and time zone by "Z", "+", or "-" timeZoneDelta = timedelta() timeZoneDeltaModifier = 1 parts = time.split("Z") if len(parts) > 1: if parts[1] != "": return None if len(parts) == 1: parts = time.split("+") if len(parts) == 1: parts = time.split("-") timeZoneDeltaModifier = -1 if len(parts) == 1: # Time zone not present return None time = parts[0] timeZone = parts[1] if timeZone != "": parts = timeZone.split(":") if len(parts) != 2: return None try: hours = int(parts[0]) minutes = int(parts[1]) except ValueError: return None timeZoneDelta = timeZoneDeltaModifier * timedelta(0, 0, 0, 0, minutes, hours) parts = time.split(":") if len(parts) != 3: return None try: hours = int(parts[0]) minutes = int(parts[1]) seconds = int(parts[2]) # We're losing the decimal portion here, but it probably doesn't matter except ValueError: return None return datetime(year, month, day, hours, minutes, seconds) - timeZoneDelta
36,251
def kinetic_energy(atoms): """ Returns the kinetic energy (Da*angs/ps^2) of the atoms. """ en = 0.0 for a in atoms: vel = v3.mag(a.vel) en += 0.5 * a.mass * vel * vel return en
36,252
async def cat(ctx): """gives you a cute cat image!""" embed = discord.Embed() catimg = await alex_api.cats() embed = discord.Embed(title= ('CUTY CATTY'),timestamp=datetime.datetime.utcnow(), color=discord.Color.green()) embed.set_footer(text=ctx.author.name , icon_url=ctx.author.avatar_url) embed.set_image(url=f"{catimg}") #embed.set_thumbnail(url="https://i.postimg.cc/mr4CGYXd/tenor.gif") #await ctx.send(catimg) await ctx.send(embed=embed)
36,253
def boxes_intersect(boxes, box): """Determine whether a box intersects with any of the boxes listed""" x1, y1, x2, y2 = box if in_box(boxes, x1, y1) \ or in_box(boxes, x1, y2) \ or in_box(boxes, x2, y1) \ or in_box(boxes, x2, y2): return True return False
36,254
def cumulative_mean_normalized_difference_function(df, n): """ Compute cumulative mean normalized difference function (CMND). :param df: Difference function :param n: length of data :return: cumulative mean normalized difference function :rtype: list """ # scipy method cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float) return np.insert(cmn_df, 0, 1)
36,255
def configure(app): """ init babel :param app: :return: """ babel.init_app(app)
36,256
def add_spaces_old(spaces: Iterable["zfit.Space"]): """Add two spaces and merge their limits if possible or return False. Args: spaces: Returns: Union[None, :py:class:`~zfit.Space`, bool]: Raises: LimitsIncompatibleError: if limits of the `spaces` cannot be merged because they overlap """ spaces = convert_to_container(spaces) if not all(isinstance(space, ZfitSpace) for space in spaces): raise TypeError("Cannot only add type ZfitSpace") if len(spaces) <= 1: raise ValueError("Need at least two spaces to be added.") # TODO: allow? usecase? obs = frozenset(frozenset(space.obs) for space in spaces) if len(obs) != 1: return False obs1 = spaces[0].obs spaces = [space.with_obs(obs=obs1) if not space.obs == obs1 else space for space in spaces] if limits_overlap(spaces=spaces, allow_exact_match=True): raise LimitsIncompatibleError("Limits of spaces overlap, cannot merge spaces.") lowers = [] uppers = [] for space in spaces: if not space.limits_are_set: continue for lower, upper in space: for other_lower, other_upper in zip(lowers, uppers): lower_same = np.allclose(lower, other_lower) upper_same = np.allclose(upper, other_upper) assert not lower_same ^ upper_same, "Bug, please report as issue. limits_overlap did not catch right." if lower_same and upper_same: break else: lowers.append(lower) uppers.append(upper) lowers = tuple(lowers) uppers = tuple(uppers) if len(lowers) == 0: limits = None else: limits = lowers, uppers new_space = zfit.Space(obs=spaces[0].obs, limits=limits) return new_space
36,257
def minimal_subject_transformer(index, minimal_subject, attributes, subject_types, subject_type_is, center, radius): """Construct the JSON object for a MinimalSubject.""" subdomain, type = minimal_subject.subdomain, minimal_subject.type # Gather all the attributes values = [None] # attributes are indexed starting from 1 for attribute in attributes: name = attribute.key().name() if name in subject_types[type].minimal_attribute_names: values.append(minimal_subject.get_value(name)) else: values.append(None) # Pack the results into an object suitable for JSON serialization. subject_jobject = { 'name': minimal_subject.name, 'type': subject_type_is[subdomain + ':' + type], 'values': values, } if (minimal_subject.has_value('location') and minimal_subject.get_value('location')): location = { 'lat': minimal_subject.get_value('location').lat, 'lon': minimal_subject.get_value('location').lon } if center: subject_jobject['distance_meters'] = distance(location, center) dist = subject_jobject.get('distance_meters') if center and (dist is None or dist > radius > 0): return None return subject_jobject
36,258
def _check_archive_dir(): """ Checks that the purported archive directory appears to contain a Vesper archive. """ _check_database() _check_preferences() _check_presets()
36,259
def beta_avg_inv_cdf(y, parameters, res=0.001): """ Compute the inverse cdf of the average of the k beta distributions. Parameters ---------- y : float A float between 0 and 1 (the range of the cdf) parameters : array of tuples Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution. res : float, optional (default=0.001) The precision of the convolution, measured as step size in the support. Returns ------- x : float the inverse cdf of y """ return brentq(lambda x: beta_avg_cdf([x], parameters, res)[0] - y, 0, 1)
36,260
def check_overwrite(file_path, overwrite=False): """ Defines the behavior for the overwrite argument for all output types. Arguments --------- file_path: str Exact file path overwrite: bool True: Structure files will be overwritten in the dir_path False: Exception will be raised if file already exists """ if os.path.exists(file_path): if overwrite == True: return else: raise Exception('Filepath {} already exists. ' 'If you want to overwite, use overwrite=True'. format(file_path))
36,261
def _aprime(pHI,pFA): """recursive private function for calculating A'""" pCR = 1 - pFA # use recursion to handle # cases below the diagonal defined by pHI == pFA if pFA > pHI: return 1 - _aprime(1-pHI ,1-pFA) # Pollack and Norman's (1964) A' measure # formula from Grier 1971 if pHI == 0 or pFA == 1: # in both of these cases pHI == pFA return .5 return .5 + (pHI - pFA)*(1 + pHI - pFA)/(4*pHI*(1 - pFA))
36,262
def requires_testing_data(func): """Skip testing data test.""" return _pytest_mark()(func)
36,263
def mfcc_derivative_loss(y, y_hat, derivative_op=None): """ Expects y/y_hat to be of shape batch_size x features_dim x time_steps (default=128) """ if derivative_op is None: derivative_op = delta_matrix() y_derivative = tf.matmul(y, derivative_op) y_hat_derivative = tf.matmul(y_hat, derivative_op) return tf.reduce_mean(tf.abs(y_derivative - y_hat_derivative))
36,264
def get_representative_terms( abilities_corpus: Iterable[str], skills_corpus: Iterable[str] ) -> Tuple[list, list]: """Return representative terms in order of importance from the abilities and skills corpora. Parameters ---------- abilities_corpus : Iterable[str] Iterable containing the ability descriptions skills_corpus : Iterable[str] Iterable containing the skills descriptions Returns ------- Tuple[list, list] The first element is a list of abilities terms and the second element is a list of skills terms """ # Intialize empty lists to store all the verbs extracted from the docs abilities_verbs = [] skills_verbs = [] # For each description, get the verbs and append them to the master list # TODO: Could refine by only retrieving verbs that occur at the start of the # description, i.e. only capture the main verb used in the skill/ability for doc in nlp.pipe(abilities_corpus): abilities_verbs.extend([token.lemma_ for token in utils.get_verbs(doc)]) for doc in nlp.pipe(skills_corpus): skills_verbs.extend([token.lemma_ for token in utils.get_verbs(doc)]) # Get counts for each verb; will be useful for ranking later abilities_verbs_counter = Counter(abilities_verbs) skills_verbs_counter = Counter(skills_verbs) # Compute the set difference and sort by term frequency # TODO: Could implement something more sophisticated/closer to TF-IDF that looks at # how often a term occurs in abilities vs. skills--impact would be to expand the # term list to include terms that occurred in both, but occurred much more # frequently in one than another unique_abilities_verbs = sorted( list(set(abilities_verbs).difference(skills_verbs)), key=lambda x: -abilities_verbs_counter[x], ) unique_skills_verbs = sorted( list(set(skills_verbs).difference(abilities_verbs)), key=lambda x: -skills_verbs_counter[x], ) return unique_abilities_verbs, unique_skills_verbs
36,265
def get_tags_from_playbook(playbook_file): """Get available tags from Ansible playbook""" tags = [] playbook_path = os.path.dirname(playbook_file) with open(playbook_file) as playbook_fp: playbook = yaml.safe_load(playbook_fp) for item in playbook: if 'import_playbook' in item: import_playbook = os.path.join(playbook_path, item['import_playbook']) imported_tags = get_tags_from_playbook(import_playbook) tags.extend(imported_tags) elif 'tags' in item: if isinstance(item['tags'], (list, )): tags.extend(item['tags']) else: tags.append(item['tags']) else: print(item) # Remove duplicates while maintaining order tags = list(OrderedDict.fromkeys(tags)) if tags.count('always') > 0: tags.remove('always') if len(tags) == 0: sys.stderr.write('%s has no tags\n' % playbook_file) return tags
36,266
def get_mprocess_names_type1() -> List[str]: """returns the list of valid MProcess names of type1. Returns ------- List[str] the list of valid MProcess names of type1. """ names = ( get_mprocess_names_type1_set_pure_state_vectors() + get_mprocess_names_type1_set_kraus_matrices() ) return names
36,267
def LoadAI(FileName): """LoadSC: This loads an IGOR binary file saved by LabView. Loads LabView Scope data from igor and extracts a bunch of interesting information (inf) from the data header""" IBWData = igor.LoadIBW(FileName); # I am going to store the experimental information in a dictionary AIdata = {"Note": IBWData["Note"], "Data": IBWData["Data"]}; return AIdata
36,268
def accept_message_request(request, user_id): """ Ajax call to accept a message request. """ sender = get_object_or_404(User, id=user_id) acceptor = request.user if sender in acceptor.profile.pending_list.all(): acceptor.profile.pending_list.remove(sender) acceptor.profile.contact_list.add(sender) sender.profile.contact_list.add(acceptor) Notification.objects.create(Actor=acceptor, Target=sender, notif_type='confirmed_msg_request') text = 'Added to contact list' else: text = 'Unexpected error!' return HttpResponse(text)
36,269
def cache_response(method, url, data, params, response): """ Caches the response in cache index """ logger.info("Caching response") response_json = response.__dict__ res = {} for key in ["content", "text", "json", "html"]: if key in response_json: res[key] = response_json[key] body = { "method": method, "url": url, "data": data, "params": params, "response": res } try: es.index(index="cache", body=body) except Exception as err: logger.error(err) return
36,270
def pytz_timezones_from_utc_offset(tz_offset, common_only=True): """ Determine timezone strings corresponding to the given timezone (UTC) offset Parameters ---------- tz_offset : int, or float Hours of offset from UTC common_only : bool Whether to only return common zone names (True) or all zone names (False) Returns ------- results : list List of Olson database timezone name strings Examples -------- obs.pytz_timezones_from_utc_offset(-7) obs.pytz_timezones_from_utc_offset(-7.62, common_only=False) """ #pick one of the timezone collections (All possible vs only the common zones) timezones = pytz.common_timezones if common_only else pytz.all_timezones # convert the float hours offset to a timedelta offset_days, offset_seconds = 0, int(tz_offset * 3600) if offset_seconds < 0: offset_days = -1 offset_seconds += 24 * 3600 desired_delta = dt.timedelta(offset_days, offset_seconds) # Loop through the timezones and find any with matching offsets null_delta = dt.timedelta(0, 0) results = [] for tz_name in timezones: tz = pytz.timezone(tz_name) non_dst_offset = getattr(tz, '_transition_info', [[null_delta]])[-1] if desired_delta == non_dst_offset[0]: results.append(tz_name) return results
36,271
def _sympysage_rf(self): """ EXAMPLES:: sage: from sympy import Symbol, rf sage: _ = var('x, y') sage: rfxy = rf(Symbol('x'), Symbol('y')) sage: assert rising_factorial(x,y)._sympy_() == rfxy.rewrite('gamma') sage: assert rising_factorial(x,y) == rfxy._sage_() """ from sage.arith.all import rising_factorial return rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
36,272
def validate_token_parameters(params): """Ensures token precence, token type, expiration and scope in params.""" if 'error' in params: raise_from_error(params.get('error'), params) if not 'access_token' in params: raise MissingTokenError(description="Missing access token parameter.") if not 'token_type' in params: if os.environ.get('OAUTHLIB_STRICT_TOKEN_TYPE'): raise MissingTokenTypeError() # If the issued access token scope is different from the one requested by # the client, the authorization server MUST include the "scope" response # parameter to inform the client of the actual scope granted. # https://tools.ietf.org/html/rfc6749#section-3.3 if params.scope_changed: message = 'Scope has changed from "{old}" to "{new}".'.format( old=params.old_scope, new=params.scope, ) scope_changed.send(message=message, old=params.old_scopes, new=params.scopes) if not os.environ.get('OAUTHLIB_RELAX_TOKEN_SCOPE', None): w = Warning(message) w.token = params w.old_scope = params.old_scopes w.new_scope = params.scopes raise w
36,273
def get_feature_gated_capabilities(request=None): """Return the capabilities gated behind enabled features. Args: request (django.http.HttpRequest, optional): The HTTP request from the client. Yields: tuple: A 3-tuple of the following: * The category of the capability (:py:class:`unicode`). * The capability name (:py:class:`unicode`). * Whether or not the capability is enabled (:py:class:`bool`). """ for category, caps in _feature_gated_capabilities.items(): for cap, required_feature in caps.items(): if required_feature.is_enabled(request=request): yield category, cap, True
36,274
def test_chip64_skip_next_if_unequal(): """ Tests the c64.skip_next_if_unequal() method, ensures that the code_ptr is appropriately modified. """ c64 = chip64.Chip64() c64.registers[0] = 0 c64.registers[1] = 1 c64.skip_next_if_unequal(0, 1) assert c64.code_ptr == 2 c64.reset() c64.registers[0] = 0 c64.registers[1] = 0 c64.skip_next_if_unequal(0, 1) assert c64.code_ptr != 2
36,275
def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean): """ Plots data for user to look at within program parameters ---------- xdata,ydata: x and y data to be plotted plot_xlabel,plot_ylabel: label x and y axes in plot file_name_for_saving: string given for saving file for later referece block_boolean: True or False, tells if program waits for figure to close """ plot_figure, plot_axis = plt.subplots() plt.plot(xdata,ydata,color='blue') plt.xlabel(plot_xlabel) plt.ylabel(plot_ylabel) plt.suptitle(plot_title) plt.show(block=block_boolean) os.chdir(folder_to_save) plt.savefig(filename_for_saving) os.chdir('..') return plot_figure, plot_axis
36,276
def run_sax_on_sequences(rdd_sequences_data, paa, alphabet_size): """ Perform the Symbolic Aggregate Approximation (SAX) on the data provided in **ts_data** :param rdd_sequences_data: rdd containing all sequences: returned by function *sliding_windows()*: *sequences_data* contain a list of all seq : tuple composed by: (key, sequence_list, seq_mean, seq_sd) - keys: an unique key for each seq - sequence_list: the normalized sequence as numpy array giving TS points: [ [t1, v1 ], ... , [tN, vN] ] :type rdd_sequences_data: RDD of list :param paa: number of letters in output word :type paa: int :param alphabet_size: number of characters in result word :type alphabet_size: int :return: the PAA result, the SAX breakpoints and the SAX string :rtype: SaxResult object Note that each letter have the same signification (same breakpoints between all the seq). :raise exception: IkatsException when an error occurred while processing the sax algorithm """ if type(rdd_sequences_data) is not pyspark.rdd.PipelinedRDD: msg = "Unexpected type : PipelinedRDD expected for rdd_sequences_data={}" raise IkatsException(msg.format(rdd_sequences_data)) if type(alphabet_size) is not int or alphabet_size not in range(2, 27): msg = "Unexpected arg value : integer within [2,26] expected for alphabet_size={}" raise IkatsException(msg.format(alphabet_size)) try: LOGGER.info('Starting run_sax_on_sequences ...') # Calculate the PAAs on all the sequences def _spark_internal(sequence, local_paa=paa): """ Compute the PAA of each sequence *sequence*. """ local_paa_seq = run_paa(ts_data=np.array(sequence), paa_size=local_paa).means if len(local_paa_seq) != local_paa: local_paa_seq = local_paa_seq[: len(local_paa_seq) - 1] return local_paa_seq # INPUT : rdd_sequences_data = [(key, sequence_list, seq_mean, seq_sd),...] # OUTPUT : paa_seq = one sequence of all the paa concatenated (flatMap) # PROCESS : Run PAA on the TS data sequences paa_seq = rdd_sequences_data.sortByKey().flatMap(lambda x: _spark_internal(x[1])) # Note that *sortByKey()* is necessary for reproducible results # Once PAA calculated, then, find breakpoints and SAX words sax_result = SaxResult(paa=paa_seq, breakpoints=[], sax_word='') # Build the distribution breakpoints: need a flat list of paa # Note that this method is not sparkified => need to collect the paa data sax_result.build_breakpoints(alphabet_size) # Give the SAX result for all sequences (all timeseries) # Note that the concatenated entire sax word is collected. sax_result.build_sax_word() LOGGER.info("... ended run_sax_on_sequences.") return sax_result except Exception: LOGGER.error("... ended run_sax_on_sequences with error.") raise IkatsException("Failed execution: run_sax_on_sequences()")
36,277
def TC_analysis(EC_dict, target_substrate, filenamechoice, smilesFile, inchiFile): """This function carries out similarity indexing on the substrates of enzymes of interest. Arguments: EC_dict -- dictionary of EC numbers with the relevant substrates target_substrate -- SMILES string of the substrate of interest""" #Reading in the SMILES dump. smilesData = open(smilesFile) encodedSmilesDict = json.load(smilesData) smilesData.close() smilesDict = {k.encode('utf-8'): v.encode('utf-8') for k, v in encodedSmilesDict.iteritems()} inchiData = open(inchiFile) encodedInchiDict = json.load(inchiData) inchiData.close() inchiDict = {k.encode('utf-8'): v.encode('utf-8') for k, v in encodedInchiDict.iteritems()} net_substrate = pybel.readstring('smi', target_substrate) target_fp = net_substrate.calcfp('FP4') #Setting master_list to hold all the tuples of (TC, substrate, EC number) master_list = [] for k, v in EC_dict.iteritems(): #TC_vals = [] substrates_sub = v for entry in substrates_sub: if entry in smilesDict: cString, z = smilesDict[str(entry)], 0 elif entry in inchiDict: cString, z = inchiDict[str(entry)], 1 else: continue #print str(entry) #Conversion to SMILES and TC calculation. if z == 0: try: instring = pybel.readstring('smi', cString) instring_fp = instring.calcfp('FP4') TC = round((instring_fp | target_fp), 3) except IOError: TC = float('-1') if z == 1: try: instring = pybel.readstring('inchi', cString) instring_fp = instring.calcfp('FP4') TC = round((instring_fp | target_fp), 3) except IOError: TC = float('-1') #Tuple creation in the order (TC, substrate, EC number). A list of tuples can be sorted by whichever # entry, the default being the first. #We leave the default setting so that it sorts by descending TC and prints the tuples in that order to a # text file for our viewing pleasure. set_info = (TC, entry, k) master_list.append(set_info) final_list = sorted(master_list, reverse=True) stats_file = open(filenamechoice + '.txt', 'w') stats_file.write('BRENDA ENZYME SEARCH RESULTS\nRESULTS GIVEN AS:\nTC\tNATURAL SUBSTRATE\tEC NUMBER\n') for info in final_list: si, substr, ecno = info stats_file.write(str(si) + '\t' + substr + '\t' + ecno + '\n') stats_file.close()
36,278
def attack_speed(game: FireEmblemGame, unit: ActiveUnit, weapon: Union[ActiveWeapon, None]) -> int: """ Calculates and returns the unit's Attack Speed, based on the AS calculation method of the current game, the unit's stats, the given weapon's Weight. If the weapon is None, always returns the unit's base Spd stat :param game: game to determine method to use for calculating AS. :param unit: unit for which to calculate AS :param weapon: Weapon unit is assumed to be holding :return: Unit's functional Attack Speed """ method = game.attack_speed_method if method == AS_Methods.SPEED or (weapon is None and method != AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR_OVER_FIVE): return stats.calc_spd(unit) elif method == AS_Methods.SPEED_MINUS_WEIGHT: return stats.calc_spd(unit) - weapon.template.wt elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_CON: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_con(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_CON_BUT_NOT_FOR_MAGIC: if weapon.template.weapon_type in (WeaponType.TOME, WeaponType.FIRE, WeaponType.THUNDER, WeaponType.WIND, WeaponType.DARK, WeaponType.LIGHT, WeaponType.ANIMA, WeaponType.BLACK, WeaponType.WHITE): return stats.calc_spd(unit) - weapon.template.wt else: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_con(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_str(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR_OVER_FIVE: # this is exclusively the calc method for Three Houses, which allows carried items to have weight # if the unit has an equipped item, count that too item_wt = 0 for item in unit.items.all(): if item.equipped: item_wt = item.template.wt weapon_wt = weapon.template.wt if weapon else 0 return stats.calc_spd(unit) - max(weapon_wt + item_wt - stats.calc_str(unit) // 5, 0) else: raise ValueError(f"Unrecognized AS calculation method '{method}' for game {game.name}")
36,279
def test_nr_cfg_inline_commands_plugin_scrapli(): """ ret should look like:: nrp1: ---------- ceos1: ---------- scrapli_send_config: ---------- changed: True diff: exception: None failed: False result: ntp server 1.1.1.1 ntp server 1.1.1.2 ceos2: ---------- scrapli_send_config: ---------- changed: True diff: exception: None failed: False result: ntp server 1.1.1.1 ntp server 1.1.1.2 """ ret = client.cmd( tgt="nrp1", fun="nr.cfg", arg=["ntp server 1.1.1.1", "ntp server 1.1.1.2"], kwarg={"plugin": "scrapli"}, tgt_type="glob", timeout=60, ) for host, results in ret["nrp1"].items(): assert results["scrapli_send_config"]["changed"] is True assert results["scrapli_send_config"]["exception"] is None assert results["scrapli_send_config"]["failed"] is False assert isinstance(results["scrapli_send_config"]["result"], str) assert len(results["scrapli_send_config"]["result"]) > 0
36,280
def set_pow_ref_by_upstream_turbines_in_radius( df, df_upstream, turb_no, x_turbs, y_turbs, max_radius, include_itself=False): """Add a column called 'pow_ref' to your dataframe, which is the mean of the columns pow_%03d for turbines that are upstream and also within radius [max_radius] of the turbine of interest [turb_no]. Args: df ([pd.DataFrame]): Dataframe with measurements. This dataframe typically consists of wd_%03d, ws_%03d, ti_%03d, pow_%03d, and potentially additional measurements. df_upstream ([pd.DataFrame]): Dataframe containing rows indicating wind direction ranges and the corresponding upstream turbines for that wind direction range. This variable can be generated with flasc.floris_tools.get_upstream_turbs_floris(...). turb_no ([int]): Turbine number from which the radius should be calculated. x_turbs ([list, array]): Array containing x locations of turbines. y_turbs ([list, array]): Array containing y locations of turbines. max_radius ([float]): Maximum radius for the upstream turbines until which they are still considered as relevant/used for the calculation of the averaged column quantity. include_itself (bool, optional): Include the measurements of turbine turb_no in the determination of the averaged column quantity. Defaults to False. Returns: df ([pd.DataFrame]): Dataframe which equals the inserted dataframe plus the additional column called 'pow_ref'. """ return _set_col_by_upstream_turbines_in_radius( col_out='pow_ref', col_prefix='pow', df=df, df_upstream=df_upstream, turb_no=turb_no, x_turbs=x_turbs, y_turbs=y_turbs, max_radius=max_radius, circular_mean=False, include_itself=include_itself)
36,281
def load_block_production(config: ValidatorConfig, identity_account_pubkey: str): """ loads block production https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction """ params = [ { 'identity': identity_account_pubkey } ] return smart_rpc_call(config, "getBlockProduction", params, {})
36,282
def get_object(dic, img_width, img_height, center,z,angle): """ Move the UR3 robot to get the object. Parameters ---------- dic : dict Dictionnary of positions. img_width : int Width size of the image. In pixel. img_height : int Height size of the image. In pixel. center : list Object's center. In pixel. z : float Object's height. angle : float Angle of rotation of the object. """ x, y = __calcul_positions(dic,img_width,img_height,center) __goto_object(dic,x,y,z,angle)
36,283
def run_doxygen(*, conf: DoxygenConfiguration, root_dir: Path) -> int: """Run Doxygen. Parameters ---------- conf A `DoxygenConfiguration` that configures the Doxygen build. root_dir The directory that is considered the root of the Doxygen build. This is the directory where the Doxygen configuration is written as ``doxygen.conf``. Returns ------- status The shell status code returned by the ``doxygen`` executable. """ os.makedirs(root_dir, exist_ok=True) with working_directory(root_dir): conf_path = Path("doxygen.conf") conf_path.write_text(conf.render()) result = subprocess.run(["doxygen", str(conf_path)]) return result.returncode
36,284
def remove_metatlas_objects_by_list(object_list, field, filter_list): """ inputs: object_list: iterable to be filtered by its attribute values field: name of attribute to filter on filter_list: strings that are tested to see if they are substrings of the attribute value returns filtered list of objects that do not have matches to filter_list """ return filter_by_list(object_list, lambda x: getattr(x, field), filter_list, include=False)
36,285
def test_remove_with_index(): """ Tests removing an item using index """ # Arrange component1 = KiCadComponent({ "name": "TestComponent1" }) component2 = KiCadComponent({ "name": "TestComponent2" }) component3 = KiCadComponent({ "name": "TestComponent3" }) expected = [component1, component3] # Act actual = KiCadComponentList() actual.Add(component1) actual.Add(component2) actual.Add(component3) actual.Remove(1) # Assert assert len(actual) == len(expected) for i in range(0, len(actual)): assert actual[i] == expected[i]
36,286
def sync_database_account_data_resources( neo4j_session: neo4j.Session, subscription_id: str, database_account_list: List[Dict], azure_update_tag: int, ) -> None: """ This function calls the load functions for the resources that are present as a part of the database account response (like cors policy, failover policy, private endpoint connections, virtual network rules and locations). """ for database_account in database_account_list: _load_cosmosdb_cors_policy(neo4j_session, database_account, azure_update_tag) _load_cosmosdb_failover_policies(neo4j_session, database_account, azure_update_tag) _load_cosmosdb_private_endpoint_connections(neo4j_session, database_account, azure_update_tag) _load_cosmosdb_virtual_network_rules(neo4j_session, database_account, azure_update_tag) _load_database_account_write_locations(neo4j_session, database_account, azure_update_tag) _load_database_account_read_locations(neo4j_session, database_account, azure_update_tag) _load_database_account_associated_locations(neo4j_session, database_account, azure_update_tag)
36,287
async def subscriber_callback(protocol): """ Receives the data from the publisher. """ msg = await protocol.recvfrom() data = np.frombuffer(msg, dtype=np.float64) # print("Received the data from the publisher.") # print(data) position = np.array([[data[0]], [data[1]], [data[2]]], dtype=np.float64) rotation = np.asarray(data[3:], dtype=np.float64).reshape(3, 3) print("Position in numpy [meter]") print(position) # print("Rotation matrix in numpy") # print(rotation)
36,288
def resnet_encoder(inputs, input_depth=16, block_type='wide', activation_fn=tf.nn.relu, is_training=True, reuse=None, outputs_collections=None, scope=None): """Defines an encoder network based on resnet blocks """ if block_type == 'wide': resnet_block = wide_resnet else: raise NotImplementedError normalizer_fn = slim.batch_norm with tf.variable_scope(scope, [inputs], reuse=reuse) as sc: size_in = inputs.get_shape().as_list()[1] num_stages = int(log2(size_in)) # Initial convolution net = slim.conv2d(inputs, input_depth, kernel_size=5, activation_fn=activation_fn, padding='SAME', weights_initializer=slim.initializers.variance_scaling_initializer(), normalizer_fn=None, stride=2, scope='conv_in') for i in range(1, num_stages - 2): current_depth = input_depth * 2**i net = resnet_block(net, current_depth, resample='down', activation_fn=activation_fn, scope='resnet%d_a' % i) net = resnet_block(net, current_depth, resample=None, activation_fn=activation_fn, scope='resnet%d_b' % i) # Reshaping into a 1D code net = slim.flatten(net, scope='flat') output = slim.fully_connected(net, 2048, activation_fn=activation_fn, normalizer_fn=normalizer_fn, scope='fc_enc1') return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output)
36,289
def execute_stored_proc(cursor, sql): """Execute a stored-procedure. Parameters ---------- cursor: `OracleCursor` sql: `str` stored-proc sql statement. """ stored_proc_name, stored_proc_args = _sql_to_stored_proc_cursor_args(sql) status = cursor.callproc(stored_proc_name, parameters=stored_proc_args) status = '\n'.join(status) return [(None, None, None, status)]
36,290
def copy_dimensions(fi, fo, removedim=[], renamedim={}, changedim={}, adddim={}): """ Create dimensions in output file from dimensions in input file. Parameters ---------- fi : file_handle File handle of opened netcdf input file fo : file_handle File handle of opened netcdf output file removedim : list of str, optional Do not create dimensions given in *removedim* in output file. renamedim : dict, optional Rename dimensions in output file compared to input file. Dimension names in input file are given as dictionary keys, corresponding dimension names of output file are give as dictionary values. changedim : dict, optional Change the size of the output dimension compared to the input file. Dimension names are given as dictionary keys, corresponding dimension sizes are given as dictionary values. adddim : dict, optional Add dimension to output file. New dimension names are given as dictionary keys and new dimension sizes are given as dictionary values. Returns ------- nothing The output file will have the altered and unaltered dimensions of the input file. Examples -------- .. code-block:: python copy_dimensions(fi, fo, removedim=['patch'], renamedim={'x': 'lon', 'y': 'lat'}, changedim={'mland': 1}) """ removedim = _tolist(removedim) for d in fi.dimensions.values(): # remove dimension if in removedim if d.name not in removedim: # change dimension size if in changedim if d.name in changedim.keys(): nd = changedim[d.name] elif d.isunlimited(): nd = None else: nd = len(d) # rename dimension if in renamedim if d.name in renamedim.keys(): oname = renamedim[d.name] else: oname = d.name # create dimension fo.createDimension(oname, nd) # add new dimensions for d in adddim.keys(): if d not in fo.dimensions: fo.createDimension(d, adddim[d]) return
36,291
def no_order_func_nb(c: OrderContext, *args) -> Order: """Placeholder order function that returns no order.""" return NoOrder
36,292
def test_print_version(): """CoreMS version""" rv, out = getstatusoutput(f'{prg} {directory}') assert rv == 0 assert re.findall("CoreMS version", out)
36,293
def test_async_add_opp_job_schedule_coroutinefunction(loop): """Test that we schedule coroutines and add jobs to the job pool.""" opp = MagicMock(loop=MagicMock(wraps=loop)) async def job(): pass ha.OpenPeerPower.async_add_opp_job(opp, ha.OppJob(job)) assert len(opp.loop.call_soon.mock_calls) == 0 assert len(opp.loop.create_task.mock_calls) == 1 assert len(opp.add_job.mock_calls) == 0
36,294
def get_queued(): """ Returns a list of notifications that should be sent: - Status is queued - Has scheduled_time lower than the current time or None """ return PushNotification.objects.filter(status=STATUS.queued) \ .select_related('template') \ .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \ .order_by(*get_sending_order())[:get_batch_size()]
36,295
def address_family(config): """Gets AF neighbor config""" check_options = neigh_options(config) print(f"{'Neighbor: ':>20}{config.get('id', {}):<10}") print(f"{'Next-Hop-Self: ':>20}{check_options[0][0]}") print(f"{'Route-Reflector: ':>20}{check_options[1][0]}") print(f"{'Route-Map: ':>20}{config.get('route-map', {}).get('route-map-name', 'None'):<15}Direction: {config.get('route-map', {}).get('inout', 'None')}") print(f"{'Prefix-list: ':>20}{config.get('prefix-list', {}).get('prefix-list-name', 'None'):<15}Direction: {config.get('prefix-list', {}).get('inout', 'None')}") print(f"{'Activate: ':>20}{check_options[3][0]}\n")
36,296
def hue_of_color(color): """ Gets the hue of a color. :param color: The RGB color tuple. :return: The hue of the color (0.0-1.0). """ return rgb_to_hsv(*[x / 255 for x in color])[0]
36,297
def recent_tracks(user, api_key, page): """Get the most recent tracks from `user` using `api_key`. Start at page `page` and limit results to `limit`.""" return requests.get( api_url % (user, api_key, page, LastfmStats.plays_per_page)).json()
36,298
def extract_hdf5_frames(hdf5_frames): """ Extract frames from HDF5 dataset. This converts the frames to a list. :param hdf5_frames: original video frames :return [frame] list of frames """ frames = [] for i in range(len(hdf5_frames)): hdf5_frame = hdf5_frames[str(i)] assert len(hdf5_frame) == 120 frame_rows = [] for rnum in range(len(hdf5_frame)): row = hdf5_frame[rnum] frame_rows.append(row) frames.append(np.array(frame_rows)) return frames
36,299