content
stringlengths
22
815k
id
int64
0
4.91M
def plot_elite(d: dict, f, ylabel: str, title: str, save_path: str): """Create a plot of the given dictionary. Each value of d consists of a list of length 3 (min, avg, max).""" # Parse the values keys = sorted(d.keys()) elite_data = [f(d[k]) for k in keys] # Create the plot plt.figure(figsize=(len(keys) / 2, 3)) plt.plot(keys, elite_data) plt.title(title) plt.xlabel("Generations") plt.ylabel(ylabel) plt.ylim(0, max(elite_data) * 1.05) plt.grid() plt.tight_layout() plt.savefig(save_path) plt.close()
14,600
def new_oauth2ProviderLimited(pyramid_request): """this is used to build a new auth""" validatorHooks = CustomValidator_Hooks(pyramid_request) provider = oauth2_provider.OAuth2Provider( pyramid_request, validator_api_hooks=validatorHooks, validator_class=CustomValidator, server_class=WebApplicationServer, ) return provider
14,601
def rand_color(red=(92, 220), green=(92, 220), blue=(92, 220)): """ Random red, green, blue with the option to limit the ranges. The ranges are tuples 0..255. """ r = rand_byte(red) g = rand_byte(green) b = rand_byte(blue) return f"#{r:02x}{g:02x}{b:02x}"
14,602
def tile_to_html(tile, fig_size=None): """ Provide HTML string representation of Tile image.""" import base64 b64_img_html = '<img src="data:image/png;base64,{}" />' png_bits = tile_to_png(tile, fig_size=fig_size) b64_png = base64.b64encode(png_bits).decode('utf-8').replace('\n', '') return b64_img_html.format(b64_png)
14,603
def generate_buchwald_hartwig_rxns(df): """ Converts the entries in the excel files from Sandfort et al. to reaction SMILES. """ df = df.copy() fwd_template = '[F,Cl,Br,I]-[c;H0;D3;+0:1](:[c,n:2]):[c,n:3].[NH2;D1;+0:4]-[c:5]>>[c,n:2]:[c;H0;D3;+0:1](:[c,n:3])-[NH;D2;+0:4]-[c:5]' methylaniline = 'Cc1ccc(N)cc1' pd_catalyst = Chem.MolToSmiles(Chem.MolFromSmiles('O=S(=O)(O[Pd]1~[NH2]C2C=CC=CC=2C2C=CC=CC1=2)C(F)(F)F')) methylaniline_mol = Chem.MolFromSmiles(methylaniline) rxn = rdChemReactions.ReactionFromSmarts(fwd_template) products = [] for i, row in df.iterrows(): reacts = (Chem.MolFromSmiles(row['Aryl halide']), methylaniline_mol) rxn_products = rxn.RunReactants(reacts) rxn_products_smiles = set([Chem.MolToSmiles(mol[0]) for mol in rxn_products]) assert len(rxn_products_smiles) == 1 products.append(list(rxn_products_smiles)[0]) df['product'] = products rxns = [] can_smiles_dict = {} for i, row in df.iterrows(): aryl_halide = canonicalize_with_dict(row['Aryl halide'], can_smiles_dict) can_smiles_dict[row['Aryl halide']] = aryl_halide ligand = canonicalize_with_dict(row['Ligand'], can_smiles_dict) can_smiles_dict[row['Ligand']] = ligand base = canonicalize_with_dict(row['Base'], can_smiles_dict) can_smiles_dict[row['Base']] = base additive = canonicalize_with_dict(row['Additive'], can_smiles_dict) can_smiles_dict[row['Additive']] = additive reactants = f"{aryl_halide}.{methylaniline}.{pd_catalyst}.{ligand}.{base}.{additive}" rxns.append(f"{reactants}>>{row['product']}") return rxns
14,604
def init_bold_std_trans_wf( mem_gb, omp_nthreads, spaces, name="bold_std_trans_wf", use_compression=True, use_fieldwarp=False, ): """ Sample fMRI into standard space with a single-step resampling of the original BOLD series. .. important:: This workflow provides two outputnodes. One output node (with name ``poutputnode``) will be parameterized in a Nipype sense (see `Nipype iterables <https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a second node (``outputnode``) will collapse the parameterized outputs into synchronous lists of the output fields listed below. Workflow Graph .. workflow:: :graph2use: colored :simple_form: yes from niworkflows.utils.spaces import SpatialReferences from fprodents.workflows.bold.resampling import init_bold_std_trans_wf wf = init_bold_std_trans_wf( mem_gb=3, omp_nthreads=1, spaces=SpatialReferences( spaces=['MNI152Lin', ('MNIPediatricAsym', {'cohort': '6'})], checkpoint=True), ) Parameters ---------- mem_gb : :obj:`float` Size of BOLD file in GB omp_nthreads : :obj:`int` Maximum number of threads an individual process may use spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences` A container for storing, organizing, and parsing spatial normalizations. Composed of :py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references. Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a dictionary with template specifications (e.g., a specification of ``{'resolution': 2}`` would lead to resampling on a 2mm resolution of the space). name : :obj:`str` Name of workflow (default: ``bold_std_trans_wf``) use_compression : :obj:`bool` Save registered BOLD series as ``.nii.gz`` use_fieldwarp : :obj:`bool` Include SDC warp in single-shot transform from BOLD to MNI Inputs ------ anat2std_xfm List of anatomical-to-standard space transforms generated during spatial normalization. bold_mask Skull-stripping mask of reference image bold_split Individual 3D volumes, not motion corrected fieldwarp a :abbr:`DFM (displacements field map)` in ITK format hmc_xforms List of affine transforms aligning each volume to ``ref_image`` in ITK format bold2anat Affine transform from ``ref_bold_brain`` to T1 space (ITK format) name_source BOLD series NIfTI file Used to recover original information lost during processing templates List of templates that were applied as targets during spatial normalization. Outputs ------- bold_std BOLD series, resampled to template space bold_std_ref Reference, contrast-enhanced summary of the BOLD series, resampled to template space bold_mask_std BOLD series mask in template space template Template identifiers synchronized correspondingly to previously described outputs. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms from niworkflows.interfaces.itk import MultiApplyTransforms from niworkflows.interfaces.utility import KeySelect from niworkflows.interfaces.nibabel import GenerateSamplingReference from niworkflows.interfaces.nilearn import Merge from niworkflows.utils.spaces import format_reference workflow = Workflow(name=name) output_references = spaces.cached.get_spaces(nonstandard=False, dim=(3,)) std_vol_references = [ (s.fullname, s.spec) for s in spaces.references if s.standard and s.dim == 3 ] if len(output_references) == 1: workflow.__desc__ = """\ The BOLD time-series were resampled into standard space, generating a *preprocessed BOLD run in {tpl} space*. """.format( tpl=output_references[0] ) elif len(output_references) > 1: workflow.__desc__ = """\ The BOLD time-series were resampled into several standard spaces, correspondingly generating the following *spatially-normalized, preprocessed BOLD runs*: {tpl}. """.format( tpl=", ".join(output_references) ) inputnode = pe.Node( niu.IdentityInterface( fields=[ "anat2std_xfm", "bold_mask", "bold_split", "fieldwarp", "hmc_xforms", "bold2anat", "name_source", "templates", ] ), name="inputnode", ) iterablesource = pe.Node( niu.IdentityInterface(fields=["std_target"]), name="iterablesource" ) # Generate conversions for every template+spec at the input iterablesource.iterables = [("std_target", std_vol_references)] split_target = pe.Node( niu.Function( function=_split_spec, input_names=["in_target"], output_names=["space", "template", "spec"], ), run_without_submitting=True, name="split_target", ) select_std = pe.Node( KeySelect(fields=["anat2std_xfm"]), name="select_std", run_without_submitting=True, ) select_tpl = pe.Node( niu.Function(function=_select_template), name="select_tpl", run_without_submitting=True, ) gen_ref = pe.Node( GenerateSamplingReference(), name="gen_ref", mem_gb=0.3 ) # 256x256x256 * 64 / 8 ~ 150MB) mask_std_tfm = pe.Node( ApplyTransforms(interpolation="MultiLabel"), name="mask_std_tfm", mem_gb=1 ) ref_std_tfm = pe.Node( ApplyTransforms(interpolation="LanczosWindowedSinc"), name="ref_std_tfm", mem_gb=1 ) # Write corrected file in the designated output dir mask_merge_tfms = pe.Node( niu.Merge(2), name="mask_merge_tfms", run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB, ) nxforms = 3 + use_fieldwarp merge_xforms = pe.Node( niu.Merge(nxforms), name="merge_xforms", run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB, ) workflow.connect([(inputnode, merge_xforms, [("hmc_xforms", "in%d" % nxforms)])]) if use_fieldwarp: workflow.connect([(inputnode, merge_xforms, [("fieldwarp", "in3")])]) bold_to_std_transform = pe.Node( MultiApplyTransforms( interpolation="LanczosWindowedSinc", float=True, copy_dtype=True ), name="bold_to_std_transform", mem_gb=mem_gb * 3 * omp_nthreads, n_procs=omp_nthreads, ) merge = pe.Node(Merge(compress=use_compression), name="merge", mem_gb=mem_gb * 3) # fmt:off workflow.connect([ (iterablesource, split_target, [('std_target', 'in_target')]), (iterablesource, select_tpl, [('std_target', 'template')]), (inputnode, select_std, [('anat2std_xfm', 'anat2std_xfm'), ('templates', 'keys')]), (inputnode, mask_std_tfm, [('bold_mask', 'input_image')]), (inputnode, ref_std_tfm, [('bold_mask', 'input_image')]), (inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]), (inputnode, merge_xforms, [ (('bold2anat', _aslist), 'in2')]), (inputnode, merge, [('name_source', 'header_source')]), (inputnode, mask_merge_tfms, [(('bold2anat', _aslist), 'in2')]), (inputnode, bold_to_std_transform, [('bold_split', 'input_image')]), (split_target, select_std, [('space', 'key')]), (select_std, merge_xforms, [('anat2std_xfm', 'in1')]), (select_std, mask_merge_tfms, [('anat2std_xfm', 'in1')]), (split_target, gen_ref, [(('spec', _is_native), 'keep_native')]), (select_tpl, gen_ref, [('out', 'fixed_image')]), (merge_xforms, bold_to_std_transform, [('out', 'transforms')]), (gen_ref, bold_to_std_transform, [('out_file', 'reference_image')]), (gen_ref, mask_std_tfm, [('out_file', 'reference_image')]), (mask_merge_tfms, mask_std_tfm, [('out', 'transforms')]), (gen_ref, ref_std_tfm, [('out_file', 'reference_image')]), (mask_merge_tfms, ref_std_tfm, [('out', 'transforms')]), (bold_to_std_transform, merge, [('out_files', 'in_files')]), ]) # fmt:on output_names = [ "bold_mask_std", "bold_std", "bold_std_ref", "spatial_reference", "template", ] poutputnode = pe.Node( niu.IdentityInterface(fields=output_names), name="poutputnode" ) # fmt:off workflow.connect([ # Connecting outputnode (iterablesource, poutputnode, [ (('std_target', format_reference), 'spatial_reference')]), (merge, poutputnode, [('out_file', 'bold_std')]), (ref_std_tfm, poutputnode, [('output_image', 'bold_std_ref')]), (mask_std_tfm, poutputnode, [('output_image', 'bold_mask_std')]), (select_std, poutputnode, [('key', 'template')]), ]) # fmt:on # Connect parametric outputs to a Join outputnode outputnode = pe.JoinNode( niu.IdentityInterface(fields=output_names), name="outputnode", joinsource="iterablesource", ) # fmt:off workflow.connect([ (poutputnode, outputnode, [(f, f) for f in output_names]), ]) # fmt:on return workflow
14,605
def print_lambda_cost(args): """ Main function. :param args: script arguments. :return: None. """ regions = list_available_lambda_regions() progress_bar = progressbar.ProgressBar(max_value=len(regions)) lambdas_data = [] total_monthly_cost = 0 for region in progress_bar(regions): lambda_client = init_boto_client('lambda', region, args) cloudwatch_client = init_boto_client('cloudwatch', region, args) next_marker = None response = lambda_client.list_functions() while next_marker != '': next_marker = '' functions = response['Functions'] if not functions: continue for function_data in functions: sum_invocations = get_cloudwatch_metric( cloudwatch_client, 'Invocations', 'Sum', function_data['FunctionName'] ) avg_duration = get_cloudwatch_metric( cloudwatch_client, 'Duration', 'Average', function_data['FunctionName'] ) period_cost = calculate_cost( avg_duration, sum_invocations, function_data['MemorySize'] ) lambdas_data.append(( function_data['FunctionName'], region, function_data['MemorySize'], RESULT_NA if avg_duration == 0 else int(avg_duration), RESULT_NA if avg_duration == 0 else int(sum_invocations), RESULT_NA if avg_duration == 0 else '{0:.3f}'.format( period_cost ), RESULT_NA if avg_duration == 0 else '{0:.3f}'.format( period_cost * 30, ), )) total_monthly_cost += (period_cost * 30) # Verify if there is next marker. if 'NextMarker' in response: next_marker = response['NextMarker'] response = lambda_client.list_functions(Marker=next_marker) # Sort data by the cost. lambdas_data.sort( key=lambda x: 0 if x[5] == RESULT_NA else float(x[5]), reverse=True ) print_table_to_console(lambdas_data) print('Total monthly cost estimation: ${0:.3f}'.format(total_monthly_cost)) if not args.csv: return lambdas_data.insert(0, TABLE_HEADERS) with codecs.open(args.csv, 'w', encoding='utf-8') as output_file: for table_row in lambdas_data: output_file.writelines( '{0}\n'.format(','.join([str(x) for x in table_row])) )
14,606
def log_model_info(model): """Logs model info""" logger.info("Model:\n{}".format(model)) logger.info("Params: {:,}".format(mu.params_count(model))) logger.info("Acts: {:,}".format(mu.acts_count(model))) logger.info("Flops: {:,}".format(mu.flops_count(model)))
14,607
def orbit_text(ax,radius,long_asc_node,inclination,longitude,text): """ Position text using orbital coordinates. @param radius : radial distance of text @type radius : float (degrees) @param long_asc_node : longitude (deg) where vector crosses the orbit @type long_asc_node : float @param inclination : tilt of the inclined plane's axis @type inclination : float (degrees) @param longitude : longitude along the inclined plane from the intersection @type longitude : float (degrees) @param text : @type text : str """ V = radius*unit_vector(long_asc_node,inclination,longitude) ax.text3D(V[0],V[1],V[2],text)
14,608
def _get_indent(node): """Determine the indentation level of ``node``.""" indent = None while node: indent = find_first(node, TOKEN.INDENT) if indent is not None: indent = indent.value break node = node.parent return indent
14,609
def distancesarr(image_centroid, object_centroids): """gets the distances between image and objects""" distances = [] j = 0 for row in object_centroids: distance = centroid_distance(image_centroid, object_centroids, j) distances.append(distance) j +=1 return distances
14,610
def obs_agent_has_neighbour(agent_id: int, factory: Factory) -> np.ndarray: """Does this agent have a neighbouring node?""" agent: Table = factory.tables[agent_id] return np.asarray( [ agent.node.has_neighbour(Direction.up), agent.node.has_neighbour(Direction.right), agent.node.has_neighbour(Direction.down), agent.node.has_neighbour(Direction.left), ] )
14,611
def P_split_prob(b): """Returns the probability of b according to the P_split() distribution. """ """n = b.length if n <= 2: p = 1.0 else: k = 1 # si el arbol es binario y n > 2 seguro que tiene que ser splittable. #while k < n and not b.splittable(k): while not b.splittable(k): k += 1 p = (1.0 / float(n)) * gP_split_prob(b, 0, k) * gP_split_prob(b, k, n) return p""" return gP_split_prob(b, b.start_index, b.start_index+b.length)
14,612
def t(): """Or time(). Returns the number of seconds elapsed since the cartridge was run.""" global begin return py_time.time() - begin
14,613
def data_provider(data_provider_function, verbose=True): """PHPUnit style data provider decorator""" def test_decorator(test_function): def new_test_function(self, *args): i = 0 if verbose: print("\nTest class : " + get_class_that_defined_method(test_function)) print("Test function: " + test_function.__name__) for data_set in data_provider_function(): try: if verbose: print(" #" + str(i).rjust(2, '0') + ": ", end='') test_function(self, *data_set) i += 1 except AssertionError: if verbose: print("Failed with data set #%d: " % i, end='', file=sys.stderr) print(data_set, file=sys.stderr) raise else: if verbose: print("passed") if verbose: print("----------------------------\n") return new_test_function return test_decorator
14,614
def intersect_generators(genlist): """ Intersect generators listed in genlist. Yield items only if they are yielded by all generators in genlist. Threads (via ThreadedGenerator) are used in order to run generators in parallel, so that items can be yielded before generators are exhausted. Threads are stopped when they are either exhausted or Ctrl-C is pressed. Quitting before all generators are finished is attempted if there is no more chance of finding an item in all queues. @param genlist: list of page generators @type genlist: list """ # If any generator is empty, no pages are going to be returned for source in genlist: if not source: debug('At least one generator ({0!r}) is empty and execution was ' 'skipped immediately.'.format(source), 'intersect') return # Item is cached to check that it is found n_gen # times before being yielded. cache = collections.defaultdict(set) n_gen = len(genlist) # Class to keep track of alive threads. # Start new threads and remove completed threads. thrlist = ThreadList() for source in genlist: threaded_gen = ThreadedGenerator(name=repr(source), target=source) threaded_gen.daemon = True thrlist.append(threaded_gen) while True: # Get items from queues in a round-robin way. for t in thrlist: try: # TODO: evaluate if True and timeout is necessary. item = t.queue.get(True, 0.1) # Cache entry is a set of thread. # Duplicates from same thread are not counted twice. cache[item].add(t) if len(cache[item]) == n_gen: yield item # Remove item from cache. # No chance of seeing it again (see later: early stop). cache.pop(item) active = thrlist.active_count() max_cache = n_gen if cache.values(): max_cache = max(len(v) for v in cache.values()) # No. of active threads is not enough to reach n_gen. # We can quit even if some thread is still active. # There could be an item in all generators which has not yet # appeared from any generator. Only when we have lost one # generator, then we can bail out early based on seen items. if active < n_gen and n_gen - max_cache > active: thrlist.stop_all() return except Queue.Empty: pass except KeyboardInterrupt: thrlist.stop_all() finally: # All threads are done. if thrlist.active_count() == 0: return
14,615
def main(): """Console script for ceda_intake.""" parser = argparse.ArgumentParser() parser.add_argument('--test', dest='test_mode', action='store_true', help='Create small catalog in test mode') parser.add_argument('-p', '--project', type=str, required=True, help='Project catalog to generate') args = parser.parse_args() make_intake_catalog(args.project, test_mode=args.test_mode) return 0
14,616
def check_combinations2_task(infiles, outfile, prefices, subpath, subdir): """ Test combinations with k-tuple = 2 """ with open(outfile, "w") as outf: outf.write(prefices + ",")
14,617
def convert_none( key: str, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False ) -> str: """Converts a null value into an XML element""" key, attr = make_valid_xml_name(key, attr) if attr_type: attr["type"] = get_xml_type(None) attrstring = make_attrstring(attr) return f"<{key}{attrstring}></{key}>"
14,618
def main(): """To try the server.""" try: server = HTTPServer(('', 10300), RequestHandler) print('Server started...') server.serve_forever() except KeyboardInterrupt: print('^C received, shutting down server.') server.socket.close()
14,619
def maybe_download_and_extract(): """Download and extract model tar file. If the pretrained model we're using doesn't already exist, this function downloads it from the TensorFlow.org website and unpacks it into a directory. """ dest_directory = FLAGS.model_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
14,620
def is_title(ngram, factor = 2.0): """ Define the probability of a ngram to be a title. Factor is for the confidence coex max. """ confidence = 1 to_test = [n for n in ngram if n not in stop_words] for item in to_test: if item.istitle(): confidence += factor / len(to_test) # print item, confidence return confidence
14,621
def img_to_b64(img_path): """显示一副图片""" assert os.path.isfile(img_path) with open(img_path, 'rb') as f: img = f.read() b64 = base64.b64encode(img) return b64
14,622
def ul(microliters): """Unicode function name for creating microliter volumes""" if isinstance(microliters,str) and ':' in microliters: return Unit(microliters).to('microliter') return Unit(microliters,"microliter")
14,623
def edit_bbox(obj_to_edit, action): """action = `delete` `change_class:new_class_index` `resize_bbox:new_x_left:new_y_top:new_x_right:new_y_bottom` """ global tracker_dir, img_index, image_paths_list, current_img_path, width, height if "change_class" in action: new_class_index = int(action.split(":")[1]) elif "resize_bbox" in action: new_x_left = max(0, int(action.split(":")[1])) new_y_top = max(0, int(action.split(":")[2])) new_x_right = min(width, int(action.split(":")[3])) new_y_bottom = min(height, int(action.split(":")[4])) # 1. initialize bboxes_to_edit_dict # (we use a dict since a single label can be associated with multiple ones in videos) bboxes_to_edit_dict = {} current_img_path = image_paths_list[img_index] bboxes_to_edit_dict[current_img_path] = obj_to_edit # 2. add elements to bboxes_to_edit_dict """ If the bbox is in the json file then it was used by the video Tracker, hence, we must also edit the next predicted bboxes associated to the same `anchor_id`. """ # if `current_img_path` is a frame from a video is_from_video, video_name = is_frame_from_video(current_img_path) if is_from_video: # get json file corresponding to that video json_file_path = "{}.json".format(os.path.join(tracker_dir, video_name)) file_exists, json_file_data = get_json_file_data(json_file_path) # if json file exists if file_exists: # match obj_to_edit with the corresponding json object frame_data_dict = json_file_data["frame_data_dict"] json_object_list = get_json_file_object_list( current_img_path, frame_data_dict ) obj_matched = get_json_object_dict(obj_to_edit, json_object_list) # if match found if obj_matched is not None: # get this object's anchor_id anchor_id = obj_matched["anchor_id"] frame_path_list = get_next_frame_path_list(video_name, current_img_path) frame_path_list.insert(0, current_img_path) if "change_class" in action: # add also the previous frames prev_path_list = get_prev_frame_path_list( video_name, current_img_path ) frame_path_list = prev_path_list + frame_path_list # update json file if contain the same anchor_id for frame_path in frame_path_list: json_object_list = get_json_file_object_list( frame_path, frame_data_dict ) json_obj = get_json_file_object_by_id(json_object_list, anchor_id) if json_obj is not None: bboxes_to_edit_dict[frame_path] = [ json_obj["class_index"], json_obj["bbox"]["xmin"], json_obj["bbox"]["ymin"], json_obj["bbox"]["xmax"], json_obj["bbox"]["ymax"], ] # edit json file if "delete" in action: json_object_list.remove(json_obj) elif "change_class" in action: json_obj["class_index"] = new_class_index elif "resize_bbox" in action: json_obj["bbox"]["xmin"] = new_x_left json_obj["bbox"]["ymin"] = new_y_top json_obj["bbox"]["xmax"] = new_x_right json_obj["bbox"]["ymax"] = new_y_bottom else: break # save the edited data with open(json_file_path, "w") as outfile: json.dump(json_file_data, outfile, sort_keys=True, indent=4) # 3. loop through bboxes_to_edit_dict and edit the corresponding annotation files for path in bboxes_to_edit_dict: obj_to_edit = bboxes_to_edit_dict[path] class_index, xmin, ymin, xmax, ymax = map(int, obj_to_edit) for ann_path in get_annotation_paths(path, annotation_formats): if ".txt" in ann_path.name: # edit YOLO file with open(ann_path, "r") as old_file: lines = old_file.readlines() yolo_line = yolo_format( class_index, (xmin, ymin), (xmax, ymax), width, height ) # Idea: height and width ought to be stored ind = findIndex(obj_to_edit) i = 0 with open(ann_path, "w") as new_file: for line in lines: if i != ind: new_file.write(line) elif "change_class" in action: new_yolo_line = yolo_format( new_class_index, (xmin, ymin), (xmax, ymax), width, height, ) new_file.write(new_yolo_line + "\n") elif "resize_bbox" in action: new_yolo_line = yolo_format( class_index, (new_x_left, new_y_top), (new_x_right, new_y_bottom), width, height, ) new_file.write(new_yolo_line + "\n") i = i + 1 else: raise RuntimeError("Support for VOC discontinued.")
14,624
def read_glh(filename): """ Read glitch parameters. Parameters ---------- filename : str Name of file to read Returns ------- glhParams : array Array of median glitch parameters glhCov : array Covariance matrix """ # Extract glitch parameters glhFit = np.genfromtxt(filename, skip_header=3) glhParams = np.zeros(3) glhParams[0] = np.median(glhFit[:, 8]) glhParams[1] = np.median(glhFit[:, 4]) glhParams[2] = np.median(glhFit[:, 5]) # Compute covariance matrix tmpFit = np.zeros((len(glhFit[:, 0]), 3)) tmpFit[:, 0] = glhFit[:, 8] tmpFit[:, 1] = glhFit[:, 4] tmpFit[:, 2] = glhFit[:, 5] glhCov = MinCovDet().fit(tmpFit).covariance_ # iglhCov = np.linalg.pinv(glhCov, rcond=1e-8) return glhParams, glhCov
14,625
def update_user_group(user_group_id, name, **options): """ Update a user group :param user_group_id: The id of the user group to update :type user_group_id: str :param name: Name of the user group :type name: str, optional :param options: Generic advanced options dict, see online documentation :type options: dict, optional :return: The updated group :rtype: dict """ uri = [USER_GROUPS_SUB_PATH, user_group_id] params = {"name": name} return _call_account_api("put", uri, params, **options)
14,626
def longitudinal_kmeans(X, n_clusters=5, var_reg=1e-3, fixed_clusters=True, random_state=None): """Longitudinal K-Means Algorithm (Genolini and Falissard, 2010)""" n_time_steps, n_nodes, n_features = X.shape # vectorize latent positions across time X_vec = np.moveaxis(X, 0, -1).reshape(n_nodes, n_time_steps * n_features) # perform normal k-means on the vectorized features kmeans = KMeans(n_clusters=n_clusters, random_state=random_state).fit(X_vec) # this method assigns a single cluster to each point across time. labels = kmeans.labels_.reshape(-1, 1) labels = np.hstack([labels] * n_time_steps).T # un-vectorize centers, shape (n_time_steps, n_centers, n_features) centers_vec = kmeans.cluster_centers_ if fixed_clusters: centers = np.empty((n_clusters, n_features)) for k in range(n_clusters): muk = centers_vec[k].reshape(-1, n_time_steps).T centers[k] = muk.mean(axis=0) # average position overtime else: centers = np.empty((n_time_steps, n_clusters, n_features)) for k in range(n_clusters): centers[:, k] = centers_vec[k].reshape(-1, n_time_steps).T # calculate cluster variances (assumed spherical and constant over-time) variances = np.zeros(n_clusters, dtype=np.float64) for k in range(n_clusters): for t in range(n_time_steps): variances[k] += np.var(X[t][labels[t] == k], axis=0).mean() variances[k] /= n_time_steps # clusters with a single data point will have zero-variance. # assign a fudge factor in this case variances[variances == 0.] = var_reg return centers, variances, labels
14,627
def terminate(mgr, qname='input'): """DEPRECATED: Use TFNode class instead""" logging.info("terminate() invoked") mgr.set('state','terminating') # drop remaining items in the queue queue = mgr.get_queue(qname) count = 0 done = False while not done: try: queue.get(block=True, timeout=5) queue.task_done() count += 1 except Empty: logging.info("dropped {0} items from queue".format(count)) done = True
14,628
def do_payment( checkout_data, # Dict[str, str] parsed_checkout, # Dict[str, str] enable_itn, # type: bool ): # type: (...) -> Dict[str, str] """ Common test helper: do a payment, and assert results. This takes a checkout's data and page parse (for session info and assertions). This will enable and verify ITN processing if `enable_itn` is true. Return the payment confirmation page's parse. """ def _post_payment(): # type: () -> requests.Response return post_sandbox_payment( parsed_checkout['session_type'], parsed_checkout['session_id'], parsed_checkout['payment_method'], ) if enable_itn: require_itn_configured() with itn_handler(ITN_HOST, ITN_PORT) as itn_queue: # type: Queue response = _post_payment() itn_data = itn_queue.get(timeout=2) else: response = _post_payment() parsed_payment = parse_payfast_page(response) assert { 'payment_summary': parsed_checkout['payment_summary'], 'notice': 'Your payment was successful\n' } == parsed_payment if enable_itn: # Check the ITN result. # Expect whitespace-stripped versions of the checkout data. expected = {name: value.strip(api.CHECKOUT_SIGNATURE_IGNORED_WHITESPACE) for (name, value) in checkout_data.items()} expected_amount_gross = '{:.2f}'.format(decimal.Decimal(checkout_data['amount'].strip())) expected_signature = api.itn_signature(itn_data) assert { 'm_payment_id': expected.get('m_payment_id', ''), 'pf_payment_id': itn_data.get('pf_payment_id', 'MISSING'), 'payment_status': 'COMPLETE', 'item_name': expected.get('item_name', 'MISSING'), 'item_description': expected.get('item_description', ''), 'amount_gross': expected_amount_gross, 'amount_fee': itn_data.get('amount_fee', 'MISSING'), 'amount_net': itn_data.get('amount_net', 'MISSING'), 'custom_str1': expected.get('custom_str1', ''), 'custom_str2': expected.get('custom_str2', ''), 'custom_str3': expected.get('custom_str3', ''), 'custom_str4': expected.get('custom_str4', ''), 'custom_str5': expected.get('custom_str5', ''), 'custom_int1': expected.get('custom_int1', ''), 'custom_int2': expected.get('custom_int2', ''), 'custom_int3': expected.get('custom_int3', ''), 'custom_int4': expected.get('custom_int4', ''), 'custom_int5': expected.get('custom_int5', ''), # The sandbox seems to fix these names, rather than using the checkout submission data. 'name_first': 'Test', 'name_last': 'User 01', 'email_address': expected.get('email_address', 'sbtu01@payfast.co.za'), 'merchant_id': '10000100', 'signature': expected_signature, } == itn_data return parsed_payment
14,629
def nifti_to_numpy(input_folder: str, output_folder: str): """Converts all nifti files in a input folder to numpy and saves the data and affine matrix into the output folder Args: input_folder (str): Folder to read the nifti files from output_folder (str): Folder to write the numpy arrays to """ for fname in tqdm(sorted(os.listdir(input_folder))): if not fname.endswith("nii.gz"): continue n_file = os.path.join(input_folder, fname) nifti = nib.load(n_file) np_data = nifti.get_fdata() np_affine = nifti.affine f_basename = fname.split(".")[0] np.save(os.path.join(output_folder, f_basename + "_data.npy"), np_data.astype(np.float16)) np.save(os.path.join(output_folder, f_basename + "_aff.npy"), np_affine)
14,630
def test_gocc_other_relationship_for_expression_exists(): """Test GOCC Other Relationship For Expression Exists""" query = """MATCH (n:ExpressionBioEntity)-[r:CELLULAR_COMPONENT_RIBBON_TERM]-(o:GOTerm:Ontology) WHERE o.primaryKey = 'GO:otherLocations' RETURN count(r) AS counter""" result = execute_transaction(query) for record in result: assert record["counter"] > 0
14,631
def ensure_sphinx_astropy_installed(): """ Make sure that sphinx-astropy is available. This returns the available version of sphinx-astropy as well as any paths that should be added to sys.path for sphinx-astropy to be available. """ # We've split out the Sphinx part of astropy-helpers into sphinx-astropy # but we want it to be auto-installed seamlessly for anyone using # build_docs. We check if it's already installed, and if not, we install # it to a local .eggs directory and add the eggs to the path (these # have to each be added to the path, we can't add them by simply adding # .eggs to the path) sys_path_inserts = [] sphinx_astropy_version = None try: from sphinx_astropy import __version__ as sphinx_astropy_version # noqa except ImportError: raise ImportError("sphinx-astropy needs to be installed to build " "the documentation.") return sphinx_astropy_version, sys_path_inserts
14,632
def SPEED_OF_LIGHT(): """ The `SPEED_OF_LIGHT` function returns the speed of light in vacuum (unit is ms-1) according to the IERS numerical standards (2010). """ return 299792458.0
14,633
def odd_factory(NATIVE_TYPE): # pylint: disable=invalid-name """ Produces a Factory for OddTensors with underlying tf.dtype NATIVE_TYPE. """ assert NATIVE_TYPE in (tf.int32, tf.int64) class Factory: """ Represents a native integer data type. It is currently not considered for general use, but only to support subprotocols of SecureNN. One value of the native dtype is removed in order to obtain an odd modulus. More concretely, this data type wraps either tf.int32 or tf.int64 but removes -1, which is instead mapped to 0. """ def tensor(self, value): """ Wrap `value` in this data type, performing type conversion as needed. Internal use should consider explicit construction as an optimization that avoids redundant correction. """ if isinstance(value, tf.Tensor): if value.dtype is not NATIVE_TYPE: value = tf.cast(value, dtype=NATIVE_TYPE) # no assumptions are made about the tensor here and hence we need to # apply our mapping for invalid values value = _map_minusone_to_zero(value, NATIVE_TYPE) return OddDenseTensor(value) raise TypeError("Don't know how to handle {}".format(type(value))) def constant(self, value): raise NotImplementedError() def variable(self, initial_value): raise NotImplementedError() def placeholder(self, shape): raise NotImplementedError() @property def modulus(self): if NATIVE_TYPE is tf.int32: return 2**32 - 1 if NATIVE_TYPE is tf.int64: return 2**64 - 1 raise NotImplementedError(("Incorrect native type ", "{}.".format(NATIVE_TYPE))) @property def native_type(self): return NATIVE_TYPE def sample_uniform(self, shape, minval: Optional[int] = None, maxval: Optional[int] = None): """Sample a tensor from a uniform distribution.""" assert minval is None assert maxval is None if secure_random.supports_seeded_randomness(): seed = secure_random.secure_seed() return OddUniformTensor(shape=shape, seed=seed) if secure_random.supports_secure_randomness(): sampler = secure_random.random_uniform else: sampler = tf.random_uniform value = _construct_value_from_sampler(sampler=sampler, shape=shape) return OddDenseTensor(value) def sample_bounded(self, shape, bitlength: int): raise NotImplementedError() def stack(self, xs: list, axis: int = 0): raise NotImplementedError() def concat(self, xs: list, axis: int): raise NotImplementedError() master_factory = Factory() class OddTensor(AbstractTensor): """ Base class for the concrete odd tensors types. Implements basic functionality needed by SecureNN subprotocols from a few abstract properties implemented by concrete types below. """ @property def factory(self): return master_factory @property @abc.abstractproperty def value(self) -> tf.Tensor: pass @property @abc.abstractproperty def shape(self): pass def __repr__(self) -> str: return '{}(shape={}, NATIVE_TYPE={})'.format( type(self), self.shape, NATIVE_TYPE, ) def __getitem__(self, slc): return OddDenseTensor(self.value[slc]) def __add__(self, other): return self.add(other) def __sub__(self, other): return self.sub(other) def add(self, other): """Add other to this tensor.""" x, y = _lift(self, other) bitlength = math.ceil(math.log2(master_factory.modulus)) with tf.name_scope('add'): # the below avoids redundant seed expansion; can be removed once # we have a (per-device) caching mechanism in place x_value = x.value y_value = y.value z = x_value + y_value with tf.name_scope('correct_wrap'): # we want to compute whether we wrapped around, ie `pos(x) + pos(y) >= m - 1`, # for correction purposes which, since `m - 1 == 1` for signed integers, can be # rewritten as: # -> `pos(x) >= m - 1 - pos(y)` # -> `m - 1 - pos(y) - 1 < pos(x)` # -> `-1 - pos(y) - 1 < pos(x)` # -> `-2 - pos(y) < pos(x)` wrapped_around = _lessthan_as_unsigned(-2 - y_value, x_value, bitlength) z += wrapped_around return OddDenseTensor(z) def sub(self, other): """Subtract other from this tensor.""" x, y = _lift(self, other) bitlength = math.ceil(math.log2(master_factory.modulus)) with tf.name_scope('sub'): # the below avoids redundant seed expansion; can be removed once # we have a (per-device) caching mechanism in place x_value = x.value y_value = y.value z = x_value - y_value with tf.name_scope('correct-wrap'): # we want to compute whether we wrapped around, ie `pos(x) - pos(y) < 0`, # for correction purposes which can be rewritten as # -> `pos(x) < pos(y)` wrapped_around = _lessthan_as_unsigned(x_value, y_value, bitlength) z -= wrapped_around return OddDenseTensor(z) def bits(self, factory=None): if factory is None: return OddDenseTensor(binarize(self.value)) return factory.tensor(binarize(self.value)) def cast(self, factory): if factory is master_factory: return self return factory.tensor(self.value) class OddDenseTensor(OddTensor): """ Represents a tensor with explicit values, as opposed to OddUniformTensor with implicit values. Internal use only and assume that invalid values have already been mapped. """ def __init__(self, value): assert isinstance(value, tf.Tensor) self._value = value @property def value(self) -> tf.Tensor: return self._value @property def shape(self): return self._value.shape class OddUniformTensor(OddTensor): """ Represents a tensor with uniform values defined implicitly through a seed. Internal use only. """ def __init__(self, shape, seed): self._seed = seed self._shape = shape @property def shape(self): return self._shape @property def value(self) -> tf.Tensor: # TODO(Morten) result should be stored in a (per-device) cache with tf.name_scope('expand-seed'): sampler = partial(secure_random.seeded_random_uniform, seed=self._seed) value = _construct_value_from_sampler(sampler=sampler, shape=self._shape) return value def _lift(x, y) -> Tuple[OddTensor, OddTensor]: """ Attempts to lift x and y to compatible OddTensors for further processing. """ if isinstance(x, OddTensor) and isinstance(y, OddTensor): assert x.factory == y.factory, "Incompatible types: {} and {}".format( x.factory, y.factory) return x, y if isinstance(x, OddTensor): if isinstance(y, int): return x, x.factory.tensor(np.array([y])) if isinstance(y, OddTensor): if isinstance(x, int): return y.factory.tensor(np.array([x])), y raise TypeError("Don't know how to lift {} {}".format(type(x), type(y))) def _construct_value_from_sampler(sampler, shape): """Sample from sampler and correct for the modified dtype.""" # to get uniform distribution over [min, max] without -1 we sample # [min+1, max] and shift negative values down by one unshifted_value = sampler(shape=shape, dtype=NATIVE_TYPE, minval=NATIVE_TYPE.min + 1, maxval=NATIVE_TYPE.max) value = tf.where(unshifted_value < 0, unshifted_value + tf.ones(shape=unshifted_value.shape, dtype=unshifted_value.dtype), unshifted_value) return value def _lessthan_as_unsigned(x, y, bitlength): """ Performs comparison `x < y` on signed integers *as if* they were unsigned, e.g. `1 < -1`. Taken from Section 2-12, page 23, of [Hacker's Delight](https://www.hackersdelight.org/). """ with tf.name_scope('unsigned-compare'): not_x = tf.bitwise.invert(x) lhs = tf.bitwise.bitwise_and(not_x, y) rhs = tf.bitwise.bitwise_and(tf.bitwise.bitwise_or(not_x, y), x - y) z = tf.bitwise.right_shift(tf.bitwise.bitwise_or(lhs, rhs), bitlength - 1) # turn 0/-1 into 0/1 before returning return tf.bitwise.bitwise_and(z, tf.ones(shape=z.shape, dtype=z.dtype)) def _map_minusone_to_zero(value, native_type): """ Maps all -1 values to zero. """ zeros = tf.zeros(shape=value.shape, dtype=native_type) return tf.where(value == -1, zeros, value) return master_factory
14,634
def filter_by_is_awesome(resources): """The resources being that is_awesome Arguments: resources {[type]} -- A list of resources """ return [resource for resource in resources if resource.is_awesome]
14,635
def topograph_image(image, step): """ Takes in NxMxC numpy matrix and a step size and a delta returns NxMxC numpy matrix with contours in each C cell """ step_gen = _step_range_gen(step) new_img = np.array(image, copy=True) """step_gen ~ (255, 245, 235, 225,...) """ def myfunc(color): for tops, bots in window(step_gen, 2): if (color <= tops) and (color > bots): return tops if color > tops: break return 0 topograph = np.vectorize(myfunc) return new_img if step == 1 else topograph(new_img)
14,636
def _etag(cur): """Get current history ETag during request processing.""" h_from, h_until = web.ctx.ermrest_history_snaprange cur.execute(("SELECT _ermrest.tstzencode( GREATEST( %(h_until)s::timestamptz, (" + _RANGE_AMENDVER_SQL + ")) );") % { 'h_from': sql_literal(h_from), 'h_until': sql_literal(h_until), }) return cur.fetchone()[0]
14,637
def send_request(url, method='GET', headers=None, param_get=None, data=None): """实际发送请求到目标服务器, 对于重定向, 原样返回给用户 被request_remote_site_and_parse()调用""" final_hostname = urlsplit(url).netloc dbgprint('FinalRequestUrl', url, 'FinalHostname', final_hostname) # Only external in-zone domains are allowed (SSRF check layer 2) if final_hostname not in allowed_domains_set and not developer_temporary_disable_ssrf_prevention: raise ConnectionAbortedError('Trying to access an OUT-OF-ZONE domain(SSRF Layer 2):', final_hostname) # set zero data to None instead of b'' if not data: data = None prepped_req = requests.Request( method, url, headers=headers, params=param_get, data=data, ).prepare() # get session if enable_connection_keep_alive: _session = connection_pool.get_session(final_hostname) else: _session = requests.Session() # Send real requests parse.time["req_start_time"] = time() r = _session.send( prepped_req, proxies=requests_proxies, allow_redirects=False, stream=enable_stream_content_transfer, verify=not developer_do_not_verify_ssl, ) # remote request time parse.time["req_time_header"] = time() - parse.time["req_start_time"] dbgprint('RequestTime:', parse.time["req_time_header"], v=4) # Some debug output # print(r.request.headers, r.headers) if verbose_level >= 3: dbgprint(r.request.method, "FinalSentToRemoteRequestUrl:", r.url, "\nRem Resp Stat: ", r.status_code) dbgprint("RemoteRequestHeaders: ", r.request.headers) if data: dbgprint('RemoteRequestRawData: ', r.request.body) dbgprint("RemoteResponseHeaders: ", r.headers) return r
14,638
def MakeListOfPoints(charts, bot, test_name, buildername, buildnumber, supplemental_columns): """Constructs a list of point dictionaries to send. The format output by this function is the original format for sending data to the perf dashboard. Args: charts: A dictionary of chart names to chart data, as generated by the log processor classes (see process_log_utils.GraphingLogProcessor). bot: A string which comes from perf_id, e.g. linux-release. test_name: A test suite name, e.g. sunspider. buildername: Builder name (for stdio links). buildnumber: Build number (for stdio links). supplemental_columns: A dictionary of extra data to send with a point. Returns: A list of dictionaries in the format accepted by the perf dashboard. Each dictionary has the keys "master", "bot", "test", "value", "revision". The full details of this format are described at http://goo.gl/TcJliv. """ results = [] # The master name used for the dashboard is the CamelCase name returned by # GetActiveMaster(), and not the canonical master name with dots. master = slave_utils.GetActiveMaster() for chart_name, chart_data in sorted(charts.items()): point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_') for trace_name, trace_values in sorted(chart_data['traces'].items()): is_important = trace_name in chart_data.get('important', []) test_path = _TestPath(test_name, chart_name, trace_name) result = { 'master': master, 'bot': bot, 'test': test_path, 'revision': point_id, 'supplemental_columns': {} } # Add the supplemental_columns values that were passed in after the # calculated revision column values so that these can be overwritten. result['supplemental_columns'].update(revision_columns) result['supplemental_columns'].update( _GetStdioUriColumn(test_name, buildername, buildnumber)) result['supplemental_columns'].update(supplemental_columns) result['value'] = trace_values[0] result['error'] = trace_values[1] # Add other properties to this result dictionary if available. if chart_data.get('units'): result['units'] = chart_data['units'] if is_important: result['important'] = True results.append(result) return results
14,639
def set_standard_part(elements: List[int]) -> None: """Sets covers (wall, opening or floor) to standard part Args: elements (List[int]): element IDs """
14,640
def parse_csv(string): """ Rough port of wq/pandas.js to Python. Useful for validating CSV output generated by Django REST Pandas. """ if not string.startswith(','): data = [] for row in csv.DictReader(StringIO(string)): for key, val in row.items(): try: row[key] = float(val) except ValueError: pass data.append(row) return [{ 'data': data }] reader = csv.reader(StringIO(string)) val_cols = None val_start = None id_cols = None for row in reader: if row[0] == '' and not val_cols: val_start = row.count('') val_cols = row[val_start:] col_meta = [{} for v in val_cols] elif row[-1] != '' and val_cols and not id_cols: key = row[0] for i, meta in enumerate(row[val_start:]): col_meta[i].update(**{key: meta}) elif row[-1] == '' and not id_cols: id_cols = row[:row.index('')] meta_index = {} meta_i = 0 datasets = [] for i, ds1 in enumerate(col_meta): if i in meta_index: continue meta_index[i] = meta_i meta_i += 1 datasets.append(ds1) if i < len(col_meta): for j, ds2 in enumerate(col_meta[i + 1:]): if ds1 == ds2: meta_index[i + j + 1] = i for d in datasets: d['data'] = [] elif val_cols and id_cols: ids = { key: val for key, val in zip(id_cols, row[:len(id_cols)]) } records = {} for i, val in enumerate(row[len(id_cols):]): mi = meta_index[i] if mi not in records: data = ids.copy() else: data = records[mi] try: val = float(val) except ValueError: pass if val != '': data[val_cols[i]] = val records[mi] = data for mi, data in records.items(): datasets[mi]['data'].append(data) return datasets
14,641
def list_system_configurations(): """ List all the system configuration parameters Returns: .. code-block:: python [ { "ParameterName": "ParameterValue" }, ... ] Raises: 500 - ChaliceViewError """ try: print("Listing all the system configuration parameters") system_table = ddb_resource.Table(SYSTEM_TABLE_NAME) response = system_table.scan( ConsistentRead=True ) configs = response["Items"] while "LastEvaluatedKey" in response: response = system_table.scan( ExclusiveStartKey=response["LastEvaluatedKey"], ConsistentRead=True ) configs.extend(response["Items"]) except Exception as e: print(f"Unable to list the system configuration parameters: {str(e)}") raise ChaliceViewError(f"Unable to list the system configuration parameters: {str(e)}") else: return replace_decimals(configs)
14,642
def main(): """Run bot.""" # Create the Updater and pass it your bot's token. # Make sure to set use_context=True to use the new context based callbacks # Post version 12 this will no longer be necessary defaults = Defaults(timeout = 60) updater = Updater(os.environ['BOT_TOKEN'], use_context=True, defaults=defaults) # Get the dispatcher to register handlers dp = updater.dispatcher # on different commands - answer in Telegram dp.add_handler(CommandHandler("start", start)) dp.add_handler(CommandHandler("help", start)) dp.add_handler(CommandHandler("status", status)) dp.add_handler(CommandHandler("stop", stop)) dp.add_handler(CommandHandler("set", set_timer, pass_args=True, pass_job_queue=True, pass_chat_data=True)) dp.add_handler(CommandHandler("unset", unset, pass_chat_data=True)) j = updater.job_queue job_minute = j.run_repeating(callback_minute, interval=60, first=0) # time is assumed in UTC here job_weekend = j.run_daily(callback_weekend, time(hour=17, minute=0), days=(5,6)) # job_once = j.run_once(callback_weekend, 10) # Start the Bot updater.start_polling() # Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or # SIGABRT. This should be used most of the time, since start_polling() is # non-blocking and will stop the bot gracefully. updater.idle()
14,643
def add_dbnsfp_to_vds(hail_context, vds, genome_version, root="va.dbnsfp", subset=None, verbose=True): """Add dbNSFP fields to the VDS""" if genome_version == "37": dbnsfp_schema = DBNSFP_SCHEMA_37 elif genome_version == "38": dbnsfp_schema = DBNSFP_SCHEMA_38 else: raise ValueError("Invalid genome_version: " + str(genome_version)) expr = convert_vds_schema_string_to_annotate_variants_expr( root=root, other_source_fields=dbnsfp_schema, other_source_root="vds", ) if verbose: print(expr) dbnsfp_vds = read_dbnsfp_vds(hail_context, genome_version, subset=subset) return vds.annotate_variants_vds(dbnsfp_vds, expr=expr)
14,644
def look_for_time_position(target, source, begin_pos=0): """ Given a time stamp, find its position in time series. If target does NOT exist in source, then return the value of the smallest time point that is larger than the given target. Parameters ------------- target : DateTime obj A datetime obj to look for source : list, type=DateTime A list of DateTime objects to search from begin_pos : int, default=0 The start position to search. Default to search from the beginning. Returns --------------- position : int The location """ # TODO: make use of the unused parameter - begin_pos for i, t in enumerate(source[begin_pos:]): if t >= target: ans = i + begin_pos # return ans insert_index = bisect.bisect(source, target, lo=begin_pos) if insert_index >= len(source): # print("Error (look_for_time_position): the time is out of scope.") return -1 return insert_index """ # TODO: use binary search to speed up for i, t in enumerate(source): if t >= target: return i print("Error (look_for_time_position): the time is out of scope.") return -1 """
14,645
def get_wav2vec_preds_for_wav( path_to_wav: str, model, processor, device: torch.device, bs: int = 8, loading_step: float = 10, extra_step: float = 1, ) -> str: """ Gets binary predictions for wav file with a wav2vec 2.0 model Args: path_to_wav (str): absolute path to wav file model: a wav2vec 2.0 model processor: a wav2vec 2.0 processor device: a torch.device object bs (int, optional): Batch size. Defaults to 8. loading_step (float, optional): length of fixed segments. Defaults to 10. extra_step (float, optional): size of extra step to load before and after. Defaults to 1. Returns: str: binary predictions """ def my_collate_fn(batch: list[np.array]) -> list[np.array]: return [example for example in batch] dataset = TokenPredDataset(path_to_wav, extra_step, loading_step) dataloader = DataLoader( dataset, batch_size=bs, shuffle=False, collate_fn=my_collate_fn, num_workers=min(cpu_count() // 2, 4), drop_last=False, ) # for the extra frames loaded before and after each segment correction = int(extra_step / dataset.wav2vec_frame_length) all_preds = [] i = 0 with torch.no_grad(): for wav_batch in iter(dataloader): tokenized_audio = processor( wav_batch, return_tensors="pt", padding="longest", sampling_rate=16000 ) input_values = tokenized_audio.input_values.to(device) attention_mask = tokenized_audio.attention_mask.to(device) logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) for j, preds in enumerate(predicted_ids.tolist()): true_length = ( attention_mask[j].cpu().numpy().sum() / dataset.sr / dataset.wav2vec_frame_length ) # apply corrections if i == 0: preds = preds[:-correction] true_length -= correction elif i == len(dataset) - 1: preds = preds[correction:] true_length -= correction else: preds = preds[correction:-correction] true_length -= 2 * correction # remove padding all_preds.extend(preds[: int(true_length)]) i += 1 tokens_preds = processor.tokenizer.convert_ids_to_tokens(all_preds) predictions = "".join(["0" if char == "<pad>" else "1" for char in tokens_preds]) return predictions
14,646
def get_template(parsed_args): """Initialize jinja2 and return the right template""" env = jinja2.Environment( loader=jinja2.PackageLoader(__name__, TEMPLATES_PATH), trim_blocks=True, lstrip_blocks=True, ) # Make the missingvalue() function available in the template so that the # template fails to render if we don't provide the values it needs. env.globals["missingvalue"] = missingvalue env.filters["format_hex"] = format_hex if "arty" in parsed_args.board or "vc707" in parsed_args.board or "vcu118" in parsed_args.board: template = env.get_template("fpga.cfg") elif "hifive" in parsed_args.board: template = env.get_template("hifive.cfg") else: print("Board %s is not supported!" % parsed_args.board, file=sys.stderr) sys.exit(1) return template
14,647
def get_node_cmd(config): """ Get the node CLI call for Least Cost Xmission Parameters ---------- config : reVX.config.least_cost_xmission.LeastCostXmissionConfig Least Cost Xmission config object. Returns ------- cmd : str CLI call to submit to SLURM execution. """ args = ['-n {}'.format(SLURM.s(config.name)), 'local', '-cost {}'.format(SLURM.s(config.cost_fpath)), '-feats {}'.format(SLURM.s(config.features_fpath)), '-cap {}'.format(SLURM.s(config.capacity_class)), '-res {}'.format(SLURM.s(config.resolution)), '-xcfg {}'.format(SLURM.s(config.xmission_config)), '-gids {}'.format(SLURM.s(config.sc_point_gids)), '-nn {}'.format(SLURM.s(config.nn_sinks)), '-buffer {}'.format(SLURM.s(config.clipping_buffer)), '-bmult {}'.format(SLURM.s(config.barrier_mult)), '-mw {}'.format(SLURM.s(config.execution_control.max_workers)), '-o {}'.format(SLURM.s(config.dirout)), '-log {}'.format(SLURM.s(config.logdir)), ] if config.log_level == logging.DEBUG: args.append('-v') cmd = ('python -m reVX.least_cost_xmission.least_cost_xmission_cli {}' .format(' '.join(args))) logger.debug('Submitting the following cli call:\n\t{}'.format(cmd)) return cmd
14,648
def header_lines(filename): """Read the first five lines of a file and return them as a list of strings.""" with open(filename, mode='rb') as f: return [f.readline().decode().rstrip() for _ in range(5)]
14,649
def deactivate_venv( venv: str = 'mrsm', color : bool = True, debug: bool = False ) -> bool: """ Remove a virtual environment from sys.path (if it's been activated). """ import sys global active_venvs if venv is None: return True if debug: from meerschaum.utils.debug import dprint dprint(f"Deactivating virtual environment '{venv}'...", color=color) if venv in active_venvs: _locks['active_venvs'].acquire() active_venvs.remove(venv) _locks['active_venvs'].release() if sys.path is None: return False target = venv_target_path(venv, debug=debug) _locks['sys.path'].acquire() if str(target) in sys.path: sys.path.remove(str(target)) _locks['sys.path'].release() if debug: dprint(f'sys.path: {sys.path}', color=color) return True
14,650
async def load_last_cotd(chat_id: int): """Load the time when the user has last received his card of the day. Args: chat_id (int): user chat_id """ QUERY = "SELECT last_cotd FROM users WHERE id = %(id)s" async with aconn.cursor() as cur: await cur.execute(QUERY, {"id": chat_id}) record = await cur.fetchone() return record[0] if record else None
14,651
def load_model(targets, model_name='umxhq', device='cpu'): """ target model path can be either <target>.pth, or <target>-sha256.pth (as used on torchub) """ model_path = Path(model_name).expanduser() if not model_path.exists(): raise NotImplementedError else: # load model from disk with open(Path(model_path, str(len(targets)) + '.json'), 'r') as stream: results = json.load(stream) target_model_path = Path(model_path) / "model.pth" state = torch.load( target_model_path, map_location=device ) max_bin = utils.bandwidth_to_max_bin( 44100, results['args']['nfft'], results['args']['bandwidth'] ) unmix = model.OpenUnmixSingle( n_fft=results['args']['nfft'], n_hop=results['args']['nhop'], nb_channels=results['args']['nb_channels'], hidden_size=results['args']['hidden_size'], max_bin=max_bin ) unmix.load_state_dict(state) unmix.stft.center = True unmix.eval() unmix.to(device) print('loadmodel function done') return unmix
14,652
def model_gradient_descent( f: Callable[..., float], x0: np.ndarray, *, args=(), rate: float = 1e-1, sample_radius: float = 1e-1, n_sample_points: int = 100, n_sample_points_ratio: Optional[float] = None, rate_decay_exponent: float = 0.0, stability_constant: float = 0.0, sample_radius_decay_exponent: float = 0.0, tol: float = 1e-8, known_values: Optional[Tuple[List[np.ndarray], List[float]]] = None, max_iterations: Optional[int] = None, max_evaluations: Optional[int] = None) -> scipy.optimize.OptimizeResult: """Model gradient descent algorithm for black-box optimization. The idea of this algorithm is to perform gradient descent, but estimate the gradient using a surrogate model instead of, say, by finite-differencing. The surrogate model is a least-squared quadratic fit to points sampled from the vicinity of the current iterate. This algorithm works well when you have an initial guess which is in the convex neighborhood of a local optimum and you want to converge to that local optimum. It's meant to be used when the function is stochastic. Args: f: The function to minimize. x0: An initial guess. args: Additional arguments to pass to the function. rate: The learning rate for the gradient descent. sample_radius: The radius around the current iterate to sample points from to build the quadratic model. n_sample_points: The number of points to sample in each iteration. n_sample_points_ratio: This specifies the number of points to sample in each iteration as a coefficient of the number of points required to exactly determine a quadratic model. The number of sample points will be this coefficient times (n+1)(n+2)/2, rounded up, where n is the number of parameters. Setting this overrides n_sample_points. rate_decay_exponent: Controls decay of learning rate. In each iteration, the learning rate is changed to the base learning rate divided by (i + 1 + S)**a, where S is the stability constant and a is the rate decay exponent (this parameter). stability_constant: Affects decay of learning rate. In each iteration, the learning rate is changed to the base learning rate divided by (i + 1 + S)**a, where S is the stability constant (this parameter) and a is the rate decay exponent. sample_radius_decay_exponent: Controls decay of sample radius. tol: The algorithm terminates when the difference between the current iterate and the next suggested iterate is smaller than this value. known_values: Any prior known values of the objective function. This is given as a tuple where the first element is a list of points and the second element is a list of the function values at those points. max_iterations: The maximum number of iterations to allow before termination. max_evaluations: The maximum number of function evaluations to allow before termination. Returns: Scipy OptimizeResult """ if known_values is not None: known_xs, known_ys = known_values known_xs = [np.copy(x) for x in known_xs] known_ys = [np.copy(y) for y in known_ys] else: known_xs, known_ys = [], [] if max_iterations is None: max_iterations = np.inf if max_evaluations is None: max_evaluations = np.inf n = len(x0) if n_sample_points_ratio is not None: n_sample_points = int( np.ceil(n_sample_points_ratio * (n + 1) * (n + 2) / 2)) _, f = wrap_function(f, args) res = OptimizeResult() current_x = np.copy(x0) res.x_iters = [] # initializes as lists res.xs_iters = [] res.ys_iters = [] res.func_vals = [] res.model_vals = [None] res.fun = 0 total_evals = 0 num_iter = 0 converged = False message = None while num_iter < max_iterations: current_sample_radius = (sample_radius / (num_iter + 1)**sample_radius_decay_exponent) # Determine points to evaluate # in ball around current point new_xs = [np.copy(current_x)] + [ current_x + _random_point_in_ball(n, current_sample_radius) for _ in range(n_sample_points) ] if total_evals + len(new_xs) > max_evaluations: message = 'Reached maximum number of evaluations.' break # Evaluate points res.xs_iters.append(new_xs) new_ys = [f(x) for x in new_xs] res.ys_iters.append(new_ys) total_evals += len(new_ys) known_xs.extend(new_xs) known_ys.extend(new_ys) # Save function value res.func_vals.append(new_ys[0]) res.x_iters.append(np.copy(current_x)) res.fun = res.func_vals[-1] # Determine points to use to build model model_xs = [] model_ys = [] for x, y in zip(known_xs, known_ys): if np.linalg.norm(x - current_x) < current_sample_radius: model_xs.append(x) model_ys.append(y) # Build and solve model model_gradient, model = _get_least_squares_model_gradient( model_xs, model_ys, current_x) # calculate the gradient and update the current point gradient_norm = np.linalg.norm(model_gradient) decayed_rate = ( rate / (num_iter + 1 + stability_constant)**rate_decay_exponent) # Convergence criteria if decayed_rate * gradient_norm < tol: converged = True message = 'Optimization converged successfully.' break # Update current_x -= decayed_rate * model_gradient res.model_vals.append( model.predict([-decayed_rate * model_gradient])[0]) num_iter += 1 if converged: final_val = res.func_vals[-1] else: final_val = f(current_x) res.func_vals.append(final_val) if message is None: message = 'Reached maximum number of iterations.' res.x_iters.append(current_x) total_evals += 1 res.x = current_x res.fun = final_val res.nit = num_iter res.nfev = total_evals res.message = message return res
14,653
def index(): """Show the index.""" return render_template( "invenio_archivematica/index.html", module_name=_('Invenio-Archivematica'))
14,654
def on_pod_event(event, body: Body, logger: Logger, **kwargs): """ Handle low-level MySQL server pod events. The events we're interested in are: - when a container restarts in a Pod (e.g. because of mysqld crash) """ # TODO ensure that the pod is owned by us while True: try: pod = MySQLPod.from_json(body) member_info = pod.get_membership_info() ready = pod.check_containers_ready() if pod.phase != "Running" or pod.deleting or not member_info: logger.debug( f"ignored pod event: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info}") return mysql_restarts = pod.get_container_restarts("mysql") event = "" if g_ephemeral_pod_state.get(pod, "mysql-restarts") != mysql_restarts: event = "mysql-restarted" containers = [ f"{c.name}={'ready' if c.ready else 'not-ready'}" for c in pod.status.container_statuses] conditions = [ f"{c.type}={c.status}" for c in pod.status.conditions] logger.info(f"POD EVENT {event}: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info} restarts={mysql_restarts} containers={containers} conditions={conditions}") cluster = pod.get_cluster() if not cluster: logger.info( f"Ignoring event for pod {pod.name} belonging to a deleted cluster") return with ClusterMutex(cluster, pod): cluster_ctl = ClusterController(cluster) # Check if a container in the pod restarted if ready and event == "mysql-restarted": cluster_ctl.on_pod_restarted(pod, logger) g_ephemeral_pod_state.set( pod, "mysql-restarts", mysql_restarts) # Check if we should refresh the cluster status status = cluster_ctl.probe_status_if_needed(pod, logger) if status == diagnose.ClusterDiagStatus.UNKNOWN: raise kopf.TemporaryError( f"Cluster has unreachable members. status={status}", delay=15) break except kopf.TemporaryError as e: # TODO review this # Manually handle retries, the event handler isn't getting called again # by kopf (maybe a bug or maybe we're using it wrong) logger.info(f"{e}: retrying after {e.delay} seconds") if e.delay: time.sleep(e.delay) continue
14,655
def get_functions_and_macros_from_doc(pmdk_path): """ Returns names of functions and macros in a list based on names of files from the doc directory. """ path_to_functions_and_macros = path.join(pmdk_path, 'doc') functions_and_macros_from_doc = [] for _, _, files in walk(path_to_functions_and_macros): for f in files: if not f.endswith('.3'): continue # Files with extension '.3' have the same name as functions and # macros of PMDK library. 'pmemobj_action' is excluded, because # it is not a name of the function. if f.startswith('pmemobj_action'): continue if not 'libpmem2' in PMDK_LIBRARIES and f.startswith('pmem2'): continue functions_and_macros_from_doc.append(f.split('.')[0]) return functions_and_macros_from_doc
14,656
def kmv_tet_polyset(m, mf, mi): """Create the polynomial set for a KMV space on a tetrahedron.""" poly = polynomial_set(3, 1, m) # TODO: check this for axes in [(x[0], x[1]), (x[0], x[2]), (x[1], x[2]), (x[1] - x[0], x[2] - x[0])]: b = axes[0] * axes[1] * (1 - axes[0] - axes[1]) for i in range(mf - 2): for j in range(mf - 2 - i): poly.append(x[0] ** i * x[1] ** j * x[2] ** (mf - 3 - i - j) * b) b = x[0] * x[1] * x[2] * (1 - x[0] - x[1] - x[2]) for i in range(mi - 3): for j in range(mi - 3 - i): poly.append(x[0] ** i * x[1] ** j * x[2] ** (mf - 4 - i - j) * b) return poly
14,657
def list_faqs(IndexId=None, NextToken=None, MaxResults=None): """ Gets a list of FAQ lists associated with an index. See also: AWS API Documentation Exceptions :example: response = client.list_faqs( IndexId='string', NextToken='string', MaxResults=123 ) :type IndexId: string :param IndexId: [REQUIRED]\nThe index that contains the FAQ lists.\n :type NextToken: string :param NextToken: If the result of the previous request to ListFaqs was truncated, include the NextToken to fetch the next set of FAQs. :type MaxResults: integer :param MaxResults: The maximum number of FAQs to return in the response. If there are fewer results in the list, this response contains only the actual results. :rtype: dict ReturnsResponse Syntax { 'NextToken': 'string', 'FaqSummaryItems': [ { 'Id': 'string', 'Name': 'string', 'Status': 'CREATING'|'UPDATING'|'ACTIVE'|'DELETING'|'FAILED', 'CreatedAt': datetime(2015, 1, 1), 'UpdatedAt': datetime(2015, 1, 1) }, ] } Response Structure (dict) -- NextToken (string) -- The ListFaqs operation returns a page of FAQs at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the ListFaqs operation to return the next page of FAQs. FaqSummaryItems (list) -- information about the FAQs associated with the specified index. (dict) -- Provides information about a frequently asked questions and answer contained in an index. Id (string) -- The unique identifier of the FAQ. Name (string) -- The name that you assigned the FAQ when you created or updated the FAQ. Status (string) -- The current status of the FAQ. When the status is ACTIVE the FAQ is ready for use. CreatedAt (datetime) -- The UNIX datetime that the FAQ was added to the index. UpdatedAt (datetime) -- The UNIX datetime that the FAQ was last updated. Exceptions kendra.Client.exceptions.ValidationException kendra.Client.exceptions.ResourceNotFoundException kendra.Client.exceptions.ThrottlingException kendra.Client.exceptions.AccessDeniedException kendra.Client.exceptions.InternalServerException :return: { 'NextToken': 'string', 'FaqSummaryItems': [ { 'Id': 'string', 'Name': 'string', 'Status': 'CREATING'|'UPDATING'|'ACTIVE'|'DELETING'|'FAILED', 'CreatedAt': datetime(2015, 1, 1), 'UpdatedAt': datetime(2015, 1, 1) }, ] } :returns: kendra.Client.exceptions.ValidationException kendra.Client.exceptions.ResourceNotFoundException kendra.Client.exceptions.ThrottlingException kendra.Client.exceptions.AccessDeniedException kendra.Client.exceptions.InternalServerException """ pass
14,658
def word_detokenize(tokens): """ A heuristic attempt to undo the Penn Treebank tokenization above. Pass the --pristine-output flag if no attempt at detokenizing is desired. """ regexes = [ # Newlines (re.compile(r'[ ]?\\n[ ]?'), r'\n'), # Contractions (re.compile(r"\b(can)\s(not)\b"), r'\1\2'), (re.compile(r"\b(d)\s('ye)\b"), r'\1\2'), (re.compile(r"\b(gim)\s(me)\b"), r'\1\2'), (re.compile(r"\b(gon)\s(na)\b"), r'\1\2'), (re.compile(r"\b(got)\s(ta)\b"), r'\1\2'), (re.compile(r"\b(lem)\s(me)\b"), r'\1\2'), (re.compile(r"\b(mor)\s('n)\b"), r'\1\2'), (re.compile(r"\b(wan)\s(na)\b"), r'\1\2'), # Ending quotes (re.compile(r"([^' ]) ('ll|'re|'ve|n't)\b"), r"\1\2"), (re.compile(r"([^' ]) ('s|'m|'d)\b"), r"\1\2"), (re.compile(r'[ ]?”'), r'"'), # Double dashes (re.compile(r'[ ]?--[ ]?'), r'--'), # Parens and brackets (re.compile(r'([\[\(\{\<]) '), r'\1'), (re.compile(r' ([\]\)\}\>])'), r'\1'), (re.compile(r'([\]\)\}\>]) ([:;,.])'), r'\1\2'), # Punctuation (re.compile(r"([^']) ' "), r"\1' "), (re.compile(r' ([?!\.])'), r'\1'), (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r'\1\2\3'), (re.compile(r'([#$]) '), r'\1'), (re.compile(r' ([;%:,])'), r'\1'), # Starting quotes (re.compile(r'(“)[ ]?'), r'"') ] text = ' '.join(tokens) for regexp, substitution in regexes: text = regexp.sub(substitution, text) return text.strip()
14,659
def googlenet_paper(pretrained=False, **kwargs): """ GoogLeNet Model as given in the official Paper. """ kwargs['aux'] = True if 'aux' not in kwargs else kwargs['aux'] kwargs['replace5x5with3x3'] = False if 'replace5x5with3x3' not in kwargs \ else kwargs['replace5x5with3x3'] return get_net(GoogLeNet, pretrained=pretrained, pretrain_url=None, fname='googlenet', kwargs_net=kwargs, attr='classifier', inn=1024)
14,660
def get_parser(): """ Parses the command line arguments .. todo:: Adapter services related to alerts/messaging, and local device/Edge management :returns: An object with attributes based on the arguments :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description="Inmarsat Modbus Proxy Adapter for ClearBlade") parser.add_argument('--url', default='https://platform.clearblade.com', help="The URL of the ClearBlade Platform the adapter will connect to.") parser.add_argument('--systemKey', required=True, help="The System Key of the ClearBlade platform System the adapter will connect to.") parser.add_argument('--systemSecret', required=True, help="The System Secret of the ClearBlade plaform System the adapter will connect to.") parser.add_argument('--deviceName', default=ADAPTER_DEVICE_ID, help="The id/name of the device that will be used for device \ authentication against the ClearBlade platform or Edge, defined \ within the devices table of the ClearBlade platform.") parser.add_argument('--deviceKey', required=True, help="The active key of the device that will be used for device \ authentication against the ClearBlade platform or Edge, defined within \ the Devices table of the ClearBlade platform.") parser.add_argument('--_slaves', dest='slaves_collection', default=DEVICE_PROXY_CONFIG_COLLECTION, help="The ClearBlade Collection name with RTU proxy definitions") parser.add_argument('--data', dest='data_collection', default=DATA_COLLECTION, help="The ClearBlade Collection name with proxy data") parser.add_argument('--net', dest='net_if', default='eth0', help="The physical port of the network listener") parser.add_argument('--ip', dest='ip_address', default='localhost', help="The local IP Address the PyModbus server will listen on") parser.add_argument('--tcp', dest='tcp_port', default=502, help="The local TCP Port the PyModbus server will listen on") parser.add_argument('--logLevel', dest='log_level', default='INFO', choices=['INFO', 'DEBUG'], help="The level of logging that will be utilized by the adapter.") parser.add_argument('--heartbeat', dest='heartbeat', default=30, help="The logging heartbeat interval in seconds.") # parser.add_argument('--messagingUrl', dest='messagingURL', default='localhost', # help="The MQTT URL of the ClearBlade Platform or Edge the adapter will connect to.") # # parser.add_argument('--messagingPort', dest='messagingPort', default=1883, # help="The MQTT Port of the ClearBlade Platform or Edge the adapter will connect to.") # # parser.add_argument('--topicRoot', dest='adapterTopicRoot', default='modbusProxy', # help="The root of MQTT topics this adapter will subscribe and publish to.") # # parser.add_argument('--deviceProvisionSvc', dest='deviceProvisionSvc', default='', # help="The name of a service that can be invoked to provision IoT devices \ # within the ClearBlade Platform or Edge.") # # parser.add_argument('--deviceHealthSvc', dest='deviceHealthSvc', default='', # help="The name of a service that can be invoked to provide the health of \ # an IoT device to the ClearBlade Platform or Edge.") # # parser.add_argument('--deviceLogsSvc', dest='deviceLogsSvc', default='', # help="The name of a service that can be invoked to provide IoT device \ # logging information to the ClearBlade Platform or Edge.") # # parser.add_argument('--deviceStatusSvc', dest='deviceStatusSvc', default='', # help="The name of a service that can be invoked to provide the status of \ # an IoT device to the ClearBlade Platform or Edge.") # # parser.add_argument('--deviceDecommissionSvc', dest='deviceDecommissionSvc', default='', # help="The name of a service that can be invoked to decommission IoT \ # devices within the ClearBlade Platform or Edge.") return parser
14,661
def get_prefix(path): """Generate a prefix for qresource in function of the passed path. Args: path (str): Relative path of a folder of resources from project dir. Returns; str: Prefix corresponding to `path` """ # Remove finishing separator from path if exist if path[-1] == os.sep: path = path[:-1] # Return the prefix corresponding to the path return "/" + os.path.basename(path)
14,662
def getInfoLabel(infotag): """ Returns an InfoLabel as a string. infotag : string - infoTag for value you want returned. List of InfoTags - http://xbmc.org/wiki/?title=InfoLabels example: - label = xbmc.getInfoLabel('Weather.Conditions') """ pass
14,663
def is_regex(obj): """Cannot do type check against SRE_Pattern, so we use duck typing.""" return hasattr(obj, 'match') and hasattr(obj, 'pattern')
14,664
def GetDefaultScopeLister(compute_client, project=None): """Constructs default zone/region lister.""" scope_func = { compute_scope.ScopeEnum.ZONE: functools.partial(zones_service.List, compute_client), compute_scope.ScopeEnum.REGION: functools.partial(regions_service.List, compute_client), compute_scope.ScopeEnum.GLOBAL: lambda _: [ResourceStub(name='')] } def Lister(scopes, _): prj = project or properties.VALUES.core.project.Get(required=True) results = {} for scope in scopes: results[scope] = scope_func[scope](prj) return results return Lister
14,665
def find_user(username): """ Function that will find a user by their username and return the user """ return User.find_by_username(username)
14,666
def filter_by_author(resources: List[Resource], author: Author) -> List[Resource]: """The resources by the specified author Arguments: resources {List[Resource]} -- A list of resources """ return [resource for resource in resources if resource.author == author]
14,667
def cpu_bound_op(exec_time, *data): """ Simulation of a long-running CPU-bound operation :param exec_time: how long this operation will take :param data: data to "process" (sum it up) :return: the processed result """ logger.info("Running cpu-bound op on {} for {} seconds".format(data, exec_time)) time.sleep(exec_time) return sum(data)
14,668
def elastic_transform_approx( img, alpha, sigma, alpha_affine, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None, random_state=None, ): """Elastic deformation of images as described in [Simard2003]_ (with modifications for speed). Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. """ if random_state is None: random_state = np.random.RandomState(1234) height, width = img.shape[:2] # Random affine center_square = np.float32((height, width)) // 2 square_size = min((height, width)) // 3 alpha = float(alpha) sigma = float(sigma) alpha_affine = float(alpha_affine) pts1 = np.float32( [ center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size, ] ) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) matrix = cv2.getAffineTransform(pts1, pts2) warp_fn = _maybe_process_in_chunks( cv2.warpAffine, M=matrix, dsize=(width, height), flags=interpolation, borderMode=border_mode, borderValue=value ) img = warp_fn(img) dx = random_state.rand(height, width).astype(np.float32) * 2 - 1 cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx) dx *= alpha dy = random_state.rand(height, width).astype(np.float32) * 2 - 1 cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy) dy *= alpha x, y = np.meshgrid(np.arange(width), np.arange(height)) map_x = np.float32(x + dx) map_y = np.float32(y + dy) remap_fn = _maybe_process_in_chunks( cv2.remap, map1=map_x, map2=map_y, interpolation=interpolation, borderMode=border_mode, borderValue=value ) return remap_fn(img)
14,669
def uncompleted_task(request): """ Make the completed task incomplete if use uncheck the task.""" task_list = TaskList.objects.all() context = {'task_list': task_list} if request.POST: task_is_unchecked = request.POST['task_is_unchecked'] task_unchecked_id = request.POST['task_unchecked_id'] get_task = TaskList.objects.get(pk=task_unchecked_id) if task_is_unchecked: get_task.is_completed = False get_task.save() if request.is_ajax(): return JsonResponse({'task_is_unchecked': task_is_unchecked, 'task_unchecked_id': task_unchecked_id, 'unchecked_view_text': "From View UnChecked:"}, status=200) return render(request, 'todos/home.html', context)
14,670
def load_settings_from_file(filepath="settings.ini", in_omelib=True): """Reload settings from a different settings file. Arguments --------- filepath: The path to the settings file to use. in_omelib: Whether or not the path given is a relative path from the omelib directory. """ if in_omelib: filepath = join(omelib_directory, filepath) config.read(filepath) # attempt to intellegently determine more difficult settings if not config.has_option("DATABASE", "user"): if "USERNAME" in os.environ: # windows user = os.environ["USERNAME"] elif "USER" in os.environ: # unix user = os.environ["USER"] config.set("DATABASE", "user", user) # executables if not config.has_option("EXECUTABLES", "psql"): psql = which("psql91") if psql is None: psql = which("psql") config.set("EXECUTABLES", "psql", psql) if not config.has_option("EXECUTABLES", "R"): R = which("R") config.set("EXECUTABLES", "R", R) if not config.has_option("EXECUTABLES", "Rscript"): Rscript = which("Rscript") config.set("EXECUTABLES", "Rscript", Rscript) if not config.has_option("EXECUTABLES", "primer3"): primer3 = which("primer3_core") config.set("EXECUTABLES", "primer3", primer3) if not config.has_option("EXECUTABLES", "cufflinks"): cufflinks = which("cufflinks") config.set("EXECUTABLES", "cufflinks", cufflinks) if not config.has_option("EXECUTABLES", "java"): java = which("java") config.set("EXECUTABLES", "java", java) # save options as variables self.postgres_user = config.get("DATABASE", "postgres_user") self.postgres_password = config.get("DATABASE", "postgres_password") if len(self.postgres_password) > 0: os.environ["PGPASSWORD"] = self.postgres_password self.postgres_database = config.get("DATABASE", "postgres_database") self.postgres_host = config.get("DATABASE", "postgres_host") self.postgres_port = config.get("DATABASE", "postgres_port") self.postgres_test_database = config.get("DATABASE", "postgres_test_database") self.psql = _escape_space(config.get("EXECUTABLES", "psql")) self.R = _escape_space(config.get("EXECUTABLES", "R")) self.Rscript = _escape_space(config.get("EXECUTABLES", "Rscript")) self.primer3 = _escape_space(config.get("EXECUTABLES", "primer3")) self.cufflinks = config.get("EXECUTABLES", "cufflinks") self.java = config.get("EXECUTABLES", "java") # make a psql string with the database options included self.psql_full = "%s --host=%s --username=%s --port=%s " % \ (self.psql, self.postgres_host, self.postgres_user, self.postgres_port) try: self.data_directory = expanduser(config.get('DATA', 'data_directory')) except NoOptionError: raise Exception('data_directory was not supplied in settings.ini') # set default here, after getting the data directory try: self.model_genome = expanduser(config.get('DATA', 'model_genome')) except NoOptionError: raise Exception('model_genome path was not supplied in settings.ini') # these are optional for data_pref in ['compartment_names', 'reaction_id_prefs', 'reaction_hash_prefs', 'gene_reaction_rule_prefs', 'data_source_preferences', 'model_dump_directory', 'model_published_directory', 'model_polished_directory']: try: setattr(self, data_pref, expanduser(config.get('DATA', data_pref))) except NoOptionError: setattr(self, data_pref, None)
14,671
def jitter(t, X, amountS): """Return a random number (intended as a time offset, i.e. jitter) within the range +/-amountS The jitter is different (but constant) for any given day in t (epoch secs) and for any value X (which might be e.g. deviceID)""" dt = ISO8601.epoch_seconds_to_datetime(t) dayOfYear = int(dt.strftime("%j")) year = int(dt.strftime("%Y")) uniqueValue = year*367+dayOfYear+abs(hash(X)) # Note that hash is implementation-dependent so may give different results on different platforms rand = utils.hashIt(uniqueValue,100) sign = int(str(uniqueValue)[0]) < 5 v = (rand / 100.0) * amountS if sign: v = -v return v
14,672
def check_DNA(DNA_sequence): """Check that we have a DNA sequence without junk""" # import string # Remove all spaces DNA_sequence=string.replace(DNA_sequence,' ','') # Upper case DNA_sequence=string.upper(DNA_sequence) # Check that we only have DNA bases in the seq ok=1 garbage={} DNA_bases=['A','G','C','T'] for letter in DNA_sequence: if not letter in DNA_bases: ok=None garbage[letter]=1 if ok: return ok, DNA_sequence return ok,garbage.keys()
14,673
def draw(): """Clears and draws objects to the screen""" screen.fill(WHITE) object_pos = current_object.get_pos() # Draw the Tetris object for i in range(len(object_pos)): pygame.draw.rect(screen, current_object.color, [object_pos[i][0] * BLOCK_SIZE, object_pos[i][1] * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE], 0) # Draw the blocks for y in range(len(blocks)): for x in range(len(blocks[0])): if blocks[y][x] == 1: pygame.draw.rect(screen, RED, [x * BLOCK_SIZE, y * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE], 0)
14,674
def read(): """ read() : Fetches documents from Firestore collection as JSON warehouse : Return document that matches query ID all_warehouses : Return all documents """ try: warehouse_id = request.args.get('id') if warehouse_id: warehouse = warehouse_ref.document(warehouse_id).get() return jsonify(warehouse.to_dict()), 200 else: all_warehouses = [doc.to_dict() for doc in warehouse_ref.stream()] return jsonify(all_warehouses), 200 except Exception as e: return f"An Error Occured: {e}"
14,675
def transposeC(array, axes=None): """ Returns the (conjugate) transpose of the input `array`. Parameters ---------- array : array_like Input array that needs to be transposed. Optional -------- axes : 1D array_like of int or None. Default: None If *None*, reverse the dimensions. Else, permute the axes according to the values given. Returns ------- array_t : :obj:`~numpy.ndarray` object Input `array` with its axes transposed. Examples -------- Using an array with only real values returns its transposed variant: >>> array = np.array([[1, 2.5], [3.5, 5]]) >>> array array([[ 1. , 2.5], [ 3.5, 5. ]]) >>> transposeC(array) array([[ 1. , 3.5], [ 2.5, 5. ]]) And using an array containing complex values returns its conjugate transposed: >>> array = np.array([[1, -2+4j], [7.5j, 0]]) >>> array array([[ 1.+0.j , -2.+4.j ], [ 0.+7.5j, 0.+0.j ]]) >>> transposeC(array) array([[ 1.-0.j , 0.-7.5j], [-2.-4.j , 0.-0.j ]]) """ # Take the transpose of the conjugate or the input array and return it return(np.transpose(np.conjugate(array), axes))
14,676
def _partition_files(files: List[str], num_partitions: int) -> List[List[str]]: """Split files into num_partitions partitions of close to equal size""" id_to_file = defaultdict(list) for f in files: id_to_file[_sample_id_from_path(f)[0]].append(f) sample_ids = np.array(list(id_to_file)) np.random.shuffle(sample_ids) split_ids = np.array_split(sample_ids, num_partitions) splits = [ sum((id_to_file[sample_id] for sample_id in split), []) for split in split_ids ] return [split for split in splits if split]
14,677
def test_tti(shape, space_order, kernel): """ This first test compare the solution of the acoustic wave-equation and the TTI wave-eqatuon with all anisotropy parametrs to 0. The two solutions should be the same. """ if kernel == 'shifted': space_order *= 2 to = 2 so = space_order // 2 if kernel == 'shifted' else space_order nbpml = 10 origin = [0. for _ in shape] spacing = [10. for _ in shape] vp = 1.5 * np.ones(shape) # Constant model for true velocity model = Model(origin=origin, shape=shape, vp=vp, spacing=spacing, nbpml=nbpml, space_order=space_order, epsilon=np.zeros(shape), delta=np.zeros(shape), theta=np.zeros(shape), phi=np.zeros(shape)) # Define seismic data and parameters f0 = .010 dt = model.critical_dt t0 = 0.0 tn = 350.0 time_range = TimeAxis(start=t0, stop=tn, step=dt) nt = time_range.num last = (nt - 2) % 3 indlast = [(last + 1) % 3, last % 3, (last-1) % 3] # Generate a wavefield as initial condition source = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range) source.coordinates.data[0, :] = np.array(model.domain_size) * .5 receiver = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=1) acoustic = AcousticWaveSolver(model, source=source, receiver=receiver, time_order=2, space_order=so) rec, u1, _ = acoustic.forward(save=False) source.data.fill(0.) # Solvers acoustic = AcousticWaveSolver(model, source=source, receiver=receiver, time_order=2, space_order=so) solver_tti = AnisotropicWaveSolver(model, source=source, receiver=receiver, time_order=2, space_order=space_order) # Create new wavefield object restart forward computation u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=so) u.data[0:3, :] = u1.data[indlast, :] acoustic.forward(save=False, u=u, time_M=10, src=source) utti = TimeFunction(name='u', grid=model.grid, time_order=to, space_order=so) vtti = TimeFunction(name='v', grid=model.grid, time_order=to, space_order=so) utti.data[0:to+1, :] = u1.data[indlast[:to+1], :] vtti.data[0:to+1, :] = u1.data[indlast[:to+1], :] solver_tti.forward(u=utti, v=vtti, kernel=kernel, time_M=10, src=source) normal_u = u.data[:] normal_utti = .5 * utti.data[:] normal_vtti = .5 * vtti.data[:] res = linalg.norm((normal_u - normal_utti - normal_vtti).reshape(-1))**2 res /= np.linalg.norm(normal_u.reshape(-1))**2 log("Difference between acoustic and TTI with all coefficients to 0 %2.4e" % res) assert np.isclose(res, 0.0, atol=1e-4)
14,678
def run_vscode_command( command: str, *args: str, wait_for_finish: bool = False, expect_response: bool = False, decode_json_arguments: bool = False, ): """Execute command via vscode command server.""" # NB: This is a hack to work around the fact that talon doesn't support # variable argument lists args = list( filter( lambda x: x is not NotSet, args, ) ) if decode_json_arguments: args = [json.loads(arg) for arg in args] port_file_path = Path(gettempdir()) / "vscode-port" original_contents = port_file_path.read_text() # Issue command to VSCode telling it to update the port file. Because only # the active VSCode instance will accept keypresses, we can be sure that # the active VSCode instance will be the one to write the port. if is_mac: actions.key("cmd-shift-alt-p") else: actions.key("ctrl-shift-alt-p") # Wait for the VSCode instance to update the port file. This generally # happens within the first millisecond, but we give it 3 seconds just in # case. start_time = time.monotonic() new_contents = port_file_path.read_text() sleep_time = 0.0005 while True: if new_contents != original_contents: try: decoded_contents = json.loads(new_contents) # If we're successful, we break out of the loop break except ValueError: # If we're not successful, we keep waiting; we assume it was a # partial write from VSCode pass time.sleep(sleep_time) sleep_time *= 2 if time.monotonic() - start_time > 3.0: raise Exception("Timed out waiting for VSCode to update port file") new_contents = port_file_path.read_text() port = decoded_contents["port"] response = requests.post( f"http://localhost:{port}/execute-command", json={ "commandId": command, "args": args, "waitForFinish": wait_for_finish, "expectResponse": expect_response, }, timeout=(0.05, 3.05), ) response.raise_for_status() actions.sleep("25ms") if expect_response: return response.json()
14,679
def flush_cache(roles=['webapp_servers', 'celery_servers']): """ Flushes the cache. """ if _current_host_has_role(roles): print("=== FLUSHING CACHE ===") with cd(env.REMOTE_CODEBASE_PATH): run("workon %s && ./manage.py ft_clear_cache" % env.REMOTE_VIRTUALENV_NAME)
14,680
def run_with_timeout(proc, timeout, input=None): """ Run Popen process with given timeout. Kills the process if it does not finish in time. You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise the output will be None. The returncode is 999 if the process was killed. :returns: (returncode, stdout string, stderr string) """ output = [] def target(): output.extend(proc.communicate(input)) thread = threading.Thread(target=target) thread.daemon = True thread.start() killed = False thread.join(timeout) if thread.is_alive(): proc.terminate() killed = True thread.join() returncode = proc.returncode if killed: returncode = 999 return returncode, output[0], output[1]
14,681
def set_is_significant_to_zero(rfam_acc, rfamseq_acc): """ Fetch the correct db entry from full_region table according to rfam_acc and rfamseq_acc and set is_significant field to zero (0) rfam_acc: RNA family accession rfamseq_acc: Family specific sequence accession """ # maybe have this working out of the list which will be returned from # connect to db cnx = RfamDB.connect() # get a new buffered cursor cursor = cnx.cursor(buffered=True) # update is_significant field to 0 query = ("UPDATE full_region SET is_significant=0 " "WHERE rfam_acc=\'%s\' AND rfamseq_acc=\'%s\'") % (rfam_acc, rfamseq_acc) cursor.execute(query) cnx.commit() cursor.close() RfamDB.disconnect(cnx)
14,682
def dot(x, y, alpha=0): """ Compute alpha = xy + alpha, storing the incremental sum in alpha x and y can be row and/or column vectors. If necessary, an implicit transposition happens. """ assert type(x) is matrix and len(x.shape) is 2, \ "laff.dot: vector x must be a 2D numpy.matrix" assert type(y) is matrix and len(y.shape) is 2, \ "laff.dot: vector y must be a 2D numpy.matrix" if(type(alpha) is matrix): m_alpha, n_alpha = alpha.shape assert isinstance(alpha,(int,float,complex)) or (m_alpha is 1 and n_alpha is 1), \ "laff.scal: alpha must be a 1 x 1 matrix" if(type(alpha) is matrix): alpha[0,0] = 0 else: alpha = 0 m_x, n_x = x.shape m_y, n_y = y.shape assert m_x is 1 or n_x is 1, "laff.dot: x is not a vector" assert m_y is 1 or n_y is 1, "laff.dot: y is not a vector" if m_x is 1 and m_y is 1: # x is a row, y is a row assert n_x == n_y, "laff.dot: size mismatch between x and y" if(type(alpha) is matrix): for i in range(n_x): alpha[0,0] += y[0, i] * x[0, i] else: for i in range(n_x): alpha += y[0, i] * x[0, i] elif n_x is 1 and n_y is 1: # x is a column, y is a column assert m_x == m_y, "laff.dot: size mismatch between x and y" if(type(alpha) is matrix): for i in range(m_x): alpha[0,0] += y[i, 0] * x[i, 0] else: for i in range(m_x): alpha += y[i, 0] * x[i, 0] elif m_x is 1 and n_y is 1: # x is a row, y is a column assert n_x == m_y, "laff.dot: size mismatch between x and y" if(type(alpha) is matrix): for i in range(n_x): alpha[0,0] += y[i, 0] * x[0, i] else: for i in range(n_x): alpha += y[i, 0] * x[0, i] elif n_x is 1 and m_y is 1: # x is a column, y is a row assert m_x == n_y, "laff.dot: size mismatch between x and y" if(type(alpha) is matrix): for i in range(m_x): alpha += y[0, i] * x[i, 0] else: for i in range(m_x): alpha += y[0, i] * x[i, 0] return alpha
14,683
def main(argv=None): """Run ExperimentRunner locally on ray. To run this example on cloud (e.g. gce/ec2), use the setup scripts: 'softlearning launch_example_{gce,ec2} examples.development <options>'. Run 'softlearning launch_example_{gce,ec2} --help' for further instructions. """ run_example_local('examples.goal_conditioned_classifier_rl', argv)
14,684
def create_buildpack(buildpack_name, buildpack_path, position=1): """Creates a buildpack. Always enables it afterwards (--enable flag). Args: buildpack_name (str): Name for the buildpack. buildpack_path (str): Path to the buildpack's artifact. position (int): Priority of the new buildpack in the buildpack list. Raises: CommandFailedError: When the command fails (returns non-zero code). """ run_command([CF, 'create-buildpack', buildpack_name, buildpack_path, str(position), '--enable'])
14,685
def load_train_test_data( train_data_path, label_binarizer, test_data_path=None, test_size=None, data_format="list"): """ train_data_path: path. path to JSONL data that contains text and tags fields label_binarizer: MultiLabelBinarizer. multilabel binarizer instance used to transform tags test_data_path: path, default None. path to test JSONL data similar to train_data test_size: float, default None. if test_data_path not provided, dictates portion to be used as test data_format: str, default list. controls data are returned as lists or generators for memory efficiency """ if data_format == "list": if test_data_path: X_train, Y_train, _ = load_data(train_data_path, label_binarizer) X_test, Y_test, _ = load_data(test_data_path, label_binarizer) else: X, Y, _ = load_data(train_data_path, label_binarizer) X_train, X_test, Y_train, Y_test = train_test_split( X, Y, random_state=42, test_size=test_size ) else: if test_data_path: X_train = partial(yield_texts, train_data_path) Y_train = partial(yield_tags, train_data_path, label_binarizer) X_test = partial(yield_texts, test_data_path) Y_test = partial(yield_tags, test_data_path, label_binarizer) else: # need to split train / test and shuffle in memory efficient way raise NotImplementedError return X_train, X_test, Y_train, Y_test
14,686
def delete_courrier_affaire_view(request): """ Supprimer le fichier une fois téléchargé """ settings = request.registry.settings filename = request.params['filename'] temporary_directory = settings["temporary_directory"] file_path = os.path.join(temporary_directory, filename) if os.path.exists(file_path): os.remove(file_path) return "ok" else: raise exc.HTTPNotFound("Le fichier est indisponible")
14,687
def get_token() -> str: """Obtains the Access Token from the Authorization Header""" # Get the authorization header authorization_header = request.headers.get("Authorization", None) # Raise an error if no Authorization error is found if not authorization_header: payload = { "code": "authorization_header_missing", "description": "Authorization header is expected", } raise AuthError(payload, 401) authorization_header_parts = authorization_header.split() # We are expecting the Authorization header to contain a Bearer token if authorization_header_parts[0].lower() != "bearer": payload = { "code": "invalid_header", "description": "Authorization header must be a Bearer token", } raise AuthError(payload, 401) # The Authorization header is prefixed with Bearer, but does not contain the actual token elif len(authorization_header_parts) == 1: payload = {"code": "invalid_header", "description": "Token not found"} raise AuthError(payload, 401) # We only expect 2 parts, "Bearer" and the access token elif len(authorization_header_parts) > 2: payload = { "code": "invalid_header", "description": "Authorization header must be a valid Bearer token", } raise AuthError(payload, 401) # If all checks out, we return the access token return authorization_header_parts[1]
14,688
def load_image(path, grayscale=False): """Summary Args: path (str): Path to image grayscale (bool): True loads image as grayscale, False loads image as color Returns: numpy.ndarray: Image loaded from path """ # TODO: Load image img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) if grayscale else cv2.imread(path) return img
14,689
def _get_matching_s3_keys(bucket, prefix='', suffix=''): """Generate all the matching keys in an S3 bucket. Parameters ---------- bucket : str Name of the S3 bucket prefix : str, optional Only fetch keys that start with this prefix suffix : str, optional Only fetch keys that end with this suffix Yields ------ key : str S3 keys that match the prefix and suffix """ s3 = get_s3_client() kwargs = {'Bucket': bucket, "MaxKeys": 1000} # If the prefix is a single string (not a tuple of strings), we can # do the filtering directly in the S3 API. if isinstance(prefix, str): kwargs['Prefix'] = prefix while True: # The S3 API response is a large blob of metadata. # 'Contents' contains information about the listed objects. resp = s3.list_objects_v2(**kwargs) try: contents = resp['Contents'] except KeyError: return for obj in contents: key = obj['Key'] if key.startswith(prefix) and key.endswith(suffix): yield key # The S3 API is paginated, returning up to 1000 keys at a time. # Pass the continuation token into the next response, until we # reach the final page (when this field is missing). try: kwargs['ContinuationToken'] = resp['NextContinuationToken'] except KeyError: break
14,690
def frames_player(frames, n_snapshots=None): """ Shows each frame from the data with 15fps speed. If n_snapshots is defined it will take and save snapshots from the video with interval = len(frames) // n_snapshots :param frames: :param n_snapshots: :return: """ # For taking snapshots at equal intervals every_steps = len(frames) // n_snapshots step = 0 # Display the video frame by frame for frame in frames: cv2.imshow('Frame', frame) cv2.waitKey(25) if not step % every_steps: cv2.imwrite('snapshot_%d.png' % step, frame) # Loop at 15 frames/sec time.sleep(1/15)
14,691
def test_days(): """ Ensure d suffix is converted to days """ assert_equal(datetime.timedelta(days=1), convert_delta("1d"))
14,692
def convert_op_str(qubit_op_str, op_coeff): """ Convert qubit operator into openfermion format """ converted_Op=[f'{qOp_str}{qNo_index}' for qNo_index, qOp_str in enumerate(qubit_op_str) if qOp_str !='I'] seperator = ' ' #space Openfermion_qubit_op = QubitOperator(seperator.join(converted_Op), op_coeff) return Openfermion_qubit_op
14,693
def __state_resolving_additional_facts(conversation, message, just_acknowledged): """ Bot is asking the user questions to resolve additional facts :param conversation: The current conversation :param message: The user's message :param just_acknowledged: Whether or not an acknowledgement just happened. Used to skip fact resolution and instead asks a question immediately. :return: A question to as """ question = None # Retrieve current_fact from conversation current_fact = conversation.current_fact if just_acknowledged: question = Responses.fact_question(current_fact.name) else: # Extract entity from message based on current fact fact_entity_value = __extract_entity(current_fact.name, current_fact.type, message) if fact_entity_value is not None: next_fact = fact_service.submit_resolved_fact(conversation, current_fact, fact_entity_value) new_fact_id = next_fact['fact_id'] new_fact = None if new_fact_id: new_fact = db.session.query(Fact).get(new_fact_id) conversation.current_fact = new_fact # Additional facts remain to be asked if fact_service.has_additional_facts(conversation): # Additional fact limit reached, time for a new prediction if fact_service.count_additional_facts_resolved(conversation) % MAX_ADDITIONAL_FACTS == 0: conversation.bot_state = BotState.GIVING_PREDICTION else: question = Responses.fact_question(new_fact.name) else: # There are no more additional facts! Give a prediction conversation.bot_state = BotState.GIVING_PREDICTION return question
14,694
def load_dataset(path): """Load json file and store fields separately.""" with open(path) as f: data = json.load(f)['data'] output = {'qids': [], 'questions': [], 'answers': [], 'contexts': [], 'qid2cid': []} for article in data: for paragraph in article['paragraphs']: output['contexts'].append(paragraph['context']) for qa in paragraph['qas']: output['qids'].append(qa['id']) output['questions'].append(qa['question']) output['qid2cid'].append(len(output['contexts']) - 1) if 'answers' in qa: output['answers'].append(qa['answers']) return output
14,695
def generate_rand_enex_by_prob_nb(shape: tp.Shape, entry_prob: tp.MaybeArray[float], exit_prob: tp.MaybeArray[float], entry_wait: int, exit_wait: int, entry_pick_first: bool, exit_pick_first: bool, flex_2d: bool, seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]: """Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another. `entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`. Specify `seed` to make output deterministic.""" if seed is not None: np.random.seed(seed) temp_idx_arr = np.empty((shape[0],), dtype=np.int_) return generate_enex_nb( shape, entry_wait, exit_wait, entry_pick_first, exit_pick_first, rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d), rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d) )
14,696
def bresenham_3d_line_of_sight(observers, targets, raster, obs_height_field, tar_height_field, radius, raster_crs, fresnel=False): """Naive bresenham line of sight algorithm""" writePoints = [] lines_for_shp = [] start, end = 0, 0 raster_transform = raster.GetGeoTransform() pixelWidth = raster_transform[1] pix = pixelWidth pixelHeight = raster_transform[5] xOrigin = raster_transform[0] yOrigin = raster_transform[3] raster_band = raster.GetRasterBand(1) info = [] for obs in range(observers.GetFeatureCount()): observer = observers.GetFeature(obs) # get Observer point geometry obs_geom = observer.geometry() try: obs_x = obs_geom.GetPoints()[0][0] obs_y = obs_geom.GetPoints()[0][1] except ValueError: debugHere() # offset x,y values to equivalent raster index values obs_x_off = int((obs_x - xOrigin) / pixelWidth) obs_y_off = int((obs_y - yOrigin) / pixelHeight) mask_x = obs_x - radius mask_y = obs_y - radius mask_x_pix = int((mask_x - xOrigin) / pixelWidth) mask_y_pix = int((mask_y - yOrigin) / pixelHeight) radius_pix = int(radius / pixelWidth) mask_width = radius_pix * 2 mask_height = radius_pix * 2 if mask_x_pix < 0: # mask has overflow beyond raster edge mask_width += mask_x_pix # clip mask width by the overflow mask_x_pix = 0 # set mask origin x to edge of raster mask_x = xOrigin if mask_y_pix < 0: mask_height += mask_y_pix mask_y_pix = 0 mask_y = yOrigin # truncate positive overflow if mask_width + mask_x_pix > raster_band.XSize: overflow = raster_band.XSize - (mask_width + mask_x_pix) mask_width += overflow if mask_height + mask_y_pix > raster_band.YSize: overflow = raster_band.YSize - (mask_height + mask_y_pix) mask_height += overflow mask_x_pix = int(mask_x_pix) mask_y_pix = int(mask_y_pix) mask_width = int(mask_width) mask_height = int(mask_height) new_obs_x = obs_x_off - mask_x_pix new_obs_y = mask_y_pix - obs_y_off # x_geog, y_geog = raster_x_min + x * pix + pix / 2, raster_y_max - y * pix - pix / 2 # areaOfInterest = QgsRectangle(x_geog - radius, y_geog - radius, x_geog + radius, y_geog + radius) # set observer height # Raster used is smaller than radius, so no clipping nescesarry try: if raster_band.YSize < radius * 2 or raster_band.YSize < radius * 2: mask_x = xOrigin mask_y = yOrigin new_obs_x = obs_x_off new_obs_y = obs_y_off raster_array = raster_band.ReadAsArray().astype(np.float) else: raster_array = raster_band.ReadAsArray(mask_x_pix, mask_y_pix, mask_width, mask_height).astype(np.float) except: debugHere() try: obs_height = observer.items()[obs_height_field] if obs_height is None: obs_height = 1.6 # set observer height to person height z = obs_height + raster_array[new_obs_y, new_obs_x] except(IndexError, TypeError) as e: print e debugHere() start = (new_obs_y, new_obs_x, z) writePoints.append([(mask_x, mask_y), (mask_x, mask_y + (mask_height * pixelHeight)), (mask_x + (mask_width * pixelWidth) , mask_y), (mask_x + (mask_width * pixelWidth), mask_y + (mask_height * pixelHeight))]) # raster_crs for tar in range(targets.GetFeatureCount()): target_in_radius = True target = targets.GetFeature(tar) # get Target point geometry tar_geom = target.geometry() x, y = tar_geom.GetPoints()[0] target_outside_radius = euclidian_distance((obs_x, obs_y), (x, y)) > radius if target_outside_radius: continue # offset x,y values to equivalent raster index values x = int((x - mask_x) / pixelWidth) y = int((y - mask_y) / pixelHeight) # check if target point is out of search area # if target_outside_radius: # continue # get target height z = target.items()[tar_height_field] try: landscape_height = raster_array[y, x] except IndexError: target_in_radius = False continue # get target height z = target.items()[tar_height_field] + landscape_height end = (y, x, z) # Unpack start/end tuples x, y, z = start x2, y2, z2 = end z_value = z # Calculate differentials diff_x = x2 - x diff_y = y2 - y diff_z = z2 - z # Assign incremental slope values for x, y, z incr_x = -1 if (diff_x < 0) else 1 incr_y = -1 if (diff_y < 0) else 1 incr_z = -1 if (diff_z < 0) else 1 abs_diff_x = abs(diff_x) abs_diff_y = abs(diff_y) abs_diff_z = abs(diff_z) diff_x2 = abs_diff_x * 2 diff_y2 = abs_diff_y * 2 diff_z2 = abs_diff_z * 2 # Find the steepest axis and find line segments accordingly if (abs_diff_x >= abs_diff_y) and (abs_diff_x >= abs_diff_z): steepest = 'x' z_line_length = np.sqrt(pow(diff_x, 2) + pow(diff_z, 2)) z_segment_length = z_line_length / diff_x elif (abs_diff_y > abs_diff_x) and (abs_diff_y >= abs_diff_z): steepest = 'y' z_line_length = np.sqrt(pow(diff_y, 2) + pow(diff_z, 2)) z_segment_length = z_line_length / diff_y elif (abs_diff_z > abs_diff_x) and (abs_diff_z > abs_diff_y): steepest = 'z' z_line_length = np.sqrt(pow(diff_x, 2) + pow(diff_z, 2)) z_segment_length = z_line_length / diff_z else: return "Error when finding steepest line" incr_z_value = np.sqrt(abs(pow(z_segment_length, 2) - pow(1, 2))) incr_z_value = -incr_z_value if (diff_z < 0) else incr_z_value xm, ym, zm = (x2 + x) / 2, (y2 + y) / 2, (z2 + z) / 2 zm = z + xm * incr_z_value mid_fresnel = get_fresnel_radius(z_line_length / 2, z_line_length / 2) if fresnel: try: visibility = zm - mid_fresnel > raster_array[xm, ym] except: debugHere() if not visibility: lines_for_shp.append(build_return_package(observer, target, visibility)) continue if 'x' in steepest: err_1 = diff_y2 - abs_diff_x err_2 = diff_z2 - abs_diff_x for i in np.arange(abs_diff_x - 1): if (err_1 > 0): y += incr_y err_1 -= diff_x2 if (err_2 > 0): z += incr_z err_2 -= diff_x2 err_1 += diff_y2 err_2 += diff_z2 x += incr_x z_value += incr_z_value visibility = z_value > raster_array[x, y] if not visibility: break if 'y' in steepest: err_1 = diff_x2 - abs_diff_y err_2 = diff_z2 - abs_diff_y for i in np.arange(abs_diff_y - 1): if (err_1 > 0): x += incr_x err_1 -= diff_y2 if (err_2 > 0): z += incr_z err_2 -= diff_y2 err_1 += diff_x2 err_2 += diff_z2 y += incr_y z_value += incr_z_value visibility = z_value > raster_array[x, y] if not visibility: break if 'z' in steepest: err_1 = diff_y2 - abs_diff_z err_2 = diff_x2 - abs_diff_z for i in np.arange(abs_diff_z - 1): if (err_1 > 0): y += incr_y err_1 -= diff_z2 if (err_2 > 0): x += incr_x err_2 -= diff_z2 err_1 += diff_y2 err_2 += diff_x2 z += incr_z z_value += incr_z_value visibility = z_value > raster_array[x, y] if not visibility: break lines_for_shp.append(build_return_package(observer, target, visibility)) return lines_for_shp
14,697
def delete(): """ Receives requests for deleting certain files at the back-end """ path = request.form['path'] files = glob.glob(path) for f in files: os.remove(f) return 'Successfull'
14,698
def retrieve(last_updated=datetime.now()): """ Crawls news and returns a list of tweets to publish. """ print('Retrieving {} alzheimer news since {}.'.format(SITE, last_updated)) to_ret = list() # Get all the content from the last page of the site's news tree = html.fromstring(requests.get(URL).content) # Get list of articles articles = CSSSelector('article')(tree) for article in articles: # For each article parse the date on the metadata and compare to the last update of the bot. # If the article is newer it should go on until it finds one that's not link = CSSSelector('article .ue-c-cover-content__link')(article)[0].get('href') if "promo" in link.lower() or "follow" in link.lower(): continue news_page = html.fromstring(requests.get(link).content) news_date = CSSSelector('time')(news_page)[0].get('datetime') news_datetime = datetime.strptime(news_date, '%Y-%m-%dT%H:%M:%SZ') if news_datetime < last_updated: break # Get the useful parts of each article to compose a tweet. title = CSSSelector('article .ue-c-cover-content__headline')(article)[0].text author = CSSSelector('.ue-c-article__byline-name a, .ue-c-article__byline-name')(news_page)[0].text article_body = str(etree.tostring(CSSSelector('.ue-l-article__body')(news_page)[0])) if "alzheimer" not in article_body.lower(): continue # Compose a tweet with the article's information tweet = """ {title} Autor/a: {author} Enlace: https:{link} ({site}) """.format(title=title, author=author, link=link, site=SITE) to_ret.append(dedent(tweet)) # Returns a list of tweets ready for the bot to tweet. return to_ret
14,699