content
stringlengths
22
815k
id
int64
0
4.91M
def sgd(lr, tparams, grads, inp, cost, opt_ret=None): """ Stochastic gradient descent (SGD) optimizer :param lr: :param tparams: :param grads: :param inp: :param cost: :param opt_ret: :return f_grad_shared, f_update: """ gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k) for k, p in tparams.items()] gsup = [(gs, g) for gs, g in zip(gshared, grads)] outs = [cost] if opt_ret is not None: # opt_ret should be a dict outs += list(opt_ret.values()) f_grad_shared = theano.function(inp, outs, updates=gsup, profile=profile) pup = [(p, p - lr * g) for p, g in zip(itervalues(tparams), gshared)] f_update = theano.function([lr], [], updates=pup, profile=profile) return f_grad_shared, f_update
15,400
def build_empty_indexes(ngram_len): """ Build and return the nested indexes structure. The resulting index structure can be visualized this way:: 1. The unigrams index is in indexes[1] with this structure: {1: { u1: {index_docid1: [posting_list1], index_docid2: [posting_list2]}, u2: {index_docid1: [posting_list3], index_docid3: [posting_list4]} } } 2. The bigrams index is in indexes[2] with this structure: {2: { u3, u4: {index_docid1: [posting_list7], index_docid2: [posting_list6]}, u5, u6: {index_docid1: [posting_list5], index_docid3: [posting_list8]} } } and so on, until ngram_len """ indexes = {} for i in range(1, ngram_len + 1): indexes[i] = defaultdict(posting_list) return indexes
15,401
def test_if_in_for_tensor(): """ Feature: JIT Fallback Description: Test fallback with control flow. Expectation: No exception. """ @ms_function def control_flow_for(): x = Tensor(7) y = Tensor(0) for _ in range(3): if y < Tensor(10): y += x return y res = control_flow_for() assert res == 14
15,402
def file_export(args_opt): """Export to file""" if config.model == "ssd320": net = SSD320(ssd_mobilenet_v2(), config, is_training=False) else: net = ssd_mobilenet_v2(config=config) net = SsdInferWithDecoder(net, Tensor(default_boxes), config) save_ckpt_path = './ckpt_' + str(device_id) + '/' ckpt_file, _ = get_ckpt_epoch(save_ckpt_path) if ckpt_file is None: return param_dict = load_checkpoint(os.path.join(save_ckpt_path, ckpt_file)) net.init_parameters_data() load_param_into_net(net, param_dict) net.set_train(False) input_shp = [1, 3] + config.img_shape input_array = Tensor( np.random.uniform(-1.0, 1.0, size=input_shp), mindspore.float32) export(net, input_array, file_name="ssd", file_format=args_opt.file_format) name = 'ssd.' + args_opt.file_format.lower() mox.file.copy(name, os.path.join(args_opt.train_url, name)) print("Export finished.")
15,403
def as_dicts(results): """Convert execution results to a list of tuples of dicts for better comparison.""" return [result.to_dict(dict_class=dict) for result in results]
15,404
def merge_dicts(dict_to_merge, merged_dict): """Recursively merge the contents of dict_to_merge into merged_dict. Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge""" for key, value in iteritems(dict_to_merge): if isinstance(merged_dict.get(key), dict): merge_dicts(value, merged_dict[key]) else: merged_dict[key] = value return merged_dict
15,405
def _backend_name_to_class(backend_str: str): """ Convert a backend string to the test configuration class for the backend. """ known_backends = _get_all_backends() if backend_str not in known_backends: raise ValueError( f'Unknown backend {backend_str}. ' f'Known backends: {known_backends}' ) conftest = importlib.import_module( f'ibis.backends.{backend_str}.tests.conftest' ) return conftest.TestConf
15,406
def concat_allocator_cmd(allocator, cmd): """add env variable for different allocator modes.""" new_cmd = cmd if allocator == "direct": new_cmd = "DIRECT_BUFFER=1 " + cmd elif allocator == "unified": new_cmd = "UNIFIED_BUFFER=1 " + cmd elif allocator == "je_direct": new_cmd = "JEMALLOC=1 DIRECT_BUFFER=1 " + cmd elif allocator == "je_cycle": new_cmd = "JEMALLOC=1 " + cmd elif allocator == "je_unified": new_cmd = "JEMALLOC=1 UNIFIED_BUFFER=1 " + cmd return new_cmd
15,407
def generate_property_comment( description: intermediate.PropertyDescription, ) -> Tuple[Optional[Stripped], Optional[List[Error]]]: """Generate the documentation comment for the given property.""" return _generate_summary_remarks_constraints(description)
15,408
def task_eeg_get_flicker_frequencies() -> Dict: """ Get the flicker frequency of each trial. Save as .mat files. """ Path(fname.eeg_flicker_frequencies_dir).mkdir(exist_ok=True, parents=True) for subject in SUBJECTS: # Get sources. sources = dict( dat=fname.bids_dat(subject=subject) ) sources_list = list(sources.values()) # Get targets. targets = dict( frequencies=fname.eeg_flicker_frequencies(subject=subject, variable="frequencies"), durations=fname.eeg_flicker_frequencies(subject=subject, variable="durations"), write_script_to=fname.eeg_flicker_frequencies_script(subject=subject), ) targets_list = list(targets.values()) # Make the script to run. script = textwrap.dedent(f"""\ %% Get the flicker frequency of each trial. Save as a .mat file. do_one() function do_one() %% Read input variables. durations = get_durations('{sources["dat"]}') %% Run functions. frequencies = []; for i = 1:numel(durations) duration = durations(i) frequency = 1./(duration/50) frequencies = [frequencies, frequency] end %% Save output variables. save('{targets["frequencies"]}', 'frequencies'); save('{targets["durations"]}', 'durations'); end function [durations] = get_durations(dat_path) datmat = importdata(dat_path) durations = datmat(1:end, 5); end """) # Make action to run script. action = f"python3 matlab2.py".split() action += ["--script_contents", script] action += ["--write_script_to", targets["write_script_to"]] # Go! yield dict( name=f"subject--{subject}", actions=[action], file_dep=sources_list, targets=targets_list, )
15,409
def create_set(X, y, inds): """ X list and y nparray :return: """ new_X = [] for i in inds: new_X.append(X[i]) new_y = y[inds] return SignalAndTarget(new_X, new_y)
15,410
def evaluate_model(h5_file, pred_file): """ evaluate the trained model. Plot ROC curve and calculate AUC. inputs: model json file path, model weights file. outputs: filename of the plotting. """ try: batch_size = 32 model = load_model(h5_file) file_path = os.path.dirname(h5_file) filename_base = os.path.basename(h5_file).split('.')[0] hdf5_file = tables.open_file(pred_file, mode='r') m_pred = hdf5_file.root.test_img.shape[0] steps = int(ceil(m_pred / batch_size)) generator = read_hdf5(hdf5_file, dataset="test", batch_size=32) preds = model.predict_generator(generator, steps=steps, verbose=1) preds = np.array(preds)[:, 1] logging.debug(f'preds: {preds}') true_values = hdf5_file.root.test_labels fpr, tpr, _ = roc_curve(list(true_values), list(preds)) precision, recall, thresholds = prc(list(true_values), list(preds)) average_precision = average_precision_score( list(true_values), list(preds)) roc_auc = auc(fpr, tpr) roc_name = os.path.join(file_path, filename_base + "_roc.png") prc_name = os.path.join(file_path, filename_base + "_prc.png") clear_plot() plot_roc(fpr, tpr, roc_auc, roc_name) clear_plot() plot_prc(recall, precision, average_precision, prc_name) clear_plot() finally: hdf5_file.close()
15,411
def find_dates(): """ FInd valid dates """ text = read_file() valid = [] for i, c in enumerate(text): # Find "-" which we use identifier for possible dates if c == "-": try: date = validate_date_string(i, text) if date: valid.append(date) except ValueError: continue print(", ".join(valid)) return True
15,412
def launch_app(): """ Activates the application if it is not running or is running in the background. """ session = get_mobile_driver_session().driver logger.info("Launch app from desired capabilities on device") if get_platform() == 'iOS': session.execute_script('mobile: launchApp', {'bundleId': APP_BUNDLE_ID}) else: app_package = session.desired_capabilities["appPackage"] awake_android() session.activate_app(app_id=app_package)
15,413
def handle_400_error(_error): """Return a http 400 error to client""" return make_response(jsonify({'error': 'Misunderstood'}), 400)
15,414
def control_norm_backward(grad_out, ustream, vstream, abkw, cache): """ Implements the forward pass of the control norm For each incoming sample it does: grad = grad_out - (1 - abkw) * vstream * out vstream = vstream + mu() y = (x - mstream) / sqrt(varstream) varstream = afwd * varstream + (1 - afwd) * var(x) + (afwd * (1 - afwd) * (mu(x) - mstream) ** 2 mstream = afwd * mstream + (1 - afwd) * mu(x) """ out, scale = cache grad_in = np.empty_like(grad_out) for idx in range(grad_out.shape[0]): grad = grad_out[idx] - (1 - abkw) * vstream * out[idx] vstream += grad * out[idx] grad = grad / scale[idx] grad_in[idx] = grad - (1 - abkw) * ustream ustream += grad_in[idx] return grad_in, ustream, vstream, (None, )
15,415
def get_random_asset_id_of_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), current_workspace: models.Workspace = Depends(deps.get_current_workspace), ) -> Any: """ Get random asset from specific dataset """ dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() offset = get_random_asset_offset(dataset) assets = viz_client.get_assets( user_id=current_user.id, repo_id=current_workspace.hash, # type: ignore branch_id=dataset.task_hash, # type: ignore keyword=None, offset=offset, limit=1, ) if assets.total == 0: raise AssetNotFound() return {"result": assets.items[0]}
15,416
def seconds_to_hours(s): """Convert seconds to hours: :param s: Number of seconds :type s: Float :return: Number of hours :rtype: Float """ return float(s) / 3600
15,417
def assign_change_priority(zone: dict, change_operations: list) -> None: """ Given a list of change operations derived from the difference of two zones files, assign a priority integer to each change operation. The priority integer serves two purposes: 1. Identify the relative order the changes. The target of an alias record will have a higher priority, since it needs to be present when we commit our change transaction. 2. Group together all change operations that can be committed together in the same ResourceRecordSet change transaction. """ rr_prio = defaultdict(int) def is_same_zone(change: dict) -> bool: return change["zone"]["id"] == zone["id"] def is_alias(change: ComparableRecord) -> bool: record = change["record"] return record.alias_dns_name is not None and is_same_zone(change) def is_new_alias(change: ComparableRecord) -> bool: return is_alias(change) and change["operation"] in ("CREATE", "UPSERT") for change in change_operations: if is_new_alias(change): record = change["record"] rr_prio[record.alias_dns_name] += 1 for change in change_operations: if is_new_alias(change): record = change["record"] rr_prio[record.alias_dns_name] += rr_prio[record.name] for change in change_operations: record = change["record"] change["prio"] = rr_prio[record.name]
15,418
def contigs_n_bases(contigs): """Returns the sum of all n_bases of contigs.""" return sum(c.n_bases for c in contigs)
15,419
def parse_input_fn_result(result): """Gets features, labels, and hooks from the result of an Estimator input_fn. Args: result: output of an input_fn to an estimator, which should be one of: * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple (features, labels) with same constraints as below. * A tuple (features, labels): Where `features` is a `Tensor` or a dictionary of string feature name to `Tensor` and `labels` is a `Tensor` or a dictionary of string label name to `Tensor`. Both `features` and `labels` are consumed by `model_fn`. They should satisfy the expectation of `model_fn` from inputs. Returns: Tuple of features, labels, and input_hooks, where features are as described above, labels are as described above or None, and input_hooks are a list of SessionRunHooks to be included when running. Raises: ValueError: if the result is a list or tuple of length != 2. """ input_hooks = [] if isinstance(result, dataset_ops.DatasetV2): iterator = dataset_ops.make_initializable_iterator(result) input_hooks.append(_DatasetInitializerHook(iterator)) result = iterator.get_next() return parse_iterator_result(result) + (input_hooks,)
15,420
def remove(self): """Deprecated. Remove a node path from the scene graph""" print("Warning: NodePath.remove() is deprecated. Use remove_node() instead.") # Send message in case anyone needs to do something # before node is deleted messenger.send('preRemoveNodePath', [self]) # Remove nodePath self.removeNode()
15,421
def demosaic(cfa, pattern='RGGB'): """ Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using bilinear interpolation. Parameters ---------- CFA : array_like *Bayer* color filter array (CFA). pattern : unicode, optional **{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**, Arrangement of the colour filters on the pixel array. Returns ------- ndarray *RGB* colourspace array. Notes ----- - The definition output is not clipped in range [0, 1] : this allows for direct HDRI / radiance image generation on *Bayer* CFA data and post demosaicing of the high dynamic range data as showcased in this `Jupyter Notebook <https://github.com/colour-science/colour-hdri/\ blob/develop/colour_hdri/examples/\ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`_. References ---------- - :cite:`Losson2010c` Examples -------- >>> CFA = np.array( ... [[0.30980393, 0.36078432, 0.30588236, 0.3764706], ... [0.35686275, 0.39607844, 0.36078432, 0.40000001]]) >>> demosaic(CFA) array([[[ 0.69705884, 0.17941177, 0.09901961], [ 0.46176472, 0.4509804 , 0.19803922], [ 0.45882354, 0.27450981, 0.19901961], [ 0.22941177, 0.5647059 , 0.30000001]], <BLANKLINE> [[ 0.23235295, 0.53529412, 0.29705883], [ 0.15392157, 0.26960785, 0.59411766], [ 0.15294118, 0.4509804 , 0.59705884], [ 0.07647059, 0.18431373, 0.90000002]]]) >>> CFA = np.array( ... [[0.3764706, 0.360784320, 0.40784314, 0.3764706], ... [0.35686275, 0.30980393, 0.36078432, 0.29803923]]) >>> demosaic(CFA, 'BGGR') array([[[ 0.07745098, 0.17941177, 0.84705885], [ 0.15490197, 0.4509804 , 0.5882353 ], [ 0.15196079, 0.27450981, 0.61176471], [ 0.22352942, 0.5647059 , 0.30588235]], <BLANKLINE> [[ 0.23235295, 0.53529412, 0.28235295], [ 0.4647059 , 0.26960785, 0.19607843], [ 0.45588237, 0.4509804 , 0.20392157], [ 0.67058827, 0.18431373, 0.10196078]]]) """ cfa = np.asarray(cfa) R_m, G_m, B_m = masks(cfa.shape, pattern) H_G = np.asarray([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4 H_RB = np.asarray([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4 R = convolve(cfa * R_m, H_RB) G = convolve(cfa * G_m, H_G) B = convolve(cfa * B_m, H_RB) return np.concatenate( [R[..., np.newaxis], G[..., np.newaxis], B[..., np.newaxis]], axis=-1 )
15,422
def auditable_event(message, user_id, subject_id, context="other"): """Record auditable event message: The message to record, i.e. "log in via facebook" user_id: The authenticated user id performing the action subject_id: The user id upon which the action was performed """ text = "performed by {0} on {1}: {2}: {3}".format( user_id, subject_id, context, message) current_app.logger.log(AUDIT, text) with db.session.no_autoflush: db.session.add(Audit( user_id=user_id, subject_id=subject_id, comment=message, context=context)) db.session.commit()
15,423
def overwrite_core_fields(new_metadata, old_metadata): """For fields like dc and project_metadata, if overwrite the items in old_metadata with the fields in new_metadata""" old_metadata = copy.deepcopy(old_metadata) for cat in ['dc', 'project_metadata']: if cat not in new_metadata: continue for newk, newv in new_metadata[cat].items(): log.debug('Replacing old field [{}][{}] with {}'.format(cat, newk, newv)) old_metadata[cat][newk] = newv return old_metadata
15,424
def compare_skill(embedding, idx=None): """Display a skill its most similar skills in the embedding. Args: embedding (array): skills embedding idx (int): index to select skill, defaults to None (if None, a random index is chosen) Returns: df: dataframe of a skill and the skills it is closest to in the embedding by cosine similarity """ if idx is None: description = embedding[random.randint(0, len(embedding))] else: description = embedding[idx] return ( skills[["preferredLabel", "description"]] .assign(cosine_scores=util.pytorch_cos_sim(description, embedding)[0]) .sort_values(by=["cosine_scores"], ascending=False) .head(10) )
15,425
def translate_df(df: DataFrame) -> DataFrame: """ Función para traducir directamente un DataFrame :param df: DataFrame a traducir :return: DataFrame """ regs = df.Country.count() #Contamos la cantidad de registros en la columna 'Country' para servir como delimitador del for # Usamos un for para traducir uno a uno los países, el parámetro lang_tgt nos indica el idioma al que queremos llegar y lang_src el de origen for i in range(0, regs): df.Country[i] = translator.translate(df.Country[i], lang_tgt='es', lang_src='en') return df
15,426
def recreate_city_table(log_file_path, city_table_path): """Create a table with city and its respected cases count.""" df = pd.read_csv(log_file_path) # Group the table by city and infections grouped_city = df.groupby("City") grouped_infection = df.groupby(["City", "Infection"]) # Initiate new log file. header = [ "City", "Mask Policy", "Maximum Capacity", "Infection Count", "Sample Total", ] with open(city_table_path, "a+") as f: input_writer = csv.writer(f, delimiter=",") input_writer.writerow(header) city_total = [] # Iterate through each table and append to log. for i in grouped_city: city_total.append(len(i[1].index)) id = 0 for j in grouped_infection: elements = [] if j[0][1] is True: elements.append(j[0][0]) elements.append(j[1].iloc[0]["Mask Policy"]) elements.append(j[1].iloc[0]["Maximum Capacity"]) elements.append(len(j[1].index)) elements.append(city_total[id]) id += 1 # Append city status to the log file. with open(city_table_path, "a") as f: input_writer = csv.writer(f, delimiter=",") input_writer.writerow(elements)
15,427
def compare_data(data1, data2, ignore=None, expected=True): """ todo: Update Documentation :param data1: :type data1: :param data2: :type data2: :param ignore: :type ignore: :param expected: :type expected: :return: :rtype: """ print(data1) print(data2) return expected
15,428
def test_irr3(): """ Test irr on unique distribution. """ d = bivariates['cat'] red = i_rr(d, ((0,), (1,)), (2,)) assert red == pytest.approx(0)
15,429
def _process_caption_jieba(caption): """Processes a Chinese caption string into a list of tonenized words. Args: caption: A string caption. Returns: A list of strings; the tokenized caption. """ tokenized_caption = [] tokenized_caption.extend(jieba.cut(caption, cut_all=False)) return tokenized_caption
15,430
def create_connection(graph, node1, node2, linktype, propertydict=None, allow_dup=False): """ :param graph: :param node1: :param node2: :param linktype: :param propertydict: :return: """ data = {} data["graph"] = graph data["node1"] = node1 data["node2"] = node2 data["linktype"] = linktype data["propertydict"] = propertydict data["allow_dup"] = allow_dup resp = requests.post(config.APIURL + config.APIBaseURL + "create_connection", json=data) if resp.status_code == 200: ret = resp.json() else: ret = None print("create connection") return ret
15,431
def get_nn(config): """ Args: config: Path to the confi file generated during training Returns: Model instance """ # Loads the model configurations from config file # generated during training with open(config, 'r') as f: C = json.load(f) C = Struct(**C) # Load correct network if C.network == 'resnet50': from src.architectures import resnet50 as nn elif C.network == 'resnet152': from src.architectures import resnet152 as nn elif C.network == 'vgg16': from src.architectures import vgg16 as nn else: from src.architectures import vgg19 as nn # Create our model, load weights and then # compile it input_shape_img = (224, 224, 3) img_input = Input(shape=input_shape_img) base_layers = nn.nn_base(img_input) classifier = nn.classifier(base_layers, trainable=False) return Model(inputs=img_input, outputs=classifier)
15,432
def mandlebrot(ssize, screen, clock, Xs,Xe,Ys,Ye): """ would using numpy improve performance and/or precision ? """ screen_x, screen_y = ssize print("start mandlebrot") for Py in range(screen_y): for Px in range(screen_x): x0 = scaled_x(Px, screen_x, Xs, Xe) y0 = scaled_y(Py, screen_y, Ys, Ye) color = iterate_location(x0, y0) #color = warp_red(iteration, maxiter) #color = grayscale(iteration, maxiter) pygame.draw.line(screen, color, [Px,Py], [Px,Py], 1) #print(color, Px, Py) if Py % 25 == 0: pygame.display.update() #clock.tick() pygame.display.update() #clock.tick() print("finished")
15,433
def test_breadth_traversal_single(single_tree): """test breadth-first traversal""" order = [] single_tree.breadth_first_traversal_op(lambda n: order.append(n.val)) assert order == [1]
15,434
def get_functor(value: Any) -> Union[Functor, FunctorIter, FunctorDict]: """ Returns a base functor instance with a value property set to 'value' of the class for either dictionary, other iterable or uniterable type, and, where passed, a const property set to the constructor of 'value'. >>> f = get_functor([1, 2, 3]) >>> print(f.__class__.__name__, f.value, f.const == list) FunctorIter [1, 2, 3] True """ const = get_constructor(value) if const in iterables_passed: return FunctorIter(value, const) if const == dict: return FunctorDict(value) return Functor(value)
15,435
def save_hdf_dataset(ds, fname, verbose=True): """ Save VoigtFit.dataset to a HDF5 file. The function maps the internal data to a HDF5 data model. """ if splitext(fname)[1] == '.hdf5': pass else: fname += '.hdf5' with h5py.File(fname, 'w') as hdf: # set main attributes: hdf.attrs['redshift'] = ds.redshift if hasattr(ds.velspan, '__iter__'): vmin, vmax = ds.velspan hdf.attrs['vmin'] = vmin hdf.attrs['vmax'] = vmax else: hdf.attrs['vmin'] = -ds.velspan hdf.attrs['vmax'] = ds.velspan if hasattr(ds, 'name'): hdf.attrs['name'] = ds.name else: hdf.attrs['name'] = '' if hasattr(ds, 'verbose'): hdf.attrs['verbose'] = ds.verbose else: hdf.attrs['verbose'] = True # .data: data = hdf.create_group('data') for num, chunk in enumerate(ds.data): spec = data.create_group('spec%i' % (num+1)) spec.attrs['filename'] = ds.data_filenames[num] spec.attrs['res'] = chunk['res'] spec.attrs['norm'] = chunk['norm'] spec.attrs['nsub'] = chunk['nsub'] spec.attrs['specID'] = chunk['specID'] spec.create_dataset('wl', data=chunk['wl']) spec.create_dataset('flux', data=chunk['flux']) spec.create_dataset('mask', data=chunk['mask']) spec.create_dataset('error', data=chunk['error']) # .regions: hdf_regions = hdf.create_group('regions') for num, reg in enumerate(ds.regions): reg_group = hdf_regions.create_group('region%i' % (num+1)) if hasattr(reg.velspan, '__iter__'): vmin, vmax = reg.velspan reg_group.attrs['vmin'] = vmin reg_group.attrs['vmax'] = vmax else: reg_group.attrs['vmin'] = -reg.velspan reg_group.attrs['vmax'] = reg.velspan reg_group.attrs['res'] = reg.res reg_group.attrs['normalized'] = reg.normalized reg_group.attrs['cont_err'] = reg.cont_err reg_group.attrs['new_mask'] = reg.new_mask reg_group.attrs['specID'] = reg.specID reg_group.attrs['kernel_fwhm'] = reg.kernel_fwhm reg_group.attrs['kernel_nsub'] = reg.kernel_nsub reg_group.attrs['label'] = reg.label reg_group.create_dataset('kernel', data=reg.kernel) reg_group.create_dataset('wl', data=reg.wl) reg_group.create_dataset('flux', data=reg.flux) reg_group.create_dataset('mask', data=reg.mask) reg_group.create_dataset('error', data=reg.err) lines = reg_group.create_group('lines') for line in reg.lines: lines.create_group(line.tag) lines[line.tag].attrs['active'] = line.active # .molecules: molecules = hdf.create_group('molecules') if hasattr(ds, 'molecules'): for molecule, items in ds.molecules.items(): pre_array = [tuple(item) for item in items] band_data = np.array(pre_array, dtype=[('band', 'S8'), ('Jmax', 'i4')]) molecules.create_dataset(molecule, data=band_data) fine_lines = hdf.create_group('fine_lines') if hasattr(ds, 'fine_lines'): for ground_state, lines in ds.fine_lines.items(): # line_array = np.array(lines, dtype='str') line_array = [s.encode("ascii", "ignore") for s in lines] fine_lines.create_dataset(str(ground_state), data=line_array) # .components: components = hdf.create_group('components') for ion, ds_comps in ds.components.items(): ion_group = components.create_group(ion) for cnum, comp in enumerate(ds_comps): comp_group = ion_group.create_group("comp%i" % (cnum+1)) comp_group.attrs['z'] = comp.z comp_group.attrs['b'] = comp.b comp_group.attrs['logN'] = comp.logN for key, val in comp.options.items(): val = 'None' if val is None else val comp_group.attrs[key] = val # .best_fit: if ds.best_fit is not None: p_opt = ds.best_fit best_fit = hdf.create_group('best_fit') for ion, comps in ds.components.items(): params = best_fit.create_group(ion) for n in range(len(comps)): param_group = params.create_group("comp%i" % (n+1)) # Save best-fit values: param_group.attrs['z'] = p_opt['z%i_%s' % (n, ion)].value param_group.attrs['b'] = p_opt['b%i_%s' % (n, ion)].value param_group.attrs['logN'] = p_opt['logN%i_%s' % (n, ion)].value # and uncertainties: param_group.attrs['z_err'] = p_opt['z%i_%s' % (n, ion)].stderr param_group.attrs['b_err'] = p_opt['b%i_%s' % (n, ion)].stderr param_group.attrs['logN_err'] = p_opt['logN%i_%s' % (n, ion)].stderr # Save Chebyshev parameters: cheb_group = best_fit.create_group('cheb_params') for parname in list(ds.best_fit.keys()): if 'cheb_p' in parname: coeff = ds.best_fit[parname] cheb_par = cheb_group.create_group(parname) cheb_par.attrs['value'] = coeff.value cheb_par.attrs['error'] = coeff.stderr if verbose: print("Successfully saved the dataset to file: " + fname)
15,436
def main(): """ This code, which must run on a LAPTOP: 1. Constructs a GUI for my part of the Capstone Project. 2. Communicates via MQTT with the code that runs on the EV3 robot. """ # ------------------------------------------------------------------------- # Construct and connect the MQTT Client: # ------------------------------------------------------------------------- mqtt_sender = com.MqttClient() #If put things in () it will be a receiver. mqtt_sender.connect_to_ev3() # ------------------------------------------------------------------------- # The root TK object for the GUI: # ------------------------------------------------------------------------- root = tkinter.Tk() root.title('CSSE120 Capstone Project, Winter 2018-2019') # ------------------------------------------------------------------------- # The main frame, upon which the other frames are placed. # ------------------------------------------------------------------------- main_frame = ttk.Frame(root, padding = 10, borderwidth=5, relief='groove') main_frame.grid() # ------------------------------------------------------------------------- # Sub-frames for the shared GUI that the team developed: # ------------------------------------------------------------------------- teleop_frame, arm_frame, control_arm, drive_system_frame, sound_system_frame, m3_personal_frame, m3_camera_frame, m3_led_frame, m3_do_math_frame = get_shared_frames(main_frame, mqtt_sender) # exit button: # ------------------------------------------------------------------------- # Frames that are particular to my individual contributions to the project. # ------------------------------------------------------------------------- # done: Implement and call get_my_frames(...) # ------------------------------------------------------------------------- # Grid the frames. # ------------------------------------------------------------------------- grid_frames(teleop_frame, arm_frame, control_arm, drive_system_frame, sound_system_frame, m3_personal_frame, m3_camera_frame, m3_led_frame, m3_do_math_frame) # ------------------------------------------------------------------------- # The event loop: # ------------------------------------------------------------------------- root.mainloop()
15,437
def split_bibtexs_by_bib_style(bibtexs): """ Args: bibtexs (list of Queryset of Bibtex): Returns: list of tuple: (Style Key, Display Name, Bibtex List) """ # Get STYLE KYES bibtex_backet = dict() choices = expand_book_style_tuple(Book.STYLE_CHOICES) + list( Bibtex.BIBSTYLE_CHOICES ) for i, (key, _) in enumerate(choices): # if key == "SAME_AS_BOOK": # _idx_same_as_book = i if key != "SAME_AS_BOOK": bibtex_backet[key] = [] choices.pop(i) # Split by Style for bib in bibtexs: bibtex_backet[bib.bib_type_key].append(bib) # Make list of tuple ret = [] for key, display_name in choices: if len(bibtex_backet[key]) > 0: ret.append((key, display_name, bibtex_backet[key])) return ret
15,438
def apply_gates(date, plate, gates_df, subpopulations=False, correlation=None): """ Constructs dataframe with channels relevant to receptor quantification. """ if date == "5-16": receptors = ['CD127'] channels = ['BL1-H'] else: receptors = ['CD25', 'CD122', 'CD132'] channels = ["VL1-H", "BL5-H", "RL1-H"] for i, r in enumerate(receptors): cellTypes = ['T-helper', 'T-reg', 'NK', 'CD8+'] for j, cellType in enumerate(cellTypes): if i == 0 and j == 0: df, unstainedWell, isotypes = samp_Gate(date, plate, gates_df, cellType, r, correlation, subPop=subpopulations) df = subtract_unstained_signal(df, channels, receptors, unstainedWell, isotypes) else: df2, unstainedWell2, isotypes2 = samp_Gate(date, plate, gates_df, cellType, r, correlation, subPop=subpopulations) df2 = subtract_unstained_signal(df2, channels, receptors, unstainedWell2, isotypes2) df = df.append(df2) return df
15,439
def _check_config(config): """Check the configuration is robust. Args: config (dict): the configuration in dict. Raises: AssertionError: if the configuration violates some of rules. """ # Check Dataset definition if "pretrain data setting" in config: data_loader = config["pretrain data setting"] assert data_loader["batch size"] > 0 for dataset_info in data_loader["datasets"]: assert os.path.exists(dataset_info["path"]) assert dataset_info["signal length"] > 0 # Check Experiment setting if "experiment" in config: experiment = config["experiment"] assert experiment["learning rate"] > 0
15,440
def action_interaction_exponential_reward_function( context: np.ndarray, action_context: np.ndarray, action: np.ndarray, base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray], action_interaction_weight_matrix: np.ndarray, reward_type: str, random_state: Optional[int] = None, **kwargs, ) -> np.ndarray: """Reward function incorporating exponential interactions among combinatorial action Parameters ----------- context: array-like, shape (n_rounds, dim_context) Context vectors characterizing each round (such as user information). action_context: array-like, shape (n_unique_action, dim_action_context) Vector representation for each action. action: array-like, shape (n_unique_action * len_list) Sampled action. Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]. base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None Function generating expected reward for each given action-context pair, i.e., :math:`\\mu: \\mathcal{X} \\times \\mathcal{A} \\rightarrow \\mathbb{R}`. If None is set, context **independent** expected reward for each action will be sampled from the uniform distribution automatically. reward_type: str, default='binary' Type of reward variable, which must be either 'binary' or 'continuous'. When 'binary' is given, expected reward is transformed by logit function. action_interaction_weight_matrix (`W`): array-like, shape (len_list, len_list) `W(i, j)` is the weight of how the expected reward of slot `i` affects that of slot `j`. random_state: int, default=None Controls the random seed in sampling dataset. Returns --------- expected_reward_factual: array-like, shape (n_rounds, len_list) Expected rewards given factual actions (:math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\sum_{j \\neq k} g^{-1}(f(x, a(j))) * W(k, j)`). """ if not isinstance(context, np.ndarray) or context.ndim != 2: raise ValueError("context must be 2-dimensional ndarray") if not isinstance(action_context, np.ndarray) or action_context.ndim != 2: raise ValueError("action_context must be 2-dimensional ndarray") if not isinstance(action, np.ndarray) or action.ndim != 1: raise ValueError("action must be 1-dimensional ndarray") if reward_type not in [ "binary", "continuous", ]: raise ValueError( f"reward_type must be either 'binary' or 'continuous', but {reward_type} is given." ) if action_interaction_weight_matrix.shape[0] * context.shape[0] != action.shape[0]: raise ValueError( "the size of axis 0 of action_interaction_weight_matrix multiplied by that of context must be the same as that of action" ) # action_2d: array-like, shape (n_rounds, len_list) action_2d = action.reshape( (context.shape[0], action_interaction_weight_matrix.shape[0]) ) # action_3d: array-like, shape (n_rounds, n_unique_action, len_list) action_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1) # expected_reward: array-like, shape (n_rounds, n_unique_action) expected_reward = base_reward_function( context=context, action_context=action_context, random_state=random_state ) if reward_type == "binary": expected_reward = np.log(expected_reward / (1 - expected_reward)) # expected_reward_3d: array-like, shape (n_rounds, n_unique_action, len_list) expected_reward_3d = np.tile( expected_reward, (action_interaction_weight_matrix.shape[0], 1, 1) ).transpose(1, 2, 0) # action_interaction_weight: array-like, shape (n_rounds, n_unique_action, len_list) action_interaction_weight = action_3d @ action_interaction_weight_matrix # weighted_expected_reward: array-like, shape (n_rounds, n_unique_action, len_list) weighted_expected_reward = action_interaction_weight * expected_reward_3d # expected_reward_factual: list, shape (n_rounds, len_list) expected_reward_factual = weighted_expected_reward.sum(axis=1) if reward_type == "binary": expected_reward_factual = sigmoid(expected_reward_factual) # q_l = \sum_{a} a3d[i, a, l] q_a + \sum_{a_1, a_2} delta(a_1, a_2) # return: array, shape (n_rounds, len_list) expected_reward_factual = np.array(expected_reward_factual) assert expected_reward_factual.shape == ( context.shape[0], action_interaction_weight_matrix.shape[0], ), f"response shape must be (n_rounds, len_list), but {expected_reward_factual.shape}" return expected_reward_factual
15,441
def deserialize_once_dates(dates): """ Deserializes the dates as expected within a once dates object. :param dates: The dates object :return: A 2-tuple containing all the deserialized date parameters """ return ( du_parser.parse(dates[RULE_ONCE_S_TIME]), du_parser.parse(dates[RULE_ONCE_E_TIME]) )
15,442
def gen_CDR_MitEx( device_backend: Backend, simulator_backend: Backend, n_non_cliffords: int, n_pairs: int, total_state_circuits: int, **kwargs ) -> MitEx: """ Produces a MitEx object for applying Clifford Circuit Learning & Clifford Data Regression mitigation methods when calculating expectation values of observables. Implementation as in arXiv:2005.10189. :param device_backend: Backend object device experiments are default run through. :type device_backend: Backend :param simulator_backend: Backend object simulated characterisation experiments are default run through. :type simulator_backend: Backend :param n_non_cliffords: Number of gates in Ansatz Circuit left as non-Clifford gates when producing characterisation circuits. :type n_non_cliffords: int :param n_pairs: Number of non-Clifford gates sampled to become Clifford and vice versa each time a new state circuit is generated. :type n_pairs: int :param total_state_circuits: Total number of state circuits produced for characterisation. :type total_state_circuits: int :key StatesSimulatorMitex: MitEx object noiseless characterisation simulations are executed on, default simulator_backend with basic compilation of circuit. :key StatesDeviceMitex: MitEx object noisy characterisation circuit are executed on, default device_backend with basic compilation of circuit. :key ExperimentMitex: MitEx object that actual experiment circuits are executed on, default backend with some compilation of circuit. :key model: Model characterised by state circuits, default _PolyCDRCorrect(1) (see cdr_post.py for other options). :key likelihood_function: LikelihoodFunction used to filter state circuit results, given by a LikelihoodFunction Enum, default set to none. :key tolerance: Model can be perturbed when calibration circuits have by exact expectation values too close to each other. This parameter sets a distance between exact expectation values which at least some calibration circuits should have. :key distance_tolerance: The absolute tolerance on the distance between expectation values of the calibration and original circuit. :key calibration_fraction: The upper bound on the fraction of calibration circuits which have noisy expectation values far from that of the original circuit. """ _states_sim_mitex = copy.copy( kwargs.get( "states_simluator_mitex", MitEx( simulator_backend, _label="StatesSimMitex", mitres=gen_compiled_MitRes(simulator_backend, 0), ), ) ) _states_device_mitex = copy.copy( kwargs.get( "states_device_mitex", MitEx( device_backend, _label="StatesDeviceMitex", mitres=gen_compiled_MitRes(device_backend, 0), ), ) ) _experiment_mitex = copy.copy( kwargs.get( "experiment_mitex", MitEx( device_backend, _label="ExperimentMitex", mitres=gen_compiled_MitRes(device_backend, 0), ), ) ) _states_sim_taskgraph = TaskGraph().from_TaskGraph(_states_sim_mitex) _states_sim_taskgraph.parallel(_states_device_mitex) _states_sim_taskgraph.append(ccl_result_batching_task_gen(total_state_circuits)) likelihood_function = kwargs.get("likelihood_function", LikelihoodFunction.none) _experiment_taskgraph = TaskGraph().from_TaskGraph(_experiment_mitex) _experiment_taskgraph.parallel(_states_sim_taskgraph) _post_calibrate_task_graph = TaskGraph(_label="FitCalibrate") _post_calibrate_task_graph.append( ccl_likelihood_filtering_task_gen(likelihood_function) ) _post_calibrate_task_graph.append( cdr_calibration_task_gen( device_backend, kwargs.get("model", _PolyCDRCorrect(1)), ) ) _post_task_graph = TaskGraph(_label="QualityCheckCorrect") _post_task_graph.parallel(_post_calibrate_task_graph) _post_task_graph.prepend( cdr_quality_check_task_gen( distance_tolerance=kwargs.get("distance_tolerance", 0.1), calibration_fraction=kwargs.get("calibration_fraction", 0.5), ) ) _experiment_taskgraph.prepend( ccl_state_task_gen( n_non_cliffords, n_pairs, total_state_circuits, simulator_backend=simulator_backend, tolerance=kwargs.get("tolerance", 0.01), max_state_circuits_attempts=kwargs.get("max_state_circuits_attempts", 10), ) ) _experiment_taskgraph.append(_post_task_graph) _experiment_taskgraph.append(cdr_correction_task_gen(device_backend)) return MitEx(device_backend).from_TaskGraph(_experiment_taskgraph)
15,443
def lambda_sum_largest_canon(expr, args): """ S_k(X) denotes lambda_sum_largest(X, k) t >= k S_k(X - Z) + trace(Z), Z is PSD implies t >= ks + trace(Z) Z is PSD sI >= X - Z (PSD sense) which implies t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X) We use the fact that S_k(X) = sup_{sets of k orthonormal vectors u_i}\sum_{i}u_i^T X u_i and if Z >= X in PSD sense then \sum_{i}u_i^T Z u_i >= \sum_{i}u_i^T X u_i We have equality when s = lambda_k and Z diagonal with Z_{ii} = (lambda_i - lambda_k)_+ """ X = expr.args[0] k = expr.k Z = Variable((X.shape[0], X.shape[0]), PSD=True) obj, constr = lambda_max_canon(expr, [X - Z]) obj = k*obj + trace(Z) return obj, constr
15,444
def test_create_deployment(name, simple_box): """ Test that we can create a deployment with the right name """ deployment = Deployment(name, simple_box) assert deployment.name == name
15,445
def print_g(msg, term=True, destination=default_log): """ Write msg to stdout and to file. Parameters ---------- msg: str The text. term: bool If true, write to stdout. destination: str Path of destination file. Returns ------- No return value. """ if term: print('%s' % msg) with open(destination, 'a') as f: f.write('%s\n' % msg) f.flush()
15,446
def compare_bib_dict(item1, item2): """ compare bibtex item1 and item 2 in dictionary form """ # unique id check col_list = ["doi", "pmid", "pmcid", "title", "local-url"] for c in col_list: if (item1.get(c, "1") != '') and (item1.get(c, "1") == item2.get(c, "2")): return 1.0 score = 0.0 def _get_score(item1, item2, colname, s): if item1.get(colname, "1") == '': return 0.0 if item1.get(colname, "2") == '': return 0.0 if item1.get(colname, "1") == item2.get(colname, "2"): return s return 0.0 score = score + _get_score(item1, item2, "year", 0.2) score = score + _get_score(item1, item2, "author", 0.2) score = score + _get_score(item1, item2, "author1", 0.1) score = score + _get_score(item1, item2, "journal", 0.2) score = score + _get_score(item1, item2, "volume", 0.1) return score
15,447
def find_residues_lsfd(poles, H, fs): """Find residues from poles and FRF estimates Estimate the (in band) residue matrices from poles and FRF's by the Least Squares Frequency Domain Algorithm (LSFD). A residue matrix is the outer product of the mode vector and the modal participation factor. The mode vector can therefore be recovered by SVD decomposition of the residue matrix. Arguments --------- poles : 1darray Continous time poles (eigenvalues). H : 3darray FRF matrix where the first and second axis refers to the outputs and inputs, respectively and the third axis refers to the frequency. fs : float Sampling rate Returns ------- 3darray Residue matrices where the first dimension refers to the poles, second dimension to outputs and third to inputs, i.e. if `R` is the returned matrix then `R[0]` is the residue matrix corresponding to pole `poles[0]`. """ l, m, nf = H.shape p = np.r_[poles, poles.conj()] n = p.size A = np.zeros((l*nf, (n+2)*l), dtype=complex) w = 2*np.pi*np.linspace(0., fs/2, num=nf) I = np.eye(l) B = np.zeros((nf*l, m), dtype=complex) for i, wi in enumerate(w): A[i*l:(i+1)*l, -2*l:-1*l] = I / (1j*wi+1e-3)**1 A[i*l:(i+1)*l, -l:] = I * (1j*wi) B[i*l:(i+1)*l, :] = H[:, :, i] for j, pj in enumerate(p): A[i*l:(i+1)*l, j*l:(j+1)*l] = I/(1j*wi-pj) X = np.linalg.lstsq(A, B, rcond=None)[0] return X[:l*n//2].reshape((n//2, l, m))
15,448
def create_regularly_sampled_time_points(interval: pendulum.Duration, start_time_point: pendulum.DateTime, count: int): """ Create a sequence of `count` time points starting at `start_time_point`, `interval` apart. Args: interval: The time interval between each point. start_time_point: The starting time point of the sequence. count: The number of time points in the sequence. Returns: The sequence of time points. """ # I must handle a count of 0 specially because `pendulum` **includes** the endpoint of the specified range. if count == 0: return [] # The `pendulum` package, by default, **includes** the endpoint of the specified range. I want to exclude it when # I create these series so my end point must be `count - 1`. end_time_point = start_time_point + interval * (count - 1) result = pendulum.period(start_time_point, end_time_point).range('seconds', interval.total_seconds()) return result
15,449
def create_model(bert_config, is_training, input_ids_list, input_mask_list, segment_ids_list, use_one_hot_embeddings): """Creates a classification model.""" all_logits = [] input_ids_shape = modeling.get_shape_list(input_ids_list, expected_rank=2) batch_size = input_ids_shape[0] seq_length = input_ids_shape[1] seq_length = seq_length // NUM_DOCS def reshape_and_unstack_inputs(inputs, batch_size): inputs = tf.reshape(inputs, [batch_size, NUM_DOCS, seq_length]) return tf.unstack(inputs, axis=1) input_ids_list = reshape_and_unstack_inputs(input_ids_list, batch_size) input_mask_list = reshape_and_unstack_inputs(input_mask_list, batch_size) segment_ids_list = reshape_and_unstack_inputs(segment_ids_list, batch_size) start_logits, end_logits = [], [] with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope: for i in range(len(input_ids_list)): model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids_list[i], input_mask=input_mask_list[i], token_type_ids=segment_ids_list[i], use_one_hot_embeddings=use_one_hot_embeddings, scope="bert") final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) hidden_size = final_hidden_shape[2] output_weights = tf.get_variable( "cls/open_qa/output_weights", [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cls/open_qa/output_bias", [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (s_logits, e_logits) = (unstacked_logits[0], unstacked_logits[1]) start_logits.append(s_logits) end_logits.append(e_logits) start_logits = tf.concat(start_logits, axis=-1) end_logits = tf.concat(end_logits, axis=-1) return (start_logits, end_logits)
15,450
def _remove_redundant_quantize_ops_per_subgraph(model, subgraph_index, signature_index): """Remove redundant quantize ops per subgraph.""" subgraph = model.subgraphs[subgraph_index] tensors = subgraph.tensors operators = subgraph.operators # Find all quantize operators. quant_opcode_idxs = get_quantize_opcode_idx(model) dequant_opcode_idxs = get_dequantize_opcode_idx(model) # Find all redundant quant tensors. all_quant_ops = [] redundant_quant_tensors = {} output_dequant_tensors = {} for op in operators: if op.opcodeIndex in quant_opcode_idxs: all_quant_ops.append(op) input_tensor = tensors[op.inputs[0]] output_tensor = tensors[op.outputs[0]] input_type = _convert_tflite_enum_type_to_tf_type(input_tensor.type) output_type = _convert_tflite_enum_type_to_tf_type(output_tensor.type) # This is a requantize op, so write down its input tensor index. if input_type != dtypes.float32 and output_type != dtypes.float32: redundant_quant_tensors[op.inputs[0]] = op if (op.opcodeIndex in dequant_opcode_idxs and op.outputs[0] in subgraph.outputs): output_dequant_tensors[op.inputs[0]] = op # Remove all the quant ops which produce the redundant quant tensors. for op in all_quant_ops: output_tensor_idx = op.outputs[0] if output_tensor_idx in redundant_quant_tensors: requantize_op = redundant_quant_tensors[output_tensor_idx] if model.signatureDefs: signature_def = model.signatureDefs[0] for output in signature_def.outputs: if output.tensorIndex == op.outputs[0]: output.tensorIndex = op.inputs[0] # Reset the input of the requantize op to the float input requantize_op.inputs[0] = op.inputs[0] operators.remove(op) # Remove all the quant ops which connect to the output dequant op. for op in all_quant_ops: output_tensor_idx = op.outputs[0] if output_tensor_idx in output_dequant_tensors: dequant_op = output_dequant_tensors[output_tensor_idx] subgraph.outputs[subgraph.outputs == dequant_op.outputs[0]] = op.inputs[0] if signature_index >= 0: signature_def = model.signatureDefs[signature_index] for output in signature_def.outputs: if output.tensorIndex == dequant_op.outputs[0]: output.tensorIndex = op.inputs[0] operators.remove(op) operators.remove(dequant_op)
15,451
def hmmsearch(genome_id, species_id, marker_genes_hmm, num_threads=1): """ Performance HMM search using prokka annotated protein sequences """ input_annotations = destpath(get_uhgg_layout(species_id, "faa", genome_id)["annotation_file"]) annotated_genes = download_reference(input_annotations) hmmsearch_file = f"{genome_id}.hmmsearch" # Command if find_files(hmmsearch_file): # This only happens in debug mode, where we can use pre-existing file. tsprint(f"Found hmmsearch results for genome {genome_id} from prior run.") else: try: command(f"hmmsearch --noali --cpu {num_threads} --domtblout {hmmsearch_file} {marker_genes_hmm} {annotated_genes}") except: # Do not keep bogus zero-length files; those are harmful if we rerun in place. command(f"mv {hmmsearch_file} {hmmsearch_file}.bogus", check=False) raise return hmmsearch_file
15,452
def where(condition: numpy.typing.ArrayLike, *args: PolyLike) -> ndpoly: """ Return elements chosen from `x` or `y` depending on `condition`. .. note:: When only `condition` is provided, this function is a shorthand for ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be preferred, as it behaves correctly for subclasses. The rest of this documentation covers only the case where all three arguments a re provided. Args: condition: Where True, yield `x`, otherwise yield `y`. x: Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns: An array with elements from `x` where `condition` is True, and elements from `y` elsewhere. Examples: >>> poly = numpoly.variable()*numpy.arange(4) >>> poly polynomial([0, q0, 2*q0, 3*q0]) >>> numpoly.where([1, 0, 1, 0], 7, 2*poly) polynomial([7, 2*q0, 7, 6*q0]) >>> numpoly.where(poly, 2*poly, 4) polynomial([4, 2*q0, 4*q0, 6*q0]) >>> numpoly.where(poly) (array([1, 2, 3]),) """ if isinstance(condition, numpoly.ndpoly): condition = numpy.any(numpy.asarray( condition.coefficients), 0).astype(bool) if not args: return numpy.where(condition) poly1, poly2 = numpoly.align_polynomials(*args) coefficients = [numpy.where(condition, x1, x2) for x1, x2 in zip(poly1.coefficients, poly2.coefficients)] dtype = numpy.result_type(poly1.dtype, poly2.dtype) return numpoly.polynomial_from_attributes( exponents=poly1.exponents, coefficients=coefficients, names=poly1.names, dtype=dtype, )
15,453
def download_google_file(google_file, folder = "./"): """ Do a Google image search limited to pixabay.com and get the download file using these instructions: https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson2-download.ipynb Then use this script to grab the higher res photos. """ f = open(google_file,'r') urls = f.read().split("\n") f.close() [download(cdn_to_larger(url), folder) for url in urls]
15,454
async def async_attach_trigger( hass, config, action, automation_info, *, platform_type="event" ): """Listen for events based on configuration.""" event_types = config.get(CONF_EVENT_TYPE) removes = [] event_data_schema = None if config.get(CONF_EVENT_DATA): event_data_schema = vol.Schema( { vol.Required(key): value for key, value in config.get(CONF_EVENT_DATA).items() }, extra=vol.ALLOW_EXTRA, ) event_context_schema = None if config.get(CONF_EVENT_CONTEXT): event_context_schema = vol.Schema( { vol.Required(key): _schema_value(value) for key, value in config.get(CONF_EVENT_CONTEXT).items() }, extra=vol.ALLOW_EXTRA, ) job = HassJob(action) @callback def handle_event(event): """Listen for events and calls the action when data matches.""" try: # Check that the event data and context match the configured # schema if one was provided if event_data_schema: event_data_schema(event.data) if event_context_schema: event_context_schema(event.context.as_dict()) except vol.Invalid: # If event doesn't match, skip event return hass.async_run_hass_job( job, { "trigger": { "platform": platform_type, "event": event, "description": f"event '{event.event_type}'", } }, event.context, ) removes = [ hass.bus.async_listen(event_type, handle_event) for event_type in event_types ] @callback def remove_listen_events(): """Remove event listeners.""" for remove in removes: remove() return remove_listen_events
15,455
def histogram_filter(x, lb=0, ub=1): """Truncates the tail of samples for better visualisation. Parameters ---------- x : array-like One-dimensional numeric arrays. lb : float in [0, 1], optional Defines the lower bound quantile ub : float in [0, 1], optional Defines the upper bound quantile """ return x[(np.quantile(x, q=lb) < x) & (x < np.quantile(x, q=ub))]
15,456
def create_tweet(food_name): """Create the text of the tweet you want to send.""" r = requests.get(food2fork_url, params={"q": food_name, "key": F2F_KEY}) try: r_json = r.json() except Exception as e: return "No recipe found. #sadpanda" # fetch top-ranked recipe recipe = r_json["recipes"][0] recipe_f2f_url = recipe["f2f_url"] recipe_name = recipe["title"] recipe_publisher = recipe["publisher"] recipe_img = recipe["image_url"] text = "\"%s\" by %s: %s" % (recipe_name, recipe_publisher, recipe_f2f_url) return text
15,457
def sublist(lst1: List[T1], lst2: List[T1]) -> bool: """ Check `lst1` is sublist of `lst2`. Parameters ---------- lst1 : List[T1] List 1. lst2 : List[T1] List 2. Returns ------- bool `True` if `lst1` is sublist of `lst2`. Examples -------- >>> sublist([1,2,3], [1,2,3]) True >>> sublist([1,2,3], [1,2,3,4]) True >>> sublist([1,2,3,5], [1,2,3,4]) False """ return set(lst1) <= set(lst2)
15,458
async def _sync_friends(): """Get the actual set of friends to match the expected set of friends""" global _SYNCED, _WATCHES expected = set([str(k, "utf8").lower() for k in _WATCHES.keys()]) while True: actual = await _friends() if actual is None: raise eqcmd.CommandError("Failed to retrieve friends") if actual == expected: _SYNCED = True return for person in expected.symmetric_difference(actual): if person in _ONLINE_STATES: del _ONLINE_STATES[person] await _toggle_friend(person)
15,459
def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] def func(grads,g): grads.append(tf.expand_dims(g,0)) return grads for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: if g is None: continue # if tf.is_nan(g): # continue # Add 0 dimension to the gradients to represent the tower. # expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. # grads=tf.cond(tf.reduce_any(tf.is_nan(g)), lambda:grads, lambda:func(grads,g)) grads.append(tf.expand_dims(g, 0)) if len(grads)==0: continue # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads
15,460
def settings_check(settings: Settings): """Checks if all of the necessary keys are present in a settings dictionary. :param settings: :raise ValueError: if one of the settings is not present """ if settings['h'] is None: raise ValueError("The settings file doesn't specify the height of the container.") if settings['w'] is None: raise ValueError("The settings file doesn't specify the width of the container.") if settings['r'] is None: raise ValueError("The settings file doesn't specify the atom radius.") if settings['v'] is None: raise ValueError("The settings file doesn't specify the velocity limit.") if settings['c'] is None: raise ValueError("The settings file doesn't specify the collision tolerance.") if settings['M'] is None: raise ValueError("The settings file doesn't specify the M constant.") if settings['K'] is None: raise ValueError("The settings file doesn't specify the K constant.") if settings['N'] is None: raise ValueError("The settings file doesn't specify the number of atoms")
15,461
def save_masks(model, it, config): """ For self-training。 将训练得到的模型再应用于训练集,得到新一轮的训练标签(masks),并保存下来。 """ for subset in ['train', 'val']: if it == 0: dataset = VertebralDataset() dataset.load_vertebral(args.dataset[:-4], subset) dataset.prepare() else: dataset = VertebralDataset_self_training() dataset.load_vertebral(args.dataset, subset, it) dataset.prepare() FOLDER_PATH = f'/DATA5_DB8/data/sqpeng/data/vertebrae_masks/iter_{it}' if not os.path.exists(FOLDER_PATH): os.mkdir(FOLDER_PATH) for image_id in tqdm(dataset.image_ids): # image, image_meta, gt_class_id, gt_bbox, gt_mask = \ # modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False) image = dataset.load_image(image_id) image, window, scale, padding, crop = utils.resize_image( image, min_dim=config.IMAGE_MIN_DIM, min_scale=config.IMAGE_MIN_SCALE, max_dim=config.IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE) info = dataset.image_info[image_id] print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id, dataset.image_reference(image_id))) # Run object detection results = model.detect([image], verbose=1) # print('rois: ', results[0]['rois']) filtered_roi = roi_filter(dataset, image_id, config, model) r = results[0] count_masks = 0 for i, roi in enumerate(filtered_roi): if np.any(roi): count_masks += 1 mask = np.zeros([image.shape[0], image.shape[1], count_masks], dtype=np.bool) index = 0 for i, roi in enumerate(filtered_roi): if np.any(roi): mask[:, :, index] = r['masks'][:, :, i] index += 1 file_name = os.path.join(FOLDER_PATH, f'{info["id"]}.npy') np.save(file_name, mask) print("Saved to ", file_name)
15,462
def combine_predictions(indices1: NpArray, confs1: NpArray, indices2: NpArray, confs2: NpArray) -> Tuple[NpArray, NpArray]: """ Joins two predictions, returns sorted top-3 results in every row """ dprint(indices1.shape) dprint(indices2.shape) assert indices1.shape == indices2.shape assert confs1.shape == confs2.shape merged_indices = [] merged_confs = [] for idx1, conf1, idx2, conf2 in tqdm(zip(indices1, confs1, indices2, confs2), total=indices1.shape[0]): items: DefaultDict[int, float] = defaultdict(float) for i, c in zip(idx1, conf1): items[i] += c for i, c in zip(idx2, conf2): items[i] += c indices = sorted(items.keys(), key=lambda i: -items[i]) confs = [items[i] for i in indices] merged_indices.append(indices[:TOP_K]) merged_confs.append(confs[:TOP_K]) return np.array(merged_indices), np.array(merged_confs)
15,463
def get_speed_limit(center, rad, speed_limit): """ Retrieves the speed limit of the intersection circle :param center: center coordinate point of the intersection circle :param rad: radius of the intersection circle :param speed_limit: speed limit of the intersection :type center: Coordinates :type rad: float :type speed_limit: int :return: speed limit of the intersection circle """ i = Intersection(center, rad, speed_limit) return i.get_speed_limit()
15,464
def process(utim, data): """ Run process """ res = None try: crypto = CryptoLayer(utim.get_session_key()) logging.debug('Signing message {0} with key {1}' .format(data[SubprocessorIndex.body.value], utim.get_session_key())) res = crypto.sign(CryptoLayer.SIGN_MODE_SHA1, data[SubprocessorIndex.body.value]) logging.debug('Signed package: {0}'.format(res)) except TypeError: logging.error('Error appeared in signing message') if res is None: return [Address.ADDRESS_UTIM, Address.ADDRESS_UHOST, Status.STATUS_FINALIZED, res] else: return [Address.ADDRESS_UTIM, Address.ADDRESS_UHOST, Status.STATUS_TO_SEND, res]
15,465
def tika_content(string, file_name, serverEndPoint=u'http://' + params.TIKA_HOST + ':9998'): """ converts the binary file to string via Apache Tika interface return converted string """ def convert_to_pdf(file_path): """ convert the file_path file to pdf via libreoffice libreoffice --headless --convert-to pdf file_path --outdir /tmp """ if not os.path.isfile(file_path): logging.error('%s file may be not there or no permission', file_path) return None try: libre_exec = subprocess.check_output(['which', 'libreoffice']) libre_exec = libre_exec.rstrip().decode() subprocess.call([libre_exec, '--headless', '--convert-to', 'pdf', file_path, '--outdir', '/tmp']) pdf_file = os.path.splitext(file_path)[0] + '.pdf' logging.info('%s file converted as %s', file_path, pdf_file) if os.path.isfile(pdf_file): logging.info('%s file to pdf convert successful', file_path) return True else: logging.info('%s pdf file not found', pdf_file) return None except Exception as e: logging.error(e) logging.warning('LibreOffice package needed!') return None def is_office_file(file_path): """ office file type extension control wrapper func """ ext = os.path.splitext(file_path)[1] return any(ext in _ext for _ext in ['.xlsx', '.docx', '.pptx', '.ppsx']) file_name = clean_str(file_name) try: parsed = parser.from_buffer(string, serverEndPoint) except Exception as e: logging.error(e) logging.error("tika parse sorunu") return None if type(parsed) is not dict: logging.error("tika verilen string i parse edemedi!") return None # for org.apache.tika.exception.TikaException: Unexpected RuntimeException # from org.apache.tika.parser.microsoft.OfficeParser convert doc to pdf # than tika content elif type(parsed) is dict and len(parsed) == 0 and \ is_office_file(file_name): attachment = '/tmp/' + file_name open(attachment, 'wb').write(string) res = convert_to_pdf(attachment) if res is None: logging.warning('convert_to_pdf returned None!') return None attachment_pdf = os.path.splitext(attachment)[0] + '.pdf' # if res and os.path.isfile(attachment_pdf): if res: logging.info("%s file converted to pdf e and parsing via tika", file_name) parsed = parser.from_file(attachment_pdf, serverEndPoint) return parsed['content'] else: logging.error("%s pdf file open failure!", attachment_pdf) return None elif type(parsed) is dict and len(parsed) == 0: logging.error('Tika parse problem. %s file could not be parsed via\ Tika', file_name) return None else: return (parsed['content'], parsed['metadata']['Content-Type'])
15,466
def db_eval(techniques,sequences,inputdir=cfg.PATH.SEGMENTATION_DIR,metrics=None): """ Perform per-frame sequence evaluation. Arguments: techniques (string,list): name(s) of the method to be evaluated. sequences (string,list): name(s) of the sequence to be evaluated. inputdir (string): path to the technique(s) folder. Returns: db_eval_dict[method][measure][sequence] (dict): evaluation results. """ if isinstance(techniques,str): techniques = [techniques] if isinstance(sequences,str): sequences = [sequences] ndict = lambda: defaultdict(ndict) db_eval_dict = ndict() # RAW, per-frame evaluation timer = Timer() log.info("Number of cores allocated: %d"%cfg.N_JOBS) for technique in techniques: log.info('Evaluating technique: "%s"'%technique) timer.tic() J,j_M,j_O,j_D,F,f_M,f_O,f_D,T,t_M = \ zip(*Parallel(n_jobs=cfg.N_JOBS)(delayed(db_eval_sequence)( technique,sequence,inputdir,metrics) for sequence in sequences)) log.info('Processing time: "%.3f"'%timer.toc()) # STORE RAW EVALUATION for seq_id,sequence in enumerate(sequences): db_eval_dict[technique]['J'][sequence] = J[seq_id] db_eval_dict[technique]['F'][sequence] = F[seq_id] db_eval_dict[technique]['T'][sequence] = T[seq_id] return db_eval_dict
15,467
def scale_constraint(source_obj, target_obj, maintain_offset=True): """ create scale constraint. :param source_obj: :param target_obj: :param maintain_offset: :return: """ return cmds.scaleConstraint(source_obj, target_obj, mo=maintain_offset)[0]
15,468
def freq2note(freq): """Convert frequency in Hz to nearest note name. Parameters ---------- freq : float [0, 23000[ input frequency, in Hz Returns ------- str name of the nearest note Example ------- >>> aubio.freq2note(440) 'A4' >>> aubio.freq2note(220.1) 'A3' """ nearest_note = int(freqtomidi(freq) + .5) return midi2note(nearest_note)
15,469
def read_partpositions(filename, nspec, ctable=True, clevel=5, cname="lz4", quantize=None): """Read the particle positions in `filename`. This function strives to use as less memory as possible; for this, a bcolz ctable container is used for holding the data. Besides to be compressed in-memory, its chunked nature makes a natural fit for data that needs to be appended because it does not need expensive memory resize operations. NOTE: This code reads directly from un UNFORMATTED SEQUENTIAL data Fortran file so care has been taken to skip the record length at the beginning and the end of every record. See: http://stackoverflow.com/questions/8751185/fortran-unformatted-file-format Parameters ---------- filename : string The file name of the particle raw data nspec : int number of species in particle raw data ctable : bool Return a bcolz ctable container. If not, a numpy structured array is returned instead. clevel : int Compression level for the ctable container cname : string Codec name for the ctable container. Can be 'blosclz', 'lz4', 'zlib' or 'zstd'. quantize : int Quantize data to improve (lossy) compression. Data is quantized using np.around(scale*data)/scale, where scale is 2**bits, and bits is determined from the quantize value. For example, if quantize=1, bits will be 4. 0 means that the quantization is disabled. Returns ------- ctable object OR structured_numpy_array Returning a ctable is preferred because it is used internally so it does not require to be converted to other formats, so it is faster and uses less memory. Note: Passing a `quantize` param > 0 can increase the compression ratio of the ctable container, but it may also slow down the reading speed significantly. License This function is taken from the reflexible package (https://github.com/spectraphilic/reflexible/tree/master/reflexible). Authored by John F Burkhart <jfburkhart@gmail.com> with contributions Francesc Alted <falted@gmail.com>. Licensed under: 'This script follows creative commons usage.' """ CHUNKSIZE = 10 * 1000 xmass_dtype = [('xmass_%d' % (i + 1), 'f4') for i in range(nspec)] # note age is calculated from itramem by adding itimein out_fields = [ ('npoint', 'i4'), ('xtra1', 'f4'), ('ytra1', 'f4'), ('ztra1', 'f4'), ('itramem', 'i4'), ('topo', 'f4'), ('pvi', 'f4'), ('qvi', 'f4'), ('rhoi', 'f4'), ('hmixi', 'f4'), ('tri', 'f4'), ('tti', 'f4')] + xmass_dtype raw_fields = [('begin_recsize', 'i4')] + out_fields + [('end_recsize', 'i4')] raw_rectype = np.dtype(raw_fields) recsize = raw_rectype.itemsize cparams = bcolz.cparams(clevel=clevel, cname=cname) if quantize is not None and quantize > 0: out = get_quantized_ctable(raw_rectype, cparams=cparams, quantize=quantize, expectedlen=int(1e6)) else: out = bcolz.zeros(0, dtype=raw_rectype, cparams=cparams, expectedlen=int(1e6)) with open(filename, "rb", buffering=1) as f: # The timein value is at the beginning of the file reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4")[0] assert reclen == 4 itimein = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4") reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype="i4")[0] assert reclen == 4 nrec = 0 while True: # Try to read a complete chunk data = f.read(CHUNKSIZE * recsize) read_records = int(len(data) / recsize) # the actual number of records read chunk = np.ndarray(shape=(read_records,), buffer=data, dtype=raw_rectype) # Add the chunk to the out array out.append(chunk[:read_records]) nrec += read_records if read_records < CHUNKSIZE: # We reached the end of the file break # Truncate at the max length (last row is always a sentinel, so remove it) out.trim(1) # Remove the first and last columns out.delcol("begin_recsize") out.delcol("end_recsize") if ctable: return out else: return out[:]
15,470
def _run_cli(*args): """Run the jinjafy cli command from the tests/ directory, passing in the provided arguments""" return subprocess.run( ['jinjafy', *args], # Execute in the same directory as this test file cwd=path.dirname(__file__), # stdout as text encoding='utf-8', # capture stderr stdout in the completed process stderr=subprocess.PIPE, stdout=subprocess.PIPE, )
15,471
def SQLHistPlotDivGrp(gGroups,cPlotVar,cPlotVarY,divElement='Canal_Number',pName='',pXlabel='',pYlabel='',bins=20,sqlAdd='',drawGraph=True,myalph=1,xnorm=0,ynorm=0,stdVal='',imgDPI=72,useEbar=False,plotEbar=False,showLegend=True,fixVals=None,logx=False,logy=False,isInt=False): """SQLHistPlotGrp(gGroups,cPlotVar,cPlotVarY,pName='',pXlabel='',pYlabel='',bins=20,sqlAdd='',drawGraph=True,myalph=1,norm=True,stdVal=''): Runs and stacks SQLHistPlot over a group of SQL where compatible statements stored in a dictionary """ if type(gGroups) is not type({}): groupBy=gGroups gGroups={} gList=cur.execute('SELECT '+groupBy+' from Lacuna where Project_Number = "'+str(projectTitle)+'" AND '+cPlotVar+' IS NOT NULL group by '+groupBy+' ORDER BY PROJECT,SAMPLE_AIM_NUMBER').fetchall() for cObj in gList: gGroups[cObj[0]]=groupBy+'="'+cObj[0]+'"' print gGroups keyList=gGroups.keys() keyList.sort() keyList.reverse() if sqlAdd!='': sqlAdd=' AND '+sqlAdd+' ' for cGroup in keyList: # New Function SQLHistPlotDiv(cPlotVar,cPlotVarY,divElement=divElement,pName=pName,pXlabel=pXlabel,pYlabel=pYlabel,bins=bins,sqlAdd=gGroups[cGroup]+' '+sqlAdd,drawGraph=False,myalph=myalph,xnorm=xnorm,ynorm=ynorm,stdVal=stdVal,imgDPI=imgDPI,useEbar=useEbar,plotEbar=plotEbar,fixVals=fixVals,logx=logx,logy=logy,isInt=isInt) if pXlabel=='': pXlabel=ptName(cPlotVar) if pYlabel=='': pYlabel=ptName(cPlotVarY) if drawGraph: title(pName) xlabel(pXlabel) ylabel(pYlabel) if showLegend: legend(keyList) savefig('histogram-'+strcsv(pName)+'.pdf',dpi=imgDPI*2) savefig('histogram-'+strcsv(pName),dpi=imgDPI) close()
15,472
def test40_subdict_simplified_err(): """ Check the function parsing a PII_TASKS list of simplified tasks with errors """ # Not a tuple PII_TASKS = [r"\d16"] with pytest.raises(mod.InvPiiTask): mod.build_subdict(PII_TASKS, "fr") # A tuple plus not a tuple PII_TASKS = [(PiiEnum.CREDIT_CARD, r"\d{16}", "a toy Credit Card example"), r"\d16"] with pytest.raises(mod.InvPiiTask): mod.build_subdict(PII_TASKS, "zh") # A tuple without a valid PiiEnum PII_TASKS = [("not a PiiEnum", r"\d{16}", "a toy Credit Card example")] with pytest.raises(mod.InvPiiTask): mod.build_subdict(PII_TASKS, "es")
15,473
def license_list(ctx): """Show all license within the VSD""" from datetime import datetime result = ctx.obj['nc'].get("licenses") table = PrettyTable(["License id", "is Cluster", "Compagny", "Max NICs", "Max VMs", "Version", "Expiration"]) for line in result: version = line['productVersion'] + 'R' + str(line['majorRelease']), table.add_row([line['ID'], line['isClusterLicense'], line['company'], line['allowedNICsCount'], line['allowedVMsCount'], line['productVersion'] + 'R' + str( line['majorRelease']), datetime.fromtimestamp( line['expirationDate'] / 1000).strftime('%Y-%m-%d %H:%M:%S')]) print(table)
15,474
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ # TODO: Implement function weights_initializer_stddev = 0.01 weights_regularized_l2 = 1e-3 # TODO: Implement function # Convolutional 1x1 to mantain space information. conv_1x1_of_7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, # kernel_size padding = 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='conv_1x1_of_7') # Upsample deconvolution x 2 first_upsamplex2 = tf.layers.conv2d_transpose(conv_1x1_of_7, num_classes, 4, # kernel_size strides= (2, 2), padding= 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='first_upsamplex2') conv_1x1_of_4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, # kernel_size padding = 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='conv_1x1_of_4') # Adding skip layer. first_skip = tf.add(first_upsamplex2, conv_1x1_of_4, name='first_skip') # Upsample deconvolutions x 2. second_upsamplex2 = tf.layers.conv2d_transpose(first_skip, num_classes, 4, # kernel_size strides= (2, 2), padding= 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='second_upsamplex2') conv_1x1_of_3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, # kernel_size padding = 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='conv_1x1_of_3') # Adding skip layer. second_skip = tf.add(second_upsamplex2, conv_1x1_of_3, name='second_skip') # Upsample deconvolution x 8. third_upsamplex8 = tf.layers.conv2d_transpose(second_skip, num_classes, 16, strides= (8, 8), padding= 'same', kernel_initializer = tf.random_normal_initializer(stddev=weights_initializer_stddev), kernel_regularizer= tf.contrib.layers.l2_regularizer(weights_regularized_l2), name='third_upsamplex8') return third_upsamplex8 #return None
15,475
def configure_l3(conf, tunnel_mode): """ This function creates a temporary test bridge and adds an L3 tunnel. """ s = util.start_local_server(conf[1][1]) server = util.rpc_client("127.0.0.1", conf[1][1]) server.create_bridge(DEFAULT_TEST_BRIDGE) server.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_PORT) server.interface_up(DEFAULT_TEST_BRIDGE) server.interface_assign_ip(DEFAULT_TEST_BRIDGE, conf[1][0], None) server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type", None, tunnel_mode) server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "options", "remote_ip", conf[0]) return s
15,476
def gradient_descent(y, tx, initial_w, gamma, max_iters): """Gradient descent algorithm.""" threshold = 1e-3 # determines convergence. To be tuned # Define parameters to store w and loss ws = [initial_w] losses = [] w = initial_w method = 'mse' for n_iter in range(max_iters): current_grad = gradient_least_square(y, tx, w) current_loss = compute_loss(y, tx, w, method) # Moving in the direction of negative gradient w = w - gamma * current_grad # Store w and loss ws.append(w) losses.append(current_loss) # Convergence criteria if len(losses) > 1 and np.abs(current_loss - losses[-1]) < threshold: break print("Gradient Descent({bi}): loss={l}".format( bi=n_iter, l=current_loss)) return losses, ws
15,477
def farthest_point_sample(xyz, npoint): """ Input: xyz: pointcloud data, [B, N, 3] npoint: number of samples Return: centroids: sampled pointcloud index, [B, npoint] """ device = xyz.device B, N, C = xyz.shape # 初始化一个centroids矩阵,用于存储npoint个采样点的索引位置,大小为B×npoint # 其中B为BatchSize的个数 centroids = torch.zeros(B, npoint, dtype=torch.long).to(device) # distance矩阵(B×N)记录某个batch中所有点到某一个点的距离,初始化的值很大,后面会迭代更新 distance = torch.ones(B, N).to(device) * 1e10 # farthest表示当前最远的点,也是随机初始化,范围为0~N,初始化B个;每个batch都随机有一个初始最远点 farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) # batch_indices初始化为0~(B-1)的数组 batch_indices = torch.arange(B, dtype=torch.long).to(device) # 直到采样点达到npoint,否则进行如下迭代: for i in range(npoint): # 设当前的采样点centroids为当前的最远点farthest centroids[:, i] = farthest # 取出该中心点centroid的坐标 centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) # 求出所有点到该centroid点的欧式距离,存在dist矩阵中 dist = torch.sum((xyz - centroid) ** 2, -1) # 建立一个mask,如果dist中的元素小于distance矩阵中保存的距离值,则更新distance中的对应值 # 随着迭代的继续,distance矩阵中的值会慢慢变小, # 其相当于记录着某个Batch中每个点距离所有已出现的采样点的最小距离 mask = dist < distance distance[mask] = dist[mask].float() # 从distance矩阵取出最远的点为farthest,继续下一轮迭代 farthest = torch.max(distance, -1)[1] return centroids
15,478
def eval_lstm_crf(): """ eval lstm """ print('\neval.py config: \n', config) context.set_context( mode=context.GRAPH_MODE, save_graphs=False, device_id=config.device_id, device_target=config.device_target ) embeddings_size = config.embed_size parser = ImdbParser(config.data_CoNLL_path, config.glove_path, config.data_CoNLL_path, embed_size=config.embed_size ) embeddings, sequence_length, _, _, sequence_index, sequence_tag_index, tags_to_index_map \ = parser.get_datas_embeddings(seg=['test'], build_data=False) embeddings_table = embeddings.astype(np.float32) # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size # and hiddle_size is multiples of 16, this problem will be solved later. if config.device_target == 'Ascend': pad_num = int(np.ceil(config.embed_size / 16) * 16 - config.embed_size) if pad_num > 0: embeddings_table = np.pad(embeddings_table, [(0, 0), (0, pad_num)], 'constant') embeddings_size = int(np.ceil(config.embed_size / 16) * 16) ds_test = get_data_set(sequence_index, sequence_tag_index, config.batch_size) network = Lstm_CRF(vocab_size=embeddings.shape[0], tag_to_index=tags_to_index_map, embedding_size=embeddings_size, hidden_size=config.num_hiddens, num_layers=config.num_layers, weight=Tensor(embeddings_table), bidirectional=config.bidirectional, batch_size=config.batch_size, seq_length=sequence_length, is_training=False) callback = F1(len(tags_to_index_map)) model = Model(network) param_dict = load_checkpoint(os.path.join(config.ckpt_save_path, config.ckpt_path)) load_param_into_net(network, param_dict) print("============== Starting Testing ==============") rest_golds_list = list() rest_preds_list = list() columns_list = ["feature", "label"] for data in ds_test.create_dict_iterator(num_epochs=1): input_data = [] for i in columns_list: input_data.append(data[i]) feature, label = input_data logits = model.predict(feature, label) logit_ids, label_ids = callback.update(logits, label) rest_preds = np.array(logit_ids) rest_preds = np.expand_dims(rest_preds, 0) rest_labels = deepcopy(label_ids) label_ids = np.expand_dims(label_ids, 0) rest_labels = np.expand_dims(rest_labels, 0) rest_golds, rest_preds = get_label_lists(rest_labels, rest_preds, label_ids) rest_golds_list += rest_golds rest_preds_list += rest_preds accs = [] correct_preds, total_correct, total_preds = 0., 0., 0. for golds, preds in zip(rest_golds_list, rest_preds_list): accs += [a == b for (a, b) in zip(golds, preds)] golds_chunks = set(get_chunks(golds, tags_to_index_map)) preds_chunks = set(get_chunks(preds, tags_to_index_map)) correct_preds += len(golds_chunks & preds_chunks) total_preds += len(preds_chunks) total_correct += len(golds_chunks) p = correct_preds / total_preds if correct_preds > 0 else 0 r = correct_preds / total_correct if correct_preds > 0 else 0 f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0 acc = np.mean(accs) print("acc: {:.6f}%, F1: {:.6f}% ".format(acc*100, f1*100))
15,479
def loadData(fname='Unstra.out2.00008.athdf'): """load 3d bfield and calc the current density""" #data=ath.athdf(fname,quantities=['B1','B2','B3']) time,data=ath.athdf(fname,quantities=['Bcc1']) bx = data['Bcc1'] time,data=ath.athdf(fname,quantities=['Bcc2']) by = data['Bcc2'] time,data=ath.athdf(fname,quantities=['Bcc3']) bz = data['Bcc3'] x = data['x1f'] y = data['x2f'] z = data['x3f'] # --- def curl(vx,vy,vz,dx,dy,dz): [dzvx,dyvx,dxvx] = np.gradient(vx) [dzvy,dyvy,dxvy] = np.gradient(vy) [dzvz,dyvz,dxvz] = np.gradient(vz) cx = dyvz/dy-dzvy/dz cy = dzvx/dz-dxvz/dx cz = dxvy/dx-dyvx/dy # No need to del the reference by one manually # allow python to perform its own garbage collection # after the function return cxyz #del dzvx #del dzvy #del dzvz return cx,cy,cz # --- dx = dz = x[1]-x[0] dy = y[1]-y[0] jx,jy,jz = curl(bx,by,bz,dx,dy,dz) j2 = jx**2+jy**2+jz**2 return bx,by,bz,j2
15,480
def sparse_chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None): """Given the true sparsely encoded tag sequence y, input x (with mask), transition energies U, boundary energies b_start and b_end, it computes the loss function of a Linear Chain Conditional Random Field: loss(y, x) = NLL(P(y|x)), where P(y|x) = exp(E(y, x)) / Z. So, loss(y, x) = - E(y, x) + log(Z) Here, E(y, x) is the tag path energy, and Z is the normalization constant. The values log(Z) is also called free energy. """ x = add_boundary_energy(x, b_start, b_end, mask) energy = path_energy0(y, x, U, mask) energy -= free_energy0(x, U, mask) return K.expand_dims(-energy, -1)
15,481
def _get_de43_fields(de43_field): """ get pds 43 field breakdown :param de43_field: data of pds 43 :return: dictionary of pds 43 sub elements """ LOGGER.debug("de43_field=%s", de43_field) de43_regex = ( r"(?P<DE43_NAME>.+?) *\\(?P<DE43_ADDRESS>.+?) *\\(?P<DE43_SUBURB>.+?) *\\" r"(?P<DE43_POSTCODE>\S{4,10}) *(?P<DE43_STATE>.{3})(?P<DE43_COUNTRY>.{3})" ) field_match = re.match(de43_regex, de43_field) if not field_match: return dict() # get the dict field_dict = field_match.groupdict() return field_dict
15,482
async def query(database: Database, payload: PostionQueryIn): """ Find whether a point is within a country """ query = select([countries.c.name, countries.c.iso2, countries.c.iso3]) # Convert a GeoPoint into a format that can be used in postgis queries point = f"POINT({payload.location.longitude} {payload.location.latitude})" query = query.where( ST_Covers(countries.c.geog, ST_GeographyFromText(f"SRID=4326;{point}")) ) results = await database.fetch_one(query=query) return results
15,483
def calc_header_zeropoint(im, ext=0): """ Determine AB zeropoint from image header Parameters ---------- im : `~astropy.io.fits.HDUList` or Image object or header. Returns ------- ZP : float AB zeropoint """ from . import model scale_exptime = 1. if isinstance(im, pyfits.Header): header = im else: if '_dr' in im.filename(): ext = 0 elif '_fl' in im.filename(): if 'DETECTOR' in im[0].header: if im[0].header['DETECTOR'] == 'IR': ext = 0 bunit = im[1].header['BUNIT'] else: # ACS / UVIS if ext == 0: ext = 1 bunit = im[1].header['BUNIT'] if bunit == 'ELECTRONS': scale_exptime = im[0].header['EXPTIME'] header = im[ext].header try: fi = get_hst_filter(im[0].header).upper() except: fi = None # Get AB zeropoint if 'PHOTFLAM' in header: ZP = (-2.5*np.log10(header['PHOTFLAM']) - 21.10 - 5*np.log10(header['PHOTPLAM']) + 18.6921) ZP += 2.5*np.log10(scale_exptime) elif 'PHOTFNU' in header: ZP = -2.5*np.log10(header['PHOTFNU'])+8.90 ZP += 2.5*np.log10(scale_exptime) elif (fi is not None): if fi in model.photflam_list: ZP = (-2.5*np.log10(model.photflam_list[fi]) - 21.10 - 5*np.log10(model.photplam_list[fi]) + 18.6921) else: print('Couldn\'t find PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25') ZP = 25 else: print('Couldn\'t find FILTER, PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25') ZP = 25 # If zeropoint infinite (e.g., PHOTFLAM = 0), then calculate from synphot if not np.isfinite(ZP): try: import pysynphot as S bp = S.ObsBandpass(im[0].header['PHOTMODE'].replace(' ', ',')) spec = S.FlatSpectrum(0, fluxunits='ABMag') obs = S.Observation(spec, bp) ZP = 2.5*np.log10(obs.countrate()) except: pass return ZP
15,484
def _convert_min_sec_to_sec(val): """ :param val: val is a string in format 'XmYsZ' like '0m5s3' meaning at secong 5,3 :return: >>> _convert_min_sec_to_sec('10m11s2') 611.2 """ _min = val.split('m')[0] _sec = val.split('m')[1].split('s')[0] _dsec = val.split('s')[1] if len(_dsec) == 1: _dsec = _dsec + '0' res = int(_min) * 60 + int(_sec) + float(_dsec)/100. return res
15,485
def sitetester_home(): """ Home screen for Tester: A Tester can: a. Change their testing site b. View apspointments for the site they work at c. Create an appointment for their testing site d. View aggregate test results e. View daily test results """ error = None username = session['user_id'] _is_tester, _ = is_tester(username) if not _is_tester: error = 'You do not have access to this page.' return render_template('login.html', error=error) if request.method == 'POST': _instr = request.form['submit_button'] if _instr == 'Aggregate': return redirect(url_for("aggregrate_test_results")) elif _instr == 'Daily': return redirect(url_for("daily")) elif _instr == 'Change Sites': return redirect(url_for("tester_changesite", id = username)) elif _instr == 'View Appointments': return redirect(url_for("view_appointments")) elif _instr == 'Create Appointment': return redirect(url_for('create_appointment')) else: error = "Invalid selection" return render_template("sitetester_home.html", error=error) else: return render_template("sitetester_home.html", error=error)
15,486
def test_sarcasm(): """Jokes should crash.<sarcasm/>""" dirty = u'Yeah right <sarcasm/>' clean = u'Yeah right &lt;sarcasm/&gt;' eq_cleaning_for_frag_and_doc(clean, dirty)
15,487
def datetime_to_fractional_year(input: datetime) -> float: """Converts a Python datetime object to a fractional year.""" start = date(input.year, 1, 1).toordinal() # type: ignore year_length = date(input.year + 1, 1, 1).toordinal() - start # type: ignore return input.year + (input.toordinal() - start) / year_length
15,488
def _optical_flow_to_rgb( flow: tf.Tensor, saturate_magnitude: float = -1.0, name: Optional[str] = None, ) -> tf.Tensor: """Visualize an optical flow field in RGB colorspace.""" name = name or 'OpticalFlowToRGB' hsv = _optical_flow_to_hsv(flow, saturate_magnitude, name) return tf.image.hsv_to_rgb(hsv)
15,489
def shuffle_dict(dict_1, dict_2, num_shuffles=10): """ Shuffles num_shuffles times for two dictionaries that you want to compare against each other, shuffles them. returns two di """ shuffled_dict_1 = {} shuffled_dict_2 = [] for x in range(num_shuffles): for dataset_name, dataset_element in dict_1.items(): if dataset_name not in shuffled_dict_1: shuffled_dict_1[dataset_name] = [] shuffled_dict_1[dataset_name].append(shuffleBedTool(dataset_name + str(x), dataset_element)) for dataset_name, dataset_element in dict_2.items(): if dataset_name not in shuffled_dict_2: shuffled_dict_2[dataset_name] = [] shuffled_dict_2[dataset_name].append(shuffleBedTool(dataset_name + str(x), dataset_element)) return shuffled_dict_1, shuffled_dict_2
15,490
def clear_dd2_selection(val, n_clicks): """Clear Dropdown selections for Dropdown #2 (dd2) ( Dropdown to clear #2 of 2 ) Args: val (str): cascading response via `clear_dd2_selection()` callback n_clicks: int Returns: str: Resets selections to default, blank states. """ if n_clicks > 0: app.logger.info( f"-:!:- FUNCTION('clear_dd2_selection') has been activated, and now has value 'n_clicks' = {n_clicks} & 'val' = {val}" ) if val == "None": return "None" else: return None
15,491
def set_lframe(pdict): """ Defines reference frame per residue using backbone atoms. z is normalized vector between Cb and Ca. x is perpendicular to that and Ca-N vector. y is perpendicular to the z-x plane. """ # local frame z = pdict['Cb'] - pdict['Ca'] z /= np.linalg.norm(z, axis=-1)[:,None] x = np.cross(pdict['Ca']-pdict['N'], z) x /= np.linalg.norm(x, axis=-1)[:,None] y = np.cross(z, x) y /= np.linalg.norm(y, axis=-1)[:,None] xyz = np.stack([x,y,z]) pdict['lfr'] = np.transpose(xyz, [1,0,2])
15,492
def test_interpretation_02(reqid, expected_result): """ Action : Test mocking interpretation. Expected Results : No difference from normal application usage. Returns: N/A. """ json_parser = LoadAndParse() json_parser.data = {"services": [{"title": "ECU Reset", "id": "11"}, {"title": "Security Access", "id": "27"}]} assert json_parser.return_signal_by_title(reqid) == expected_result
15,493
def test_ggn_implementation(problem): """Compare diagonal of full GGN with diagonal of block GGN.""" problem.set_up() diag_ggn_from_full = AutogradExtensions(problem).diag_ggn_via_ggn() diag_ggn_from_block = AutogradExtensions(problem).diag_ggn() check_sizes_and_values(diag_ggn_from_full, diag_ggn_from_block) problem.tear_down()
15,494
def get_SHF_L_min_C(): """:return: 冷房負荷最小顕熱比率 (-)""" return 0.4
15,495
def make_workflow_from_user_options(): """Parser/validator for the cmd line args.""" parser = get_parser() if len(sys.argv) < 2: print('Too few arguments!') parser.print_help() parser.exit(1) # parsing try: user_args = parser.parse_args() except: parser.exit(1) vis_type = cfg.alignment_default_vis_type type_of_features = 'alignment' in_dir, in_dir_type = check_input_dir_alignment(user_args.in_dir) image1 = user_args.image1 image2 = user_args.image2 id_list, images_for_id = check_id_list(user_args.id_list, in_dir, vis_type, image1, image2, in_dir_type=in_dir_type) delay_in_animation = check_time(user_args.delay_in_animation, var_name='Delay') out_dir = check_out_dir(user_args.out_dir, in_dir) views = check_views(user_args.views) num_slices_per_view, num_rows_per_view = check_finite_int(user_args.num_slices, user_args.num_rows) outlier_method, outlier_fraction, \ outlier_feat_types, disable_outlier_detection = check_outlier_params( user_args.outlier_method, user_args.outlier_fraction, user_args.outlier_feat_types, user_args.disable_outlier_detection, id_list, vis_type, type_of_features) wf = AlignmentRatingWorkflow(id_list, in_dir, image1, image2, out_dir=out_dir, in_dir_type=in_dir_type, prepare_first=user_args.prepare_first, vis_type=vis_type, delay_in_animation=delay_in_animation, outlier_method=outlier_method, outlier_fraction=outlier_fraction, outlier_feat_types=outlier_feat_types, disable_outlier_detection=disable_outlier_detection, views=views, num_slices_per_view=num_slices_per_view, num_rows_per_view=num_rows_per_view) return wf
15,496
def get_path(obj: Union[str, pathlib.Path]) -> pathlib.Path: """Convert a str into a fully resolved & expanded Path object. Args: obj: obj to convert into expanded and resolved absolute Path obj """ return pathlib.Path(obj).expanduser().resolve()
15,497
def verify_policy_type_id(policy_type_id): """ :type policy_type_id: str :param policy_type_id: policy type id - e.g. storage-policy-00000001 :rtype: int :return: Fixed policy type ID :raises: ValueError: policy type id """ if not re.match("storage-policy-\d+", policy_type_id): raise ValueError('{0} is not a valid policy type ID.'.format(policy_type_id)) return int(policy_type_id.split("-")[2])
15,498
def f_fg_iou(results): """Calculates foreground IOU score. Args: a: list of [T, H, W] or [H, W], binary mask b: list of [T, H, W] or [H, W], binary mask Returns: fg_iou: [B] """ y_out = results['y_out'] y_gt = results['y_gt'] num_ex = len(y_gt) fg_iou = np.zeros([num_ex]) if len(y_gt[0].shape) == 3: for ii in range(num_ex): fg_iou[ii] = f_iou(y_out[ii].max(axis=0), y_gt[ii].max(axis=0)) else: for ii in range(num_ex): fg_iou[ii] = f_iou(y_out[ii], y_gt[ii]) return fg_iou
15,499