content
stringlengths
22
815k
id
int64
0
4.91M
def sackStringToSack(sackString): """ C{sackString} is a C{str}. Returns a L{window.SACK}. """ try: # If not enough args for split, Python raises ValueError joinedSackList, ackNumberStr = sackString.rsplit('|', 1) ackNumber = strToIntInRange(ackNumberStr, -1, 2**53) sackList = tuple(strToNonNegLimit(s, 2**53) for s in joinedSackList.split(',')) if joinedSackList else () except ValueError: raise InvalidSackString("bad sack") return SACK(ackNumber, sackList)
9,300
def setup_graph(event, sta, chan, band, tm_shape, tm_type, wm_family, wm_type, phases, init_run_name, init_iteration, fit_hz=5, uatemplate_rate=1e-4, smoothing=0, dummy_fallback=False, raw_signals=False, init_templates=False, **kwargs): """ Set up the graph with the signal for a given training event. """ s = Sigvisa() cursor = s.dbconn.cursor() try: input_runid = get_fitting_runid(cursor, init_run_name, init_iteration, create_if_new = False) runids = (input_runid,) print "input_runid", input_runid except RunNotFoundException: runids = () sg = SigvisaGraph(template_model_type=tm_type, template_shape=tm_shape, wiggle_model_type=wm_type, wiggle_family=wm_family, phases=phases, runids = runids, uatemplate_rate=uatemplate_rate, min_mb=1.0, dummy_fallback=dummy_fallback, raw_signals=raw_signals, **kwargs) filter_str = band if not raw_signals: filter_str += ";env" wave = load_event_station_chan(event.evid, sta, chan, cursor=cursor, exclude_other_evs=True, phases=None if phases=="leb" else phases, pre_s=100.0).filter(filter_str) cursor.close() if smoothing > 0: wave = wave.filter('smooth_%d' % smoothing) if fit_hz != wave['srate']: wave = wave.filter('hz_%.2f' % fit_hz) if len(mask_blocks(wave.data.mask)) > 2: raise Exception("wave contains missing data") if (not raw_signals) and (np.sum(wave.data < 0.0001) > 10): raise Exception("wave contains regions of zeros") sg.add_wave(wave=wave, init_extra_noise=True) evnodes = sg.add_event(ev=event) eid = evnodes["lon"].eid stddevs = {"time": 2.0, "mb": 0.2} sg.observe_event(eid=eid, ev=event, stddevs=stddevs) if init_templates: fitid = get_previous_fitid(input_runid, event.evid, sta) set_templates_from_fitid(sg, 1, fitid, wave) #sg.fix_arrival_times() phases = sg.ev_arriving_phases(1, wave["sta"]) assert( "P" in phases or "Pg" in phases or "Pn" in phases or "pP" in phases) return sg
9,301
def _update_sidecar(sidecar_fname, key, val): """Update a sidecar JSON file with a given key/value pair. Parameters ---------- sidecar_fname : str | os.PathLike Full name of the data file key : str The key in the sidecar JSON file. E.g. "PowerLineFrequency" val : str The corresponding value to change to in the sidecar JSON file. """ with open(sidecar_fname, "r") as fin: sidecar_json = json.load(fin) sidecar_json[key] = val with open(sidecar_fname, "w") as fout: json.dump(sidecar_json, fout)
9,302
def raw_dataset_fp(tmpdir: pathlib.Path) -> str: """Generates dataset to be used in this test. Returns (str): file path string for dataset to use in this tests """ raw_fp = os.path.join(tmpdir, "raw_data.csv") random.seed(42) cli_synthesize_dataset(64, INPUT_FEATURES + OUTPUT_FEATURES, raw_fp) yield raw_fp
9,303
def insert_devices_hostname(db_connection, hostnames): """ Function stores data into the database, particularly in the node_log table. Arguments: - db_connection: sqlite3 database connection - hostnames: Dictionary which contains MAC address as a key and hostname as a value. """ for key, value in hostnames.items(): if value: try: db_connection.execute(""" UPDATE agent_devices SET hostname = ? WHERE mac = ? """, (value, key)) db_connection.commit() except sqlite3.OperationalError: print(sqlite3.OperationalError, 'INSERT statement failed with Operational error')
9,304
def parse(owl_f, pred_list, m): """ From an owl file parses subject and objects from each predicate to extract :param owl_f: owl file path :param pred_list: list of predicates to extract :param m: model class :return: """ g = rdflib.Graph() g.load(owl_f) for subject, predicate, obj in g: for tag in pred_list: if tag in predicate: m.addinMemory(remove_string(str(subject)), remove_string(str(obj)), tag) print("added to memory")
9,305
def getLogisticModelNames(config): """ Get the names of the models present in the configobj Args: config: configobj object defining the model and its inputs. Returns: list: list of model names. """ names = [] lmodel_space = config for key, value in lmodel_space.items(): if isinstance(value, str): continue else: # this is a model names.append(key) return names
9,306
def checkfileCopyright(filename): """ return true if file has already a Copyright in first X lines """ infile = open(filename, 'r') for x in xrange(6): x = x line = infile.readline() if "Copyright" in line or "copyright" in line: return True return False
9,307
def TopicFormat(topic_name, topic_project=''): """Formats a topic name as a fully qualified topic path. Args: topic_name: (string) Name of the topic to convert. topic_project: (string) Name of the project the given topic belongs to. If not given, then the project defaults to the currently selected cloud project. Returns: Returns a fully qualified topic path of the form project/foo/topics/topic_name. """ return TopicIdentifier(topic_name, topic_project).GetFullPath()
9,308
def mp0(g0): """Return 0th order free energy.""" return g0.sum()
9,309
def create_diamond(color=None): """ Creates a diamond. :param color: Diamond color :type color: list :return: OpenGL list """ # noinspection PyArgumentEqualDefault a = Point3(-1.0, -1.0, 0.0) # noinspection PyArgumentEqualDefault b = Point3(1.0, -1.0, 0.0) # noinspection PyArgumentEqualDefault c = Point3(1.0, 1.0, 0.0) # noinspection PyArgumentEqualDefault d = Point3(-1.0, 1.0, 0.0) # noinspection PyArgumentEqualDefault e = Point3(0.0, 0.0, 1.0) # noinspection PyArgumentEqualDefault f = Point3(0.0, 0.0, -1.0) obj = _gl.glGenLists(1) _gl.glNewList(obj, _gl.GL_COMPILE) _gl.glPushMatrix() if color is not None: _gl.glColor4fv(color) _gl.glBegin(_gl.GL_TRIANGLES) draw_vertex_list_create_normal([a, b, e]) draw_vertex_list_create_normal([b, c, e]) draw_vertex_list_create_normal([c, d, e]) draw_vertex_list_create_normal([d, a, e]) draw_vertex_list_create_normal([b, a, f]) draw_vertex_list_create_normal([c, b, f]) draw_vertex_list_create_normal([d, c, f]) draw_vertex_list_create_normal([a, d, f]) _gl.glEnd() _gl.glPopMatrix() _gl.glEndList() return obj
9,310
def merge_dicts(*dicts): """ Recursive dict merge. Instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. """ assert len(dicts) > 1 dict_ = copy.deepcopy(dicts[0]) for merge_dict in dicts[1:]: for k, v in merge_dict.items(): if (k in dict_ and isinstance(dict_[k], dict) and isinstance(merge_dict[k], collections.Mapping)): dict_[k] = merge_dicts(dict_[k], merge_dict[k]) else: dict_[k] = merge_dict[k] return dict_
9,311
def getAllFWImageIDs(fwInvDict): """ gets a list of all the firmware image IDs @param fwInvDict: the dictionary to search for FW image IDs @return: list containing string representation of the found image ids """ idList = [] for key in fwInvDict: if 'Version' in fwInvDict[key]: idList.append(key.split('/')[-1]) return idList
9,312
def main(): """Make a jazz noise here""" args = get_args() random.seed(args.seed) count_file, count_seq = 1, 0 if not os.path.isdir(args.outdir): os.makedirs(args.outdir) for fh in args.FILE: basename = os.path.basename(fh.name) out_file = os.path.join(args.outdir, basename) print(f' {count_file}: {basename}') count_file += 1 out_fh = open(out_file, "wt") for rec in SeqIO.parse(fh, 'fasta'): if random.random() < args.pct: SeqIO.write(rec, out_fh, 'fasta') count_seq += 1 out_fh.close() file_str = 'file' if len(args.FILE) <= 1 else 'files' print(f'Wrote {count_seq:,} sequences from {len(args.FILE)} {file_str} to directory "{args.outdir}"')
9,313
def create_dataset(dataset_path, batch_size=1, repeat_size=1, max_dataset_size=None, shuffle=True, num_parallel_workers=1, phase='train', data_dir='testA', use_S=False): """ create Mnist dataset for train or eval. dataset_path: Data path batch_size: The number of data records in each group repeat_size: The number of replicated data records num_parallel_workers: The number of parallel workers """ # define dataset and apply the transform func if phase == 'train': ds = UnalignedDataset(dataset_path, phase, max_dataset_size=max_dataset_size, shuffle=True, use_S=use_S) column_names = ["image_A", "image_B"] if use_S: column_names.append('image_S') device_num = 1 distributed_sampler = DistributedSampler(len(ds), num_replicas=device_num, rank=0, shuffle=shuffle) gan_generator_ds = GeneratorDataset(ds, column_names=column_names, sampler=distributed_sampler, num_parallel_workers=num_parallel_workers) else: data_dir = os.path.join(dataset_path, data_dir) ds = GanImageFolderDataset(data_dir, max_dataset_size=max_dataset_size) gan_generator_ds = GeneratorDataset(ds, column_names=["image", "image_name"], num_parallel_workers=num_parallel_workers) gan_generator_ds = cyclegan_transform.apply_ds(gan_generator_ds, repeat_size=repeat_size, batch_size=batch_size, num_parallel_workers=num_parallel_workers, shuffle=shuffle, phase=phase, use_S=use_S) dataset_size = len(ds) return gan_generator_ds, dataset_size
9,314
def setup_files(): """ Clears the csv results files """ with open("training_results.csv", 'wb') as f: wr = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) header = ["Subject Number", "Round Number", "Dominant Eye", "Pair Number", "Name 1", "Name 2"] wr.writerow(header) with open("memory_results_before.csv", 'wb') as f: wr = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) header = ["Subject Number", "Round Number", "Name 1", "Remembered Name 1", "Name 2", "Remembered Name 2", "Foil Name 1", "Foil Name 2"] wr.writerow(header) with open("memory_results_after.csv", 'wb') as f: wr = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) header = ["Subject Number", "Round number", "Name 1", "Remembered Name 1", "Name 2", "Remembered Name 2", "Foil Name 1", "Foil Name 2"] wr.writerow(header) if os.path.exists("subject logs"): shutil.rmtree("subject logs") os.makedirs("subject logs") with open('subject logs/catch trials.csv', 'wb') as f: wr = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) header = ["Subject Number", "Visible seen (should be > 14)", "Invisible \ seen (should be < 2)", "Invalid Trials"] wr.writerow(header)
9,315
def save_model(model, save_path): """Saves model with pickle Parameters ---------- model : object trained ML model save_path : str save directory """ with open(save_path, 'wb') as outfile: dump(model, outfile) print('Model saved at:', save_path)
9,316
def document_uris_from_data(document_data, claimant): """ Return one or more document URI dicts for the given document data. Returns one document uri dict for each document equivalence claim in document_data. Each dict can be used to init a DocumentURI object directly:: document_uri = DocumentURI(**document_uri_dict) Always returns at least one "self-claim" document URI whose URI is the claimant URI itself. :param document_data: the "document" sub-object that was POSTed to the API as part of a new or updated annotation :type document_data: dict :param claimant: the URI that the browser was at when this annotation was created (the top-level "uri" field of the annotation) :type claimant: unicode :returns: a list of one or more document URI dicts :rtype: list of dicts """ document_uris = document_uris_from_links(document_data.get("link", []), claimant) document_uris.extend( document_uris_from_highwire_pdf(document_data.get("highwire", {}), claimant) ) document_uris.extend( document_uris_from_highwire_doi(document_data.get("highwire", {}), claimant) ) document_uris.extend(document_uris_from_dc(document_data.get("dc", {}), claimant)) document_uris.append(document_uri_self_claim(claimant)) for document_uri in document_uris: uri = document_uri["uri"] if uri: document_uri["uri"] = uri.strip() document_uris = [d for d in document_uris if d["uri"]] return document_uris
9,317
def get_bond_angle_index(edge_index): """ edge_index: (2, E) bond_angle_index: (3, *) """ def _add_item( node_i_indices, node_j_indices, node_k_indices, node_i_index, node_j_index, node_k_index): node_i_indices += [node_i_index, node_k_index] node_j_indices += [node_j_index, node_j_index] node_k_indices += [node_k_index, node_i_index] E = edge_index.shape[1] node_i_indices = [] node_j_indices = [] node_k_indices = [] for edge_i in range(E - 1): for edge_j in range(edge_i + 1, E): a0, a1 = edge_index[:, edge_i] b0, b1 = edge_index[:, edge_j] if a0 == b0 and a1 == b1: continue if a0 == b1 and a1 == b0: continue if a0 == b0: _add_item(node_i_indices, node_j_indices, node_k_indices, a1, a0, b1) if a0 == b1: _add_item(node_i_indices, node_j_indices, node_k_indices, a1, a0, b0) if a1 == b0: _add_item(node_i_indices, node_j_indices, node_k_indices, a0, a1, b1) if a1 == b1: _add_item(node_i_indices, node_j_indices, node_k_indices, a0, a1, b0) node_ijk = np.array([node_i_indices, node_j_indices, node_k_indices]) uniq_node_ijk = np.unique(node_ijk, axis=1).astype('int64') # (3, *) return uniq_node_ijk
9,318
def get_config(section=None, option=None): """Return dpm configuration objects. :param section: the name of the section in the ini file, e.g. "index:ckan". - May be omitted only when no other parameters are provided - Must be omitted elsewhere :type section: str :param option: the name of the option to be retrieved from the section of the ini file, e.g. 'ckan.api_key' - Can be omitted if a section is provided - Must be omitted if no section is provided :type option: str :return: [str, str, .., str] -- The section names of the ini file, when no section and no option are provided -- e.g. ['dpm', 'index:ckan', 'index:db', 'upload:ckan'] [str, str, .., str] -- The option names of the ini file for a given section -- e.g.['ckan.url', 'ckan.api_key'] [str] -- The option value if a valid section and a valid option name are given. -- e.g. ['http://thedatahub.org/api/'] """ if not section and not option: return dpm.CONFIG.sections() elif section and not option: return dpm.CONFIG.options(section) elif section and option: return dpm.CONFIG.get(section, option) else: raise ValueError("Please provide no parameters OR just section OR both section and option")
9,319
def display_pixels(pixels_states, instance, fg=[255, 255, 255], bg=[0, 0, 0]): """Given the states of each pixels (1 = on, 0 = off) in a list, we display the list on the led matrix""" instance.set_pixels([fg if state else bg for state in pixels_states])
9,320
def update_repo(name, repo, variant, registry, prefix): """ 1. generate the kiwi file 2. pull down kiwi file from server 3. if same, skip 4. bco the project/package to a temp location 5. oosc ar 6. display diff 7. prompt for changelog entry (reuse result for other repos for same image) 8. oosc commit """ if repo.startswith("obs://"): osc = OOSC else: osc = IOSC repo = repo[6:] print(name, repo, variant) curr = os.getcwd() wip_dir = os.path.join(curr, "wip") tmpl_dir = os.path.join(curr, TEMPLATES_DIR) curr_kiwi = None new_kiwi = None try: rq = osc.api("-X", "GET", "/source/{0}/{1}/{1}.kiwi".format(repo, name)) curr_kiwi = rq.stdout.decode('utf-8') except sh.ErrorReturnCode as err: #print(err) #print("Skipping {}/{}...".format(repo, name)) #return pass try: rq = sh.xsltproc(os.path.join(tmpl_dir, name, "{}.xsl".format(name)), os.path.join(tmpl_dir, name, "{}.xml".format(variant))) new_kiwi = rq.stdout.decode('utf-8') new_kiwi = new_kiwi.replace("{}/".format(registry), "{}{}".format(registry, prefix)) new_kiwi = new_kiwi.replace("obsrepositories:/ceph/ceph", "obsrepositories:{}ceph/ceph".format(prefix)) if curr_kiwi == new_kiwi: print("Skipping {}/{}: no difference".format(repo, name)) return except sh.ErrorReturnCode as err: print(err) print("Skipping {}/{}...".format(repo, name)) return try: try: os.mkdir(wip_dir) except os.error: pass os.chdir(wip_dir) osc.bco(repo, name) os.chdir(os.path.join(wip_dir, BRANCHBASE.format(repo), name)) with open("{}.kiwi".format(name), "w") as f: f.write(new_kiwi) # copy updated template files as well for f in os.listdir(os.path.join(tmpl_dir, name)): shutil.copyfile(os.path.join(tmpl_dir, name, f), os.path.join(wip_dir, BRANCHBASE.format(repo), name, f)) osc.ar() try: msg = sys.argv[sys.argv.index("-m") + 1] osc.commit("-m", msg) osc.sr("-m", msg) except ValueError as err: pass finally: os.chdir(curr)
9,321
def match_intervals(intervals_from, intervals_to, strict=True): """Match one set of time intervals to another. This can be useful for tasks such as mapping beat timings to segments. Each element ``[a, b]`` of ``intervals_from`` is matched to the element ``[c, d]`` of ``intervals_to`` which maximizes the Jaccard similarity between the intervals:: max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)| In ``strict=True`` mode, if there is no interval with positive intersection with ``[a,b]``, an exception is thrown. In ``strict=False`` mode, any interval ``[a, b]`` that has no intersection with any element of ``intervals_to`` is instead matched to the interval ``[c, d]`` which minimizes:: min(|b - c|, |a - d|) that is, the disjoint interval [c, d] with a boundary closest to [a, b]. .. note:: An element of ``intervals_to`` may be matched to multiple entries of ``intervals_from``. Parameters ---------- intervals_from : np.ndarray [shape=(n, 2)] The time range for source intervals. The ``i`` th interval spans time ``intervals_from[i, 0]`` to ``intervals_from[i, 1]``. ``intervals_from[0, 0]`` should be 0, ``intervals_from[-1, 1]`` should be the track duration. intervals_to : np.ndarray [shape=(m, 2)] Analogous to ``intervals_from``. strict : bool If ``True``, intervals can only match if they intersect. If ``False``, disjoint intervals can match. Returns ------- interval_mapping : np.ndarray [shape=(n,)] For each interval in ``intervals_from``, the corresponding interval in ``intervals_to``. See Also -------- match_events Raises ------ ParameterError If either array of input intervals is not the correct shape If ``strict=True`` and some element of ``intervals_from`` is disjoint from every element of ``intervals_to``. Examples -------- >>> ints_from = np.array([[3, 5], [1, 4], [4, 5]]) >>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]]) >>> librosa.util.match_intervals(ints_from, ints_to) array([2, 1, 2], dtype=uint32) >>> # [3, 5] => [4, 5] (ints_to[2]) >>> # [1, 4] => [1, 3] (ints_to[1]) >>> # [4, 5] => [4, 5] (ints_to[2]) The reverse matching of the above is not possible in ``strict`` mode because ``[6, 7]`` is disjoint from all intervals in ``ints_from``. With ``strict=False``, we get the following: >>> librosa.util.match_intervals(ints_to, ints_from, strict=False) array([1, 1, 2, 2], dtype=uint32) >>> # [0, 2] => [1, 4] (ints_from[1]) >>> # [1, 3] => [1, 4] (ints_from[1]) >>> # [4, 5] => [4, 5] (ints_from[2]) >>> # [6, 7] => [4, 5] (ints_from[2]) """ if len(intervals_from) == 0 or len(intervals_to) == 0: raise ParameterError("Attempting to match empty interval list") # Verify that the input intervals has correct shape and size valid_intervals(intervals_from) valid_intervals(intervals_to) try: return __match_intervals(intervals_from, intervals_to, strict=strict) except ParameterError as exc: raise ParameterError( "Unable to match intervals with strict={}".format(strict) ) from exc
9,322
def correlate(x, y, margin, method='pearson'): """ Find delay and correlation between x and each column o y Parameters ---------- x : `pandas.Series` Main signal y : `pandas.DataFrame` Secondary signals method : `str`, optional Correlation method. Defaults to `pearson`. Options: `pearson`,`robust`,`kendall`,`spearman` Returns ------- `(List[float], List[int])` List of correlation coefficients and delays in samples in the same order as y's columns Notes ----- Uses the pandas method corrwith (which can return pearson, kendall or spearman coefficients) to correlate. If robust correlation is used, the mapping presented in [1]_ is used and then Pearson correlation is used. To speedup the lag finding, the delays are calculated in log intervals and then interpolated by splines, as shown in [2]_, and the lag with maximum correlation found in this interpolated function is then used as the delay. References ---------- .. [1] Raymaekers, J., Rousseeuw, P. "Fast Robust Correlation for High-Dimensional Data", Technometrics, vol. 63, Pages 184-198, 2021 .. [2] Sakurai, Yasushi & Papadimitriou, Spiros & Faloutsos, Christos. (2005). BRAID: Stream mining through group lag correlations. Proceedings of the ACM SIGMOD International Conference on Management of Data. 599-610. """ beg, end = (x.index.min(), x.index.max()) y = interpolate(y,x.index,margin) if(method == 'robust'): method='pearson' x = pd.Series(z(sig.detrend(x)), index=x.index, name=x.name) x = x.apply(g) y = y.apply(lambda s: z(sig.detrend(s))).applymap(g) N = int(x.size*margin) l = int(np.log2(N)) b = 4 log_lags = np.array([int(2**i+(j*2**i/b)) for i in range(2,l+1) for j in range(4) if 2**i+(j*2**i/b) < N]) log_lags = list(-1*log_lags)[::-1]+[-3,-2,-1,0,1,2,3]+list(log_lags) new_lags = list(range(-1*max(log_lags),max(log_lags)+1)) vals = pd.DataFrame([lagged_corr(x,y,lag,method) for lag in log_lags]) vals = vals.apply(lambda s: inter.make_interp_spline(log_lags, abs(s),k=3)(new_lags)) peaks = vals.apply(lambda s: pd.Series([new_lags[i] for i in sig.find_peaks(s)[0]]+[new_lags[max(range(len(s)), key=s.__getitem__)]]).drop_duplicates()) peak_corr = pd.DataFrame(np.array([[x.corr((y[col].shift(int(peak)))[beg:end], method=method) if not pd.isna(peak) else 0 for peak in peaks[col]] for col in peaks]).transpose(), columns=y.columns) dela = [peak_corr[col].abs().idxmax() for col in peak_corr] delays = [int(peaks[col].iloc[dela[pos]]) for pos, col in enumerate(peak_corr)] corrs = [round(peak_corr[col].iloc[dela[pos]],2) for pos, col in enumerate(peak_corr)] return corrs, delays
9,323
def write_question_settings(session, settings, question_class, json_path): # type: (Session, Dict[str, Any], str, Optional[List[str]]) -> None """Writes settings for a question class.""" if not session.network: raise ValueError("Network must be set to write question class settings") json_path_tail = "/".join(json_path) if json_path else "" url_tail = "/{}/{}/{}/{}/{}/{}".format( CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SETTINGS, CoordConstsV2.RSC_QUESTIONS, question_class, json_path_tail, ) _put_json(session, url_tail, settings)
9,324
def sortList2(head: ListNode) -> ListNode: """down2up""" h, length, intv = head, 0, 1 while h: h, length = h.next, length + 1 res = ListNode(0) res.next = head # merge the list in different intv. while intv < length: pre, h = res, res.next while h: # get the two merge head `h1`, `h2` h1, i = h, intv while i and h: h, i = h.next, i - 1 if i: break # no need to merge because the `h2` is None. h2, i = h, intv while i and h: h, i = h.next, i - 1 c1, c2 = intv, intv - i # the `c2`: length of `h2` can be small than the `intv`. # merge the `h1` and `h2`. while c1 and c2: if h1.val < h2.val: pre.next, h1, c1 = h1, h1.next, c1 - 1 else: pre.next, h2, c2 = h2, h2.next, c2 - 1 pre = pre.next pre.next = h1 if c1 else h2 while c1 > 0 or c2 > 0: pre, c1, c2 = pre.next, c1 - 1, c2 - 1 pre.next = h intv *= 2 return res.next
9,325
def _inverse_frequency_max(searcher, fieldname, term): """ Inverse frequency smooth idf schema """ n = searcher.doc_frequency(fieldname, term) maxweight = searcher.term_info(fieldname, term).max_weight() return log(1 + (maxweight / n), 10) if n != 0.0 else 0.0
9,326
def main(): """ This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags. """ config = Config() core = HostSearch() hosts = core.get_hosts(tags=['!nessus'], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get('nessus', 'host') access = config.get('nessus', 'access_key') secret = config.get('nessus', 'secret_key') template_name = config.get('nessus', 'template_name') nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag('nessus') host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {'scanned_hosts': len(hosts)})
9,327
def test_ap_config_set_errors(dev, apdev): """hostapd configuration parsing errors""" hapd = hostapd.add_ap(apdev[0], { "ssid": "foobar" }) hapd.set("wep_key0", '"hello"') hapd.set("wep_key1", '"hello"') hapd.set("wep_key0", '') hapd.set("wep_key0", '"hello"') if "FAIL" not in hapd.request("SET wep_key1 \"hello\""): raise Exception("SET wep_key1 allowed to override existing key") hapd.set("wep_key1", '') hapd.set("wep_key1", '"hello"') hapd.set("auth_server_addr", "127.0.0.1") hapd.set("acct_server_addr", "127.0.0.1") tests = [ "SET eap_reauth_period -1", "SET fst_llt ", "SET auth_server_addr_replace foo", "SET acct_server_addr_replace foo" ] for t in tests: if "FAIL" not in hapd.request(t): raise Exception("Invalid command accepted: " + t) # Deprecated entries hapd.set("tx_queue_after_beacon_aifs", '2') hapd.set("tx_queue_beacon_aifs", '2') hapd.set("tx_queue_data9_aifs", '2') hapd.set("debug", '1') hapd.set("dump_file", '/tmp/hostapd-test-dump') hapd.set("eap_authenticator", '0') hapd.set("radio_measurements", '0') hapd.set("radio_measurements", '1') # Various extra coverage (not really errors) hapd.set("logger_syslog_level", '1') hapd.set("logger_syslog", '0') for i in range(50000): if "OK" not in hapd.request("SET hs20_conn_capab 17:5060:0"): logger.info("hs20_conn_capab limit at %d" % i) break if i < 1000 or i >= 49999: raise Exception("hs20_conn_capab limit not seen")
9,328
def ha(data): """ Hadamard Transform This function is very slow. Implement a Fast Walsh-Hadamard Transform with sequency/Walsh ordering (FWHT_w) for faster tranforms. See: http://en.wikipedia.org/wiki/Walsh_matrix http://en.wikipedia.org/wiki/Fast_Hadamard_transform """ # implementation is a proof of concept and EXTEMEMLY SLOW # determind the order and final size of input vectors ord = int(np.ceil(np.log2(data.shape[-1]))) # Walsh/Hadamard order max = 2**ord # zero fill to power of 2 pad = max - data.shape[-1] zdata = zf(data,pad) # Multiple each vector by the hadamard matrix nat = np.zeros(zdata.shape,dtype=zdata.dtype) H = hadamard(max) nat = np.dot(zdata,H) nat = np.array(nat,dtype=data.dtype) # Bit-Reversal Permutation s = [int2bin(x,digits=ord)[::-1] for x in range(max)] brp = [bin2int(x) for x in s] brp_data = np.take(nat,brp,axis=-1) # Gray code permutation (bit-inverse) gp = gray(ord) gp_data = np.take(brp_data,gp,axis=-1) return gp_data
9,329
def most_repeated_character(string: str) -> str: """ Find the most repeated character in a string. :param string: :return: """ map: Dict[str, int] = {} for letter in string: if letter not in map: map[letter] = 1 else: map[letter] += 1 return sorted(map.items(), key=lambda item: item[1], reverse=True)[0][0]
9,330
def read_config(config_filename): """Read the expected system configuration from the config file.""" config = None with open(config_filename, 'r') as config_file: config = json.loads(config_file.read()) config_checks = [] for config_check in config: if '_comment' in config_check: continue #Config MUST specify a description of the check description = config_check['description'] write_str("Description: %s" % description, debug=True) #Config MUST indicate the confidence of the configuration check confidence = config_check['confidence'] #Config MUST include at least one test obj tests = config_check['tests'] #Config MUST specify a fix object assert 'fix' in config_check assert isinstance(config_check['fix'], dict) #Fix object must specify at least one of these: #command, sudo_command, manual assert ('command' in config_check['fix'] or 'sudo_command' in config_check['fix'] or 'manual' in config_check['fix']) fix = None sudo_fix = None manual_fix = None if 'command' in config_check['fix']: fix = config_check['fix']['command'] if 'sudo_command' in config_check['fix']: sudo_fix = config_check['fix']['sudo_command'] if 'manual' in config_check['fix']: manual_fix = config_check['fix']['manual'] config_check_obj = ConfigCheck( tests=tests, description=description, confidence=confidence, fix=fix, sudo_fix=sudo_fix, manual_fix=manual_fix) config_checks.append(config_check_obj) return config_checks
9,331
def transitions(bits): """Count the number of transitions in a bit sequence. >>> assert transitions([0, 0]) == 0 >>> assert transitions([0, 1]) == 1 >>> assert transitions([1, 1]) == 0 >>> assert transitions([1, 0]) == 1 >>> assert transitions([0, 0, 0]) == 0 >>> assert transitions([0, 1, 0]) == 2 >>> assert transitions([1, 1, 0]) == 1 >>> assert transitions([1, 0, 0]) == 1 >>> assert transitions([0, 0, 1]) == 1 >>> assert transitions([0, 1, 1]) == 1 >>> assert transitions([1, 1, 1]) == 0 >>> assert transitions([1, 0, 1]) == 2 """ transitions = 0 for i in range(0, len(bits)-1): if bits[i] != bits[i+1]: transitions += 1 return transitions
9,332
def _convert_code(code): """ 将聚宽形式的代码转化为 xalpha 形式 :param code: :return: """ no, mk = code.split(".") if mk == "XSHG": return "SH" + no elif mk == "XSHE": return "SZ" + no
9,333
def _is_arg_name(s, index, node): """Search for the name of the argument. Right-to-left.""" if not node.arg: return False return s[index : index+len(node.arg)] == node.arg
9,334
def fitNoise(Sm_bw, L, K, Gamma): """ Estimate noise parameters Parameters ---------- Sm_bw : float array Sufficient statistics from measurements Sm_fw : float array Wavefield explained by modeled waves Returns ------- BIC : float Value of the Bayesian information criterion sigma2_ML : float array Noise variance estimated from residual signal. One dimensional array of length L. """ Sm_fw = zeros(shape(Sm_bw)) (sigma2, SlnGamma_bw) = estimateNoise(Sm_bw, Sm_fw) LogLikelihood = sum(SlnGamma_bw) NumParameters = 3 * L NumPoints = K * L BIC = -2* LogLikelihood + Gamma *NumParameters *log(NumPoints) sigma2_ML = sigma2 logging.debug('Additive Gaussian noise fit') logging.debug('\tLL: {0:.3e} BIC: {1:.3e}'.format(LogLikelihood, BIC)) logging.debug('\tsigma2: {0:.2e} ... {1:.2e} ... {2:.2e}'.format(min(sigma2), mean(sigma2), max(sigma2))) return(BIC, sigma2_ML)
9,335
def compress_dir(outpath, deleteold=True): """ Compress the directory of outpaths """ outpath = str(outpath) file = outpath.split('/')[-1] # print(f'Compressing from these files: {os.listdir(outpath)}') # print('compressing directory: {}'.format(outpath)) t0 = time.time() with tarfile.open('{}.tar.gz'.format(outpath), 'w:gz') as tar: tar.add(outpath, arcname=file) if deleteold: shutil.rmtree(outpath) print('compressed directory in {} seconds'.format(time.time() - t0)) return
9,336
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: ``sh`` object """ if debug: print_debug('COMMAND', str(cmd)) return cmd()
9,337
def goodness(signal, freq_range=None, D=None): """Compute the goodness of pitch of a signal.""" if D is None: D = libtfr.dpss(len(signal), 1.5, 1)[0] signal = signal * D[0, :] if freq_range is None: freq_range = 256 if np.all(signal == 0): return 0 else: return np.max(cepstrum(signal)[25:freq_range])
9,338
def set_request_path(db_object_id=None, request_path=None, **kwargs): """Sets the request_path of the given db_object Args: db_object_id (int): The id of the schema to list the db_objects from request_path (str): The request_path that should be set **kwargs: Additional options Keyword Args: session (object): The database session to use interactive (bool): Indicates whether to execute in interactive mode Returns: None """ session = kwargs.get("session") interactive = kwargs.get("interactive", True) try: session = core.get_current_session(session) # Make sure the MRS metadata schema exists and has the right version core.ensure_rds_metadata_schema(session) # Get the object with the given id or let the user select it db_object = get_db_object(db_object_id=db_object_id, session=session, interactive=interactive) if not db_object: return if not request_path and interactive: request_path = core.prompt( "Please enter a new request_path " f"for the db_object {db_object.get('name')} " f"[{db_object.get('request_path')}]: ", {"defaultValue": db_object.get('request_path')}) if request_path == db_object.get('request_path'): if interactive: print("The request_path was left unchanged.") return # Ensure the new request_path is unique schema = mrs_schemas.get_schema( schema_id=db_object.get("db_schema_id"), session=session, interactive=False, return_formatted=False) core.check_request_path( schema.get("host_ctx") + schema.get("request_path") + request_path, session=session) res = session.run_sql(""" UPDATE mysql_rest_service_metadata.db_object SET request_path = ? WHERE id = ? """, [request_path, db_object.get("id")]) if res.get_affected_row_count() != 1: raise Exception("Could not update the db_object.") if interactive: print(f"The db_object {db_object.get('name')} was updated " "successfully.") except Exception as e: if interactive: print(f"Error: {str(e)}") else: raise
9,339
def checkForRawFile(op, graph, frm, to): """ Confirm the source is a raw image. :param op: :param graph: :param frm: :param to: :return: @type op: Operation @type graph: ImageGraph @type frm: str @type to: str """ snode = graph.get_node(frm) exifdata = exif.getexif(os.path.join(graph.dir, snode['file'])) if 'File Type' in exifdata and exifdata['File Type'] in ['AA', 'AAX', 'ACR', 'AI', 'AIT', 'AFM', 'ACFM', 'AMFM', 'PDF', 'PS', 'AVI', 'APE', 'ASF', 'BMP', 'DIB' 'BPG', 'PNG', 'JPEG', 'GIF', 'DIVX', 'DOC', 'DOCX', 'DV', 'EXV', 'F4V', 'F4A', 'F4P', 'F4B', 'EXR', 'HDR', 'FLV', 'FPF', 'FLAC', 'FLA', 'FFF', 'IDML', 'J2C', 'JPC', 'JP2', 'JPF', 'J2K', 'JPX', 'JPM', 'JPE', 'JPG', 'LA', 'LFP', 'MP4', 'MP3', 'M2TS', 'MTS', 'M2T', 'TS', 'M4A', 'M4B', 'M4P', 'M4V', 'MAX', 'MOV', 'QT', 'O', 'PAC', 'MIFF', 'MIF', 'MIE', 'JNG', 'MNG', 'PPT', 'PPS', 'QIF', 'QTI', 'QTIF', 'RIF', 'RIFF', 'SWF', 'VOB', 'TTF', 'TTC', 'SWF', 'SEQ', 'WEBM', 'WEBP']: return (Severity.ERROR,'Only raw images permitted for this operation') return None
9,340
def check_main_dependent_group(setup_contents: str) -> None: """ Test for an order of dependencies groups between mark '# Start dependencies group' and '# End dependencies group' in setup.py """ print("[blue]Checking main dependency group[/]") pattern_main_dependent_group = re.compile( '# Start dependencies group\n(.*)# End dependencies group', re.DOTALL ) main_dependent_group = pattern_main_dependent_group.findall(setup_contents)[0] pattern_sub_dependent = re.compile(r' = \[.*?]\n', re.DOTALL) main_dependent = pattern_sub_dependent.sub(',', main_dependent_group) src = main_dependent.strip(',').split(',') _check_list_sorted(src, "Order of dependencies") for group in src: check_sub_dependent_group(group)
9,341
def createComparativeSurfPlot(dim, path2SignDir, subSignList, modFileNameList): """ @param[in] ### dim ### integer corresponding to the dimension of the approximation domain @param[in] ### path2SignDir ### string giving the path to the directory of the training session which should be investigated @param[in] ### subSignList ### list of strings giving the subsignatures of those directories which contain the data which should be depicted @param[in] ### modFileNameList ### a list containing strings corresponding to the name of the files containing the model data, i.e. parameters, ... create surface plots depicting the exact function and two approximations. NOTE: > several things have to be adjusted by 'hand', this is the approximation domain and the representation of the exact function > the elements in modFileNameList do not contain any extension! """ modFileDirName = 'model' setupFileName = 'setup.txt' N = len(subSignList) M = 150 if N <= 2: dim = 2 # default parameters rank = 1 degrs = [2, 2] boundingBox = [] boundingBox.append((-0.5, 0.5)) boundingBox.append((-0.5, 0.5)) # boundingBox.append((0.25, 0.75)) # boundingBox.append((0.25, 0.75)) # boundingBox.append((0, 1.0)) # boundingBox.append((0, 1.0)) x1 = np.linspace(boundingBox[0][0], boundingBox[0][1], M) x2 = np.linspace(boundingBox[1][0], boundingBox[1][1], M) [xx1, xx2] = np.meshgrid(x1, x2) zz = np.zeros((M, M)) # zz = xx1 ** 2 + xx2 ** 2 zz = np.sin(50 * xx1 * xx2) # list containing the surface data zList = [] # list containing parameter details used for the title rankList = [] degrList = [] numDataPtsList = [] for i in range(0, N): # ### read parameter settings from setup file # first read stuff from setup file ... path2SubDir = path2SignDir + '/' + subSignList[i] path2SetupFile = path2SubDir + '/' + setupFileName stpFile = open(path2SetupFile, 'r') for line in stpFile: if 'numDataPoints' in line: numDataPtsList.append(np.int(re.findall('\d+$', line)[0])) elif 'rank' in line: rankList.append(np.int(re.findall('\d+$', line)[0])) elif 'degrCrdnt1' in line: degrList.append(np.int(re.findall('\d+$', line)[0])) stpFile.close() # ### read model from file path2ModDir = path2SubDir + '/' + modFileDirName modelFunc = MyCPDRadialAnsatzFunc(dim, boundingBox, rank, degrs) modelFunc.readParamsFromFile(path2ModDir, modFileNameList[i]) xList = [] xList.append(xx1) xList.append(xx2) zList.append(modelFunc.evaluate2d(xList, M)) del modelFunc fig = plt.figure(figsize = (15, 4.8)) plt.rc('axes', titlesize = 8) plt.rc('axes', labelsize = 8) plt.rc('xtick', labelsize = 6) plt.rc('ytick', labelsize = 6) ax = fig.add_subplot(1, N + 1, 1, projection = '3d') # surf = ax.plot_surface(xx1, xx2, zz, antialiased = False, rstride = 1, cstride = 1, cmap = cm.coolwarm) surf = ax.plot_surface(xx1, xx2, zz, cmap = cm.coolwarm) ax.set_zlim(-1.2, 1.2) ax.view_init(elev = 30, azim = 255) ax.set_xlabel('x1') ax.set_ylabel('x2') ax.set_title('Graph of f') for i in range(0, N): ax = fig.add_subplot(1, N + 1, i + 2, projection = '3d') # surf = ax.plot_surface(xx1, xx2, zList[i], antialiased = False, rstride = 1, cstride = 1, cmap = cm.coolwarm) surf = ax.plot_surface(xx1, xx2, zList[i], cmap = cm.coolwarm) ax.set_zlim(-1.2, 1.2) ax.view_init(elev = 30, azim = 255) ax.set_xlabel('x1') ax.set_ylabel('x2') ax.set_title('Approximation to (r, N, m) = (' + str(rankList[i]) + ', ' + str(degrList[i]) + ', ' + str(np.int(numDataPtsList[i] / (2 * rankList[i] * degrList[i]))) + ')') # plt.subplots_adjust(wspace = 1.1) plt.show()
9,342
def register_relation_in_alias_manager(field: Type["ForeignKeyField"]) -> None: """ Registers the relation (and reverse relation) in alias manager. The m2m relations require registration of through model between actual end models of the relation. Delegates the actual registration to: m2m - register_many_to_many_relation_on_build fk - register_relation_on_build :param field: relation field :type field: ForeignKey or ManyToManyField class """ if field.is_multi: if field.has_unresolved_forward_refs(): return field = cast(Type["ManyToManyField"], field) register_many_to_many_relation_on_build(field=field) elif field.is_relation and not field.is_through: if field.has_unresolved_forward_refs(): return register_relation_on_build(field=field)
9,343
def test_modelize_summary(): """ Report document should be correctly modelized from given report """ reports = [ ( "/html/foo.html", { "messages": [ "ping", "pong", ], "statistics": {"foo": 1, "bar": 1}, "dummyvar": ["michou"], } ), ( "/html/bar.html", { "messages": [ "pang", "pung", ], "statistics": {"foo": 0, "bar": 1}, "dummyvar": ["dumdum"], } ), ] expected = { "document": "foo.html", "context": { "kind": "summary", "metas": { "pac": "man", }, "statistics": { "foo": 1, "bar": 2 }, "paths": [ { "name": "/html/foo.html", "path": "path-1.txt", "statistics": { "foo": 1, "bar": 1 }, }, { "name": "/html/bar.html", "path": "path-2.txt", "statistics": { "foo": 0, "bar": 1 }, } ] } } exporter = ExporterRenderer() doc = exporter.modelize_summary("foo.html", reports, {"pac": "man"}) #print() #print(json.dumps(doc, indent=4, default=str)) #print() assert doc == expected
9,344
def get_ip_address(dev="eth0"): """Retrieves the IP address via SIOCGIFADDR - only tested on Linux.""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', dev[:15]))[20:24]) except: return None
9,345
def connect(**kwargs): # pylint: disable=unused-argument """ mock get-a-connection """ return MockConn()
9,346
def new_organization(request): """Creates a new organization.""" if request.method == 'POST': new_organization_form = OrganizationForm(request.POST) if new_organization_form.is_valid(): new_organization = new_organization_form.save(commit=False) new_organization.owner = request.user new_organization.save() new_organization.editors.add(request.user) return redirect(reverse('competencies:organizations')) new_organization_form = OrganizationForm() return render_to_response('competencies/new_organization.html', {'new_organization_form': new_organization_form,}, context_instance=RequestContext(request))
9,347
def _build_obs_freq_mat(acc_rep_mat): """ build_obs_freq_mat(acc_rep_mat): Build the observed frequency matrix, from an accepted replacements matrix The acc_rep_mat matrix should be generated by the user. """ # Note: acc_rep_mat should already be a half_matrix!! total = float(sum(acc_rep_mat.values())) obs_freq_mat = ObservedFrequencyMatrix(alphabet=acc_rep_mat.alphabet, build_later=1) for i in acc_rep_mat: obs_freq_mat[i] = acc_rep_mat[i] / total return obs_freq_mat
9,348
def strip(s): """strip(s) -> string Return a copy of the string s with leading and trailing whitespace removed. """ i, j = 0, len(s) while i < j and s[i] in whitespace: i = i+1 while i < j and s[j-1] in whitespace: j = j-1 return s[i:j]
9,349
def set_news(title: str, text: str, db_user: User, lang: str, main_page: str) -> dict: """ Sets a new news into the news table :param title: of the news :param text: of the news :param db_user: author of the news :param lang: ui_locales :param main_page: url :return: """ LOG.debug("Entering set_news function") author = db_user.firstname if db_user.firstname != 'admin': author += ' {}'.format(db_user.surname) date = arrow.now() DBDiscussionSession.add(News(title=title, author=author, date=arrow.now(), news=text)) DBDiscussionSession.flush() transaction.commit() return_dict = { 'status': 'success', 'title': title, 'date': sql_timestamp_pretty_print(date, lang, False), 'author': author, 'news': text } return return_dict
9,350
def make_index_file(out_dir, files): """Write an index file.""" files = [os.path.splitext(os.path.basename(x))[0] for x in files] with open(os.path.join(out_dir, 'idx'), 'wb') as f: pickle.dump(files, f, pickle.HIGHEST_PROTOCOL)
9,351
def _test_theano_compiled_dtw(input_size, hidden_size, ndim, distance_function, normalize, enable_grads, debug_level, eps): """ Performs a test of a Theano DTW implementation. :param input_size: The size of the inputs. :param hidden_size: The size of the hidden values (used only if enable_grads=True). :param ndim: The number of dimensions to use (2: non-batched, 3: batched). :param distance_function: The symbolic distance function to use (e.g. a reference to a function in distance). :param normalize: Whether the DTW distances should be sequence length normalized. :param enable_grads: Whether gradients should be computed of a min mean DTW cost function with respect to some synthetic parameters. :param debug_level: The debug level to use (see above for explanation). :param eps: The minimum value to use inside the distance function. Set to the machine epsilon if None. :return: A compiled Theano function that can be used to compute DTW distances between sequence pairs. """ assert 2 <= ndim <= 3 # Create the input variables test values and lengths suitable for testing the implementation. if ndim == 2: x1_in, x1 = _var('x1', (4, input_size), 'theano_compiled_dtw', debug_level) x2_in, x2 = _var('x2', (5, input_size), 'theano_compiled_dtw', debug_level) x1_lengths_in, x1_lengths = _var('x1_lengths', (), 'theano_compiled_dtw', debug_level, dtype='int32', test_value_getter=lambda shape: 4) x2_lengths_in, x2_lengths = _var('x2_lengths', (), 'theano_compiled_dtw', debug_level, dtype='int32', test_value_getter=lambda shape: 5) elif ndim == 3: x1_in, x1 = _var('x1', (5, 4, input_size), 'theano_compiled_dtw', debug_level) x2_in, x2 = _var('x2', (6, 4, input_size), 'theano_compiled_dtw', debug_level) if debug_level > 0: x1.tag.test_value[-1, 0] = 0 x2.tag.test_value[-1, 1] = 0 x1.tag.test_value[-1, 2] = 0 x2.tag.test_value[-1, 2] = 0 x1_lengths_in, x1_lengths = _var('x1_lengths', (2,), 'theano_compiled_dtw', debug_level, dtype='int32', test_value_getter=lambda shape: numpy.array([4, 5, 4, 5])) x2_lengths_in, x2_lengths = _var('x2_lengths', (2,), 'theano_compiled_dtw', debug_level, dtype='int32', test_value_getter=lambda shape: numpy.array([6, 5, 5, 6])) else: raise Exception('Unsupported number of dimensions: ' + str(ndim)) if enable_grads: # Create some synthetic parameters w = utility.shared_gaussian_random_matrix('w', input_size, hidden_size) # Transform the inputs using the synthetic parameters x1 = _debug(theano.dot(x1, w), 'theano_compiled_dtw.z1', debug_level) x2 = _debug(theano.dot(x2, w), 'theano_compiled_dtw.z2', debug_level) else: w = None # Construct the symbolic expression for DTW symbolic_dtw = theano_symbolic_dtw(x1, x2, x1_lengths, x2_lengths, distance_function=distance_function, normalize=normalize, debug_level=debug_level, eps=eps) outputs = [symbolic_dtw] if enable_grads: # Create a min mean DTW cost expression cost = _debug(tt.mean(symbolic_dtw) if ndim == 3 else symbolic_dtw, 'theano_compiled_dtw.cost', debug_level) outputs.append(cost) # Perform symbolic differentiation of the cost expression with respect to the synthetic parameters outputs.append(_debug(theano.grad(cost, w), 'theano_compiled_dtw.w_grad', debug_level)) return theano.function([x1_in, x2_in, x1_lengths_in, x2_lengths_in], outputs, name='compiled_dtw_' + str(ndim), on_unused_input='ignore')
9,352
def connections_definition(): """Connection definition in connections.yml""" out = pygments.highlight( code=(Settings.typhoon_home / 'connections.yml').read_text(), lexer=YamlLexer(), formatter=Terminal256Formatter() ) pydoc.pager(out)
9,353
def test(seriesList): """This is a test function""" return seriesList
9,354
def perform_timeseries_analysis_iterative(dataset_in, intermediate_product=None, no_data=-9999): """ Description: ----- Input: dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on Output: dataset_out (xarray.DataSet) - dataset containing variables: normalized_data, total_data, total_clean """ data_vars = list(dataset_in.data_vars) key = data_vars[0] data = dataset_in[key].astype('float') processed_data = data.copy(deep=True) processed_data.values[data.values == no_data] = 0 processed_data_sum = processed_data.sum('time') clean_data = data.copy(deep=True) clean_data.values[data.values != no_data] = 1 clean_data.values[data.values == no_data] = 0 clean_data_sum = clean_data.sum('time') if intermediate_product is None: processed_data_normalized = processed_data_sum/clean_data_sum processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0 dataset_out = xr.Dataset({'normalized_data': processed_data_normalized, 'total_data': processed_data_sum, 'total_clean': clean_data_sum}, coords={'latitude': dataset_in.latitude, 'longitude': dataset_in.longitude}) else: dataset_out = intermediate_product.copy(deep=True) dataset_out['total_data'] += processed_data_sum dataset_out['total_clean'] += clean_data_sum processed_data_normalized = dataset_out['total_data'] / dataset_out['total_clean'] processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0 dataset_out['normalized_data'] = processed_data_normalized return dataset_out
9,355
def getDayOfYear(date): # type: (Date) -> int """Extracts the day of the year from a date. The first day of the year is day 1. Args: date: The date to use. Returns: An integer that is representative of the extracted value. """ print(date) return _now().timetuple().tm_yday
9,356
def predict(network, X_test): """신경망에서 사용되는 가중치 행렬들과 테스트 데이터를 파라미터로 전달받아서, 테스트 데이터의 예측값(배열)을 리턴. 파라미터 X_test: 10,000개의 테스트 이미지들의 정보를 가지고 있는 배열 """ y_pred = [] for sample in X_test: # 테스트 세트의 각 이미지들에 대해서 반복 # 이미지를 신경망에 전파(통과)시켜서 어떤 숫자가 될 지 확률을 계산. sample_hat = forward(network, sample) # 가장 큰 확률의 인덱스(-> 예측값)를 찾음. sample_pred = np.argmax(sample_hat) y_pred.append(sample_pred) # 예측값을 결과 리스트에 추가 return np.array(y_pred)
9,357
def test_bad_set_value(): """ Test setting an impossible value """ packet = Packet() with pytest.raises(TypeError): packet['uint8'] = 65535 with pytest.raises(TypeError): packet['uint8'] = 'foo' with pytest.raises(TypeError): packet['uint8'] = b'bar' with pytest.raises(TypeError): packet['uint8'] = Packet() with pytest.raises(TypeError): packet['uint8'] = [42, 100]
9,358
def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram, vms_cpu, vms_ram): """The Best Fit Decreasing (BFD) heuristic for placing VMs on hosts. :param last_n_vm_cpu: The last n VM CPU usage values to average. :param hosts_cpu: A map of host names and their available CPU in MHz. :param hosts_ram: A map of host names and their available RAM in MB. :param inactive_hosts_cpu: A map of inactive hosts and available CPU MHz. :param inactive_hosts_ram: A map of inactive hosts and available RAM MB. :param vms_cpu: A map of VM UUID and their CPU utilization in MHz. :param vms_ram: A map of VM UUID and their RAM usage in MB. :return: A map of VM UUIDs to host names, or {} if cannot be solved. """ LOG.debug('last_n_vm_cpu: %s', str(last_n_vm_cpu)) LOG.debug('hosts_cpu: %s', str(hosts_cpu)) LOG.debug('hosts_ram: %s', str(hosts_ram)) LOG.debug('inactive_hosts_cpu: %s', str(inactive_hosts_cpu)) LOG.debug('inactive_hosts_ram: %s', str(inactive_hosts_ram)) LOG.debug('vms_cpu: %s', str(vms_cpu)) LOG.debug('vms_ram: %s', str(vms_ram)) vms_tmp = [] for vm, cpu in vms_cpu.items(): if cpu: last_n_cpu = cpu[-last_n_vm_cpu:] vms_tmp.append((sum(last_n_cpu) / len(last_n_cpu), vms_ram[vm], vm)) else: LOG.warning('No CPU data for VM: %s - skipping', vm) vms = sorted(vms_tmp, reverse=True) hosts = sorted(((v, hosts_ram[k], k) for k, v in hosts_cpu.items())) inactive_hosts = sorted(((v, inactive_hosts_ram[k], k) for k, v in inactive_hosts_cpu.items())) mapping = {} for vm_cpu, vm_ram, vm_uuid in vms: mapped = False while not mapped: for _, _, host in hosts: if hosts_cpu[host] >= vm_cpu and hosts_ram[host] >= vm_ram: mapping[vm_uuid] = host hosts_cpu[host] -= vm_cpu hosts_ram[host] -= vm_ram mapped = True break else: if inactive_hosts: activated_host = inactive_hosts.pop(0) hosts.append(activated_host) hosts = sorted(hosts) hosts_cpu[activated_host[2]] = activated_host[0] hosts_ram[activated_host[2]] = activated_host[1] else: break if len(vms) == len(mapping): return mapping return {}
9,359
def __sanitize_close_input(x, y): """ Makes sure that both x and y are ht.DNDarrays. Provides copies of x and y distributed along the same split axis (if original split axes do not match). """ def sanitize_input_type(x, y): """ Verifies that x is either a scalar, or a ht.DNDarray. If a scalar, x gets wrapped in a ht.DNDarray. Raises TypeError if x is neither. """ if not isinstance(x, dndarray.DNDarray): if np.ndim(x) == 0: dtype = getattr(x, "dtype", float) device = getattr(y, "device", None) x = factories.array(x, dtype=dtype, device=device) else: raise TypeError("Expected DNDarray or numeric scalar, input was {}".format(type(x))) return x x = sanitize_input_type(x, y) y = sanitize_input_type(y, x) # Do redistribution out-of-place # If only one of the tensors is distributed, unsplit/gather it if x.split is not None and y.split is None: t1 = manipulations.resplit(x, axis=None) return t1, y elif x.split != y.split: t2 = manipulations.resplit(y, axis=x.split) return x, t2 else: return x, y
9,360
def _is_buildbot_cmdline(cmdline): """Returns (bool): True if a process is a BuildBot process. We determine this by testing if it has the command pattern: [...] [.../]python [.../]twistd [...] Args: cmdline (list): The command line list. """ return any((os.path.basename(cmdline[i]) == 'python' and os.path.basename(cmdline[i+1]) == 'twistd') for i in xrange(len(cmdline)-1))
9,361
def _deps_to_compile_together(dependency_graph): """Yield a chunk of (outfile, depnode) pairs. The rule is that we yield all the chunks at level 1 before any chunks at level 2, etc. Each chunk holds only files with the same compile_instance. The caller is still responsible for divvying up chunks based on compile_rule.num_outputs(). """ flattened_graph = list(dependency_graph.items()) keyfn = lambda kv: (kv[1].level, id(kv[1].compile_rule.compile_instance)) flattened_graph.sort(key=keyfn) for (_, chunk) in itertools.groupby(flattened_graph, keyfn): yield list(chunk)
9,362
def analyze_J_full_movie(folder_name,include_eps=False): """Analyze the Jacobian -- report timeseries parmeters. Must first run compute_F_whole_movie().""" # set up folders external_folder_name = 'ALL_MOVIES_PROCESSED' if not os.path.exists(external_folder_name): os.makedirs(external_folder_name) out_analysis = external_folder_name + '/' + folder_name + '/analysis' if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name) if not os.path.exists(out_analysis): os.makedirs(out_analysis) # import the deformation gradient. F_list = np.loadtxt(out_analysis + '/recovered_F.txt') num_frames = F_list.shape[0]; x = [] J_list = [] for kk in range(0,num_frames): F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3] J_list.append(F00*F11 - F01*F10) x.append(kk) J_list = np.asarray(J_list) x = np.asarray(x) # compute the parameters of the timeseries plt.figure(figsize=(4,4)) plt.plot(J_list,'k-') data = J_list data_med = signal.medfilt(data,5) deriv = np.gradient(data,x) count_C = 0; count_R = 0; count_F = 0 thresh_flat = 0.01*(np.max(J_list) - np.min(J_list)) pix_leng_median = []; pix_leng_mean = []; pix_leng_min = []; pix_leng_max = []; perc_sarc_short = [] fra_mean_contract_time = []; fra_mean_relax_time = []; fra_mean_flat_time = []; fra_mean_period = []; fra_to_first = [] idx_sarc = []; num_peak_all = [] for kk in range(0,x.shape[0]): if deriv[kk] > thresh_flat: count_R += 1 plt.plot(x[kk],J_list[kk],'o',color=(.5,.5,.5)) elif deriv[kk] < -1.0*thresh_flat: count_C += 1 plt.plot(x[kk],J_list[kk],'o',color=(.5,0,0)) else: count_F += 1 plt.plot(x[kk],J_list[kk],'o',color=(0,0,.5)) # detect peaks and valleys input_distance = 10; input_width = 5 th = .00; di = input_distance; wi = input_width # parameters peaks_U, _ = find_peaks(data_med,threshold=th,distance=di,width=wi) peaks_L, _ = find_peaks(-1.0*data_med,threshold=th,distance=di,width=wi) #num_peaks = 0.5 * peaks_U.shape[0] + 0.5 * peaks_L.shape[0] num_peaks = peaks_L.shape[0] if num_peaks == 0: num_peaks = 999999 mean_C = count_C / num_peaks mean_R = count_R / num_peaks mean_F = count_F / num_peaks plt.grid() #plt.plot(x[peaks_U],data[peaks_U],'rx',markersize=10) plt.plot(x[peaks_L],data[peaks_L],'rx',markersize=13) plt.title('frames contract: %i, relax: %i, flat: %i'%(count_C,count_R,count_F)) plt.xlabel('frame number') plt.ylabel('determinate of average F') plt.tight_layout() plt.savefig(out_analysis + '/recovered_F_plot_timeseries') if include_eps: plt.savefig(out_analysis + '/recovered_F_plot_timeseries.eps') return
9,363
def seed_everything(seed: int = 42) -> None: """ Utility function to seed everything. """ random.seed(1) np.random.seed(seed) tf.random.set_seed(seed)
9,364
def table_information_one(soup, div_id_name: str = None) -> dict: """ first method for bringing back table information as a dict. works on: parcelInfo SummaryPropertyValues SummarySubdivision """ table = [] for x in soup.find_all("div", {"id": div_id_name}): for div in x.find_all("div"): for row in x.find_all("tr"): cols = row.find_all("td") cols = [element.text.strip() for element in cols if element] table.extend(cols) it = iter(table) test_dict = dict(zip(it, it)) if test_dict.get(""): del test_dict[""] return test_dict
9,365
def Eval_point_chan(state, chan, data): """External validity, along a channel, where point-data is a pulled back along the channel """ # for each element, state.sp.get(*a), of the codomain vals = [(chan >> state)(*a) ** data(*a) for a in data.sp.iter_all()] val = functools.reduce(lambda p1, p2: p1 * p2, vals, 1) return val
9,366
def dfi2pyranges(dfi): """Convert dfi to pyranges Args: dfi: pd.DataFrame returned by `load_instances` """ import pyranges as pr dfi = dfi.copy() dfi['Chromosome'] = dfi['example_chrom'] dfi['Start'] = dfi['pattern_start_abs'] dfi['End'] = dfi['pattern_end_abs'] dfi['Name'] = dfi['pattern'] dfi['Score'] = dfi['contrib_weighted_p'] dfi['Strand'] = dfi['strand'] return pr.PyRanges(dfi)
9,367
def cleared_nickname(nick: str) -> str: """Perform nickname clearing on given nickname""" if nick.startswith(('+', '!')): nick = nick[1:] if nick.endswith('#'): nick = nick[:-1] if all(nick.rpartition('(')): nick = nick.rpartition('(')[0] return nick
9,368
def validate_model_on_lfw( strategy, model, left_pairs, right_pairs, is_same_list, ) -> float: """Validates the given model on the Labeled Faces in the Wild dataset. ### Parameters model: The model to be tested. dataset: The Labeled Faces in the Wild dataset, loaded from load_lfw\ function. pairs: List of LFW pairs, loaded from load_lfw_pairs function. ### Returns (accuracy_mean, accuracy_std, validation_rate, validation_std, far,\ auc, eer) - Accuracy Mean, Accuracy Standard Deviation, Validation Rate,\ Validation Standard Deviation, FAR, Area Under Curve (AUC) and Equal Error\ Rate (EER). """ embeddings, is_same_list = _get_embeddings( strategy, model, left_pairs, right_pairs, is_same_list, ) tpr, fpr, accuracy, val, val_std, far = evaluate(embeddings, is_same_list) auc = metrics.auc(fpr, tpr) eer = brentq(lambda x: 1.0 - x - interpolate.interp1d(fpr, tpr)(x), 0.0, 1.0) return np.mean(accuracy), np.std(accuracy), val, val_std, far, auc, eer
9,369
def com_google_fonts_check_repo_upstream_yaml_has_required_fields(upstream_yaml): """Check upstream.yaml file contains all required fields""" required_fields = set(["branch", "files", "repository_url"]) upstream_fields = set(upstream_yaml.keys()) missing_fields = required_fields - upstream_fields if missing_fields: yield FAIL,\ Message('missing-fields', f"The upstream.yaml file is missing the following fields:" f" {list(missing_fields)}") else: yield PASS, "The upstream.yaml file contains all necessary fields"
9,370
def get_allsongs(): """ Get all the songs in your media server """ conn = database_connect() if(conn is None): return None cur = conn.cursor() try: # Try executing the SQL and get from the database sql = """select s.song_id, s.song_title, string_agg(saa.artist_name,',') as artists from mediaserver.song s left outer join (mediaserver.Song_Artists sa join mediaserver.Artist a on (sa.performing_artist_id=a.artist_id) ) as saa on (s.song_id=saa.song_id) group by s.song_id, s.song_title order by s.song_id;""" r = dictfetchall(cur,sql) print("return val is:") print(r) cur.close() # Close the cursor conn.close() # Close the connection to the db return r except: # If there were any errors, return a NULL row printing an error to the debug print("Unexpected error getting All Songs:", sys.exc_info()[0]) raise cur.close() # Close the cursor conn.close() # Close the connection to the db return None
9,371
def nlu_audio(settings, logger): """Wrapper for NLU audio""" speech_args = settings['speech'] loop = asyncio.get_event_loop() interpretations = {} with Recorder(loop=loop) as recorder: interpretations = loop.run_until_complete(understand_audio( loop, speech_args['url'], speech_args['app_id'], unhexlify(speech_args['app_key']), # context_tag=credentials['context_tag'], "master", speech_args['language'], recorder=recorder, logger=logger)) # loop.close() if interpretations is False: # The user did not speak return {} else: return interpretations
9,372
def _gauss(sigma, n_sigma=3): """Discrete, normalized Gaussian centered on zero. Used for filtering data. Args: sigma (float): standard deviation of Gaussian n_sigma (float): extend x in each direction by ext_x * sigma Returns: ndarray: discrete Gaussian curve """ x_range = n_sigma * sigma x = np.arange(-x_range, x_range + 1e-5, 1, dtype=float) y = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * (x / sigma)**2) return y
9,373
def write_json_file(path, json_data): """write content in json format Arguments: path {string} -- path to store json file json_data {dictionary} -- dictionary of data to write in json """ with open(path+'.json', 'w+') as writer: json.dump(json_data, writer) writer.close()
9,374
def create_tentacle_mask(points, height, width, buzzmobile_width, pixels_per_m): """Creates a mask of a tentacle by drawing the points as a line.""" tentacle_mask = np.zeros((height, width), np.uint8) for i in range(len(points) - 1): pt1 = points[i] pt2 = points[i+1] cv2.line(tentacle_mask, pt1, pt2, [255, 255, 255], int(buzzmobile_width * pixels_per_m)) return tentacle_mask
9,375
def fill_profile_list( profile_result: ProfileResult, optimizer_result: Dict[str, Any], profile_index: Iterable[int], profile_list: int, problem_dimension: int, global_opt: float ) -> None: """ This is a helper function for initialize_profile Parameters ---------- profile_result: A list of profiler result objects. optimizer_result: A local optimization result. profile_index: array with parameter indices, whether a profile should be computed (1) or not (0). Default is all profiles should be computed. profile_list: integer which specifies whether a call to the profiler should create a new list of profiles (default) or should be added to a specific profile list. problem_dimension: number of parameters in the unreduced problem. global_opt: log-posterior at global optimum. """ if optimizer_result[GRAD] is not None: gradnorm = np.linalg.norm(optimizer_result[GRAD]) else: gradnorm = None # create blank profile new_profile = ProfilerResult( x_path=optimizer_result["x"], fval_path=np.array([optimizer_result["fval"]]), ratio_path=np.array([np.exp(global_opt - optimizer_result["fval"])]), gradnorm_path=gradnorm, exitflag_path=optimizer_result["exitflag"], time_path=np.array([0.]), time_total=0., n_fval=0, n_grad=0, n_hess=0, message=None) if profile_list is None: # All profiles have to be created from scratch for i_parameter in range(0, problem_dimension): if i_parameter in profile_index: # Should we create a profile for this index? profile_result.append_profiler_result(new_profile) else: # if no profile should be computed for this parameter profile_result.append_profiler_result(None) else: for i_parameter in range(0, problem_dimension): # We append to an existing list if i_parameter in profile_index: # Do we have to create a new profile? create_new = (profile_result.list[profile_list][i_parameter] is None) if create_new: profile_result.set_profiler_result( new_profile, i_parameter)
9,376
def _matrix_to_real_tril_vec(matrix): """Parametrize a positive definite hermitian matrix using its Cholesky decomposition""" tril_matrix = la.cholesky(matrix, lower=True) diag_vector = tril_matrix[np.diag_indices(tril_matrix.shape[0])].astype(float) complex_tril_vector = tril_matrix[np.tril_indices(tril_matrix.shape[0], -1)] real_tril_vector = _complex_to_real(complex_tril_vector) return np.concatenate((diag_vector, real_tril_vector))
9,377
def is_subdir(path, directory): """Check if path is a sub of directory. Arguments: path (string): the path to check direcotry (string): the path to use as relative starting point. Returns: bool: True if path is a sub of directory or False otherwise. """ try: relative = os.path.relpath(path, directory) return not relative.startswith(os.pardir) except ValueError: # filename and folder are ondifferent mount points return False
9,378
def func6(): """ # Train on new dataset Here we will train on the Sheep dataset. First we will clone and split the dataset to 2 partitions - train and validation. After that we will clone the pretrained snapshot """
9,379
def main(): """ Insantiates a Main object and executes it. The 'standalone_mode' is so that click doesn't handle usage-exceptions itself, but bubbles them up. Arguments: args (tuple): The command-line arguments. """ args = sys.argv[1:] # If stdin is not empty (being piped to) if not sys.stdin.isatty(): args += sys.stdin.readlines() command = Main() catch = lnk.errors.Catch(1) catch.catch(command.main, args, standalone_mode=False)
9,380
def max_pool(pool_size, strides, padding='SAME', name=None): """max pooling layer""" return tf.layers.MaxPooling2D(pool_size, strides, padding, name=name)
9,381
def size_as_minimum_int_or_none(size): """ :return: int, max_size as max int or None. For example: - size = no value, will return: None - size = simple int value of 5, will return: 5 - size = timed interval(s), like "2@0 22 * * *:24@0 10 * * *", will return: 2 """ return min(size_as_recurrence_map(size).values())
9,382
def args_to_numpy(args): """Converts all Torch tensors in a list to NumPy arrays Args: args (list): list containing QNode arguments, including Torch tensors Returns: list: returns the same list, with all Torch tensors converted to NumPy arrays """ res = [] for i in args: if isinstance(i, torch.Tensor): if i.is_cuda: # pragma: no cover res.append(i.cpu().detach().numpy()) else: res.append(i.detach().numpy()) else: res.append(i) # if NumPy array is scalar, convert to a Python float res = [i.tolist() if (isinstance(i, np.ndarray) and not i.shape) else i for i in res] return res
9,383
def optimizer_setup(model, params): """ creates optimizer, can have layer specific options """ if params.optimizer == 'adam': if params.freeze_backbone: optimizer = optimizer_handler.layer_specific_adam(model, params) else: optimizer = optimizer_handler.plain_adam(model, params) elif params.optimizer == 'sgd': if params.freeze_backbone: optimizer = optimizer_handler.layer_specific_sgd(model, params) else: optimizer = optimizer_handler.plain_sgd(model, params) if params.zero_bn_bias_decay: optimizer = zero_wdcay_bn_bias(optimizer) return optimizer
9,384
def charToEmoji(char, spaceCounter=0): """ If you insert a space, make sure you have your own space counter and increment it. Space counter goes from 0 to 3. """ if char in emojitable.table: print(char) if char == ' ': emoji = emojitable.table[char][spaceCounter] else: emoji = emojitable.table[char] return emoji
9,385
def averages_area(averages): """ Computes the area of the polygon formed by the hue bin averages. Parameters ---------- averages : array_like, (n, 2) Hue bin averages. Returns ------- float Area of the polygon. """ N = averages.shape[0] triangle_areas = np.empty(N) for i in range(N): u = averages[i, :] v = averages[(i + 1) % N, :] triangle_areas[i] = (u[0] * v[1] - u[1] * v[0]) / 2 return np.sum(triangle_areas)
9,386
def _tree_cmp(fpath1: PathLike, fpath2: PathLike, tree_format: str = 'newick') -> bool: """Returns True if trees stored in `fpath1` and `fpath2` are equivalent, False otherwise. Args: fpath1: First tree file path. fpath2: Second tree file path. tree_format: Tree format, i.e. ``newick``, ``nexus``, ``phyloxml`` or ``nexml``. """ ref_tree = Phylo.read(fpath1, tree_format) target_tree = Phylo.read(fpath2, tree_format) # Both trees are considered equal if they have the same leaves and the same distance from each to the root ref_dists = {leaf.name: ref_tree.distance(leaf) for leaf in ref_tree.get_terminals()} target_dists = {leaf.name: target_tree.distance(leaf) for leaf in target_tree.get_terminals()} return ref_dists == target_dists
9,387
def parse_genemark(input_f, genbank_fp): """ Extract atypical genes identified by GeneMark Parameters ---------- input_f: string file descriptor for GeneMark output gene list (*.lst) genbank_fp: string file path to genome in GenBank format Notes ----- genbank_fp is the intermediate GenBank file generated by reformat_input.py, in which multiple sequences are concantenated, instead of the original GenBank file. Returns ------- output: string gene names (protein_ids) separated by newline """ genes = {} gb = Sequence.read(genbank_fp, format='genbank') for feature in gb.interval_metadata._intervals: m = feature.metadata if m['type'] == 'CDS' and 'protein_id' in m: protein_id = m['protein_id'].replace('\"', '') if protein_id not in genes: strand = m['strand'] start = feature.bounds[0][0] + 1 end = feature.bounds[0][1] genes[protein_id] = (start, end, strand) atypical_genes = [] reading = False for line in input_f: x = line.strip().split() if len(x) == 2 and x == ['#', 'Length']: reading = True # atypical genes have class '2' in the 6th column elif reading and len(x) == 6 and x[5] == '2': (start, end, strand) = (int(x[2].lstrip('<>')), int(x[3].lstrip('<>')), x[1]) for (gene, x) in genes.items(): if x[0] == start and x[1] == end and x[2] == strand: atypical_genes.append(gene) return '\n'.join(sorted(atypical_genes))
9,388
def get_hex(fh, nbytes=1): """ get nbyte bytes (1 by default) and display as hexidecimal """ hstr = "" for i in range(nbytes): b = "%02X " % ord(fh) hstr += b return hstr
9,389
def fetch(pages, per_page, graph): """ Get a list of posts from facebook """ return [x.replace('\n', '') for name in pages for x in fetch_page(name, per_page, graph)]
9,390
def lifted_struct_loss(labels, embeddings, margin=1.0): """Computes the lifted structured loss. Args: labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels. embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not be l2 normalized. margin: Float, margin term in the loss definition. Returns: lifted_loss: tf.float32 scalar. """ # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor. lshape = tf.shape(labels) labels = tf.reshape(labels, [lshape[0], 1]) # Build pairwise squared distance matrix. pairwise_distances = metric_learning.pairwise_distance(embeddings) # Build pairwise binary adjacency matrix. adjacency = tf.math.equal(labels, tf.transpose(labels)) # Invert so we can select negatives only. adjacency_not = tf.math.logical_not(adjacency) batch_size = tf.size(labels) diff = margin - pairwise_distances mask = tf.cast(adjacency_not, dtype=tf.dtypes.float32) # Safe maximum: Temporarily shift negative distances # above zero before taking max. # this is to take the max only among negatives. row_minimums = tf.math.reduce_min(diff, 1, keepdims=True) row_negative_maximums = tf.math.reduce_max( tf.math.multiply(diff - row_minimums, mask), 1, keepdims=True) + row_minimums # Compute the loss. # Keep track of matrix of maximums where M_ij = max(m_i, m_j) # where m_i is the max of alpha - negative D_i's. # This matches the Caffe loss layer implementation at: # https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long max_elements = tf.math.maximum(row_negative_maximums, tf.transpose(row_negative_maximums)) diff_tiled = tf.tile(diff, [batch_size, 1]) mask_tiled = tf.tile(mask, [batch_size, 1]) max_elements_vect = tf.reshape(tf.transpose(max_elements), [-1, 1]) loss_exp_left = tf.reshape( tf.math.reduce_sum( tf.math.multiply( tf.math.exp(diff_tiled - max_elements_vect), mask_tiled), 1, keepdims=True), [batch_size, batch_size]) loss_mat = max_elements + tf.math.log(loss_exp_left + tf.transpose(loss_exp_left)) # Add the positive distance. loss_mat += pairwise_distances mask_positives = tf.cast( adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag( tf.ones([batch_size])) # *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2. num_positives = tf.math.reduce_sum(mask_positives) / 2.0 lifted_loss = tf.math.truediv( 0.25 * tf.math.reduce_sum( tf.math.square( tf.math.maximum( tf.math.multiply(loss_mat, mask_positives), 0.0))), num_positives) return lifted_loss
9,391
def get_uint8_rgb(dicom_path): """ Reads dicom from path and returns rgb uint8 array where R: min-max normalized, G: CLAHE, B: histogram equalized. Image size remains original. """ dcm = _read_dicom_image(dicom_path) feats = _calc_image_features(dcm) return (feats*255).astype(np.uint8)
9,392
def start(host, port, options, timeout=10): """Start an instance of mitmproxy server in a subprocess. Args: host: The host running mitmproxy. port: The port mitmproxy will listen on. Pass 0 for automatic selection. options: The selenium wire options. timeout: The number of seconds to wait for the server to start. Default 10 seconds. Returns: A MitmProxy object representing the server. Raises: TimeoutException: if the mitmproxy server did not start in the timout period. RuntimeError: if there was some unknown error starting the mitmproxy server. """ for _ in range(RETRIES): port = port or random.randint(PORT_RANGE_START, PORT_RANGE_END) proxy = subprocess.Popen([ 'mitmdump', *_get_upstream_proxy_args(options), '--set', 'confdir={}'.format(options.get('mitmproxy_confdir', DEFAULT_CONFDIR)), '--set', 'listen_port={}'.format(port), '--set', 'ssl_insecure={}'.format(str(options.get('verify_ssl', 'true')).lower()), '--set', 'upstream_cert={}'.format(DEFAULT_UPSTREAM_CERT), '--set', 'stream_websockets={}'.format(DEFAULT_STREAM_WEBSOCKETS), '--set', 'termlog_verbosity={}'.format(DEFAULT_TERMLOG_VERBOSITY), '--set', 'flow_detail={}'.format(DEFAULT_FLOW_DETAIL), '-s', __file__ ]) try: proxy.wait(timeout=2) except subprocess.TimeoutExpired: # Subprocess has started break else: raise RuntimeError('Error starting mitmproxy - check console output') start_time = time.time() while time.time() - start_time < timeout: with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: # Try and connect to mitmproxy to determine whether it's started up if sock.connect_ex((host, port)) == 0: return MitmProxy(host, port, proxy) # Hasn't yet started so wait a bit and try again time.sleep(0.5) raise TimeoutError('mitmproxy did not start within {} seconds'.format(timeout))
9,393
def query_db_cluster(instanceid): """ Querying whether DB is Clustered or not """ try: db_instance = RDS.describe_db_instances( DBInstanceIdentifier=instanceid ) return db_instance['DBInstances'][0]['DBClusterIdentifier'] except KeyError: return False
9,394
def is_nsfw() -> Callable[[T], T]: """A :func:`.check` that checks if the channel is a NSFW channel. This check raises a special exception, :exc:`.ApplicationNSFWChannelRequired` that is derived from :exc:`.ApplicationCheckFailure`. .. versionchanged:: 2.0 Raise :exc:`.ApplicationNSFWChannelRequired` instead of generic :exc:`.ApplicationCheckFailure`. DM channels will also now pass this check. """ def pred(ctx: ApplicationContext) -> bool: ch = ctx.channel if ctx.guild is None or (isinstance(ch, (discord.TextChannel, discord.Thread)) and ch.is_nsfw()): return True raise ApplicationNSFWChannelRequired(ch) # type: ignore return check(pred)
9,395
def calc_MAR(residuals, scalefactor=1.482602218): """Return median absolute residual (MAR) of input array. By default, the result is scaled to the normal distribution.""" return scalefactor * np.median(np.abs(residuals))
9,396
def resourceimport_redirect(): """ Returns a redirection action to the main resource importing view, which is a list of files available for importing. Returns: The redirection action. """ return redirect(url_for('resourceimportfilesview.index'))
9,397
def valid_attribute(node, whitelist): """Check the attribute access validity. Returns True if the member access is valid, False otherwise.""" # TODO: Support more than gast.Name? if not isinstance(node.value, gast.Name): if isinstance(node.value, gast.Attribute): return valid_attribute(node.value, whitelist) return False is_valid = False for elt in whitelist: if isinstance(elt, str): continue if isinstance(elt, Variable): is_valid_impl = valid_attribute_impl(node, elt) is_valid = is_valid_impl or is_valid if is_valid: return is_valid return is_valid
9,398
def gen_fov_chan_names(num_fovs, num_chans, return_imgs=False, use_delimiter=False): """Generate fov and channel names Names have the format 'fov0', 'fov1', ..., 'fovN' for fovs and 'chan0', 'chan1', ..., 'chanM' for channels. Args: num_fovs (int): Number of fov names to create num_chans (int): Number of channel names to create return_imgs (bool): Return 'chanK.tiff' as well if True. Default is False use_delimiter (bool): Appends '_otherinfo' to the first fov. Useful for testing fov id extraction from filenames. Default is False Returns: tuple (list, list) or (list, list, list): If return_imgs is False, only fov and channel names are returned If return_imgs is True, image names will also be returned """ fovs = [f'fov{i}' for i in range(num_fovs)] if use_delimiter: fovs[0] = f'{fovs[0]}_otherinfo' chans = [f'chan{i}' for i in range(num_chans)] if return_imgs: imgs = [f'{chan}.tiff' for chan in chans] return fovs, chans, imgs else: return fovs, chans
9,399