content
stringlengths
22
815k
id
int64
0
4.91M
def create_cell(cell_classname, cell_params): """ Creates RNN cell. Args: cell_classname: The name of the cell class, e.g. "LSTMCell", "GRUCell" and so on. cell_params: A dictionary of parameters to pass to the cell constructor. Returns: A `tf.contrib.rnn.RNNCell` object. """ cell_params = cell_params.copy() # Find the cell class, use the in-house implemented LSTMCell & GRUCell cell_class = eval(cell_classname) # find from all CELL NAMES imported from tf.contrib.rnn # Make sure additional arguments are valid cell_args = set(inspect.getargspec(cell_class.__init__).args[1:]) new_cell_params = {} for key in cell_params.keys(): if key not in cell_args: # raise ValueError( tf.logging.info( """{} is not a valid argument for {} class. Available arguments are: {}""".format(key, cell_class.__name__, cell_args)) else: new_cell_params[key] = cell_params[key] # Create cell return cell_class(**new_cell_params)
21,600
def get_myia_tag(rtag): """Return the myia tag for a constructor. This will fail if you haven't properly called fill_reverse_tag_map(). """ return rev_tag_map[rtag]
21,601
def get_priority(gene, phenotype): """ Get matched priority from the phenotype table. Parameters ---------- gene : str Gene name. phenotype : str Phenotype name. Returns ------- str EHR priority. Examples -------- >>> import pypgx >>> pypgx.get_priority('CYP2D6', 'Normal Metabolizer') 'Normal/Routine/Low Risk' >>> pypgx.get_priority('CYP2D6', 'Ultrarapid Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Normal Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Poor Metabolizer') 'Normal/Routine/Low Risk' """ if not is_target_gene(gene): raise NotTargetGeneError(gene) if phenotype not in list_phenotypes(): raise PhenotypeNotFoundError(phenotype) df = load_phenotype_table() i = (df.Gene == gene) & (df.Phenotype == phenotype) return df[i].Priority.values[0]
21,602
def _generator3(path): """ Args: path: path of the dataframe Returns: yield outputs of X and Y pairs """ args = init_args() catalog = load_catalog(path) def preprocess(x, y=None): zero = False if not np.any(x): zero = True img = (x - avg_x) / std_x return img, y, zero for index in tqdm(range(0, len(catalog), 200)): rows = catalog[index:index + 200] for idx, row in rows.iterrows(): # print(row) # pdb.set_trace() if row.ncdf_path == "nan": continue samples = load_numpy(row['hdf5_8bit_path']) offset_idx = row['hdf5_8bit_offset'] # continue timedelta_rows = [catalog[catalog.index == ( idx + datetime.timedelta(hours=i))] for i in [0, 1, 3, 6]] # CS_GHIs = [catalog[catalog.index==(idx+datetime.timedelta(hours=i))][station_i + "_CLEARSKY_GHI"].values[0] for i in [0,1,3,6]] for station_i in args.station_data.keys(): sample = samples[station_i] if row[[station_i + "_GHI"]].isnull()[0]: continue elif row[[station_i + "_DAYTIME"]][0] == 0: continue else: GHI_0 = row[station_i + "_GHI"] # train_df[train_df.index == train_df.index[0]+datetime.timedelta(hours=1)] # pdb.set_trace() GHIs = [i[station_i + "_GHI"].values[0] for i in timedelta_rows] CS_GHIs = [i[station_i + "_CLEARSKY_GHI"].values[0] for i in timedelta_rows] y = np.array(CS_GHIs) - np.array(GHIs) if np.isnan(np.sum(y)): continue # ini = time.time() # print(station_coords) imgs = [] x = sample[offset_idx].swapaxes(0, 1).swapaxes(1, 2) # print(y) x = preprocess(x)[0] continue yield x, y
21,603
def _extract_gsi(name): """ Extract a normalised groundstation if available. :param name: :rtype: str >>> _extract_gsi('LANDSAT-7.76773.S3A1C2D2R2') >>> _extract_gsi('AQUA.60724.S1A1C2D2R2') >>> _extract_gsi('TERRA.73100.S1A2C2D4R4') >>> _extract_gsi('LANDSAT-8.3108') >>> _extract_gsi('NPP.VIIRS.10014.ALICE') 'ASA' >>> _extract_gsi('NPP_VIRS_STD-HDF5_P00_18966.ASA_0_0_20150626T053709Z20150626T055046') 'ASA' >>> _extract_gsi('not_an_ads_dir') >>> _extract_gsi('LANDSAT-8.FAKE') """ last_component = name.split('.')[-1] if '_' in last_component: last_component = last_component.split('_')[0] if not metadata.is_groundstation_alias(last_component): return None return metadata.normalise_gsi(last_component)
21,604
def array_at_verts_basic2d(a): """ Computes values at cell vertices on 2d array using neighbor averaging. Parameters ---------- a : ndarray Array values at cell centers, could be a slice in any orientation. Returns ------- averts : ndarray Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1). """ assert a.ndim == 2 shape_verts2d = (a.shape[0] + 1, a.shape[1] + 1) # create a 3D array of size (nrow+1, ncol+1, 4) averts3d = np.full(shape_verts2d + (4,), np.nan) averts3d[:-1, :-1, 0] = a averts3d[:-1, 1:, 1] = a averts3d[1:, :-1, 2] = a averts3d[1:, 1:, 3] = a # calculate the mean over the last axis, ignoring NaNs averts = np.nanmean(averts3d, axis=2) return averts
21,605
def get_subquestion_answer(response, questions, subquestion): """ Return the answer to a subquestion from ``response``. """ question_id = subquestion[0] answers = response[question_id] dim = len(subquestion) - 1 for answer in answers: matched = True if subquestion[1] != answer[0]: matched = False if dim == 2 and subquestion[2] != answer[1]: matched = False if matched: if dim == 1: answer = answer[1] else: answer = answer[2] return map_answer_expr(questions, question_id, answer)
21,606
def convert_dir_save(inputdir, outputdir, templatedir, temporarydir, prefix): """ Converts all .msg files in inputdir to uORB header files Unchanged existing files are not overwritten. """ # Create new headers in temporary output directory convert_dir(inputdir, temporarydir, templatedir) # Copy changed headers from temporary dir to output dir copy_changed(temporarydir, outputdir, prefix)
21,607
def uniform_centroids(dist_map, n_centroids): """ Uniformly space `n_centroids` seeds in a naive way :param dist_map: sparse distance map :param n_centroids: number of seeds to place :return: (n_centroids, ) integer arrays with the indices of the seeds """ def get_dist(idx_vertex): return csgraph.dijkstra(dist_map, indices=idx_vertex, directed=False) res = np.zeros(n_centroids, dtype='i4') res[0] = np.random.randint(0, dist_map.shape[0]) dist = get_dist(res[0]) for idx in range(1, n_centroids): res[idx] = np.argmax(dist) np.minimum(dist, get_dist(res[idx]), out=dist) return res
21,608
def main(): """ This program creates lines on an instance of GWindow class. There is a circle indicating the user’s first click. A line appears at the condition where the circle disappears as the user clicks on the canvas for the second time. """ onmouseclicked(draw_circle_or_draw_line)
21,609
def collate_custom(batch,key=None): """ Custom collate function for the Dataset class * It doesn't convert numpy arrays to stacked-tensors, but rather combines them in a list * This is useful for processing annotations of different sizes """ # this case will occur in first pass, and will convert a # list of dictionaries (returned by the threads by sampling dataset[idx]) # to a unified dictionary of collated values if isinstance(batch[0], collections.Mapping): return {key: collate_custom([d[key] for d in batch],key) for key in batch[0]} # these cases will occur in recursion #elif torch.is_tensor(batch[0]): # for tensors, use standrard collating function #return default_collate(batch) elif isinstance(batch,list) and isinstance(batch[0],list): # flatten lists of lists flattened_list = list(itertools.chain(*batch)) return flattened_list elif isinstance(batch,list) and len(batch)==1: # lists of length 1, remove list wrap return batch[0] else: # for other types (i.e. lists of len!=1), return as is return batch
21,610
def get_horizon(latitude, longitude, dem, ellipsoid=Ellipsoid("WGS84"), distance=0.5, precision=1): """ Compute local get_horizon obstruction from Digital Elevation Model This function is mainly based on a previous Matlab function (see https://fr.mathworks.com/matlabcentral/fileexchange/59421-dem-based-topography-get_horizon-model) :param latitude: :param longitude: :param dem: DigitalElevationModel instance :param ellipsoid: Ellipsoid instance :param distance: distance in degrees :param precision: azimuth precision of resulting horizon in degrees :return: """ # Prune DEM and fit to study area study_area = dem.get_raster_at(ll_point=(latitude - distance, longitude - distance), ur_point=(latitude + distance, longitude + distance)) y_obs, x_obs = study_area.geo_grid.latlon_to_2d_index(latitude, longitude) z_obs = study_area.get_value_at(latitude, longitude) # Azimuth and elevation azimuth = (180/np.pi) * get_azimuth(latitude * np.pi/180, longitude * np.pi/180, (study_area.geo_grid.latitude - dem.res/2) * np.pi/180, (study_area.geo_grid.longitude + dem.res/2) * np.pi/180, ellipsoid.e) elevation = np.zeros(azimuth.shape) elevation[study_area > z_obs] = \ get_elevation(z_obs, study_area[study_area > z_obs], latitude * np.pi/180, study_area.geo_grid.latitude[study_area > z_obs] * np.pi/180, longitude * np.pi/180, study_area.geo_grid.longitude[study_area > z_obs] * np.pi/180, ellipsoid.e, ellipsoid.a) # TODO: understand why "z_obs < study_area" return a numpy ValueError (ambiguous truth value) # Elevation vector length len_elevation = (90 + precision) // precision elevation_dic = dict(ne=np.zeros((y_obs, len_elevation)), e=np.zeros((study_area.x_size - x_obs, 2*len_elevation - 1)), s=np.zeros((study_area.y_size - y_obs, 2*len_elevation - 1)), w=np.zeros((x_obs, 2*len_elevation - 1)), nw=np.zeros((y_obs, len_elevation))) azimuth_dic = dict(ne=np.arange(-180, -90 + precision, precision), e=np.arange(-180, 0 + precision, precision), s=np.arange(-90, 90 + precision, precision), w=np.arange(0, 180 + precision, precision), nw=np.arange(90, 180 + precision, precision)) # Main computation # NE & NW for n, (az, el) in enumerate(zip(azimuth[:y_obs], elevation[:y_obs])): idx_ne = np.digitize(azimuth_dic["ne"], az[x_obs:]) idx_nw = np.digitize(azimuth_dic['nw'], az[:x_obs]) elevation_dic["ne"][n, idx_ne < len(az[x_obs:])] = el[x_obs:][idx_ne[idx_ne < len(az[x_obs:])]] elevation_dic["nw"][n, idx_nw < len(az[:x_obs])] = el[:x_obs][idx_nw[idx_nw < len(az[:x_obs])]] # South for n, (az, el) in enumerate(zip(azimuth[y_obs:, ::-1], elevation[y_obs:, ::-1])): idx_s = np.digitize(azimuth_dic["s"], az) elevation_dic["s"][n, idx_s < len(az)] = el[idx_s[idx_s < len(az)]] # East for n, (az, el) in enumerate(zip(azimuth[:, x_obs:].transpose(), elevation[:, x_obs:].transpose())): idx_e = np.digitize(azimuth_dic["e"], az) elevation_dic["e"][n, idx_e < len(az)] = el[idx_e[idx_e < len(az)]] # West for n, (az, el) in enumerate(zip(azimuth[::-1, :x_obs].transpose(), elevation[::-1, :x_obs].transpose())): idx_w = np.digitize(azimuth_dic["w"], az) elevation_dic["w"][n, idx_w < len(az)] = el[idx_w[idx_w < len(az)]] sun_mask = np.concatenate([elevation_dic[key].max(axis=0, initial=None) for key in elevation_dic.keys()]) az_mask = np.concatenate([azimuth_dic[key] for key in azimuth_dic.keys()]) + 180 horizon = dict(elevation=np.zeros((360 + precision)//precision), azimuth=np.arange(0, 360 + precision, precision)) for n, az in enumerate(horizon["azimuth"]): horizon["elevation"][n] = np.max(sun_mask[az_mask == az]) horizon["elevation"][-1] = horizon["elevation"][0] return horizon
21,611
def get_articles(language, no_words, max_no_articles, search, **kwargs): """ Retrieve articles from Wikipedia """ wikipedia.set_rate_limiting(True) # be polite wikipedia.set_lang(language) if search is not None: titles = wikipedia.search(search, results = max_no_articles) else: titles = wikipedia.random(pages = max_no_articles) articles = [] current_no_words = 0 for title in titles: print("INFO: loading {}".format(title)) page = wikipedia.page(title=title) content = page.content article_no_words = len(content.split()) current_no_words += article_no_words print("INFO: article contains {} words".format(article_no_words)) articles.append((title, content)) if current_no_words >= no_words: break return articles
21,612
def gen_urdf_material(color_rgba): """ :param color_rgba: Four element sequence (0 to 1) encoding an rgba colour tuple, ``seq(float)`` :returns: urdf element sequence for an anonymous material definition containing just a color element, ``str`` """ return '<material name=""><color rgba="{0} {1} {2} {3}"/></material>'.format(*color_rgba)
21,613
def _add_temp_index(db): """Add an index to the 'identifier' column to make queries faster.""" add_identifier_index = """ CREATE INDEX CONCURRENTLY {identifier_index_name} ON {case_index_table} (identifier) """.format( case_index_table=CommCareCaseIndexSQL._meta.db_table, identifier_index_name=IDENTIFIER_INDEX_NAME ) with CommCareCaseIndexSQL.get_cursor_for_partition_db(db) as cursor: if not _index_exists(db, IDENTIFIER_INDEX_NAME): log_sql(add_identifier_index) cursor.execute(add_identifier_index)
21,614
def plot(self, class_=None, show_plot=True, plot_3D=True, plot_probs=True, plot_dominant_classes=True, plot_poly=False, plot_normals=False, plot_subclasses=False, plot_legend=True, fig=None, ax=None, title='Softmax Classification', **kwargs): """Display the class and/or PDF plots of the Softmax distribution. The class plot shows only the critical classes (those that have the greatest probability at any given state). Parameters ---------- plot_dominant_classes : bool, optional Plot the critical classes. Defaults to `True`. plot_probs : bool, optional Plot the probability densities. Defaults to `True`. plot_poly : bool, optional Plot the polygon from which the boundaries are formed. Defaults to `False`. **kwargs Keyword arguments for ``plot_dominant_classes``. """ # Define probabilities lazily if not hasattr(self, 'probs') and not plot_subclasses: self.probability() if not hasattr(self, 'subclass_probs') and plot_subclasses: self.probability(find_subclass_probs=True) # Plotting attributes self.plot_3D = plot_3D self.plot_subclasses = plot_subclasses if plot_dominant_classes and plot_probs and class_ is None: if fig is None: self.fig = plt.figure(figsize=(14, 8)) else: self.fig = fig bbox_size = (-1.3, -0.175, 2.2, -0.075) if ax is None: ax1 = self.fig.add_subplot(1, 2, 1) if plot_3D and self.state.shape[1] > 1: ax2 = self.fig.add_subplot(1, 2, 2, projection='3d') else: ax2 = self.fig.add_subplot(1, 2, 2) else: ax1 = ax[0] ax2 = ax[1] self._plot_dominant_classes(ax1) self._plot_probs(ax2) axes = [ax1, ax2] elif plot_dominant_classes and class_ is None: if fig is None: self.fig = plt.figure(figsize=(8, 8)) else: self.fig = fig if ax is None: ax1 = self.fig.add_subplot(111) else: ax1 = ax bbox_size = (0, -0.175, 1, -0.075) self._plot_dominant_classes(ax=ax1, **kwargs) axes = [ax1] elif plot_probs: if fig is None: self.fig = plt.figure(figsize=(8, 8)) else: self.fig = fig if class_ is not None: if ax is None: if plot_3D and self.state.shape[1] > 1: ax = self.fig.add_subplot(1, 1, 1, projection='3d') else: ax = self.fig.add_subplot(1, 1, 1) self.classes[class_].plot(ax=ax, **kwargs) axes = [self.fig.gca()] else: if plot_3D and self.state.shape[1] > 1 and ax is None: ax1 = self.fig.add_subplot(111, projection='3d') elif ax is None: ax1 = self.fig.add_subplot(111) else: ax1 = ax self._plot_probs(ax1, **kwargs) axes = [ax1] bbox_size = (0, -0.15, 1, -0.05) if plot_legend: # Create Proxy artists for legend labels proxy = [None] * self.num_classes for i in range(self.num_classes): if self.class_labels[i] not in self.class_labels[:i]: proxy_label = self.class_labels[i] else: proxy_label = "_nolegend_" proxy[i] = plt.Rectangle((0, 0), 1, 1, fc=self.class_colors[i], alpha=0.6, label=proxy_label,) plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=5, bbox_to_anchor=(0, 1.0 ,1, 0), borderaxespad=0.) # plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=4, # bbox_to_anchor=bbox_size, borderaxespad=0.) plt.suptitle(title, fontsize=16) # Plot polygon if self.poly is not None and plot_poly and plot_dominant_classes: try: for poly in self.polys: patch = PolygonPatch(poly, facecolor='none', zorder=2, linewidth=3, edgecolor='black',) ax1.add_patch(patch) except: patch = PolygonPatch(self.poly, facecolor='none', zorder=2, linewidth=3, edgecolor='black',) ax1.add_patch(patch) # Plot normals # <>TODO fix crashing issue with vertical normals if self.normals is not None and plot_normals and plot_dominant_classes: t = np.arange(self.bounds[0], self.bounds[2] + 1) for i, normal in enumerate(self.normals): if abs(normal[1]) < 0.0001: ax1.axvline(self.offsets[i], ls='--', lw=3, c='black') else: slope = normal[0] y = slope * t - self.offsets[i] ax1.plot(t, y, ls='--', lw=3, c='black') if show_plot: plt.show() try: return axes except UnboundLocalError: logging.warn('No axes to return.')
21,615
def roll_timeseries(arr, timezones): """ Roll timeseries from UTC to local time. Automatically compute time-shift from UTC offset (timezone) and time-series length. Parameters ---------- arr : ndarray Input timeseries array of form (time, sites) timezones : ndarray | list Vector of timezone shifts from UTC to local time Returns ------- local_arr : ndarray Array shifted to local time """ if arr.shape[1] != len(timezones): msg = ('Number of timezone shifts ({}) does not match number of ' 'sites ({})'.format(len(timezones), arr.shape[1])) raise ValueError(msg) time_step = arr.shape[0] // 8760 local_arr = np.zeros(arr.shape, dtype=arr.dtype) for tz in set(timezones): mask = timezones == tz local_arr[:, mask] = np.roll(arr[:, mask], int(tz * time_step), axis=0) return local_arr
21,616
def is_linear(a, eps=1e-3): """Check if array of numbers is approximately linear.""" x = np.diff(a[1:-1]).std() / np.diff(a[1:-1]).mean() return x < eps
21,617
def faces_sphere(src, show_path): """ Compute vertices and faces of Sphere input for plotting. Parameters ---------- - src (source object) - show_path (bool or int) Returns ------- vert, faces (returns all faces when show_path=int) """ # pylint: disable=protected-access res = 15 # surface discretization # generate sphere faces r = src.diameter / 2 phis = np.linspace(0, 2 * np.pi, res) phis2 = np.roll(np.linspace(0, 2 * np.pi, res), 1) ths = np.linspace(0, np.pi, res) faces = [ r * np.array( [ (np.cos(p) * np.sin(t1), np.sin(p) * np.sin(t1), np.cos(t1)), (np.cos(p) * np.sin(t2), np.sin(p) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t2), np.sin(p2) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t1), np.sin(p2) * np.sin(t1), np.cos(t1)), ] ) for p, p2 in zip(phis, phis2) for t1, t2 in zip(ths[1:-2], ths[2:-1]) ] faces += [ r * np.array( [(np.cos(p) * np.sin(th), np.sin(p) * np.sin(th), np.cos(th)) for p in phis] ) for th in [ths[1], ths[-2]] ] # add src attributes position and orientation depending on show_path rots, poss, _ = get_rot_pos_from_path(src, show_path) # all faces (incl. along path) adding pos and rot all_faces = [] for rot, pos in zip(rots, poss): for face in faces: all_faces += [[rot.apply(f) + pos for f in face]] return all_faces
21,618
def pronto_signals_to_iguana_signals(carrier_frequency, signals): """Convert the pronto format into iguana format, where the pulses and spaces are represented in number of microseconds. """ return [carrier_cycles_to_microseconds(carrier_frequency, signal) | command for signal, command in zip(signals, itertools.cycle((iguanaIR.IG_PULSE_BIT, 0)))]
21,619
def test_helper_functions(): """ Test for get_atomic_num_one_hot(), get_atom_chiral_tag_one_hot() and get_atom_mass() helper functions """ smiles = 'C' m = Chem.MolFromSmiles(smiles) atom = m.GetAtoms()[0] f_atomic = get_atomic_num_one_hot( atom, GraphConvConstants.ATOM_FEATURES['atomic_num']) req_f = list(np.zeros((101,), dtype=float)) req_f[5] = 1.0 assert len(f_atomic) == len(req_f) assert f_atomic == req_f f_chiral_tag = get_atom_chiral_tag_one_hot( atom, GraphConvConstants.ATOM_FEATURES['chiral_tag']) ref_f = [1.0, 0.0, 0.0, 0.0, 0.0] assert len(f_chiral_tag) == len(ref_f) assert f_chiral_tag == ref_f f_mass = get_atom_mass(atom) ref_f = [0.12011] assert len(f_mass) == len(ref_f) assert f_mass == ref_f
21,620
def stack_xarray_repdim(da, **dims): """Like xarrays stack, but with partial support for repeated dimensions The xarray.DataArray.stack method fails when any dimension occurs multiple times, as repeated dimensions are not currently very well supported in xarray (2018-03-26). This method provides a workaround so that stack can be used for an array where some dimensions are repeated, as long as the repeated dimensions are themselves not stacked. Parameters: da (DataArray): DataArray to operate on. **dims: Dimensions to stack. As for xarray.DataArray.stack. """ # make view of da without repeated dimensions cnt = collections.Counter(da.dims) D = {k: itertools.count() for k in cnt.keys()} tmpdims = [] dimmap = {} for dim in da.dims: if cnt[dim] == 1: tmpdims.append(dim) else: newdim = "{:s}{:d}".format(dim, next(D[dim])) tmpdims.append(newdim) dimmap[newdim] = dim da2 = xarray.DataArray(da.values, dims=tmpdims) da2_stacked = da2.stack(**dims) # put back repeated dimensions with new coordinates da3 = xarray.DataArray(da2_stacked.values, dims=[dimmap.get(d, d) for d in da2_stacked.dims]) da3 = da3.assign_coords( **{k: pandas.MultiIndex.from_product( [da.coords[kk] for kk in dims[k]], names=dims[k]) if k in dims else da.coords[k] for k in np.unique(da3.dims)}) return da3
21,621
def compute_pnorm(model: nn.Module) -> float: """ Computes the norm of the parameters of a model. :param model: A PyTorch model. :return: The norm of the parameters of the model. """ return math.sqrt(sum([p.norm().item() ** 2 for p in model.parameters()]))
21,622
def atomic_brute_cast(tree: Element) -> Element: """ Cast every node's text into an atomic string to prevent further processing on it. Since we generate the final HTML with Jinja templates, we do not want other inline or tree processors to keep modifying the data, so this function is used to mark the complete tree as "do not touch". Reference: issue [Python-Markdown/markdown#920](https://github.com/Python-Markdown/markdown/issues/920). On a side note: isn't `atomic_brute_cast` such a beautiful function name? Arguments: tree: An XML node, used like the root of an XML tree. Returns: The same node, recursively modified by side-effect. You can skip re-assigning the return value. """ if tree.text: tree.text = AtomicString(tree.text) for child in tree: atomic_brute_cast(child) return tree
21,623
def _sync_batch_changes(tasks_list: DbTasksChange): """Dummy function to perform the write in db""" with TASK_TABLE.batch_writer() as batch: for task in tasks_list.tasks_to_update: batch.put_item(Item=_serialize_downward_task(task)) for task_id in tasks_list.ids_to_remove: batch.delete_item(Key={"id": task_id})
21,624
def rsa_keys(p: int = None, q: int = None, e: int = 3) -> RSA_Keys: """ Generate a new set of RSA keys. If p and q are not provided (<= 1), then they will be generated. :param p: A big prime. :param q: A big prime. :param e: The default public key. :return: The RSA private and public keys. :raise Exception: If provided p and q are invalid. """ if not p or p <= 1: p = matasano.math.random_big_prime(e=e) if not q or q <= 1: q = matasano.math.random_big_prime(e=e) n = p * q phi_n = (p - 1) * (q - 1) d = matasano.math.modinv(e, phi_n) return RSA_Keys(RSA_Priv(d, n), RSA_Pub(e, n))
21,625
def chunk_file(file_path, chunks, work_dir): """Splits a large file by line into number of chunks and writes them into work_dir""" with open(file_path) as fin: num_lines = sum(1 for line in fin) chunk_size = math.ceil(num_lines / chunks) output_file_paths = [] with contextlib.ExitStack() as stack: fin = stack.enter_context(open(file_path)) for i, line in enumerate(fin): if not i % chunk_size: file_split = "{}.chunk_{}".format( os.path.join(work_dir, os.path.basename(file_path)), i // chunk_size ) output_file_paths.append(file_split) fout = stack.enter_context(open(file_split, "w")) fout.write("{}\n".format(line.strip())) return output_file_paths
21,626
def _get_closest_station_by_zcta_ranked(zcta): """ Selects the nth ranked station from a list of ranked stations Parameters ---------- zcta : string ZIP Code Tabulation Area (ZCTA) Returns ------- station : string Station that was found warnings : list List of warnings for the returned station (includes distance warnings) lat : float latitude for the search lon : float longitude for the search """ zcta = zcta.zfill(5) # Ensure that we have 5 characters, and if not left-pad it with zeroes. lat, lon = zcta_to_lat_long(zcta) finding_station = True rank = 0 while finding_station: rank = rank + 1 station_ranking = _rank_stations_by_distance_and_quality(lat, lon) station, warnings = select_station(station_ranking, rank=rank) # Ignore stations that begin with A if str(station)[0] != 'A': finding_station = False return station, warnings, lat, lon
21,627
def display_percentage(context: ExecutionContext, event: events.AfterExecution) -> None: """Add the current progress in % to the right side of the current line.""" operations_count = cast(int, context.operations_count) # is already initialized via `Initialized` event current_percentage = get_percentage(context.operations_processed, operations_count) styled = click.style(current_percentage, fg="cyan") # Total length of the message, so it will fill to the right border of the terminal. # Padding is already taken into account in `context.current_line_length` length = max(get_terminal_width() - context.current_line_length + len(styled) - len(current_percentage), 1) template = f"{{:>{length}}}" click.echo(template.format(styled))
21,628
def label_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized Confusion Matrix' else: title = 'Confusion Matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized Confusion Matrix") else: print('Confusion Matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax
21,629
def run_sample(): """Runs the sample.""" # !!! ATTENTION !!! # Running this sample may change/delete your Google Analytics account # configuration. Make sure to not use the Google Analytics property ID from # your production environment below. # TODO(developer): Replace this variable with your Google Analytics 4 # property ID (e.g. "123456") before running the sample. property_id = "YOUR-GA4-PROPERTY-ID" # TODO(developer): Replace this variable with your Android app data stream ID # (e.g. "123456") before running the sample. stream_id = "YOUR-ANDROID-APP-DATA-STREAM-ID" update_android_app_data_stream(property_id, stream_id)
21,630
def lowpass(data,in_t=None,cutoff=None,order=4,dt=None,axis=-1,causal=False): """ data: vector of data in_t: sample times cutoff: cutoff period in the same units as in_t returns vector same as data, but with high frequencies removed """ # Step 1: Determine dt from data or from user if specified if dt is None: dt=np.median(np.diff(in_t)) dt=float(dt) # make sure it's not an int cutoff=float(cutoff) Wn = dt / cutoff B,A = butter(order, Wn) if not causal: # scipy filtfilt triggers some warning message about tuple # indices. with warnings.catch_warnings(): warnings.simplefilter("ignore") data_filtered = filtfilt(B,A,data,axis=axis) else: data_filtered = lfilter(B,A,data,axis=axis) return data_filtered
21,631
def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the source and target dimensions. A common use case is to gather values in target dimension 1 for each batch member (target dimension 0). Args: x (torch.Tensor): tensor to gather values for target_dim (int): dimension to gather values along source_dim (int): dimension to hold constant and use for gathering values from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ assert len(indices.shape) == 1 assert x.shape[source_dim] == indices.shape[0] # unsqueeze in all dimensions except the source dimension new_shape = [1] * x.ndimension() new_shape[source_dim] = -1 indices = indices.reshape(*new_shape) # repeat in all dimensions - but preserve shape of source dimension, # and make sure target_dimension has singleton dimension expand_shape = list(x.shape) expand_shape[source_dim] = -1 expand_shape[target_dim] = 1 indices = indices.expand(*expand_shape) out = x.gather(dim=target_dim, index=indices) return out.squeeze(target_dim)
21,632
def resolve_parallelism(parallel): """Decide what level of parallelism to use. Parameters ---------- parallel : integer or None The user's specification Returns ------- A positive integer giving the parallelization level. """ if parallel is None: if mp.get_start_method() == 'fork': parallel = os.cpu_count() if SHOW_INFORMATIONAL_MESSAGES and parallel > 1: print(f'info: parallelizing processing over {parallel} CPUs') else: parallel = 1 if parallel > 1 and mp.get_start_method() != 'fork': print('''warning: parallel processing was requested but is not possible because this operating system is not using `fork`-based multiprocessing On macOS a bug prevents forking: https://bugs.python.org/issue33725''', file=sys.stderr) parallel = 1 if parallel > 1: return parallel return 1
21,633
def get_queue(launcher=None): """Get the name of the queue used in an allocation. :param launcher: Name of the WLM to use to collect allocation info. If no launcher is provided ``detect_launcher`` is used to select a launcher. :type launcher: str | None :returns: Name of the queue :rtype: str :raises SSUnsupportedError: User attempted to use an unsupported WLM """ if launcher is None: launcher = detect_launcher() if launcher == "pbs": return _pbs.get_queue() if launcher == "slurm": return _slurm.get_queue() raise SSUnsupportedError(f"SmartSim cannot get queue for launcher `{launcher}`")
21,634
def _initial_logs(): """write some initial logs""" logger.info(f"Username: {getpass.getuser()}") logger.info( f"Most recent git commit hash: {subprocess.getoutput('git rev-parse HEAD')} " ) logger.info( f"Git remote and branch info: {subprocess.getoutput('git remote show origin')}" )
21,635
def get_openmc_geometry(openmoc_geometry): """Return an OpenMC geometry corresponding to an OpenMOC geometry. Parameters ---------- openmoc_geometry : openmoc.Geometry OpenMOC geometry Returns ------- openmc_geometry : openmc.Geometry Equivalent OpenMC geometry """ cv.check_type('openmoc_geometry', openmoc_geometry, openmoc.Geometry) # Clear dictionaries and auto-generated ID OPENMC_SURFACES.clear() OPENMOC_SURFACES.clear() OPENMC_CELLS.clear() OPENMOC_CELLS.clear() OPENMC_UNIVERSES.clear() OPENMOC_UNIVERSES.clear() OPENMC_LATTICES.clear() OPENMOC_LATTICES.clear() openmoc_root_universe = openmoc_geometry.getRootUniverse() openmc_root_universe = get_openmc_universe(openmoc_root_universe) openmc_geometry = openmc.Geometry() openmc_geometry.root_universe = openmc_root_universe return openmc_geometry
21,636
def distinct(xs): """Get the list of distinct values with preserving order.""" # don't use collections.OrderedDict because we do support Python 2.6 seen = set() return [x for x in xs if x not in seen and not seen.add(x)]
21,637
def get_price_to_free_cash_flow_ratio(equity, year=None, market_cap=None): """ This ratio can be found by dividing the current price of the stock by its free cash flow per share, Easy way is to get it from the ratios object extracted from investing. """ try: price_to_free_cash_flow = None if year is None: # get it from the ratios ratios = equity.fundamentals.ratios sorted_ratios = sorted(ratios, key=lambda x: x.current_period, reverse=True) # the newest in front # Starting from the first going down the list. for ratio in sorted_ratios: if ratio.benchmark == Benchmark.company: price_to_free_cash_flow = ratio.price_to_free_cash_flow_ttm break if price_to_free_cash_flow is None: price_to_free_cash_flow = 1000 return price_to_free_cash_flow except Exception as e: log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
21,638
def _parse_train_configs(train_config): """ check if user's train configs are valid. Args: train_config(dict): user's train config. Return: configs(dict): final configs will be used. """ configs = copy.deepcopy(_train_config_default) configs.update(train_config) assert isinstance(configs['num_epoch'], int), \ "'num_epoch' must be int value" assert isinstance(configs['max_iter'], int), \ "'max_iter' must be int value" assert isinstance(configs['save_iter_step'], int), \ "'save_iter_step' must be int value" assert isinstance(configs['learning_rate'], float), \ "'learning_rate' must be float" assert isinstance(configs['weight_decay'], float), \ "'weight_decay' must be float" assert isinstance(configs['use_pact'], bool), \ "'use_pact' must be bool" assert isinstance(configs['quant_model_ckpt_path'], str), \ "'quant_model_ckpt_path' must be str" assert isinstance(configs['teacher_model_path_prefix'], str), \ "'teacher_model_path_prefix' must both be string" assert isinstance(configs['model_path_prefix'], str), \ "'model_path_prefix' must both be str" assert isinstance(configs['distill_node_pair'], list), \ "'distill_node_pair' must both be list" assert len(configs['distill_node_pair']) > 0, \ "'distill_node_pair' not configured with distillation nodes" assert len(configs['distill_node_pair']) % 2 == 0, \ "'distill_node_pair' distillation nodes need to be configured in pairs" return train_config
21,639
def as_region(region): """ Convert string to :class:`~GenomicRegion`. This function attempts to convert any string passed to it to a :class:`~GenomicRegion`. Strings are expected to be of the form <chromosome>[:<start>-<end>[:[strand]], e.g. chr1:1-1000, 2:2mb-5mb:-, chrX:1.5kb-3mb, ... Numbers can be abbreviated as '12k', '1.5Mb', etc. When fed a :class:`~GenomicRegion`, it will simply be returned, making the use of this function as an "if-necessary" converter possible. :param region: str or :class:`~GenomicRegion` :return: :class:`~GenomicRegion` """ if isinstance(region, string_types): return GenomicRegion.from_string(region) elif isinstance(region, GenomicRegion): return region raise ValueError("region parameter cannot be converted to GenomicRegion!")
21,640
def android_patch() -> None: """Run necessary patches on an android archive before building.""" fname = 'src/cpython/Modules/Setup.dist' txt = efrotools.readfile(fname) # Need to switch some flags on this one. txt = efrotools.replace_one(txt, '#zlib zlibmodule.c', 'zlib zlibmodule.c -lz\n#zlib zlibmodule.c') # Just turn all these on. for enable in [ '#array arraymodule.c', '#cmath cmathmodule.c _math.c', '#math mathmodule.c', '#_contextvars _contextvarsmodule.c', '#_struct _struct.c', '#_weakref _weakref.c', '#_testcapi _testcapimodule.c', '#_random _randommodule.c', '#_elementtree -I', '#_pickle _pickle.c', '#_datetime _datetimemodule.c', '#_bisect _bisectmodule.c', '#_heapq _heapqmodule.c', '#_asyncio _asynciomodule.c', '#unicodedata unicodedata.c', '#fcntl fcntlmodule.c', '#select selectmodule.c', '#_csv _csv.c', '#_socket socketmodule.c', '#_blake2 _blake2/blake2module.c', '#binascii binascii.c', '#_posixsubprocess _posixsubprocess.c', '#_sha3 _sha3/sha3module.c' ]: txt = efrotools.replace_one(txt, enable, enable[1:]) if ENABLE_OPENSSL: txt = efrotools.replace_one(txt, '#_ssl _ssl.c \\', '_ssl _ssl.c -DUSE_SSL -lssl -lcrypto') else: # Note that the _md5 and _sha modules are normally only built if the # system does not have the OpenSSL libs containing an optimized # version. for enable in [ '#_md5 md5module.c', '#_sha1 sha1module.c', '#_sha256 sha256module.c', '#_sha512 sha512module.c' ]: txt = efrotools.replace_one(txt, enable, enable[1:]) # Turn this off (its just an example module). txt = efrotools.replace_one(txt, 'xxsubtype xxsubtype.c', '#xxsubtype xxsubtype.c') # For whatever reason this stuff isn't in there at all; add it. txt += '\n_json _json.c\n' txt += '\n_lzma _lzmamodule.c -llzma\n' txt += ('\n_sqlite3 -I$(srcdir)/Modules/_sqlite' ' -DMODULE_NAME=\'\\"sqlite3\\"\' -DSQLITE_OMIT_LOAD_EXTENSION' ' -lsqlite3 \\\n' ' _sqlite/cache.c \\\n' ' _sqlite/connection.c \\\n' ' _sqlite/cursor.c \\\n' ' _sqlite/microprotocols.c \\\n' ' _sqlite/module.c \\\n' ' _sqlite/prepare_protocol.c \\\n' ' _sqlite/row.c \\\n' ' _sqlite/statement.c \\\n' ' _sqlite/util.c\n') if ENABLE_OPENSSL: txt += '\n\n_hashlib _hashopenssl.c -DUSE_SSL -lssl -lcrypto\n' txt += '\n\n*disabled*\n_ctypes _crypt grp' efrotools.writefile(fname, txt) # Ok, this is weird. # When applying the module Setup, python looks for any line containing *=* # and interprets the whole thing a a global define?... # This breaks things for our static sqlite compile above. # The check used to look for [A-Z]*=* which didn't break, so let' just # change it back to that for now. fname = 'src/cpython/Modules/makesetup' txt = efrotools.readfile(fname) txt = efrotools.replace_one( txt, ' *=*) DEFS="$line$NL$DEFS"; continue;;', ' [A-Z]*=*) DEFS="$line$NL$DEFS"; continue;;') efrotools.writefile(fname, txt) print('APPLIED EFROTOOLS ANDROID BUILD PATCHES.')
21,641
def premetadata_create_account_stat_table(self, conn, put_timestamp): """ Copied from AccountBroker before the metadata column was added; used for testing with TestAccountBrokerBeforeMetadata. Create account_stat table which is specific to the account DB. :param conn: DB connection object :param put_timestamp: put timestamp """ conn.executescript(''' CREATE TABLE account_stat ( account TEXT, created_at TEXT, put_timestamp TEXT DEFAULT '0', delete_timestamp TEXT DEFAULT '0', container_count INTEGER, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0, hash TEXT default '00000000000000000000000000000000', id TEXT, status TEXT DEFAULT '', status_changed_at TEXT DEFAULT '0' ); INSERT INTO account_stat (container_count) VALUES (0); ''') conn.execute(''' UPDATE account_stat SET account = ?, created_at = ?, id = ?, put_timestamp = ? ''', (self.account, Timestamp(time()).internal, str(uuid4()), put_timestamp))
21,642
def parse_user_next_stable(user): """ Parse the specified user-defined string containing the next stable version numbers and returns the discretized matches in a dictionary. """ try: data = re.match(user_version_matcher, user).groupdict() if len(data) < 3: raise AttributeError except AttributeError: return False return data
21,643
def interp_ADCP_2D( sadcp, mask, depth, lon, lat, time, time_win=360.0, rmax=15.0, vmax=2.0, range_min=4.0, ): """ This is essentially a loop over the interp_ADCP function with some additional NaN handling. Assume data is of the form D[i, j] where each j represents a profile and i a depth in that profile. Parameters ---------- sadcp : Munch Munch structure of sadcp data mask : 2D array Mask of boolean values specifying valid depths to interpolate to. depth : array Depths (m) at which to interpolate ADCP data. lon : array Longitude of CTD/VMP profile. lat : array Latitude of CTD/VMP profile. time : array Time of CTD/VMP profile as matlab datenum. time_win : float, optional Time window for search (s) centered on time of profile. Data outside the time range is excluded. rmax : float, optional Distance threshold (m) defines a circle around the location of the profile. Data outside the circle is excluded. vmax : float, optional Velocity threshold (m/s) above which we remove velocity data range_min : float, optional ADCP minimum range threshold (m) below which we remove data Return ------ u : 2D array Zonal velocity (m/s) interpolated to given depths. v : 2D array Meridional velocity (m/s) interpolated to given depths. w : 2D array Vertical velocity (m/s) interpolated to given depths. lonm : array Mean longitude of ADCP data. latm : array Mean latitude of ADCP data. range_bottom : array Minimum beam range to bottom (m). n : array Number of ADCP profiles in average. """ u = np.full_like(mask, np.nan, dtype=float) v = np.full_like(mask, np.nan, dtype=float) w = np.full_like(mask, np.nan, dtype=float) lonm = np.full_like(time, np.nan) latm = np.full_like(time, np.nan) range_bottom = np.full_like(time, np.nan) n = np.full_like(time, np.nan) for i in tqdm(range(time.size)): valid = mask[:, i] try: u_, v_, w_, lon_, lat_, range_bottom_, n_ = interp_ADCP( sadcp, depth[valid], lon[i], lat[i], time[i], time_win=time_win, rmax=rmax, vmax=vmax, range_min=range_min, ) except RuntimeError as err: continue # Fill data u[valid, i] = u_ v[valid, i] = v_ w[valid, i] = w_ lonm[i] = lon_ latm[i] = lat_ range_bottom[i] = range_bottom_ n[i] = n_ return u, v, w, lonm, latm, range_bottom, n
21,644
def extract_p(path, dict_obj, default): """ try to extract dict value in key path, if key error provide default :param path: the nested dict key path, separated by '.' (therefore no dots in key names allowed) :param dict_obj: the dictinary object from which to extract :param default: a default return value if key error :return: extracted value """ if dict_obj is None: return default keys = path.split('.') tmp_iter = dict_obj for key in keys: try: # dict.get() might make KeyError exception unnecessary tmp_iter = tmp_iter.get(key, default) except KeyError: return default return tmp_iter
21,645
def default_lscolors(env): """Gets a default instanse of LsColors""" inherited_lscolors = os_environ.get("LS_COLORS", None) if inherited_lscolors is None: lsc = LsColors.fromdircolors() else: lsc = LsColors.fromstring(inherited_lscolors) # have to place this in the env, so it is applied env["LS_COLORS"] = lsc return lsc
21,646
def get_indices_by_sent(start, end, offsets, tokens): """ Get sentence index for textbounds """ # iterate over sentences sent_start = None sent_end = None token_start = None token_end = None for i, sent in enumerate(offsets): for j, (char_start, char_end) in enumerate(sent): if (start >= char_start) and (start < char_end): sent_start = i token_start = j if (end > char_start) and (end <= char_end): sent_end = i token_end = j + 1 assert sent_start is not None assert sent_end is not None assert token_start is not None assert token_end is not None if (sent_start != sent_end): logging.warn(f"Entity spans multiple sentences, truncating") token_end = len(offsets[sent_start]) toks = tokens[sent_start][token_start:token_end] return (sent_start, token_start, token_end, toks)
21,647
def get_server_object_by_id(nova, server_id): """ Returns a server with a given id :param nova: the Nova client :param server_id: the server's id :return: an SNAPS-OO VmInst object or None if not found """ server = __get_latest_server_os_object_by_id(nova, server_id) return __map_os_server_obj_to_vm_inst(server)
21,648
def _test_resource_get(req=None, res=None): """Handle Test Resource GET requests.""" res.status = falcon.HTTP_200 res.body = ('This is me, Falcon, serving a resource!')
21,649
def download_file( token, file_id, file_name, file_size, check_sum, num_connections, key, output_file=None ): """Download an individual file""" if key is not None: raise ValueError('key parameter: encrypted downloads are not supported yet') if file_name.endswith(".gpg"): logging.info("GPG files are not supported") return if file_name.endswith(".cip"): file_name = file_name[:-len(".cip")] if output_file is None: output_file = os.path.join( os.getcwd(), file_id, os.path.basename(file_name) ) logging.debug("Output file:'{}'".format(output_file)) url = "https://ega.ebi.ac.uk:8051/elixir/data/files/{}".format(file_id) if( key is None ): url += "?destinationFormat=plain"; file_size -= 16 #16 bytes IV not necesary in plain mode logging.info("File Id: '{}'({} bytes).".format(file_id, file_size)) if( os.path.exists(output_file) and md5(output_file) == check_sum ): print_local_file_info('Local file exists:', output_file, check_sum ) return num_connections = max( num_connections, 1 ) num_connections = min( num_connections, 128 ) if( file_size < 100*1024*1024 ): num_connections = 1 logging.info("Download starting [using {} connection(s)]...".format(num_connections)) dir = os.path.dirname(output_file) if not os.path.exists(dir) and len(dir)>0 : os.makedirs(dir) chunk_len = math.ceil(file_size/num_connections) with tqdm(total=int(file_size), unit='B', unit_scale=True, unit_divisor=1024) as pbar: params = [(url, token, output_file, chunk_start_pos, min(chunk_len,file_size-chunk_start_pos), pbar) for chunk_start_pos in range(0,file_size, chunk_len)] results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(download_file_slice_ ,params): results.append(part_file_name) pbar.close() if( sum(os.path.getsize(f) for f in results) == file_size ): merge_bin_files_on_disk(output_file, results) not_valid_server_md5 = len(str(check_sum or ''))!=32 if( md5(output_file) == check_sum or not_valid_server_md5 ): print_local_file_info('Saved to : ', output_file, check_sum ) if not_valid_server_md5: logging.info("WARNING: Unable to obtain valid MD5 from the server(recived:{}). Can't validate download. Contact EGA helpdesk".format(check_sum)) else: os.remove(output_file) raise Exception("MD5 does NOT match - corrupted download")
21,650
def delete_report(report_uuid: str, database): """Delete a report.""" report = latest_report(report_uuid, database) report["deleted"] = "true" insert_new_report(report, database)
21,651
def get_tasks(container_name): """Get the list of tasks in a container.""" file_name = tasks_path(container_name) try: tasks = [x.rstrip() for x in open(file_name).readlines()] except IOError: if os.path.exists(file_name): raise tasks = [] # container doesn't exist anymore return tasks
21,652
def get_args() -> argparse.Namespace: """Get script command line arguments.""" parser = argparse.ArgumentParser(description=__doc__.split("\n")[0]) parser.add_argument( "-i", "--input-files", required=True, nargs="+", type=helpers.check_file_arg, help="Path to coverage files", ) parser.add_argument( "-o", "--output-file", help="File where to save coverage results", ) parser.add_argument( "-u", "--uncovered-only", action="store_true", help="Report only uncovered arguments", ) parser.add_argument( "-p", "--print-coverage", action="store_true", help="Print coverage percentage", ) parser.add_argument( "-b", "--badge-icon-url", action="store_true", help="Print badge icon URL", ) parser.add_argument( "--ignore-skips", action="store_true", help="Include all commands and arguments, ignore list of items to skip", ) return parser.parse_args()
21,653
def _get_archive(software, version): """ Gets the downloaded source archive for a software version. :param software: software to get the downloaded source archive for :type software: str :param version: software release :type version: str """ download_dir = get_download_location() archives = os.listdir(download_dir) prefix = "{}-{}.".format(software, version) for archive in archives: if archive.startswith(prefix): return os.path.join(download_dir, archive) return None
21,654
def make_odm(study_oid, environment, site_oid, subject_oid, mapping, retrieved_datetime, transfer_user, transfer_identifier, freeze=True): """Receives a mapping like: [ dict(folder_oid="SCRN", form_oid="DM", field_oid="SEX", value="M", cdash_domain="DM", cdash_element="SEX"), dict(folder_oid="SCRN", form_oid="DM", field_oid="DOB", value="1965-02-09", cdash_domain="DM", cdash_element="DOB"), ... ] Unpacks this into a ODM Message broken up by [folder][form][record][field] """ # Sort unstructured dicts into hierarchy of objects to send folders = {} # Map of folders to forms to records to fields for row in mapping: folder_oid = row.get('folder_oid', 'SUBJECT') folder = folders.get(folder_oid, False) if not folder: folder = Folder(folder_oid) folders[folder_oid] = folder form_oid = row.get('form_oid') form = folder.forms.get(form_oid, False) if not form: form = Form(form_oid) folder.forms[form_oid] = form # add_field sorts into appropriate records form.add_field(row) # Now loop through our structure and build ODM study_events = [] for folder_oid in folders: folder = folders[folder_oid] study_event = StudyEventData(folder.oid, study_event_repeat_key=None) # TODO: Folder repeat key? study_events.append(study_event) # Loop through forms in folder for form_oid in folder.forms: form = folder.forms[form_oid] # Add formdata to study event formdata = FormData(form.oid, transaction_type="Update") study_event << formdata # Loop through records we gathered for record_context in form.records: record = form.records[record_context] params = {} if record_context is not None: # Log line? params['oid'] = "{0}_LOG_LINE".format(form_oid) ig = ItemGroupData() # Add itemgroupdata to formdata formdata << ig # Add all items to itemgroupdata along with external audits to show where they came from for field in record.fields: transaction_type = None if field.context_item: if field.is_new: ig.transaction_type = 'Upsert' else: # We want to do a seek an update transaction_type = "Context" ig.transaction_type = 'Update' ig.item_group_repeat_key = '@CONTEXT' ehr_message = "Import from EHR: EHR Source Value %s -> Submitted value: %s" % (field.raw, field.value) item_data = ItemData(field.oid, field.value, specify_value=field.specify_value, transaction_type=transaction_type, freeze=freeze)( AuditRecord(used_imputation_method=False, identifier=transfer_identifier, include_file_oid=False)( UserRef(transfer_user), LocationRef(site_oid), ReasonForChange(ehr_message), # Any string, just becomes part of documentation in Audit trail DateTimeStamp(retrieved_datetime) ) ) # Measurement unit related to this value? if field.measurement_unit is not None: item_data << MeasurementUnitRef(field.measurement_unit) # Add to itemgroup ig << item_data # In context update situation we need to pass the value of the conext field with transaction type # of context. So if that is not one of the fields passed in we need to include it for this record if not record.has_context_field and record_context is not None: # create the itemdata element, add the mdsol:Freeze attribute ig << ItemData(record.context_field_oid, record.context_field_value, transaction_type="Context", freeze=freeze) ig.item_group_repeat_key = '@CONTEXT' odm = ODM("EHRImport")( ClinicalData(study_oid, environment)( SubjectData(site_oid, subject_oid, transaction_type="Update", subject_key_type='SubjectUUID')(*study_events) ) ) return odm
21,655
def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (see mbsp.py). """ string = "" from xml.dom.minidom import parseString dom = parseString(xml) # Traverse all the <sentence> elements in the XML. for sentence in dom.getElementsByTagName(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = attr(sentence, XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = attr(sentence, XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in children(sentence): tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string, tags=format, language=language) except: return TaggedString(string, tags=format, language=language)
21,656
def join_ad_domain_by_taking_over_existing_computer_using_session( ad_session: ADSession, computer_name=None, computer_password=None, old_computer_password=None, computer_key_file_path=DEFAULT_KRB5_KEYTAB_FILE_LOCATION) -> ManagedADComputer: """ A fairly simple 'join a domain' function using pre-created accounts, which requires minimal input - an AD session. Specifying the name of the computer to takeover explicitly is also encouraged. Given those basic inputs, the domain's nearest controllers are automatically discovered and an account is found with the computer name specified. That account is then taken over so that it can be controlled by the local system, and kerberos keys and such are generated for it. By providing an AD session, one can build a connection to the domain however they so choose and then use it to join this computer, so you don't even need to necessarily use user credentials. :param ad_session: The ADSession object representing a connection with the domain to be joined. :param computer_name: The name of the computer to take over in the domain. This should be the sAMAccountName of the computer, though if computer has a trailing $ in its sAMAccountName and that is omitted, that's ok. If not specified, we will attempt to find a computer with a name matching the local system's hostname. :param computer_password: The password to set for the computer when taking it over. If not specified, a random 120 character password will be generated and set. :param old_computer_password: The current password of the computer being taken over. If specified, the action of taking over the computer will use a "change password" operation, which is less privileged than a "reset password" operation. So specifying this reduces the permissions needed by the user specified. :param computer_key_file_path: The path of where to write the keytab file for the computer after taking it over. This will include keys for both user and server keys for the computer. If not specified, defaults to /etc/krb5.keytab :returns: A ManagedADComputer object representing the computer taken over. """ # for joining a domain, default to using the local machine's hostname as a computer name if computer_name is None: computer_name = get_system_default_computer_name() logger.warning('No computer name was specified for joining via computer takeover. This is unusual and relies ' 'implicitly on the computers in the domain matching this library in terms of how they decide ' 'on the computer name, and may cause errors. The name being used is %s', computer_name) logger.info('Attempting to join computer to domain %s by taking over account with name %s', ad_session.get_domain_dns_name(), computer_name) computer = ad_session.take_over_existing_computer(computer_name, computer_password=computer_password, old_computer_password=old_computer_password) if computer_key_file_path is not None: computer.write_full_keytab_file_for_computer(computer_key_file_path) logger.info('Successfully joined computer to domain %s by taking over computer with name %s', ad_session.get_domain_dns_name(), computer_name) return computer
21,657
def test_ValidateAccessToken_empty_env(): """Test nothing is found in an empty environment. """ environ = {} start_response = lambda x: x fsr = FakeSecretRecover("the wrong secret") assert fsr.access_secret == "the wrong secret" assert fsr.access_token_given is None app = MockApp() vat = ValidateAccessToken(app, recover_secret=fsr.recover_secret) # make a wsgi call which will result in no action: vat(environ, start_response) # check nothing has changed: assert fsr.access_secret == "the wrong secret" assert fsr.access_token_given is None assert ValidateAccessToken.ENV_KEY not in environ
21,658
def clone_repositories(githuburl,githubuser,githubtoken): """ Delete existing directories and clone all repositories for the given user """ github_token = Github(githubtoken) user = github_token.get_user() repos = github_token.get_user(githubuser).get_repos() for repo in repos: repo_path = Path(repo.name) if (repo_path.exists() and repo_path.is_dir()): shutil.rmtree(str(repo_path)) for repo in repos: print("Cloning {}".format(repo.ssh_url)) git.Repo.clone_from(repo.ssh_url, repo.name) #print(repo.name) #print(repo.ssh_url)
21,659
def peak_sound_pressure(pressure, axis=-1): """ Peak sound pressure :math:`p_{peak}` is the greatest absolute sound pressure during a certain time interval. :param pressure: Instantaneous sound pressure :math:`p`. :param axis: Axis. .. math:: p_{peak} = \\mathrm{max}(|p|) """ return np.abs(pressure).max(axis=axis)
21,660
def _download_and_prepare( args: argparse.Namespace, builder: tfds.core.DatasetBuilder, ) -> None: """Generate a single builder.""" logging.info(f'download_and_prepare for dataset {builder.info.full_name}...') dl_config = _make_download_config(args) if args.add_name_to_manual_dir: dl_config.manual_dir = os.path.join(dl_config.manual_dir, builder.name) builder.download_and_prepare( download_dir=args.download_dir, download_config=dl_config, ) # Dataset generated successfully logging.info('Dataset generation complete...') termcolor.cprint(str(builder.info.as_proto), attrs=['bold'])
21,661
def test2_RPY_to_RotZYX(): """ Test2 the method RPY_to_RotZYX """ roll: float = 1.8 pitch: float = 1.8 yaw: float = 1.8 rot_obtained = RPY_to_RotZYX(roll, pitch, yaw) rot_expected = array([[0.0516208, 0.0057865, 0.9986500], [-0.2212602, 0.9751976, 0.0057865], [-0.9738476, -0.2212602, 0.0516208]]) assert_allclose(rot_obtained, rot_expected, rtol=4e-06, verbose=True)
21,662
def field_filter_query(field, values): """Need to define work-around for full-text fields.""" values = ensure_list(values) if not len(values): return {'match_all': {}} if field in ['_id', 'id']: return {'ids': {'values': values}} if len(values) == 1: if field in ['names', 'addresses']: field = '%s.text' % field return {'match_phrase': {field: values[0]}} return {'term': {field: values[0]}} return {'terms': {field: values}}
21,663
def export_categories(filename, memmaps, regionlist): """ make CSV with on eline per mem_map of regions - in regionlist order """ print memmaps outf = open(filename, 'w') # title line outf.write("%s, " % "system") for name in regionlist[:-1]: outf.write("%s, " % name) outf.write("%s\n" % regionlist[-1]) for m in memmaps: outf.write("%s" % m.system) # per mem_map regions = [] for b in m.blocks: regions.extend(b.regions) data = [[r.fullname(), r.size] for r in regions] for name in regionlist: size = 0 for f,s in data: if f==name: size = int(s,16) if s else 0 break outf.write(",%s" % (size)) outf.write('\n') outf.close()
21,664
def compute_investigation_stats(inv, exact=True, conf=0.95, correct=True): """ Compute all statistics for all protected features of an investigation Parameters ---------- inv : the investigation exact : whether exact tests should be used conf : overall confidence level (1- familywise error rate) Returns ------- all_stats: list of all statistics for the investigation """ # count the number of hypotheses to test total_hypotheses = num_hypotheses(inv) logging.info('Testing %d hypotheses', total_hypotheses) # # Adjusted Confidence Level (Bonferroni) # adj_conf = 1-(1-conf)/total_hypotheses if correct else conf # statistics for all investigations all_stats = {sens: compute_stats(ctxts, exact, adj_conf, inv.random_state) for (sens, ctxts) in sorted(inv.contexts.iteritems())} # flattened array of all p-values all_pvals = [max(stat[-1], 1e-180) for sens_stats in all_stats.values() for stat in sens_stats['stats']] # correct p-values if correct: pvals_corr = multipletests(all_pvals, alpha=1-conf, method='holm')[1] else: pvals_corr = all_pvals # replace p-values by their corrected value idx = 0 # iterate over all protected features for the investigation for (sens, sens_contexts) in inv.contexts.iteritems(): sens_stats = all_stats[sens]['stats'] # iterate over all contexts for a protected feature for i in range(len(sens_stats)): old_stats = sens_stats[i] all_stats[sens]['stats'][i] = \ np.append(old_stats[0:-1], pvals_corr[idx]) idx += 1 for (sens, sens_contexts) in inv.contexts.iteritems(): metric = sens_contexts[0].metric # For regression, re-form the dataframes for each context if isinstance(metric.stats, pd.DataFrame): res = all_stats[sens] res = pd.DataFrame(res['stats'], index=res['index'], columns=res['cols']) all_stats[sens] = \ {'stats': np.array_split(res, len(res)/len(metric.stats))} all_stats = {sens: sens_stats['stats'] for (sens, sens_stats) in all_stats.iteritems()} return all_stats
21,665
def UpdatePackageContents(change_report, package_cp, portage_root=None): """Add newly created files/directors to package contents. Given an ItemizedChangeReport, add the newly created files and directories to the CONTENTS of an installed portage package, such that these files are considered owned by that package. Args: change_report: ItemizedChangeReport object for the changes to be made to the package. package_cp: A string similar to 'chromeos-base/autotest-tests' giving the package category and name of the package to be altered. portage_root: Portage root path, corresponding to the board that we are working on. Defaults to '/' """ package, vartree = GetPackageAPI(portage_root, package_cp) # Append new contents to package contents dictionary. contents = package.getcontents().copy() for _, filename in change_report.new_files: contents.setdefault(filename, (u'obj', '0', '0')) for _, dirname in change_report.new_directories: # Strip trailing slashes if present. contents.setdefault(dirname.rstrip('/'), (u'dir',)) # Write new contents dictionary to file. vartree.dbapi.writeContentsToContentsFile(package, contents)
21,666
def group_bars(note_list): """ Returns a list of bars, where each bar is a list of notes. The start and end times of each note are rescaled to units of bars, and expressed relative to the beginning of the current bar. Parameters ---------- note_list : list of tuples List of notes to group into bars. """ bar_list = [] current_bar = [] current_bar_start_time = 0 for raw_note in note_list: if raw_note[0] != -1: current_bar.append(raw_note) elif raw_note[0] == -1: quarter_notes_per_bar = raw_note[2] - current_bar_start_time current_bar_scaled = [] for note in current_bar: current_bar_scaled.append((note[0], note[1], min([(note[2] - current_bar_start_time) / quarter_notes_per_bar, 1]), min([(note[3] - current_bar_start_time) / quarter_notes_per_bar, 1]))) bar_list.append(current_bar_scaled) current_bar = [] current_bar_start_time = raw_note[2] return bar_list
21,667
def get_vmexpire_id_from_ref(vmexpire_ref): """Parse a container reference and return the container ID The container ID is the right-most element of the URL :param container_ref: HTTP reference of container :return: a string containing the ID of the container """ vmexpire_id = vmexpire_ref.rsplit('/', 1)[1] return vmexpire_id
21,668
def create_A_and_B_state_ligand(line, A_B_state='vdwq_q'): """Create A and B state topology for a ligand. Parameters ---------- line : str 'Atom line': with atomtype, mass, charge,... A_B_state : str Interactions in the A state and in the B state. vdwq_vdwq: ligand fully interacting in A and B state vdwq_vdw: vdw interactions and electrostatics in the A_state, only vdw in the B_state vdw_vdwq: charge vdw_dummy dummy_vdw vdwq_dummy Returns ------- text : str Atoms line for topology file with A and B state parameters """ atom_number = line.split()[0] atom_type = line.split()[1] residue_nr = line.split()[2] residue_name = line.split()[3] atom_name = line.split()[4] cgnr = line.split()[5] charge = line.split()[6] mass = line.split()[7] # A and B state are the same if A_B_state == 'vdwq_vdwq': text = line.split(';')[0] + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Turn on vdw elif A_B_state == 'dummy_vdw': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + \ atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw off elif A_B_state == 'vdw_dummy': charge = str(0.0) text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + \ ' d%s ' % atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq_dummy': text = line.split(';')[0] + ' ' + ' d%s ' % atom_type + ' 0.0 ' + mass + '\n' # uncharge elif A_B_state == 'vdwq_vdw': text = line.split(';')[0] + ' ' + ' ' + atom_type + ' 0.0 ' + mass + '\n' # charge elif A_B_state == 'vdw_vdwq': text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + str(0.0) + ' ' + \ mass + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Posre off elif A_B_state == 'dummy': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq': text = line.split(';')[0] + '\n' else: print('Transformation not implemented yet') return text
21,669
def pytask_resolve_dependencies_select_execution_dag(dag): """Select the tasks which need to be executed.""" scheduler = TopologicalSorter.from_dag(dag) visited_nodes = [] for task_name in scheduler.static_order(): if task_name not in visited_nodes: have_changed = _have_task_or_neighbors_changed(task_name, dag) if have_changed: visited_nodes += list(task_and_descending_tasks(task_name, dag)) else: dag.nodes[task_name]["task"].markers.append( Mark("skip_unchanged", (), {}) )
21,670
def test(model, model_path, device, data_path, data_transform, batch_size, classes): """ Test the given model with the given weights and dataset Parameters ---------- `model`: network model to be trained `model_path`: path to load the model weights from `device`: device to be used for testing `data_path`: path to the dataset location `data_transform`: defined transforms to be conducted on the input images `batch_size`: testing batch size `classes`: list of class label names Returns ------- `None`: """ testset = datasets.ImageFolder(root=os.path.join(data_path, 'test'), transform=data_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=4) # Load the trained model model.to(device) model.load_state_dict(torch.load(model_path)) # Testing on some random testset images dataiter = iter(testloader) data = dataiter.next() images, labels = data[0].to(device), data[1].to(device) # Load this batch into the model and predict outputs = model(images) _, predicted = torch.max(outputs, 1) # Visualize the images and their labels print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(batch_size))) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(batch_size))) imshow(torchvision.utils.make_grid(images)) # Compute the overall and class-wise error rates on the testset correct = 0 total = 0 correct_pred = {classname: 0 for classname in classes} total_pred = {classname: 0 for classname in classes} with torch.no_grad(): # Load each batch into the model and predict for data in testloader: images, labels = data[0].to(device), data[1].to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) # Collect overall results total += labels.size(0) correct += (predicted == labels).sum().item() # Collect the correct predictions for each class for label, prediction in zip(labels, predicted): if label == prediction: correct_pred[classes[label]] += 1 total_pred[classes[label]] += 1 # print accuracy for each class for classname, correct_count in correct_pred.items(): accuracy = 100 * float(correct_count) / total_pred[classname] print("Accuracy for class {:5s} is: {:.1f} %".format(classname, accuracy)) print('Error rate of the network on the %d test images: %.4f %%' % (total, 100*(total - correct)/total)) return
21,671
def clean_params(estimator, n_jobs=None): """clean unwanted hyperparameter settings If n_jobs is not None, set it into the estimator, if applicable Return ------ Cleaned estimator object """ ALLOWED_CALLBACKS = ( "EarlyStopping", "TerminateOnNaN", "ReduceLROnPlateau", "CSVLogger", "None", ) estimator_params = estimator.get_params() for name, p in estimator_params.items(): # all potential unauthorized file write if name == "memory" or name.endswith("__memory") or name.endswith("_path"): new_p = {name: None} estimator.set_params(**new_p) elif n_jobs is not None and (name == "n_jobs" or name.endswith("__n_jobs")): new_p = {name: n_jobs} estimator.set_params(**new_p) elif name.endswith("callbacks"): for cb in p: cb_type = cb["callback_selection"]["callback_type"] if cb_type not in ALLOWED_CALLBACKS: raise ValueError("Prohibited callback type: %s!" % cb_type) return estimator
21,672
def getValidOauth2TxtCredentials(force_refresh=False, api=None): """Gets OAuth2 credentials which are guaranteed to be fresh and valid.""" try: credentials = auth.get_admin_credentials(api) except gam.auth.oauth.InvalidCredentialsFileError: doRequestOAuth() # Make a new request which should store new creds. return getValidOauth2TxtCredentials(force_refresh=force_refresh, api=api) if credentials.expired or force_refresh: request = transport.create_request() credentials.refresh(request) return credentials
21,673
def get_combinations_sar(products, aoi): """Get a dataframe with all possible combinations of products and calculate their coverage of the AOI and the temporal distance between the products. Parameters ---------- products : dataframe Search results with product identifiers as index. aoi : shapely geometry Area of interest (lat/lon). Returns ------- combinations : dataframe Double-indexed output dataframe. Only combinations that contain the AOI are returned (with a 1% margin). """ couples = list(itertools.combinations(products.index, 2)) combinations = pd.DataFrame(index=pd.MultiIndex.from_tuples(couples)) for id_a, id_b in couples: footprint_a = wkt.loads(products.loc[id_a].footprint) footprint_b = wkt.loads(products.loc[id_b].footprint) footprint = footprint_a.union(footprint_b) combinations.at[(id_a, id_b), 'date_a'] = products.loc[id_a].date combinations.at[(id_a, id_b), 'date_b'] = products.loc[id_b].date combinations.at[(id_a, id_b), 'cover'] = coverage(aoi, footprint) combinations = combinations[combinations.cover >= 99.] combinations['dist'] = combinations.date_b - combinations.date_a combinations.dist = combinations.dist.apply(lambda x: abs(x.days)) combinations = combinations.sort_values(by='dist', ascending=True) return combinations
21,674
def sample(s, n): """Show a sample of string s centered at position n""" start = max(n - 8, 0) finish = min(n + 24, len(s)) return re.escape(s[start:finish])
21,675
def monkey_patch(): """Patches decorators for all functions in a specified module. If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'guts.api.ec2.cloud:' \ guts.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See guts.openstack.common.notifier.api.notify_decorator) :param name: name of the function :param function: object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # On Python 3, unbound methods are regular functions predicate = inspect.isfunction if six.PY3 else inspect.ismethod for method, func in inspect.getmembers(clz, predicate): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function elif isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func))
21,676
def getNeededLibraries(binary_filepath): """ Get all libraries given binary depends on. """ if False: return getNeededLibrariesLDD(binary_filepath) else: return getNeededLibrariesOBJDUMP(binary_filepath)
21,677
def load_transformer(input_paths:List[str], input_type:str=None) -> Transformer: """ Creates a transformer for the appropriate file type and loads the data into it from file. """ if input_type is None: input_types = [get_type(i) for i in input_paths] for t in input_types: if input_types[0] != t: error( """ Each input file must have the same file type. Try setting the --input-type parameter to enforce a single type. """ ) input_type = input_types[0] transformer_constructor = get_transformer(input_type) if transformer_constructor is None: error('Inputs do not have a recognized type: ' + str(get_file_types())) t = transformer_constructor() for i in input_paths: t.parse(i, input_type) t.report() return t
21,678
def consult_filme(): """ Consulta filme """ print('==>Consulta filme:') id_movie = input("Entre com o ID do filme: ") url = f'{url_base}{id_movie}' print(f'Requisição get: {url}') r = requests.get(url) if (r.status_code >= 400 and r.status_code <= 499): print("Filme não localizado!") else: print("Filme consultado:") response = r.text filme = literal_eval(response) print(filme)
21,679
def delete_and_create_osd_node_aws_upi(osd_node_name): """ Unschedule, drain and delete osd node, and creating a new osd node. At the end of the function there should be the same number of osd nodes as it was in the beginning, and also ceph health should be OK. This function is for AWS UPI. Args: osd_node_name (str): the name of the osd node Returns: str: The new node name """ osd_node = get_node_objs(node_names=[osd_node_name])[0] az = get_node_az(osd_node) from ocs_ci.ocs.platform_nodes import AWSNodes aws_nodes = AWSNodes() stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name) remove_nodes([osd_node]) log.info(f"name of deleted node = {osd_node_name}") log.info(f"availability zone of deleted node = {az}") log.info(f"stack name of deleted node = {stack_name_of_deleted_node}") if config.ENV_DATA.get("rhel_workers"): node_type = constants.RHEL_OS else: node_type = constants.RHCOS log.info("Preparing to create a new node...") node_conf = {"stack_name": stack_name_of_deleted_node} new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf) return new_node_names[0]
21,680
def encode_integer_compact(value: int) -> bytes: """Encode an integer with signed VLQ encoding. :param int value: The value to encode. :return: The encoded integer. :rtype: bytes """ if value == 0: return b"\0" if value < 0: sign_bit = 0x40 value = -value else: sign_bit = 0 n_bits = value.bit_length() n_bytes = 1 + int(math.ceil((n_bits - 6) / 7)) buf = bytearray(n_bytes) for i in range(n_bytes - 1, 0, -1): buf[i] = 0x80 | (value & 0x7F) value >>= 7 buf[0] = 0x80 | sign_bit | (value & 0x3F) buf[-1] &= 0x7F return bytes(buf)
21,681
def kin_phos_query(kin_accession): """ Query to pull related phosphosites using kinase accession :param kin_accession: string kinase accession :return: Flask_Table Phosphosite_results object """ session = create_sqlsession() q = session.query(Kinase).filter_by(kin_accession= kin_accession) kin = q.first() #subset of information about substrate phosphosites sites. subsets = kin.kin_phosphorylates table = Phosphosite_results(subsets) session.close() return table
21,682
def max_surplus(redemptions, costs, traders): """ Calculates the maximum possible surplus """ surplus = 0 transactions = 0.5 * traders for redemption, cost in zip(redemptions, costs): if redemption >= cost: surplus += ((redemption - cost) * transactions) return surplus
21,683
def generate_create_account_key(): """ Generates a random account creation key. Implementation is very similar to generate_reset_key(). """ chars = string.ascii_lowercase + string.digits return misc_utils.generate_random_string(constants.CREATE_ACCOUNT_KEY_LENGTH, chars=chars)
21,684
def nyu_single_set_multiple_tokens(slug_set, slug_sets, rtypes): """ Check for multiple token relations on a single hit/slug_set pair. If the two baskets share a common main entry, the cuttoff for multiple relations is actually: number-of-slugs-in-main-entry + 1 """ intersections = [s for s in slug_sets if len(s[0].intersection(slug_set[0])) >= otcore_settings.MULTIPLE_RELATIONS_COUNT] for hit_set in intersections: hit1 = slug_set[1] hit2 = hit_set[1] if hit1.basket != hit2.basket and \ not RelatedBasket.objects.filter(source=hit1.basket, destination=hit2.basket).exists() and \ not RelatedBasket.objects.filter(source=hit2.basket, destination=hit1.basket).exists(): shared_main = get_shared_main_entry(hit1.basket, hit2.basket, rtypes=rtypes) # print("{} | {} | {}".format(shared_main, hit1, hit2)) # Skip creating relation if the number of shared slugs is less than main_slugs + 1 if shared_main is not None: main_tokens = [set(hit.slug.split('-')) for hit in shared_main.topic_hits.all()] # get the slug of the name in common with the subentries try: shared_tokens = sorted([tokens for tokens in main_tokens if tokens < slug_set[0] and tokens < hit_set[0] ], key=lambda x: len(x), reverse=True)[0] except IndexError: # means that this combination of hits aren't those shared by this topic. # Move on to the next set print("SKIPPING: {} | {} | {}".format(shared_main, hit1, hit2)) continue main_token_count = len(shared_tokens) if len(slug_set[0].intersection(hit_set[0])) <= main_token_count: continue RelatedBasket.objects.create( relationtype=rtypes['multipletokens'], source=hit1.basket, destination=hit2.basket )
21,685
def parse_dblife(file): """Parse an DBLife file, returning a tuple: positions: list of (x,y) co-ordinates comments: all comments in file, as a list of strings, one per line. """ lines = file.split("\n") comments = [] positions = [] x = 0 y = 0 dblife_pattern = r"((\d*)(\.|O|o|\*))*" for line in lines: line = line.strip().rstrip() if line.startswith("!"): comments.append(line[2:]) # check if this is part of the pattern if re.match(dblife_pattern, line): count = 0 for char in line: # repeat counts if char.isdigit(): count *= 10 count += int(char) # blanks if char in ".": if count != 0: x += int(count) else: x += 1 count = 0 # ons if char in "oO*": if count != 0: for i in range(count): positions.append((x, y)) x += 1 else: positions.append((x, y)) x += 1 count = 0 count = 0 # newlines y += 1 x = 0 count = 0 return positions, comments
21,686
def db_handle_error(logger: Logger, default_return_val: Any) \ -> Any: """Handle operational database errors via decorator.""" def decorator(func: Callable) -> Any: def wrapper(*args, **kwargs): # type: ignore # Bypass attempt to perform query and just return default value is_db_disabled: bool = app_config.get( 'BROWSE_DISABLE_DATABASE') or False if is_db_disabled: if logger: logger.info( 'Database is disabled per BROWSE_DISABLE_DATABASE') return default_return_val try: return func(*args, **kwargs) except NoResultFound: return default_return_val except (OperationalError, DBAPIError) as ex: if logger: logger.warning( f'Error executing query in {func.__name__}: {ex}') return default_return_val except Exception as ex: if logger: logger.warning( f'Unknown exception in {func.__name__}: {ex}') raise return wrapper return decorator
21,687
def climb_directory_tree(starting_path: PathOrStr, file_patterns: Iterable[str]) -> Optional[List[Path]]: """Climb the directory tree looking for file patterns.""" current_dir: Path = Path(starting_path).absolute() if current_dir.is_file(): current_dir = current_dir.parent while current_dir.root != str(current_dir): for root_file in file_patterns: found_files = list(current_dir.glob(root_file)) if found_files: return found_files current_dir = current_dir.parent return None
21,688
def serve_file(request, token, require_requester=True, verify_requester=True, signer=None): """Basic view to serve a file. Uses ``evaluate_request`` under the hood. Please refer to that function to view information about exceptions. :param request: the file request :type request: bgfiles.models.FileRequest :param token: the token :type token: str :param require_requester: whether we expect the token to contain the request :type require_requester: bool :param verify_requester: whether we need to verify the current user is the requester :type verify_requester: bool :param signer: signer to use :return: django.http.HTTPResponse """ file_request, data = evaluate_request(request, token, require_requester=require_requester, verify_requester=verify_requester, signer=signer) return toolbox.serve(file_request)
21,689
def test_quadbin_toparent_neg_res(): """Throws error for negative resolution""" with pytest.raises(Exception): run_query('SELECT QUADBIN_TOPARENT(5209574053332910079, -1)')
21,690
def d2_rho_heterodyne(t, rho_vec, A, args): """ Need to cythonize, docstrings """ M = A[0] + A[3] e1 = cy_expect_rho_vec(M, rho_vec, 0) d1 = spmv(M, rho_vec) - e1 * rho_vec M = A[0] - A[3] e1 = cy_expect_rho_vec(M, rho_vec, 0) d2 = spmv(M, rho_vec) - e1 * rho_vec return [1.0 / np.sqrt(2) * d1, -1.0j / np.sqrt(2) * d2]
21,691
def plot_driver(est_dict, fname, plot_type): """ Drive usage of Plotter given a dictionary of estimates """ plotter = plot_type(fname) for key, est in est_dict.iteritems(): plotter.estimator_plot(est, key) plotter.out()
21,692
def get_venv(): """Return virtual environment path or throw an error if not found""" env = environ.get("VIRTUAL_ENV", None) if env: return Path(env) else: raise EnvironmentError("No virtual environment found.")
21,693
def test_output_format_raw(tmpdir): """Verify raw output format.""" # Attachment attachment_path = Path(tmpdir/"attachment.txt") attachment_path.write_text("Hello world\n") # Simple template template_path = Path(tmpdir/"mailmerge_template.txt") template_path.write_text(textwrap.dedent("""\ TO: {{email}} FROM: from@test.com Laȝamon 😀 klâwen """)) # Simple database database_path = Path(tmpdir/"mailmerge_database.csv") database_path.write_text(textwrap.dedent("""\ email to@test.com """)) # Simple unsecure server config config_path = Path(tmpdir/"mailmerge_server.conf") config_path.write_text(textwrap.dedent("""\ [smtp_server] host = open-smtp.example.com port = 25 """)) # Run mailmerge runner = click.testing.CliRunner(mix_stderr=False) with tmpdir.as_cwd(): result = runner.invoke(main, ["--output-format", "raw"]) assert not result.exception assert result.exit_code == 0 # Remove the Date string, which will be different each time stdout = copy.deepcopy(result.stdout) stdout = re.sub(r"Date:.+", "Date: REDACTED", stdout, re.MULTILINE) # Verify output assert result.stderr == "" assert stdout == textwrap.dedent("""\ >>> message 1 TO: to@test.com FROM: from@test.com MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Date: REDACTED TGHInWFtb24g8J+YgCBrbMOid2Vu >>> message 1 sent >>> Limit was 1 message. To remove the limit, use the --no-limit option. >>> This was a dry run. To send messages, use the --no-dry-run option. """)
21,694
def parse_pascal_voc_anno( anno_path: str, labels: List[str] = None, keypoint_meta: Dict = None ) -> Tuple[List[AnnotationBbox], Union[str, Path], np.ndarray]: """ Extract the annotations and image path from labelling in Pascal VOC format. Args: anno_path: the path to the annotation xml file labels: list of all possible labels, used to compute label index for each label name keypoint_meta: meta data of keypoints which should include at least "labels". Return A tuple of annotations, the image path and keypoints. Keypoints is a numpy array of shape (N, K, 3), where N is the number of objects of the category that defined the keypoints, and K is the number of keypoints defined in the category. `len(keypoints)` would be 0 if no keypoints found. """ anno_bboxes = [] keypoints = [] tree = ET.parse(anno_path) root = tree.getroot() # get image path from annotation. Note that the path field might not be set. anno_dir = os.path.dirname(anno_path) if root.find("path") is not None: im_path = os.path.realpath( os.path.join(anno_dir, root.find("path").text) ) else: im_path = os.path.realpath( os.path.join(anno_dir, root.find("filename").text) ) # extract bounding boxes, classification and keypoints objs = root.findall("object") for obj in objs: label = obj.find("name").text # Get keypoints if any. # For keypoint detection, currently only one category (except # background) is allowed. We assume all annotated objects are of that # category. if keypoint_meta is not None: kps = [] kps_labels = keypoint_meta["labels"] # Assume keypoints are available kps_annos = obj.find("keypoints") if kps_annos is None: raise Exception(f"No keypoints found in {anno_path}") assert set([kp.tag for kp in kps_annos]).issubset( kps_labels ), "Incompatible keypoint labels" # Read keypoint coordinates: [x, y, visibility] # Visibility 0 means invisible, non-zero means visible for name in kps_labels: kp_anno = kps_annos.find(name) if kp_anno is None: # return 0 for invisible keypoints kps.append([0, 0, 0]) else: kps.append( [ int(float(kp_anno.find("x").text)), int(float(kp_anno.find("y").text)), 1, ] ) keypoints.append(kps) # get bounding box bnd_box = obj.find("bndbox") left = int(bnd_box.find("xmin").text) top = int(bnd_box.find("ymin").text) right = int(bnd_box.find("xmax").text) bottom = int(bnd_box.find("ymax").text) # Set mapping of label name to label index if labels is None: label_idx = None else: label_idx = labels.index(label) anno_bbox = AnnotationBbox.from_array( [left, top, right, bottom], label_name=label, label_idx=label_idx, im_path=im_path, ) assert anno_bbox.is_valid() anno_bboxes.append(anno_bbox) return anno_bboxes, im_path, np.array(keypoints)
21,695
def load_user(user_id): """Login manager load user method.""" return User.query.get(int(user_id))
21,696
def write_dataframe_to_files( df, filename_prefix='dpdd_object', output_dir='./', hdf_key_prefix='object', parquet_scheme='simple', parquet_engine='fastparquet', parquet_compression='gzip', append=True, verbose=True, write=('parquet',), **kwargs): """Write out dataframe to HDF, FITS, and Parquet files. Choose file names based on tract (HDF) or tract + patch (FITS, Parquet). Parameters ---------- df : Pandas DataFrame Pandas DataFrame with the input catalog data to write out. filename_prefix : str, optional Prefix to be added to the output filename. Default is 'dpdd_object'. hdf_key_prefix : str, optional Group name within the output HDF5 file. Default is 'object'. parquet_scheme : str, optional ['simple' or 'hive'] 'simple' stores everything in one file per tract 'hive' stores one directory with a _metadata file and then the columns partitioned into row groups. Default is simple parquet_engine : str, optional Engine to write parquet on disk. Available: fastparquet, pyarrow. Default is fastparquet. parquet_compression : str, optional Compression algorithm to use when writing Parquet files. Potential: gzip, snappy, lzo, uncompressed. Default is gzip. Availability depends on the engine used. append : bool, optional If True, append to exsiting parquet files. Default is True. verbose : boolean, optional If True, print out debug messages. Default is True. write : list or tuple, optional Format(s) to write out. Default is ('parquet',), """ # We know that our GCR reader will chunk by tract+patch # So we take the tract and patch in the first entry # as the identifying tract, patch for all. tract, patch = df['tract'][0], df['patch'][0] patch = patch.replace(',', '') # Convert '0,1'->'01' # Normalize output filename outfile_base_tract_format = os.path.join(output_dir, '{base}_tract_{tract:04d}') outfile_base_tract_patch_format = \ os.path.join(output_dir, '{base}_tract_{tract:04d}_patch_{patch:s}') # tract is an int # but patch is a string (e.g., '01' for '0,1') key_format = '{key_prefix:s}_{tract:04d}_{patch:s}' info = { 'base': filename_prefix, 'tract': tract, 'patch': patch, 'key_prefix': hdf_key_prefix, } outfile_base_tract = outfile_base_tract_format.format(**info) outfile_base_tract_patch = outfile_base_tract_patch_format.format(**info) if 'hdf' in write or 'all' in write: if verbose: print("Writing {} {} to HDF5 DPDD file.".format(tract, patch)) key = key_format.format(**info) hdf_file = outfile_base_tract+'.hdf5' # Append iff the file already exists hdf_append = append and os.path.exists(hdf_file) df.to_hdf(hdf_file, key=key, append=hdf_append, format='table') if 'fits' in write or 'all' in write: if verbose: print("Writing {} {} to FITS DPDD file.".format(tract, patch)) Table.from_pandas(df).write(outfile_base_tract_patch + '.fits') if 'parquet' in write or 'all' in write: if verbose: print("Writing {} {} to Parquet DPDD file.".format(tract, patch)) parquet_file = outfile_base_tract+'.parquet' # Append iff the file already exists parquet_append = append and os.path.exists(parquet_file) df.to_parquet( parquet_file, append=parquet_append, file_scheme=parquet_scheme, engine=parquet_engine, compression=parquet_compression, )
21,697
def test_depolarizing_channel(): """ Description: Test depolarizing channel Expectation: success. """ sim2 = Simulator('projectq', 1) sim2.apply_gate(C.DepolarizingChannel(0).on(0)) assert np.allclose(sim2.get_qs(), np.array([1.0 + 0.0j, 0.0 + 0.0j]))
21,698
def test_interpolate_energy_dispersion(): """Test of interpolation of energy dispersion matrix using a simple dummy model.""" x = [0.9, 1.1] y = [8., 11.5] n_grid = len(x) * len(y) n_offset = 1 n_en = 30 n_mig = 20 clip_level = 1.e-3 # define simple dummy bias and resolution model using two parameters x and y def get_bias_std(i_en, x, y): i_en = i_en + 3 * ((x - 1) + (y - 10.)) de = n_en - i_en de[de < 0] = 0. bias = de**0.5 + n_mig / 2 rms = 5 - 2 * (i_en / n_en) bias[i_en < 3] = 2 * n_mig # return high values to zero out part of the table rms[i_en < 3] = 0 return bias, rms en = np.arange(n_en)[:, np.newaxis] mig = np.arange(n_mig)[np.newaxis, :] # auxiliary function to compute profile of the 2D distribution # used to check if the expected and interpolated matrixes are similar def calc_mean_std(matrix): n_en = matrix.shape[0] means = np.empty(n_en) stds = np.empty(n_en) for i_en in np.arange(n_en): w = matrix[i_en, :] if np.sum(w) > 0: means[i_en] = np.average(mig[0, :], weights=w) stds[i_en] = np.sqrt(np.cov(mig[0, :], aweights=w)) else: # we need to skip the empty columns means[i_en] = -1 stds[i_en] = -1 return means, stds # generate true values interp_pars = (1, 10) bias, sigma = get_bias_std(en, *interp_pars) mig_true = np.exp(-(mig - bias)**2 / (2 * sigma**2)) mig_true[mig_true < clip_level] = 0 # generate a grid of migration matrixes i_grid = 0 pars_all = np.empty((n_grid, 2)) mig_all = np.empty((n_grid, n_en, n_mig, n_offset)) for xx in x: for yy in y: bias, sigma = get_bias_std(en, xx, yy) mig_all[i_grid, :, :, 0] = (np.exp(-(mig - bias)**2 / (2 * sigma**2))) pars_all[i_grid, :] = (xx, yy) i_grid += 1 # do the interpolation and compare the results with expected ones mig_interp = interp.interpolate_energy_dispersion(mig_all, pars_all, interp_pars, method='linear') # check if all the energy bins have normalization 1 or 0 (can happen because of empty bins) sums = np.sum(mig_interp[:, :, 0], axis=1) assert np.logical_or(np.isclose(sums, 0., atol=1.e-5), np.isclose(sums, 1., atol=1.e-5)).min() # now check if we reconstruct the mean and sigma roughly fine after interpolation bias0, stds0 = calc_mean_std(mig_true) # true bias, stds = calc_mean_std(mig_interp[:, :, 0]) # interpolated # first remove the bins that are empty in true value idxs = bias0 > 0 bias0 = bias0[idxs] bias = bias[idxs] stds0 = stds0[idxs] stds = stds[idxs] # allowing for a 0.6 bin size error on the interpolated values assert np.allclose(bias, bias0, atol=0.6, rtol=0.) assert np.allclose(stds, stds0, atol=0.6, rtol=0.)
21,699