content
stringlengths
22
815k
id
int64
0
4.91M
def read_ult_meta(filebase): """Convenience fcn for output of targeted metadata.""" meta = _parse_ult_meta(filebase) return (meta["NumVectors"], meta["PixPerVector"], meta["ZeroOffset"], meta["Angle"], meta["PixelsPerMm"], meta["FramesPerSec"], meta["TimeInSecsOfFirstFrame"])
18,400
def safe_identifiers_iterable(val_list: Iterable[str]) -> List[str]: """ Returns new list, all with safe identifiers. """ return [safe_identifier(val) for val in val_list]
18,401
def encode_varint(value, write): """ Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: value (int): Value to encode write (function): Called per byte that needs to be writen Returns: int: Number of bytes written """ value = (value << 1) ^ (value >> 63) if value <= 0x7f: # 1 byte write(value) return 1 if value <= 0x3fff: # 2 bytes write(0x80 | (value & 0x7f)) write(value >> 7) return 2 if value <= 0x1fffff: # 3 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(value >> 14) return 3 if value <= 0xfffffff: # 4 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(value >> 21) return 4 if value <= 0x7ffffffff: # 5 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(0x80 | ((value >> 21) & 0x7f)) write(value >> 28) return 5 else: # Return to general algorithm bits = value & 0x7f value >>= 7 i = 0 while value: write(0x80 | bits) bits = value & 0x7f value >>= 7 i += 1 write(bits) return i
18,402
def heap_sort(li): """ [list of int] => [list of int] Heap sort: divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element from it and inserting it into the sorted region. It does not waste time with a linear-time scan of the unsorted region; rather, heap sort maintains the unsorted region in a heap data structure to more quickly find the largest element in each step. To implement a heap using arrays, we will use the rule li[k] >= li[2*k+1] and li[k] >= li[2*k+2] (left child and right child respectively). More generally, the array must satisfy the heap quality: For any given node C, if P is a parent node of C, then the value of P is greater than or equal to the key of C (for max heaps) Graphically, this would look like: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 """ def heapify(lst, heap_size, root): """ ([list of int], int, int) => [list of int] Rearranges the list to satisfy the heap quality. Root is index of the largest element in the lst. """ # the largest node largest = root left_child = 2 * largest + 1 right_child = 2 * largest + 2 # check if left_child and root need to be swapped if left_child < heap_size and lst[largest] < lst[left_child]: largest = left_child # check if right_child and root need to be swapped if right_child < heap_size and lst[largest] < lst[right_child]: largest = right_child # change root, if needed if largest != root: lst[root], lst[largest] = lst[largest], lst[root] # continue to heapify the root heapify(lst, heap_size, largest) # Build a maxheap by iterating through the list backwards for i in range(len(li), -1, -1): heapify(li, len(li), i) print(li) # extract elements one by one for i in range(len(li) - 1, 0, -1): """remember, heap sort differs from insertion sort in that # it searches for the maximum, rather than minimum, element. li[0:end] is a heap (like a tree, but elements are not guaranteed to be sorted) and li[end:len(li)] is in sorted order.""" li[i], li[0] = li[0], li[i] # return to heap, since the heap was messed up by swapping heapify(li, i, 0) return li
18,403
def vector_field(v, t, inf_mat, state_meta): """vector_field returns the temporal derivative of a flatten state vector :param v: array of shape (1,mmax+1+(nmax+1)**2) for the flatten state vector :param t: float for time (unused) :param inf_mat: array of shape (nmax+1,nmax+1) representing the infection rate :param state_meta: tuple of arrays encoding information of the structure. :returns vec_field: array of shape (1,(nmax+1)**2) for the flatten vector field. """ mmax = state_meta[0] nmax = state_meta[1] m = state_meta[2] gm = state_meta[3] pn = state_meta[4] imat = state_meta[5] nmat = state_meta[6] pnmat = state_meta[7] sm = v[:mmax+1] fni = v[mmax+1:].reshape(nmax+1,nmax+1) fni_field = np.zeros(fni.shape) #matrix field sm_field = np.zeros(sm.shape) #calculate mean-field quantities r = np.sum(inf_mat[2:,:]*(nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:]) r /= np.sum((nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:]) rho = r*excess_susceptible_membership(m,gm,sm) #contribution for nodes #------------------------ sm_field = 1 - sm - sm*m*r #contribution for groups #------------------------ #contribution from above fni_field[2:,:nmax] += imat[2:,1:]*fni[2:,1:] #contribution from equal fni_field[2:,:] += (-imat[2:,:] -(nmat[2:,:] - imat[2:,:]) *(inf_mat[2:,:] + rho))*fni[2:,:] #contribution from below fni_field[2:,1:nmax+1] += ((nmat[2:,:nmax] - imat[2:,:nmax]) *(inf_mat[2:,:nmax] + rho))*fni[2:,:nmax] return np.concatenate((sm_field,fni_field.reshape((nmax+1)**2)))
18,404
def newton(start, loss_fn, *args, lower=0, upper=None, epsilon=1e-9): """ Newton's Method! """ theta, origin, destination = args[0], args[1], args[2] if upper is None: upper = 1 start = lower while True: if loss_fn(start, theta, origin, destination) > 0: start = (upper+start)/2 else: start = (lower+start)/2 # print("START", start) x_cur = start x_prev = -1 try: while np.abs(x_cur-x_prev) >= epsilon: # print(x) x_prev = x_cur x_cur = newton_single(x_cur, loss_fn, theta, origin, destination) # print(x, x-x_prev, np.abs(x-x_prev)>=epsilon) if np.isnan(x_cur): continue return x_cur except ZeroDivisionError: print(start, x_cur)
18,405
def eval_det_cls(pred, gt, iou_thr=None): """Generic functions to compute precision/recall for object detection for a single class. Args: pred (dict): Predictions mapping from image id to bounding boxes \ and scores. gt (dict): Ground truths mapping from image id to bounding boxes. iou_thr (list[float]): A list of iou thresholds. Return: tuple (np.ndarray, np.ndarray, float): Recalls, precisions and \ average precision. """ # {img_id: {'bbox': box structure, 'det': matched list}} class_recs = {} npos = 0 img_id_npos = {} for img_id in gt.keys(): cur_gt_num = len(gt[img_id]) if cur_gt_num != 0: gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32) for i in range(cur_gt_num): gt_cur[i] = gt[img_id][i].tensor bbox = gt[img_id][0].new_box(gt_cur) else: bbox = gt[img_id] det = [[False] * len(bbox) for i in iou_thr] npos += len(bbox) img_id_npos[img_id] = img_id_npos.get(img_id, 0) + len(bbox) class_recs[img_id] = {'bbox': bbox, 'det': det} # construct dets image_ids = [] confidence = [] ious = [] for img_id in pred.keys(): cur_num = len(pred[img_id]) if cur_num == 0: continue pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32) box_idx = 0 for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) pred_cur[box_idx] = box.tensor box_idx += 1 pred_cur = box.new_box(pred_cur) gt_cur = class_recs[img_id]['bbox'] if len(gt_cur) > 0: # calculate iou in each image iou_cur = pred_cur.overlaps(pred_cur, gt_cur) for i in range(cur_num): ious.append(iou_cur[i]) else: for i in range(cur_num): ious.append(np.zeros(1)) confidence = np.array(confidence) # sort by confidence sorted_ind = np.argsort(-confidence) image_ids = [image_ids[x] for x in sorted_ind] ious = [ious[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp_thr = [np.zeros(nd) for i in iou_thr] fp_thr = [np.zeros(nd) for i in iou_thr] for d in range(nd): R = class_recs[image_ids[d]] iou_max = -np.inf BBGT = R['bbox'] cur_iou = ious[d] if len(BBGT) > 0: # compute overlaps for j in range(len(BBGT)): # iou = get_iou_main(get_iou_func, (bb, BBGT[j,...])) iou = cur_iou[j] if iou > iou_max: iou_max = iou jmax = j for iou_idx, thresh in enumerate(iou_thr): if iou_max > thresh: if not R['det'][iou_idx][jmax]: tp_thr[iou_idx][d] = 1. R['det'][iou_idx][jmax] = 1 else: fp_thr[iou_idx][d] = 1. else: fp_thr[iou_idx][d] = 1. ret = [] # Return additional information for custom metrics. new_ret = {} new_ret["image_ids"] = image_ids new_ret["iou_thr"] = iou_thr new_ret["ious"] = [max(x.tolist()) for x in ious] new_ret["fp_thr"] = [x.tolist() for x in fp_thr] new_ret["tp_thr"] = [x.tolist() for x in tp_thr] new_ret["img_id_npos"] = img_id_npos for iou_idx, thresh in enumerate(iou_thr): # compute precision recall fp = np.cumsum(fp_thr[iou_idx]) tp = np.cumsum(tp_thr[iou_idx]) recall = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = average_precision(recall, precision) ret.append((recall, precision, ap)) return ret, new_ret
18,406
def replace_images(fpath): """ Takes a path to a note and edits the note to insert image data. params: fpath: path to note Output: None, writes new note with image data """ with open(fpath, 'r') as f: line1 = f.readline() # get rid of the first xml tag line note = f.read() note_name = fpath.split('/')[-1].split('.html')[0] resource_path = fpath+'.resources' # Find each image tag and replace with corresponding uri data soup = bs(note, features="html.parser") images = soup.findAll('img', src=True) links = soup.findAll('a', href=True) all_media = [l for l in links if ('www' not in l['href'])] if images is not []: for image in images: img_data = img_to_data(resource_path+'/'+image['src'].split('/')[-1].replace('%20', ' ')) image['src'] = img_data image['width'] = "500" image['height'] = "500" # Go through linked media files and insert uri data if local pdf if all_media is not []: for media in all_media: mpath = resource_path+'/'+media['href'].split('/')[-1].replace('%20', ' ') if (media['href'].split('.')[-1] == 'pdf') and ('resources' in media['href']): try: images = convert_from_path(mpath, dpi=300) buffered = BytesIO() datas = [] for im in images: # in case of multipage pdf im.save(buffered, format="png") data = base64.encodestring(buffered.getvalue()).decode("utf-8").replace('\n', '') media_data = u'data:%s;base64,%s' % ("image/png", data) datas.append(media_data) for i,d in enumerate(datas): if d != '': tag = soup.new_tag('img') tag['src'] = media_data tag['width'] = "500" tag['height'] = "500" if i == 1: media.replace_with(tag) else: media.insert_before(tag) except: print('Failed to convert pdf media in: %s' % fpath) with open(fpath, 'w') as f: f.write(str(soup))
18,407
def parse_g2o(path: pathlib.Path, pose_count_limit: int = 100000) -> G2OData: """Parse a G2O file. Creates a list of factors and dictionary of initial poses.""" with open(path) as file: lines = [line.strip() for line in file.readlines()] pose_variables: List[jaxfg.geometry.LieVariableBase] = [] initial_poses: Dict[jaxfg.geometry.LieVariableBase, jaxlie.MatrixLieGroup] = {} factors: List[jaxfg.core.FactorBase] = [] for line in tqdm(lines): parts = [part for part in line.split(" ") if part != ""] variable: jaxfg.geometry.LieVariableBase between: jaxlie.MatrixLieGroup if parts[0] == "VERTEX_SE2": if len(pose_variables) > pose_count_limit: continue # Create SE(2) variable _, index, x, y, theta = parts index = int(index) x, y, theta = map(float, [x, y, theta]) assert len(initial_poses) == index variable = jaxfg.geometry.SE2Variable() initial_poses[variable] = jaxlie.SE2.from_xy_theta(x, y, theta) pose_variables.append(variable) elif parts[0] == "EDGE_SE2": # Create relative offset between pair of SE(2) variables before_index = int(parts[1]) after_index = int(parts[2]) if before_index > pose_count_limit or after_index > pose_count_limit: continue between = jaxlie.SE2.from_xy_theta(*(float(p) for p in parts[3:6])) precision_matrix_components = onp.array(list(map(float, parts[6:]))) precision_matrix = onp.zeros((3, 3)) precision_matrix[onp.triu_indices(3)] = precision_matrix_components precision_matrix = precision_matrix.T precision_matrix[onp.triu_indices(3)] = precision_matrix_components sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T factors.append( jaxfg.geometry.BetweenFactor.make( variable_T_world_a=pose_variables[before_index], variable_T_world_b=pose_variables[after_index], T_a_b=between, noise_model=jaxfg.noises.Gaussian( sqrt_precision_matrix=sqrt_precision_matrix ), ) ) elif parts[0] == "VERTEX_SE3:QUAT": # Create SE(3) variable _, index, x, y, z, qx, qy, qz, qw = parts index = int(index) assert len(initial_poses) == index variable = jaxfg.geometry.SE3Variable() initial_poses[variable] = jaxlie.SE3( wxyz_xyz=onp.array(list(map(float, [qw, qx, qy, qz, x, y, z]))) ) pose_variables.append(variable) elif parts[0] == "EDGE_SE3:QUAT": # Create relative offset between pair of SE(3) variables before_index = int(parts[1]) after_index = int(parts[2]) numerical_parts = list(map(float, parts[3:])) assert len(numerical_parts) == 7 + 21 # between = jaxlie.SE3.from_xy_theta(*(float(p) for p in parts[3:6])) xyz = numerical_parts[0:3] quaternion = numerical_parts[3:7] between = jaxlie.SE3.from_rotation_and_translation( rotation=jaxlie.SO3.from_quaternion_xyzw(onp.array(quaternion)), translation=onp.array(xyz), ) precision_matrix = onp.zeros((6, 6)) precision_matrix[onp.triu_indices(6)] = numerical_parts[7:] precision_matrix = precision_matrix.T precision_matrix[onp.triu_indices(6)] = numerical_parts[7:] sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T factors.append( jaxfg.geometry.BetweenFactor.make( variable_T_world_a=pose_variables[before_index], variable_T_world_b=pose_variables[after_index], T_a_b=between, noise_model=jaxfg.noises.Gaussian( sqrt_precision_matrix=sqrt_precision_matrix ), ) ) else: assert False, f"Unexpected line type: {parts[0]}" # Anchor start pose factors.append( jaxfg.geometry.PriorFactor.make( variable=pose_variables[0], mu=initial_poses[pose_variables[0]], noise_model=jaxfg.noises.DiagonalGaussian( jnp.ones(pose_variables[0].get_local_parameter_dim()) * 100.0 ), ) ) return G2OData(factors=factors, initial_poses=initial_poses)
18,408
def register_content_widgets(content_widgets): """ Run custom add-on package installation code to add custom site specific content widgets @param content_widgets: Dictionary of custom content widgets """ widget_settings = api.portal.get_registry_record( name='ade25.widgets.widget_settings' ) stored_widgets = json.loads(widget_settings) records = stored_widgets['items'] for content_widget, widget_data in content_widgets.items(): if content_widget not in records.keys(): records[content_widget] = widget_data stored_widgets["items"] = records stored_widgets["timestamp"] = six.text_type(int(time.time())), stored_widgets["updated"] = datetime.datetime.now().isoformat(), api.portal.set_registry_record( name='ade25.widgets.widget_settings', value=json.dumps(stored_widgets) )
18,409
def find_stops(input_handle, output_handle, offset, compact): """Almost stop codon finder. :arg stream input_handle: Open readable handle to a FASTA file. :arg stream output_handle: Open writable handle to a file. :arg int offset: Position of the CDS start in the reference sequence. :arg bool compact: Output one line per position. """ bt = BackTranslate() sequence = str(next(SeqIO.parse(input_handle, 'fasta')).seq) for index, codon in enumerate(findall('...', sequence[offset - 1:])): stop_positions = bt.with_dna(codon, '*') for position in sorted(stop_positions): if not compact: for subst in sorted(stop_positions[position]): output_handle.write('{}\t{}\t{}\n'.format( offset + (index * 3) + position, *subst)) else: output_handle.write('{}\t{}\t{}\n'.format( offset + (index * 3) + position, list(stop_positions[position])[0][0], ','.join(map( lambda x: x[1], sorted(stop_positions[position])))))
18,410
def _som_actor(env): """ Construct the actor part of the model and return it. """ nactions = np.product(env.action_shape) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=(1,) + env.observation_space.shape)) model.add(keras.layers.Dense(400)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(200)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(nactions)) model.add(keras.layers.Activation('sigmoid')) return model
18,411
def fido(ctx): """ Manage FIDO applications. """ try: ctx.obj['controller'] = Fido2Controller(ctx.obj['dev'].driver) except Exception as e: logger.debug('Failed to load Fido2Controller', exc_info=e) ctx.fail('Failed to load FIDO 2 Application.')
18,412
def main(): """ This is a simple wrapper for fugashi so you can test it from the command line. Like the mecab binary, it treats each line of stdin as one sentence. You can pass tagger arguments here too. """ args = ' '.join(sys.argv[1:]) # This should work if you specify a different dictionary, # but it should also work with the pip unidic. # Try the GenericTagger and then try the Unidic tagger. try: tagger = GenericTagger(args, quiet=True) except RuntimeError: tagger = Tagger(args) for line in fileinput.input([]): print(tagger.parse(line.strip()))
18,413
def sendEmail(): """email sender""" send_email('Registration ATS', ['manavshrivastava@hotmail.com'], 'Thanks for registering ATS!', '<h3>Thanks for registering with ATS!</h3>') return "email sent to manavshrivastava@hotmail.com"
18,414
def Pvalue(chi2, df): """Returns the p-value of getting chi2 from a chi-squared distribution. chi2: observed chi-squared statistic df: degrees of freedom """ return 1 - scipy.stats.chi2.cdf(chi2, df)
18,415
def _ya_testmatch_(): """ test function the forest matching algorithm basically this creates two graphs and associated """ c1 = np.array([[0],[-1],[1],[0.5],[1.5]]) parents = np.array([0, 0, 0, 2, 2]) g1 = fo.Forest(5,parents) c2 = np.array([[-1],[1],[0.5],[1.5]]) #c1 + 0.0 parents = np.array([0, 1, 1, 1]) g2 = fo.Forest(4,parents) dmax = 1 i,j,k = BPmatch_slow_asym_dev(c1, c2, g1,g2, dmax) GM = np.zeros((5,5)) GM[i,j]=k print GM*(GM>0) i,j,k = BPmatch_slow_asym_dev(c2, c1, g2,g1, dmax) GM = np.zeros((5,5)) GM[i,j]=k print GM*(GM>0.001) i,j,k = BPmatch_slow_asym(c1, c2, g1,g2, dmax) GM = np.zeros((5,5)) GM[i,j]=k print GM i,j,k = match_trivial(c1, c2, dmax, eps = 1.e-12 ) GM = np.zeros((5,5)) GM[i,j]=k print GM
18,416
def template(template_lookup_key: str) -> str: """Return template as string.""" with open(template_path(template_lookup_key), "r") as filepath: template = filepath.read() return template
18,417
def filter_rows(df, condition, reason): """ :param reason: :param df: :param condition: boolean, true for row to keep :return: filter country_city_codes df """ n_dropped = (condition == False).sum() print( f"\nexcluding {n_dropped} locations ({n_dropped / df.shape[0]:.1%}) due to {reason}" ) return df[condition]
18,418
def parse_projected_dos(f): """Parse `projected_dos.dat` output file.""" data = np.loadtxt(f) projected_dos = {"frequency_points": data[:, 0], "projected_dos": data[:, 1:].T} pdos = orm.XyData() pdos_list = [pd for pd in projected_dos["projected_dos"]] pdos.set_x(projected_dos["frequency_points"], "Frequency", "THz") pdos.set_y( pdos_list, [ "Projected DOS", ] * len(pdos_list), [ "1/THz", ] * len(pdos_list), ) pdos.label = "Projected DOS" return pdos
18,419
def get_from_parameters(a, b, c, alpha, beta, gamma): """ Create a Lattice using unit cell lengths and angles (in degrees). This code is modified from the pymatgen source code [1]_. Parameters ---------- a : :class:`float`: *a* lattice parameter. b : :class:`float`: *b* lattice parameter. c : :class:`float`: *c* lattice parameter. alpha : :class:`float`: *alpha* angle in degrees. beta : :class:`float`: *beta* angle in degrees. gamma : :class:`float`: *gamma* angle in degrees. Returns ------- :class:`tuple` of three :class:`numpy.ndarray` Tuple of cell lattice vectors of shape (3, ) in Angstrom. """ angles_r = np.radians([alpha, beta, gamma]) cos_alpha, cos_beta, cos_gamma = np.cos(angles_r) sin_alpha, sin_beta, sin_gamma = np.sin(angles_r) val = (cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta) # Sometimes rounding errors result in values slightly > 1. val = cap_absolute_value(val) gamma_star = np.arccos(val) vector_a = np.array([a * sin_beta, 0.0, a * cos_beta]) vector_b = np.array([ -b * sin_alpha * np.cos(gamma_star), b * sin_alpha * np.sin(gamma_star), b * cos_alpha, ]) vector_c = np.array([0.0, 0.0, float(c)]) return tuple([vector_a, vector_b, vector_c])
18,420
def random_chinese_name(): """生成随机中文名字 包括的名字格式:2个字名字**,3个字名字***,4个字名字**** :return: """ name_len = random.choice([i for i in range(4)]) if name_len == 0: name = random_two_name() elif name_len == 1: name = random_three_name() elif name_len == 2: name = random_three_names() else: name = random_four_name() return name
18,421
def copy_dir_with_s3(s3_old_path, s3_new_path, raise_when_no_exist=True): """Copies files from one S3 Path to another Args: s3_old_path(S3Path): Output path of the file to be uploaded s3_new_path(S3Path): Output path of the file to be uploaded raise_when_no_exist(bool, optional): Raise error if file not found Raises: ETLInputError: If s3_old_path does not exist """ if not isinstance(s3_old_path, S3Path): raise ETLInputError('S3 old path should be of type S3Path') if not s3_old_path.is_directory: raise ETLInputError('S3 old path must be directory') if not isinstance(s3_new_path, S3Path): raise ETLInputError('S3 new path should be of type S3Path') if not s3_new_path.is_directory: raise ETLInputError('S3 new path must be directory') bucket = get_s3_bucket(s3_old_path.bucket) prefix = s3_old_path.key # Enforce this to be a folder's prefix prefix += '/' if not prefix.endswith('/') else '' keys = bucket.get_all_keys(prefix=s3_old_path.key) for key in keys: if key: key.copy(s3_new_path.bucket, os.path.join(s3_new_path.key, os.path.basename(key.key))) elif raise_when_no_exist: raise ETLInputError('The key does not exist: %s' % s3_old_path.uri)
18,422
def power_list(lists: [list]) -> list: """ power set across the options of all lists """ if len(lists) == 1: return [[v] for v in lists[0]] grids = power_list(lists[:-1]) new_grids = [] for v in lists[-1]: for g in grids: new_grids.append(g + [v]) return new_grids
18,423
def send_email(from_email, to, subject, message, html=True): """ Send emails to the given recipients :param from_email: :param to: :param subject: :param message: :param html: :return: Boolean value """ try: email = EmailMessage(subject, message, from_email, to) print("Sending email..") if html: email.content_subtype = 'html' email.send() return True except Exception as e: print("Error in sending email: {0}".format(str(e))) if 'rate exceeded' in str(e): time.sleep(2) send_email(from_email, to, subject, message) return False
18,424
def set_default_locale(code): """Sets the default locale, used in get_closest_locale(). The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """ global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(_translations.keys() + [_default_locale])
18,425
def attack(health, power, percent_to_hit): """Calculates health from percent to hit and power of hit Parameters: health - integer defining health of attackee power - integer defining damage of attacker percent to hit - float defining percent chance to hit of attacker Returns: new health """ random_number = random.random() # number between 0.0 and 1.0 # if our random number falls between 0 and percent to hit if random_number <= percent_to_hit: # then a hit occurred so we reduce health by power health = health - power # return the new health value return health
18,426
def signal_requests_mock_factory(requests_mock: Mocker) -> Mocker: """Create signal service mock from factory.""" def _signal_requests_mock_factory( success_send_result: bool = True, content_length_header: str = None ) -> Mocker: requests_mock.register_uri( "GET", "http://127.0.0.1:8080/v1/about", status_code=HTTPStatus.OK, json={"versions": ["v1", "v2"]}, ) if success_send_result: requests_mock.register_uri( "POST", "http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX, status_code=HTTPStatus.CREATED, ) else: requests_mock.register_uri( "POST", "http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX, status_code=HTTPStatus.BAD_REQUEST, ) if content_length_header is not None: requests_mock.register_uri( "GET", URL_ATTACHMENT, status_code=HTTPStatus.OK, content=CONTENT, headers={"Content-Length": content_length_header}, ) else: requests_mock.register_uri( "GET", URL_ATTACHMENT, status_code=HTTPStatus.OK, content=CONTENT, ) return requests_mock return _signal_requests_mock_factory
18,427
def test_dwt_denoise_trace(): """ Check that sample data fed into dwt_denoise_trace() can be processed and that the returned signal is reasonable (for just one trace)""" # Loma Prieta test station (nc216859) data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A') trace = [] trace = read_data(data_files[0]) dataOut = dwt.denoise_trace(tr=trace) # Look at frequency content? Samples? return dataOut
18,428
def get_networks(project_id=None, auth_token=None): """ Get a list of all routed networks """ url = CATALOG_HOST + "/routednetwork" try: response_body = _api_request(url=url, http_method="GET", project_id=project_id, auth_token=auth_token) except CommandExecutionError as e: log.exception(e) return None networks = [ network for network in response_body if network['internalDeploymentStatus']['phase'] in list(map(str, POSITIVE_PHASES)) ] return networks
18,429
def main(url, out_path): """[summary] Parameters ---------- url : string URL to download zip file from (must be a zip file with no password) out_path : string Path to extract the zip file contents to Example ---------- main(f"https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip", "../data/raw/" """ try: request = requests.get(url) zipdoc = zipfile.ZipFile(BytesIO(request.content)) for name in zipdoc.namelist(): print("Extracting... {0}{1}".format(out_path, name)) zipdoc.extract(name, out_path) zipdoc.close() print("Done extracting files from the ZipFile") except BadZipFile as b: print("Error: ", b) except Exception as e: print("Error: ", e)
18,430
def setup_module(mod): """Sets up the pytest environment.""" testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) logger.info("=" * 40) logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... json_file = "{}/test_ospf_dual_stack.json".format(CWD) tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo # ... and here it calls Mininet initialization functions. # get list of daemons needs to be started for this suite. daemons = topo_daemons(tgen, topo) # Starting topology, create tmp files which are loaded to routers # to start daemons and then start routers start_topology(tgen, daemons) # Creating configuration from JSON build_config_from_json(tgen, topo) # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) # Api call verify whether OSPF converged ospf_covergence_ipv4 = verify_ospf_neighbor(tgen, topo) assert ospf_covergence_ipv4 is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence_ipv4 ) # Api call verify whether OSPF6 converged ospf_covergence_ipv6 = verify_ospf6_neighbor(tgen, topo) assert ospf_covergence_ipv6 is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence_ipv6 ) logger.info("Running setup_module() done")
18,431
def click_event(event, x, y, flags, params): """ Crop an image based on the clicked detected face """ # event is triggered with a mouse click if event == cv2.EVENT_LBUTTONUP: for location in face_locations: # unpack the coordinates from the location tuple top, right, bottom, left = location if (top < y < bottom) and (left < x < right): frame_copy = np.copy(frame) roi = frame_copy[top:bottom, left:right] # give a unique name for the cropped image currentDT = datetime.datetime.now() cropped_name = os.path.join(CROPPED_IMAGES_PATH, loc_name_dict[location] + '_' + str(currentDT) + '.png') # save the cropped image cv2.imwrite(cropped_name, roi) # show the cropped image crop = cv2.imread(cropped_name) cv2.imshow('cropped image', crop) # re-run the run_face_recognition function run_face_recognition(database)
18,432
def _download_(args): """ To be used within _ZTFDownloader_.download_data() url, fileout,overwrite,verbose = args """ url, fileout, overwrite, verbose, wait = args download_single_url(url, fileout=fileout, overwrite=overwrite, verbose=verbose, wait=wait)
18,433
def feature_registration(source,target, MIN_MATCH_COUNT = 12): """ Obtain the rigid transformation from source to target first find correspondence of color images by performing fast registration using SIFT features on color images. The corresponding depth values of the matching keypoints is then used to obtain rigid transformation through a ransac process. Parameters ---------- source : ((n,m) uint8, (n,m) float) The source color image and the corresponding 3d pointcloud combined in a list target : ((n,m) uint8, (n,m) float) The target color image and the corresponding 3d pointcloud combined in a list MIN_MATCH_COUNT : int The minimum number of good corresponding feature points for the algorithm to trust the pairwise registration result with feature matching only Returns ---------- transform: (4,4) float or None The homogeneous rigid transformation that transforms source to the target's frame if None, registration result using feature matching only cannot be trusted either due to no enough good matching feature points are found, or the ransac process does not return a solution """ cad_src, depth_src = source cad_des, depth_des = target # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descripto rs with SIFT kp1, des1 = sift.detectAndCompute(cad_src,None) kp2, des2 = sift.detectAndCompute(cad_des,None) # find good mathces bf = cv2.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) # if number of good matching feature point is greater than the MIN_MATCH_COUNT if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() bad_match_index = np.where(np.array(matchesMask) == 0) src_index=np.vstack(src_pts).squeeze() src_index = np.delete(src_index, tuple(bad_match_index[0]), axis=0) src_index[:,[0, 1]] = src_index[:,[1, 0]] src_index = tuple(src_index.T.astype(np.int32)) src_depths = depth_src[src_index] dst_index=np.vstack(dst_pts).squeeze() dst_index = np.delete(dst_index, tuple(bad_match_index[0]), axis=0) dst_index[:,[0, 1]] = dst_index[:,[1, 0]] dst_index = tuple(dst_index.T.astype(np.int32)) dst_depths = depth_des[dst_index] dst_good=[] src_good=[] dst_depths=dst_depths[matchesMask>0][0] src_depths=src_depths[matchesMask>0][0] for i in xrange(len(dst_depths)): if np.sum(dst_depths[i])!=0 and np.sum(src_depths[i])!=0: dst_good.append(dst_depths[i].tolist()) src_good.append(src_depths[i].tolist()) # get rigid transforms between 2 set of feature points through ransac transform = match_ransac(np.asarray(src_good),np.asarray(dst_good)) return transform else: return None
18,434
def set_metadata(testbench_config, testbench): """ Perform the direct substitutions from the sonar testbench metadata into the the testbench Args: testbench_config (Testbench): Sonar testbench description testbench (str): The testbench template """ for key, value in testbench_config.metadata.items(): if value is None: replace_str = "" else: replace_str = str(value) search_str = "SONAR_" + key.upper() testbench = replace_in_testbenches(testbench, search_str, replace_str) return testbench
18,435
def index(a, x): """Locate the leftmost value exactly equal to x""" i = bisect_left(a, x) if i != len(a) and a[i] == x: return i raise ValueError
18,436
def convert_secondary_type_list(obj): """ :type obj: :class:`[mbdata.models.ReleaseGroupSecondaryType]` """ type_list = models.secondary_type_list() [type_list.add_secondary_type(convert_secondary_type(t)) for t in obj] return type_list
18,437
def get_fdr_output(D, foutname): """Runs fdr and returns all relevant output.""" slm = SLM(FixedEffect(1), FixedEffect(1)) for key in D.keys(): setattr(slm, key, D[key]) # run fdr Q = fdr(slm) Q_out = {} Q_out["Q"] = Q with open(foutname, "wb") as handle: pickle.dump(Q_out, handle, protocol=4) # return
18,438
def run(inputs, parameters = None): """Function to be callled by DOE and optimization. Design Variables are the only inputs. :param inputs: {'sma', 'linear', 'sigma_o'}""" def thickness(x, t, chord): y = af.Naca00XX(chord, t, [x], return_dict = 'y') thickness_at_x = y['u'] - y['l'] return thickness_at_x if parameters != None: eng = parameters[0] import_matlab = False else: eng = None import_matlab = True sma = inputs['sma'] linear = inputs['linear'] R = inputs['R'] sigma_o = 100e6 airfoil = "naca0012" chord = 1.#0.6175 J = {'x':0.75, 'y':0.} #Adding the area key to the dictionaries sma['area'] = math.pi*(0.000381/2.)**2 linear['area'] = 0.001 # Design constants #arm length to center of gravity r_w = 0.10 #Aicraft weight (mass times gravity) W = 0.0523*9.8 #0.06*9.8 alpha = 0. V = 10 #m/s altitude = 10000. #feet # Temperature T_0 = 273.15 + 30. T_final = 273.15 + 140. #Initial martensitic volume fraction MVF_init = 1. # Number of steps and cycles n = 200 n_cycles = 0 #~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #Parameters to select how to output stuff all_outputs = True save_data = True #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if all_outputs: eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap(airfoil, chord, J, sma, linear, sigma_o, W, r_w, V, altitude, alpha, T_0, T_final, MVF_init, n, R, all_outputs = True, import_matlab = import_matlab, eng=eng, n_cycles = n_cycles) import matplotlib.pyplot as plt plt.figure() plt.plot(np.rad2deg(theta), eps_s, lw=2., label = "$\epsilon_s$") plt.plot(np.rad2deg(theta), eps_l, 'b--',lw=2, label = "$\epsilon_l$") # plt.scatter(theta, eps_s, c = 'b') # plt.scatter(theta, eps_l, c = 'b') plt.ylabel('$\epsilon$', fontsize=24) plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() print len(T), len(eps_s), len(eps_l), len(theta), len(eps_t) plt.figure() plt.plot(np.rad2deg(theta), eps_t, lw=2.) # plt.scatter(theta, eps_t, c = 'b') plt.ylabel('$\epsilon_t$', fontsize=24) plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() plt.figure() plt.plot(np.rad2deg(theta), MVF, lw=2.) # plt.scatter(theta, MVF, c = 'b') plt.ylabel('$MVF$', fontsize=24) plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() plt.figure() plt.plot(T, MVF, lw=2.) # plt.scatter(T, MVF, c = 'b') plt.ylabel('$MVF$', fontsize=24) plt.xlabel('$T (K)$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() plt.figure() plt.plot(T, sigma, lw=2.) # plt.scatter(T, sigma, c = 'b') plt.ylabel('$\sigma$', fontsize=24) plt.xlabel('$T (K)$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() plt.figure() plt.plot(T, eps_s, 'b', lw=2., label = "$\epsilon_s$") plt.plot(T, eps_l, 'b--',lw=2, label = "$\epsilon_l$") # plt.scatter(T, eps_s, c = 'b') # plt.scatter(T, eps_l, c = 'b') plt.xlabel('$T (K)$', fontsize=20) plt.ylabel('$\epsilon$', fontsize=24) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() plt.figure() plt.plot(T, np.rad2deg(theta), lw=2.) # plt.scatter(T, theta, c = 'b') plt.xlabel('$T (K)$', fontsize=20) plt.ylabel(r'$\theta ({}^{\circ})$', fontsize=20) plt.grid() F_s = [] for i in range(len(sigma)): F_s.append(sigma[i]*sma['area']) # sigma_MPa = [] # for sigma_i in sigma: # sigma_MPa.append(sigma_i/1e6) plt.figure() plt.plot(theta, F_s, 'b', lw=2., label = "$F_s$") plt.plot(theta, F_l, 'b--', lw=2., label = "$F_l$") # plt.scatter(theta, F_s, c = 'b') # plt.scatter(theta, F_l, c = 'b') plt.ylabel('$F (N)$', fontsize=20) plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20) plt.legend(loc = 'best', fontsize = 'x-large') plt.grid() else: theta, k= flap(airfoil, chord, J, sma, linear, sigma_o, W, r_w, V, altitude, alpha, T_0, T_final, MVF_init, n, R, all_outputs = False, import_matlab = import_matlab, eng=eng, n_cycles = n_cycles) if save_data == True: Data = {'theta': theta, 'eps_s': eps_s, 'eps_l': eps_l, 'sigma': sigma, 'xi': MVF, 'T': T, 'eps_t': eps_t, 'F_l': F_l, 'k': k, 'L_s':L_s} pickle.dump(Data, open( "data.p", "wb" ) ) return {'theta': theta, 'k': k}
18,439
def test_sa_question_creation() -> None: """Assert the creation and return values of a Question.""" for test_case in cases: question = parser.Question(str(test_case["filename"])) assert test_case["filename"] == question.filename assert test_case["question_type"] == question.type # Add case for checking number of feedback == number of answer sections = test_case["sections"] for key, value in sections.items(): # type: ignore assert question.sections[key] == value
18,440
def load_data(path): """Load JSON data.""" with open(path) as inf: return json.load(inf)
18,441
def test_new_dividends(mocker, capsys): """Различные варианты включения и не включения в статус.""" mocker.patch.object(div_status, "_new_div_all", return_value=SMART_LAB_DF) mocker.patch.object(div, "dividends", side_effect=[PLZL_DF, T_DF, KZOS_DF, TTLK_DF]) assert div_status.new_dividends(("TTLK", "T-RM", "KZOS", "PLZL")) == {"PLZL", "KZOS"} captured = capsys.readouterr() assert "ДАННЫЕ ПО ДИВИДЕНДАМ ТРЕБУЮТ ОБНОВЛЕНИЯ" in captured.out
18,442
def unwatch(message): """ Unsubscribe from real-time updates for the specified ticker """ log.info(f"Cancelling subscription to {message.ticker} data") req_id = ib.next_request_id() subId = subscriptions[message.ticker] if subId: # ib.cancelTickByTickData(subId) ib.cancelMktData(subId) del subscriptions[message.ticker] del requests[subId]
18,443
def get_extension(file_path): """ get_extension(file) Gets the extension of the given file. Parameters ---------- file_path A path to a file Returns ------- str Returns the extension of the file if it exists or None otherwise. The Returning extension contains a dot. Ex: .csv """ if exists(file_path): return Path(file_path).suffix else: return None
18,444
def game_state_post_save(sender, instance, created, **kwargs): """ When a new Game State is created, initialize the initial values """ if created: # starting resources for resource, quantity in STARTING['Resources'].iteritems(): resource = Resource.objects.get(name=resource) ResourceState.objects.create( resource=resource, quantity=quantity, state=instance ) # starting positions arena = instance.match.arena for player, positions in STARTING["Positions"].iteritems(): for position in positions: x, y = arena.normalize(position) territory = Territory.objects.get(arena=arena, position_x=x, position_y=y) TerritoryState.objects.create( state=instance, territory=territory, player=player, status="owned", is_base=True )
18,445
def rank_in_group(df, group_col, rank_col, rank_method="first"): """Ranks a column in each group which is grouped by another column Args: df (pandas.DataFrame): dataframe to rank-in-group its column group_col (str): column to be grouped by rank_col (str): column to be ranked for rank_method (str): rank method to be the "method" argument of pandas.rank() function Returns: pandas.DataFrame: dataframe after the rank-in-group operation """ df = df.copy() df_slice = df[[group_col, rank_col]].drop_duplicates() df_slice["ranked_{}".format(rank_col)] = df_slice[rank_column].rank( method=rank_method ) df = pd.merge( df, df_slice[[group_col, "ranked_{}".format(rank_col)]], how="left", on=group_col, ) return df
18,446
def get_layer_options(layer_options, local_options): """ Get parameters belonging to a certain type of layer. Parameters ---------- layer_options : list of String Specifies parameters of the layer. local_options : list of dictionary Specifies local parameters in a model function. """ layer_options_dict = {} for key, value in six.iteritems(local_options): if key in layer_options: layer_options_dict[key] = value return layer_options_dict
18,447
def expr(term:Vn,add:Vt,expr:Vn)->Vn: """ expr -> term + expr """ return {"add":[term,expr]}
18,448
def gene_box(cohort, order='median', percentage=False): """Box plot with counts of filtered mutations by gene. percentage computes fitness as the increase with respect to the self-renewing replication rate lambda=1.3. Color allows you to use a dictionary of colors by gene. Returns a figure.""" # Load gene color dictionary with open('../Resources/gene_color_dict.json') as json_file: color_dict = json.load(json_file) # Create a dictionary with all filtered genes gene_list = [] for traj in cohort: gene_list.append(traj.gene) gene_dict = {element: [] for element in set(gene_list)} # update the counts for each gene if percentage is False: y_label = 'Fitness' for traj in cohort: fitness = traj.fitness gene_dict[traj.gene].append(fitness) if percentage is True: y_label = 'fitness_percentage' for traj in cohort: fitness = traj.fitness_percentage gene_dict[traj.gene].append(fitness) # sort dictionary in descending order if order == 'mean': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.mean(item[1]), reverse=True)) if order == 'median': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.median(item[1]), reverse=True)) if order == 'max': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.max(item[1]), reverse=True)) # Bar plot fig = go.Figure() # color_dict = dict() # if isinstance(color, dict): # color_dict = color for i, key in enumerate(gene_dict): fig.add_trace( go.Box(y=gene_dict[key], marker_color=color_dict[key], name=key, boxpoints='all', showlegend=False)) fig.update_layout(title='Gene distribution of filtered mutations', yaxis_title=y_label, template="simple_white") fig.update_xaxes(linewidth=2) fig.update_yaxes(linewidth=2) if percentage is False: fig.update_yaxes(type='log', tickvals=[0.05, 0.1, 0.2, 0.4]) fig.update_layout(xaxis_tickangle=-45) return fig, gene_dict
18,449
def _clip_and_count( adata: AnnData, target_col: str, *, groupby: Union[str, None, List[str]] = None, clip_at: int = 3, inplace: bool = True, key_added: Union[str, None] = None, fraction: bool = True, ) -> Union[None, np.ndarray]: """Counts the number of identical entries in `target_col` for each group in `group_by`. """ if target_col not in adata.obs.columns: raise ValueError("`target_col` not found in obs.") groupby = [groupby] if isinstance(groupby, str) else groupby groupby_cols = [target_col] if groupby is None else groupby + [target_col] clonotype_counts = ( adata.obs.groupby(groupby_cols, observed=True) .size() .reset_index(name="tmp_count") .assign( tmp_count=lambda X: [ ">= {}".format(min(n, clip_at)) if n >= clip_at else str(n) for n in X["tmp_count"].values ] ) ) clipped_count = adata.obs.merge(clonotype_counts, how="left", on=groupby_cols)[ "tmp_count" ].values if inplace: key_added = ( "{}_clipped_count".format(target_col) if key_added is None else key_added ) adata.obs[key_added] = clipped_count else: return clipped_count
18,450
def create_training_patches(images, patch_size, patches_per_image=1, patch_stride=None): """ Returns a batch of image patches, given a batch of images. Args: images (list, numpy.array): Batch of images. patch_size (tuple, list): The (width, height) of the patch to return. patches_per_image (int): Number of random patches to generate from each image in the input batch. Default is 1. patch_stride (int): Stride to use in strided patching. Default is None, which does not use strided patching. If integer is passed then strided patching will be used regardless of what is passed to 'patches_per_image'. Returns: (numpy.array): Batch of image patches. """ image_patches = [] for im in images: if patch_stride is None: for i in range(patches_per_image): image_patches.append(get_random_patch(im, patch_size)) else: image_patches += list(get_stride_patches(im, patch_size, patch_stride, 2)) return np.array(image_patches)
18,451
def get_prover_options(prover_round_tag='manual', prover_round=-1) -> deephol_pb2.ProverOptions: """Returns a ProverOptions proto based on FLAGS.""" if not FLAGS.prover_options: tf.logging.fatal('Mandatory flag --prover_options is not specified.') if not tf.gfile.Exists(FLAGS.prover_options): tf.logging.fatal('Required prover options file "%s" does not exist.', FLAGS.prover_options) prover_options = deephol_pb2.ProverOptions() if FLAGS.max_theorem_parameters is not None: tf.logging.warning( 'Overring max_theorem_parameters in prover options to %d.', FLAGS.max_theorem_parameters) prover_options.action_generator_options.max_theorem_parameters = ( FLAGS.max_theorem_parameters) with tf.gfile.Open(FLAGS.prover_options) as f: text_format.MergeLines(f, prover_options) if prover_options.builtin_library: tf.logging.warning('builtin_library is deprecated. Do not provide.') if str(prover_options.builtin_library) not in ['core']: tf.logging.fatal('Unsupported built in library: %s', prover_options.builtin_library) if FLAGS.timeout_seconds is not None: prover_options.timeout_seconds = FLAGS.timeout_seconds if not FLAGS.output: tf.logging.fatal('Missing flag --output [recordio_pattern]') prover_options.prover_round = deephol_pb2.ProverRound( start_seconds=int(round(time.time())), tag=prover_round_tag, round=prover_round) _verify_prover_options(prover_options) # Log prover options. tf.logging.info('Using prover_options:\n %s', str(prover_options)) return prover_options
18,452
def bracketpy(pystring): """Find CEDICT-style pinyin in square brackets and correct pinyin. Looks for square brackets in the string and tries to convert its contents to correct pinyin. It is assumed anything in square brackets is CC-CEDICT-format pinyin. e.g.: "拼音[pin1 yin1]" will be converted into "拼音 pīnyīn". """ if len(findall("(\[.+?\])", pystring)) >= 1: cedpylist = findall("(\[.+?\])", pystring) for item in cedpylist: pystring = pystring.replace(item, " " + pyjoin(item[1:-1])) return pystring if len(findall("(\[.+?\])", pystring)) < 1: return pystring else: return None
18,453
def run_validate_dictionary(args: Namespace, unknown: Optional[List[str]] = None) -> None: """ Wrapper function for running dictionary validation Parameters ---------- args: :class:`~argparse.Namespace` Parsed command line arguments unknown: list[str] Parsed command line arguments to be passed to the configuration objects """ validate_dictionary_args(args) validate_dictionary(args, unknown)
18,454
def get_ram_usage_bytes(size_format: str = 'M'): """ Size formats include K = Kilobyte, M = Megabyte, G = Gigabyte """ total = psutil.virtual_memory().total available = psutil.virtual_memory().available used = total - available # Apply size if size_format == 'K': used = used / 1024 if size_format == 'M': used = used / 1024 / 1024 if size_format == 'G': used = used / 1024 / 1024 / 1024 return int(used)
18,455
def test_rebase_and_update_remote(mock_repo, monkeypatch): """ GIVEN Rebaser initialized correctly WHEN rebase_and_update_remote is called THEN a tag is created AND remote.fetch is called twice to catch remote updates during the rebase AND git.push is called """ monkeypatch.setattr(time, 'sleep', lambda s: None) rebaser = Rebaser(mock_repo, "my_branch", "my_commit", "my_remote", "000", dev_mode=True, release='15.0') mock_repo.commit.same.return_value = True rebaser.rebase_and_update_remote() assert mock_repo.tag.create.call_count == 1 assert mock_repo.remote.fetch.call_count == 2 expected = [(("-n", "my_remote", "private-rebaser-15.0-000-previous"),), (("-nf", "--follow-tags", "my_remote", "my_branch"),)] assert mock_repo.git.push.call_args_list == expected
18,456
def weighted_crossentropy(weights, name='anonymous'): """A weighted version of tensorflow.keras.objectives.categorical_crossentropy Arguments: weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x. name: string identifying the loss to differentiate when models have multiple losses Returns: keras loss function named name+'_weighted_loss' """ string_globe = 'global ' + name + '_weights\n' string_globe += 'global ' + name + '_kweights\n' string_globe += name + '_weights = np.array(weights)\n' string_globe += name + '_kweights = K.variable('+name+'_weights)\n' exec(string_globe, globals(), locals()) fxn_postfix = '_weighted_loss' string_fxn = 'def ' + name + fxn_postfix + '(y_true, y_pred):\n' string_fxn += '\ty_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n' string_fxn += '\ty_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n' string_fxn += '\tloss = y_true * K.log(y_pred) * ' + name + '_kweights\n' string_fxn += '\tloss = -K.sum(loss, -1)\n' string_fxn += '\treturn loss\n' exec(string_fxn, globals(), locals()) loss_fxn = eval(name + fxn_postfix, globals(), locals()) return loss_fxn
18,457
def FullBackTraceAll(cmd_args=[]): """ Show full backtrace across the interrupt boundary for threads running on all processors. Syntax: fullbtall Example: fullbtall """ for processor in IterateLinkedList(kern.globals.processor_list, 'processor_list') : print "\n" + GetProcessorSummary(processor) active_thread = processor.active_thread if unsigned(active_thread) != 0 : task_val = active_thread.task proc_val = Cast(task_val.bsd_info, 'proc *') print GetTaskSummary.header + " " + GetProcSummary.header print GetTaskSummary(task_val) + " " + GetProcSummary(proc_val) print "\t" + GetThreadSummary.header print "\t" + GetThreadSummary(active_thread) print "\tBacktrace:" ThreadVal = GetLLDBThreadForKernelThread(active_thread) FramePtr = ThreadVal.frames[0].GetFP() print GetFullBackTrace(unsigned(FramePtr), prefix="\t")
18,458
def _get_package_type(id): """ Given the id of a package this method will return the type of the package, or 'dataset' if no type is currently set """ pkg = model.Package.get(id) if pkg: return pkg.type or u'dataset' return None
18,459
def _find_protruding_dimensions(f, care, fol): """Return variables along which `f` violates `care`.""" vrs = joint_support([f, care], fol) dims = set() for var in vrs: other_vars = vrs - {var} f_proj = fol.exist(other_vars, f) care_proj = fol.exist(other_vars, care) if (care_proj | ~ f_proj) != fol.true: dims.add(var) return dims
18,460
def prepare_data(train_mode, dataset="Train"): """ Args: dataset: choose train dataset or test dataset For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp',..., 't99.bmp'] """ # Defines list of data path lists for different folders of training data dataPaths = [] # If mode is train, dataPaths from each folder in Train directory are # stored into a list which is then appended to dataPaths # Join the Train dir to current directory if dataset == "Train": data_dir = os.path.join(os.getcwd(), dataset) for root, dirs, files in os.walk(data_dir): if dirs != []: for folder in dirs: dataFolderDir = os.path.join(data_dir, folder) # make set of all dataset file path data = glob.glob(os.path.join(dataFolderDir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) else: if train_mode == 0: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode0") # make set of all dataset file path data = glob.glob(os.path.join(data_dir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 1: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode1") # make set of all dataset file path data = glob.glob(os.path.join(data_dir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 3: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode3") # make set of all dataset file path data = glob.glob(os.path.join(data_dir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 2: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode2") # make set of all dataset file path data = glob.glob(os.path.join(data_dir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 4: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode4") # make set of all dataset file path data = glob.glob(os.path.join(data_dir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 5: # Prepares testing data paths for mode 5 data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode5") for root, dirs, files in os.walk(data_dir): if dirs != []: for folder in dirs: dataFolderDir = os.path.join(data_dir, folder) # make set of all dataset file path data = glob.glob(os.path.join(dataFolderDir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) elif train_mode == 6: data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Mode6") for root, dirs, files in os.walk(data_dir): if dirs != []: for folder in dirs: dataFolderDir = os.path.join(data_dir, folder) # make set of all dataset file path data = glob.glob(os.path.join(dataFolderDir, "*.png")) # Sorts by number in file name data.sort(key=lambda f: int(''.join(filter(str.isdigit, os.path.basename(f))))) dataPaths.append(data) print(dataPaths) return dataPaths
18,461
def simulate_relatedness(genotypes, relatedness=.5, n_iter=1000, copy=True): """ Simulate relatedness by randomly copying genotypes between individuals. Parameters ---------- genotypes : array_like An array of shape (n_variants, n_samples, ploidy) where each element of the array is an integer corresponding to an allele index (-1 = missing, 0 = reference allele, 1 = first alternate allele, 2 = second alternate allele, etc.). relatedness : float, optional Fraction of variants to copy genotypes for. n_iter : int, optional Number of times to randomly copy genotypes between individuals. copy : bool, optional If False, modify `genotypes` in place. Returns ------- genotypes : ndarray, shape (n_variants, n_samples, ploidy) The input genotype array but with relatedness simulated. """ # check genotypes array genotypes = np.asarray(genotypes) assert genotypes.ndim >= 2 n_variants = genotypes.shape[0] n_samples = genotypes.shape[1] # copy input array if copy: genotypes = genotypes.copy() else: # modify in place pass # determine the number of variants to copy genotypes for n_copy = int(relatedness * n_variants) # iteratively introduce relatedness for i in range(n_iter): # randomly choose donor and recipient donor_index = random.randint(0, n_samples-1) donor = genotypes[:, donor_index] recip_index = random.randint(0, n_samples-1) recip = genotypes[:, recip_index] # randomly pick a set of variants to copy variant_indices = random.sample(range(n_variants), n_copy) # copy across genotypes recip[variant_indices] = donor[variant_indices] return genotypes
18,462
def pages_substitute(content): """ Substitute tags in pages source. """ if TAG_USERGROUPS in content: usergroups = UserGroup.objects.filter(is_active=True).order_by('name') replacement = ", ".join(f"[{u.name}]({u.webpage_url})" for u in usergroups) content = content.replace(TAG_USERGROUPS, replacement) return content
18,463
def vcfanno(vcf, out_file, conf_fns, data, basepath=None, lua_fns=None): """ annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno) """ if utils.file_exists(out_file): return out_file if lua_fns is None: lua_fns = [] vcfanno = config_utils.get_program("vcfanno", data) with file_transaction(out_file) as tx_out_file: conffn = _combine_files(conf_fns, tx_out_file) luafn = _combine_files(lua_fns, tx_out_file) luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else "" basepathflag = "-base-path {0}".format(basepath) if basepath else "" cores = dd.get_num_cores(data) cmd = "{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} | sed -e 's/Number=A/Number=1/g' | bgzip -c > {tx_out_file}" message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals()) do.run(cmd.format(**locals()), message) return out_file
18,464
def clean(params: dict) -> str: """ Build clean rules for Makefile """ clean = "\t@$(RM) -rf $(BUILDDIR)\n" if params["library_libft"]: clean += "\t@make $@ -C " + params["folder_libft"] + "\n" if params["library_mlx"] and params["compile_mlx"]: clean += "\t@make $@ -C " + params["folder_mlx"] + "\n" return clean
18,465
def mathematica(quero: str, meta: str = '') -> bool: """mathematica Rudimentar mathematical operations (boolean result) Args: quero (_type_, optional): _description_. Defaults to str. Returns: bool: True if evaluate to True. """ # neo_quero = quero.replace(' ', '').replace('(', '').replace(')', '') neo_quero = quero.replace(' ', '') if quero == 'True': return True if quero == 'False': return False if neo_quero.find('&&') > -1: parts = neo_quero.split('&&') # print(parts) # return bool(parts[0]) and bool(parts[1]) return logicum(parts[0]) and logicum(parts[1]) if neo_quero.find('||') > -1: parts = neo_quero.split('||') # return bool(parts[0]) or bool(parts[1]) return logicum(parts[0]) or logicum(parts[1]) # regula = r"(\d*)(.{1,2})(\d*)" regula = r"(?P<n1>(\d*))(?P<op>(\D{1,2}))(?P<n2>(\d*))" r1 = re.match(regula, neo_quero) if r1.group('op') == '==': return int(r1.group('n1')) == int(r1.group('n2')) if r1.group('op') == '!=': return int(r1.group('n1')) != int(r1.group('n2')) if r1.group('op') == '<=': return int(r1.group('n1')) <= int(r1.group('n2')) if r1.group('op') == '>=': return int(r1.group('n1')) >= int(r1.group('n2')) if r1.group('op') == '<': return int(r1.group('n1')) < int(r1.group('n2')) if r1.group('op') == '>': return int(r1.group('n1')) > int(r1.group('n2')) raise ValueError( 'mathematica: <quaero> [{1}] <op>? [{0}]'.format(str(quero), meta))
18,466
def fasta2vcf(f): """convert fasta to vcf dataframe Input ----- Fasta file, _ref is recognized as ref and _alt is used as alt, these are two keywords Output ------ vcf dataframe: chr, pos, name, ref, alt, reference sequence """ my_dict = {} for r in SeqIO.parse(f, "fasta"): my_dict[r.id] = str(r.seq).upper() print (my_dict) vcf = pd.DataFrame() index_list = [] chr_list = [] pos_list = [] ref_list = [] alt_list = [] seq_list = [] for k in my_dict: if not "_ref" in k: continue name = k.replace("_ref","") if not name+"_alt" in my_dict: print (k,"alt sequence not found. Please use _ref and _alt keywords. Skip...") continue ref_seq,alt_seq = my_dict[k],my_dict[name+"_alt"] if len(ref_seq) < 30: print (k,"Please input sequence length at least 30bp. Skip...") continue if ref_seq == alt_seq: print (k,"Ref and Alt sequence is the same. Please check. Skip...") continue pos,ref,alt = find_pos_ref_alt(ref_seq,alt_seq) index_list.append(name) chr_list.append(k) seq_list.append(ref_seq) pos_list.append(pos) ref_list.append(ref) alt_list.append(alt) vcf[0] = chr_list vcf[1] = pos_list vcf[2] = index_list vcf[3] = ref_list vcf[4] = alt_list vcf[5] = seq_list vcf = vcf[vcf[1]!=-1] if vcf.shape[0] == 0: print ("no valid sequences in:",f) print ("Exit...") sys.exit(1) return vcf
18,467
def test_calc_job_node_get_builder_restart(aiida_localhost): """Test the `CalcJobNode.get_builder_restart` method.""" original = orm.CalcJobNode( computer=aiida_localhost, process_type='aiida.calculations:core.arithmetic.add', label='original' ) original.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) original.set_option('max_wallclock_seconds', 1800) original.base.links.add_incoming(orm.Int(1).store(), link_type=LinkType.INPUT_CALC, link_label='x') original.base.links.add_incoming(orm.Int(2).store(), link_type=LinkType.INPUT_CALC, link_label='y') original.store() builder = original.get_builder_restart() assert 'x' in builder assert 'y' in builder assert 'metadata' in builder assert 'options' in builder.metadata assert builder.x == orm.Int(1) assert builder.y == orm.Int(2) assert builder._inputs(prune=True)['metadata']['options'] == original.get_options()
18,468
def checkout(path, commit, create = False): """ Checks out a new branch in SL :cwd: path to the git repo :commit: String, name for the new branch :create: create new or expect it to exist """ command = ["git", "checkout"] if create: command.append("-b") command.append(commit) if call(command, cwd=path) != 0: fail("Could not checkout" + commit + " from " + path)
18,469
def time_pet(power,energy): """Usage: time_pet(power,energy)""" return energy/power
18,470
def compute_sigma0( T, S, **kwargs, ): """ compute the density anomaly referenced to the surface """ return compute_rho(T, S, 0, **kwargs) - 1000
18,471
def find_rise_offsets( connection, reference_zeta_mm=None): """Determine rising curves """ cursor = connection.cursor() compute_rise_offsets(cursor, reference_zeta_mm) cursor.close() connection.commit()
18,472
def get_neighbors_radius(nelx, nely, coord, connect, radius): """ Check neighboring elements that have the centroid within the predetermined radius. Args: nelx (:obj:`int`): Number of elements on the x axis. nely (:obj:`int`): Number of elements on the x axis coord (:obj:`numpy.array`): Coordinates of the element. connect (:obj:`numpy.array`): Element connectivity. radius (:obj:`float`): Radius to get elements in the vicinity of each element. Returns: neighbors, H, centroids """ el_number = nelx * nely centroids = np.empty((el_number, 2)) idx = connect[:, 1:] - 1 centroids[:, 0] = np.sum(coord[idx, 1], axis = 1)/4 centroids[:, 1] = np.sum(coord[idx, 2], axis = 1)/4 ind_rows = [] ind_cols = [] data = [] cols = 0 neighbors = [] for el in range(el_number): distance = np.sqrt(np.sum((centroids[el] - centroids)**2, axis=1)) mask = distance <= radius neighbor = mask.nonzero()[0] + 1 neighbors.extend(neighbor - 1) hi = radius - distance hi_max = np.maximum(0, hi) data.extend(hi_max[mask]) aux = len(hi_max[mask]) rows = np.repeat(el, aux) #.tolist() columns = np.arange(0, aux) ind_rows.extend(rows) ind_cols.extend(columns) if aux > cols: cols = aux H = csc_matrix((data, (ind_rows, ind_cols)), shape=(nelx*nely, cols)).toarray() neighbors = csc_matrix((neighbors, (ind_rows, ind_cols)), shape=(nelx*nely, cols), dtype='int').toarray() return neighbors, H, centroids
18,473
def merge_with(obj, *sources, **kwargs): """ This method is like :func:`merge` except that it accepts customizer which is invoked to produce the merged values of the destination and source properties. If customizer returns ``None``, merging is handled by this method instead. The customizer is invoked with five arguments: ``(obj_value, src_value, key, obj, source)``. Args: obj (dict): Destination object to merge source(s) into. sources (dict): Source objects to merge from. subsequent sources overwrite previous ones. Keyword Args: iteratee (callable, optional): Iteratee function to handle merging (must be passed in as keyword argument). Returns: dict: Merged object. Warning: `obj` is modified in place. Example: >>> cbk = lambda obj_val, src_val: obj_val + src_val >>> obj1 = {'a': [1], 'b': [2]} >>> obj2 = {'a': [3], 'b': [4]} >>> res = merge_with(obj1, obj2, cbk) >>> obj1 == {'a': [1, 3], 'b': [2, 4]} True .. versionadded:: 4.0.0 .. versionchanged:: 4.9.3 Fixed regression in v4.8.0 that caused exception when `obj` was ``None``. """ if obj is None: return None sources = list(sources) iteratee = kwargs.pop("iteratee", None) if iteratee is None and sources and callable(sources[-1]): iteratee = sources.pop() sources = [copy.deepcopy(source) for source in sources] if callable(iteratee): iteratee = partial(callit, iteratee, argcount=getargcount(iteratee, maxargs=5)) else: iteratee = None return _merge_with(obj, *sources, iteratee=iteratee, **kwargs)
18,474
def count_class_nbr_patent_cnt(base_data_list, calculate_type): """ 统计在所有数据中不同分类号对应的专利数量 :param base_data_list: :return: """ class_number_patent_cnt_dict = dict() for base_data in base_data_list: class_number_value = base_data[const.CLASS_NBR] calculate_class_number_patent_count_dict(class_number_value, class_number_patent_cnt_dict, calculate_type) return class_number_patent_cnt_dict
18,475
def SetVariable(output, variable_name, value): """Sets a CMake variable.""" output.write('set(') output.write(variable_name) output.write(' "') output.write(CMakeStringEscape(value)) output.write('")\n')
18,476
def plot_pta_L(df): """ INPUTS -df: pandas dataframe containing the data to plot OUTPUTS -saves pta graphs in .html """ title = generate_title_run_PTA(df, "Left Ear", df.index[0]) labels = {"title": title, "x": "Frequency (Hz)", "y": "Hearing Threshold (dB HL)"} fig = go.Figure() fig.update_layout(title=labels["title"], xaxis_title=labels["x"], yaxis_title=labels["y"], xaxis_type="log", xaxis_range=[np.log10(100), np.log10(20000)], yaxis_range=[80, -20], yaxis_dtick=10, xaxis_showline=True, xaxis_linecolor="black", yaxis_showline=True, yaxis_linecolor="black", yaxis_zeroline=True, yaxis_zerolinewidth=1, yaxis_zerolinecolor="black") x, y = data_to_plot_PTA(df, "LE_") fig.add_trace(go.Scatter(x=x, y=y, line_color="blue", mode='lines+markers', name=labels["title"], hovertemplate="%{x:1.0f} Hz<br>" + "%{y:1.0f} dB HL")) completed = save_graph_PTA(fig, df, "Left Ear") if completed is True: return True else: return False
18,477
def error(text): """Safely echo an error to STDERR.""" output = text if sys.stderr.isatty(): output = format_for_tty(text, [TEXT_ERROR, TEXT_BOLD]) stderr_log.error(output)
18,478
def create_graph(edge_num: int, edge_list: list) -> dict: """ Create a graph expressed with adjacency list :dict_key : int (a vertex) :dict_value : set (consisted of vertices adjacent to key vertex) """ a_graph = {i: set() for i in range(edge_num)} for a, b in edge_list: a_graph[a - 1].add(b - 1) # All graphs always need this line a_graph[b - 1].add(a - 1) # Only undirected graph needs this line return a_graph
18,479
def msg_warn(message): """ Log a warning message :param message: the message to be logged """ to_stdout(" (!) {message}".format(message=message), colorf=yellow, bold=True) if _logger: _logger.warn(message)
18,480
def multikey_fkg_allowing_type_hints( namespace: Optional[str], fn: Callable, to_str: Callable[[Any], str] = repr) -> Callable[[Any], List[str]]: """ Equivalent of :func:`dogpile.cache.util.function_multi_key_generator`, but using :func:`inspect.signature` instead. Also modified to make the cached function unique per INSTANCE for normal methods of a class. """ namespace = get_namespace(fn, namespace) sig = inspect.signature(fn) argnames = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD] has_self = bool(argnames and argnames[0] in ('self', 'cls')) def generate_keys(*args: Any, **kw: Any) -> List[str]: if kw: raise ValueError("This dogpile.cache key function generator, " "multikey_fkg_allowing_type_hints, " "does not accept keyword arguments.") if has_self: # Unlike dogpile's default, make it instance- (or class-) specific # by including a representation of the "self" or "cls" argument: args = [hex(id(args[0]))] + list(args[1:]) keys = [namespace + "|" + key for key in map(to_str, args)] if DEBUG_INTERNALS: log.debug( "multikey_fkg_allowing_type_hints.generate_keys() -> {!r}", keys) return keys return generate_keys
18,481
def get_current_version() -> str: """Read the version of the package. See https://packaging.python.org/guides/single-sourcing-package-version """ version_exports = {} with open(VERSION_FILE) as file: exec(file.read(), version_exports) # pylint: disable=exec-used return version_exports["VERSION"]
18,482
def seed_story(text_dict): """Generate random seed for story.""" story_seed = random.choice(list(text_dict.keys())) return story_seed
18,483
def __default_proto_version_inject(): """ modified the value to default_proto_version if there are multiple allow version """ import minecraft.networking.connection as connection from minecraft.networking.connection import Connection red, connection_class = redbaron_util.read_class(Connection) connect_method = redbaron_util.get_def(connection_class, 'connect') main_with = redbaron_util.get_node(connect_method, node_type=WithNode) idx = redbaron_util.get_node_index(main_with, node_type=AssignmentNode, predicate=lambda n: str(n.target) == 'self.spawned') redbaron_util.insert_nodes(main_with, idx, [ RedBaron('''self.recorder.logger.info('Allow versions of the server: {}'.format(self.allowed_proto_versions))'''), RedBaron('''if len(self.allowed_proto_versions) > 1: self.context.protocol_version = self.default_proto_version''') ]) patched_class_source = connect_method.dumps() globals_ = dict(connection.__dict__) exec(patched_class_source, globals_) PatchedConnection = globals_['Connection'] Connection.connect = PatchedConnection.connect
18,484
def rename_file(directory, oldfilename, newfilename): """renames a file in a directory :param directory: the name of the directory containing the file to be renamed :type directory: path :param oldfilename: original name of the file :type oldfilename: string :param newfilename: new name of the file :type newfilename: string """ if os.path.exists(directory): oldfile = directory / oldfilename newfile = directory / newfilename os.rename(oldfile, newfile) print('renamed: ', oldfile, ' with: ', newfile) else: logging.error( "rename-error: directrory " + directory + " not found")
18,485
def detect_conda_env(): """Inspect whether `sys.executable` is within a conda environment and if it is, return the environment name and Path of its prefix. Otherwise return None, None""" prefix = Path(sys.prefix) if not (prefix / 'conda-meta').is_dir(): # Not a conda env return None, None if (prefix / 'envs').is_dir(): # It's the base conda env: return 'base', prefix # Not the base env: its name is the directory basename: return prefix.name, prefix
18,486
def _get_rel_att_inputs(d_model, n_heads): # pylint: disable=invalid-name """Global relative attentions bias initialization shared across the layers.""" assert d_model % n_heads == 0 and d_model % 2 == 0 d_head = d_model // n_heads bias_initializer = init.RandomNormalInitializer(1e-6) context_bias_layer = core.Weights(bias_initializer, shape=(1, n_heads, 1, d_head)) location_bias_layer = core.Weights(bias_initializer, shape=(1, n_heads, 1, d_head)) return context_bias_layer, location_bias_layer
18,487
def two_time_pad(): """A one-time pad simply involves the xor of a message with a key to produce a ciphertext: c = m ^ k. It is essential that the key be as long as the message, or in other words that the key not be repeated for two distinct message blocks. Your task: In this problem you will break a cipher when the one-time pad is re-used. c_1 = 3801025f45561a49131a1e180702 c_2 = 07010051455001060e551c571106 These are two hex-encoded ciphertexts that were formed by applying a “one-time pad” to two different messages with the same key. Find the two corresponding messages m_1 and m_2. Okay, to make your search simpler, let me lay out a few ground rules. First, every character in the text is either a lowercase letter or a space, aside from perhaps the first character in the first message which might be capitalized. As a consequence, no punctuation appears in the messages. Second, the messages consist of English words in ASCII. Finally, all of the words within each message is guaranteed to come from the set of the 100 most common English words: https://en.wikipedia.org/wiki/Most_common_words_in_English. Returns: Output the concatenation of strings m_1 and m_2. (Don't worry if words get smashed together as a result.) """ c_1 = '3801025f45561a49131a1e180702' c_2 = '07010051455001060e551c571106' # converting the hexadecimal representaiton to integers for every 2 bytes since it xor operations become on integers c_1_int = [int(c_1[i] + c_1[i+1], 16) for i in range(0, len(c_1), 2)] c_2_int = [int(c_2[i] + c_2[i+1], 16) for i in range(0, len(c_1), 2)] xord = [c_1_int[i] ^ c_2_int[i] for i in range(len(c_1_int))] #xor of the two lists which are integer representations result = construct('',xord) if result == None: return None else: print(result) new_string = ''.join([chr(ord(result[i]) ^ xord[i]) for i in range(len(result))]) return new_string + result
18,488
def same_datatypes(lst): """ Überprüft für eine Liste, ob sie nur Daten vom selben Typ enthält. Dabei spielen Keys, Länge der Objekte etc. eine Rolle :param lst: Liste, die überprüft werden soll :type lst: list :return: Boolean, je nach Ausgang der Überprüfung """ datatype = type(lst[0]).__name__ for item in lst: if type(item).__name__ != datatype: # return False, wenn die Liste verschiedene Datentypen enthält return False # Datentypen sind gleich, aber sind deren Strukturen auch gleich? (für komplexe Datentypen) if datatype == "dict": keys = lst[0].keys() for item in lst: if item.keys() != keys: # return False, wenn die Keys der Dictionaries verschieden sind return False elif datatype == "list": if sum([len(x) for x in lst]) / len(lst) != len(lst[0]): # return False, falls die Listen in der Liste verschiedene Längen haben return False datatypes = list(map(lambda x: type(x).__name__, lst[0])) for item in lst: if list(map(lambda x: type(x).__name__, item)) != datatypes: # return False, falls die Elemente der inneren Listen verschiedene Datenytpen haben return False return True
18,489
def _show_stat_wrapper_Progress(count, last_count, start_time, max_count, speed_calc_cycles, width, q, last_speed, prepend, show_stat_function, add_args, i, lock): """ calculate """ count_value, max_count_value, speed, tet, ttg, = Progress._calc(count, last_count, start_time, max_count, speed_calc_cycles, q, last_speed, lock) return show_stat_function(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **add_args)
18,490
def _to_base58_string(prefixed_key: bytes): """ Convert prefixed_key bytes into Es/EC strings with a checksum :param prefixed_key: the EC private key or EC address prefixed with the appropriate bytes :return: a EC private key string or EC address """ prefix = prefixed_key[:PREFIX_LENGTH] assert prefix == ECAddress.PREFIX or prefix == ECPrivateKey.PREFIX, 'Invalid key prefix.' temp_hash = sha256(prefixed_key[:BODY_LENGTH]).digest() checksum = sha256(temp_hash).digest()[:CHECKSUM_LENGTH] return base58.encode(prefixed_key + checksum)
18,491
def import_report_from_stdin(): """Parse a report from stdin.""" content = six.StringIO() for line in fileinput.input([]): content.write(line) content.seek(0) if not content: return import_report_from_email(content)
18,492
def round_int(n, d): """Round a number (float/int) to the closest multiple of a divisor (int).""" return round(n / float(d)) * d
18,493
def process(sample_nxs_list, mt_nxs_list, parameter_yaml): """process a series of files using a fixed set of parameters This implementation just shows one way of processing a batch job, where the processing parameters stay fixed except for the sample and emtpy can nexus files. For more complex batch processing, a user could follow this example and implement his/her own methods. """ import os from . import Context, context2kargs from ..getdos import getDOS assert len(sample_nxs_list) == len(mt_nxs_list) # load parameters params = Context() params.from_yaml(parameter_yaml) if hasattr(params, 'iqe_nxs'): del params.iqe_nxs if hasattr(params, 'iqe_h5'): del params.iqe_h5 # process for sample_nxs, mt_nxs in zip(sample_nxs_list, mt_nxs_list): params.sample_nxs = sample_nxs params.mt_nxs = mt_nxs kargs = context2kargs(params) workdir = 'work-%s,%s' % (os.path.basename(sample_nxs), os.path.basename(mt_nxs) if mt_nxs else mt_nxs) if not os.path.exists(workdir): os.makedirs(workdir) with open(os.path.join(workdir, 'log.getdos'), 'wt') as log: kargs['workdir'] = workdir print("* Processing %s, %s" % (sample_nxs, mt_nxs)) for msg in getDOS(**kargs): log.write('%s\n' % (msg,)) continue
18,494
def test_default_parameter_in_args(): """ >>> from allure_commons.utils import represent >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... has_test_case('test_default_parameter_in_args', ... has_step('First step', ... has_parameter('arg_param', represent(1)), ... has_parameter('kwarg_param', represent(2)), ... ) ... ) ... ) """ step_with_parameters(1, 2)
18,495
def zipdir(dir, zip_path): """Create a zip file from a directory. The zip file contains the contents of dir, but not dir itself. Args: dir: (str) the directory with the content to place in zip file zip_path: (str) path to the zip file """ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as ziph: for root, dirs, files in os.walk(dir): for file in files: ziph.write(join(root, file), join('/'.join(dirs), os.path.basename(file)))
18,496
def merge_array_list(arg): """ Merge multiple arrays into a single array :param arg: lists :type arg: list :return: The final array :rtype: list """ # Check if arg is a list if type(arg) != list: raise errors.AnsibleFilterError('Invalid value type, should be array') final_list = [] for cur_list in arg: final_list += cur_list return final_list
18,497
def load_callbacks(boot, bootstrap, jacknife, out, keras_verbose, patience): """ Specifies Keras callbacks, including checkpoints, early stopping, and reducing learning rate. Parameters ---------- boot bootstrap jacknife out keras_verbose patience batch_size Returns ------- checkpointer earlystop reducelr """ if bootstrap or jacknife: checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath=out + "_boot" + str(boot) + "_weights.hdf5", verbose=keras_verbose, save_best_only=True, save_weights_only=True, monitor="val_loss", save_freq="epoch", ) else: checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath=out + "_weights.hdf5", verbose=keras_verbose, save_best_only=True, save_weights_only=True, monitor="val_loss", save_freq="epoch", ) earlystop = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0, patience=patience ) reducelr = tf.keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=int(patience / 6), verbose=keras_verbose, mode="auto", min_delta=0, cooldown=0, min_lr=0, ) return checkpointer, earlystop, reducelr
18,498
def build_sentence_model(cls, vocab_size, seq_length, tokens, transitions, num_classes, training_mode, ground_truth_transitions_visible, vs, initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0): """ Construct a classifier which makes use of some hard-stack model. Args: cls: Hard stack class to use (from e.g. `spinn.fat_stack`) vocab_size: seq_length: Length of each sequence provided to the stack model tokens: Theano batch (integer matrix), `batch_size * seq_length` transitions: Theano batch (integer matrix), `batch_size * seq_length` num_classes: Number of output classes training_mode: A Theano scalar indicating whether to act as a training model with dropout (1.0) or to act as an eval model with rescaling (0.0). ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access to ground truth transitions. This can be disabled at evaluation time to force Model 1 (or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0. vs: Variable store. """ # Prepare layer which performs stack element composition. if cls is spinn.plain_rnn.RNN: if FLAGS.use_gru: compose_network = partial(util.GRULayer, initializer=util.HeKaimingInitializer()) else: compose_network = partial(util.LSTMLayer, initializer=util.HeKaimingInitializer()) embedding_projection_network = None elif cls is spinn.cbow.CBOW: compose_network = None embedding_projection_network = None else: if FLAGS.lstm_composition: if FLAGS.use_gru: compose_network = partial(util.TreeGRULayer, initializer=util.HeKaimingInitializer()) else: compose_network = partial(util.TreeLSTMLayer, initializer=util.HeKaimingInitializer()) else: assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM" compose_network = partial(util.ReLULayer, initializer=util.HeKaimingInitializer()) if project_embeddings: embedding_projection_network = util.Linear else: assert FLAGS.word_embedding_dim == FLAGS.model_dim, \ "word_embedding_dim must equal model_dim unless a projection layer is used." embedding_projection_network = util.IdentityLayer # Build hard stack which scans over input sequence. sentence_model = cls( FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length, compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs, predict_use_cell=FLAGS.predict_use_cell, use_tracking_lstm=FLAGS.use_tracking_lstm, tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim, X=tokens, transitions=transitions, initial_embeddings=initial_embeddings, embedding_dropout_keep_rate=FLAGS.embedding_keep_rate, ss_mask_gen=ss_mask_gen, ss_prob=ss_prob, connect_tracking_comp=FLAGS.connect_tracking_comp, context_sensitive_shift=FLAGS.context_sensitive_shift, context_sensitive_use_relu=FLAGS.context_sensitive_use_relu, use_input_batch_norm=False) # Extract top element of final stack timestep. if FLAGS.lstm_composition or cls is spinn.plain_rnn.RNN: sentence_vector = sentence_model.final_representations[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2)) sentence_vector_dim = FLAGS.model_dim / 2 else: sentence_vector = sentence_model.final_representations.reshape((-1, FLAGS.model_dim)) sentence_vector_dim = FLAGS.model_dim sentence_vector = util.BatchNorm(sentence_vector, sentence_vector_dim, vs, "sentence_vector", training_mode) sentence_vector = util.Dropout(sentence_vector, FLAGS.semantic_classifier_keep_rate, training_mode) # Feed forward through a single output layer logits = util.Linear( sentence_vector, sentence_vector_dim, num_classes, vs, name="semantic_classifier", use_bias=True) return sentence_model.transitions_pred, logits
18,499