content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import numpy as np def collide ( ri, vi, rj, vj, box ): """Implements collision dynamics, updating the velocities.""" # The colliding pair (i,j) is assumed to be in contact already rij = ri - rj rij = rij - np.rint ( rij ) # Separation vector rij = rij * box # Now in sigma=1 units vij = vi - vj # Relative velocity factor = np.dot ( rij, vij ) vij = -factor * rij vi = vi + vij vj = vj - vij virial = np.dot ( vij, rij ) / 3.0 return vi, vj, virial
1247411c60593b083caa2a308b53d6d6cc3911b9
48,204
def hex_dist(x1, y1, z1, x2, y2, z2): """Returns how many steps one hex is from another""" return (abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2)) // 2
a9a49fb956292ff91cfb582be0c0a43b227d6285
48,205
def _get_plane_coeff(x, y, z): """private method: compute plane coefficients in 3D given three points""" a = ((y[1] - x[1]) * (z[2] - x[2]) - (z[1] - x[1]) * (y[2] - x[2])) b = ((y[2] - x[2]) * (z[0] - x[0]) - (z[2] - x[2]) * (y[0] - x[0])) c = ((y[0] - x[0]) * (z[1] - x[1]) - (z[0] - x[0]) * (y[1] - x[1])) d = -(a * x[0] + b * x[1] + c * x[2]) return a, b, c, d
5bde9cda216366f2f447c44c8d90b663f5bf2d5b
48,206
def update_naive_algorithm(producer): """Wrap update of naive algorithm outside of `Producer.update`""" return producer._update_naive_algorithm(producer.experiment.fetch_noncompleted_trials())
8c152173d00a29b30e30208a4b9c8a2d90ef5d5e
48,209
def isAnagram(string1, string2): """assumes string1 and string2 are string of alphabetical chars of any case returns a boolean, True is string1 and string2 are casse insensite anagrams else False """ return sorted(string1.lower()) == sorted(string2.lower())
096507f7a4378127df0bc8a6de9d539cc88cd9d7
48,210
def _get_repo_path(repo_url): """ Extracts the username/reponame from the given github URL :param repo_url: (str) Full https path to the github repository :return: (str) <username>/<reponame> """ position = repo_url.find("github.com") name = "" if position >= 0: name = repo_url[position + 11:] if name.endswith("/"): # Strip trailing slash name = name[:-1] else: if repo_url.endswith("/"): name = repo_url[:-1] return name.split("/")
108b395c596364a66675c217965bebe57df506e9
48,211
def get_drive_identification(servo, subnode=None): """Gets the identification information of a given subnode. Args: servo: Instance of the servo Class. subnode: subnode to be targeted. Returns: int, int: Product code and revision number of the targeted subnode. """ prod_code = None re_number = None try: if subnode is None or subnode == 0: prod_code = servo.read('DRV_ID_PRODUCT_CODE_COCO', 0) re_number = servo.read('DRV_ID_REVISION_NUMBER_COCO', 0) else: prod_code = servo.read('DRV_ID_PRODUCT_CODE', subnode=subnode) re_number = servo.read('DRV_ID_REVISION_NUMBER', subnode) except Exception as e: pass return prod_code, re_number
269a6566c0086e7dc6b3fca297d2d2c163534b41
48,212
def convertBinaryStringToBytes(input): """covert binary string into ASCII equivalent""" output = "" for i in range(0, int(len(input)), 8): #yeah I know the following line is a bit of a mess but hey it works output += int(input[i:i+8], base=2).to_bytes(1, byteorder='big').hex() return bytes.fromhex(output)
e3c40860c5fd9926400d479fa4ccc242b25f58be
48,213
import argparse def ArgumentParser(*args, **kwargs): """Creates an argument parser and adds the merge API arguments to it. See collect_task.collect_task for more on the merge script API. """ parser = argparse.ArgumentParser(*args, **kwargs) parser.add_argument('--build-properties', help=argparse.SUPPRESS) parser.add_argument('--summary-json', help=argparse.SUPPRESS) parser.add_argument('--task-output-dir', help=argparse.SUPPRESS) parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS) parser.add_argument('jsons_to_merge', nargs='*', help=argparse.SUPPRESS) return parser
7ef601786c37e0a937f769a69b731c4e7d3c365f
48,217
def sort_values(array_in): """[Takes a np.ndarray containing N rows and 3 columns, sorts the rows by absolute value of the entry in the third column.] Args: array_in ([np.ndarray]): [Array containing the values of a correlation matrix.] Returns: [np.ndarray]: [Sorted array containing values of a correlation matrix.] """ # basic implementation of insertion sort slightly modified for the object for i in range(1, len(array_in)): key_item = array_in[i, 2] temp_0 = array_in[i, 0] temp_1 = array_in[i, 1] j = i - 1 while j >= 0 and abs(float(array_in[j, 2])) > abs(float(key_item)): array_in[j + 1] = array_in[j] j = j - 1 array_in[j + 1, 0] = temp_0 array_in[j + 1, 1] = temp_1 array_in[j + 1, 2] = key_item return array_in
4e8d99b83e5f6a53472cc157eea6b466e5b6eeac
48,218
def model_persistence(analyse_value): """ Метод реализация алгоритма постоянства. :param analyse_value: Анализируемое значение :return: Анализируемое значение """ return analyse_value
db342629a99a586c512b13b623dc97ceb63e4389
48,219
def nombre_joueurs_annee(annee,liste): """ Fonction qui en paramètre prend une année (int) et la liste des joueurs sous forme de liste de dictionnaires et qui retourne le nombre de joueurs ayant joués l'année donnée """ compteur = 0 for enreg in liste : if int(enreg['Année']) == annee: compteur += 1 return compteur
75ae36a2cac32bd6b02a4a3ade1cfab82af8f575
48,220
import argparse def get_args(): """ Get arguments from command line with argparse. """ parser = argparse.ArgumentParser( prog='Convert_RMA_NCBI_c2c.py', description="""Convert a NCBI c2c file obtained from a read-count MEGAN6 RMA file into an mpa (metaphlan) and kreport (kraken) output format.""") parser.add_argument("-i", "--input", required=True, help="An NCBI 'c2c' text file obtained from a read-count MEGAN6 " "RMA file using the rma2info program " "(rma2info -i input.RMA -o NCBI.c2c.txt -c2c Taxonomy -n -r)") parser.add_argument("-o1", "--outname1", required=True, help="The name of intermediate output file 1, which contains taxon names " "(e.g., SAMPLE.names.txt).") parser.add_argument("-o2", "--outname2", required=True, help="The name of intermediate output file 2, which contains taxon id codes " "(e.g., SAMPLE.codes.txt).") parser.add_argument("-m", "--mpa", required=True, help="The name of the mpa formatted output file (e.g., SAMPLE.mpa.txt).") parser.add_argument("-k", "--kreport", required=True, help="The name of the kreport formatted output file (e.g., SAMPLE.kreport.txt).") parser.add_argument("-r", "--readsfile", required=True, help="The sample:read count txt file.") parser.add_argument("--update", required=False, action='store_true', help="Including this flag will cause NCBITaxa to update the taxonomy database.") return parser.parse_args()
59c1aee221d82b808f332a98c15fc5b6927b5c96
48,222
def call_rposix(func, path, *args): """Call a function that takes a filesystem path as its first argument""" if path.as_unicode is not None: return func(path.as_unicode, *args) else: path_b = path.as_bytes assert path_b is not None return func(path_b, *args)
ad3f186110f751e2fef9668210abd4bf6e560d13
48,223
def off_diagonals(matrix): """ Return indices for all off-diagonal elements. >>> numpy.mat[ numpy.offdiag( mat ) ] *= -1 """ ilen, jlen = matrix.shape idx = [i for i in range(ilen) for j in range(jlen) if i != j] jdx = [j for i in range(ilen) for j in range(jlen) if i != j] return idx, jdx
0cd234048f28781922a887ffbcf699b3d8bfaef2
48,224
def build_tree(depth, sen, gap=0): """build constituency tree from syntactic distance.""" assert len(depth) >= 0 assert len(depth) == len(sen) if len(depth) == 1: parse_tree = sen[0] else: max_depth = max(depth[:-1]) assert depth[-1] > max_depth parse_tree = [] sub_sen = [] sub_depth = [] for d, w in zip(depth, sen): sub_sen.append(w) sub_depth.append(d) if d >= max_depth - gap: parse_tree.append(build_tree(sub_depth, sub_sen, gap)) sub_sen = [] sub_depth = [] return parse_tree
38b1669af6aeda2722b04d605d25a45e07a14e61
48,225
def longest_line_length(code): """Return length of longest line.""" if len(code) == 0: return 0 return max(len(line) for line in code.splitlines())
cdf83015969293e5b6a0645bc9a99878e230696e
48,227
def count_valid(passports : list[dict[str, str]], fields : list[str]) -> int: """Counts the passports that have the right fields""" return sum( all(field in passport for field in fields) for passport in passports )
aa17d08fb77bc0d1b5346db45e8f72c8e4ad9094
48,228
def r2c(arr): """Convert a real array to a complex array""" size = arr.shape[0] // 2 return arr[:size] + 1j * arr[size:]
bab679987b37df490eb396559270bec4d9b2733e
48,231
def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ def qtd(sentence, query): """ calculates the query term density. That is, proportion of terms in sentence that are also in query. """ query = set(query) count = sum([1 if word in query else 0 for word in sentence.split()]) return count / len(sentence.split()) ranked = {} for sentence, wordlist in sentences.items(): ranked[sentence] = 0 for word in query: if word in wordlist: ranked[sentence] += idfs.get(word, 0) ranked = dict(sorted(ranked.items(), key=lambda x: x[1], reverse=True)) ranked = list(ranked.items())[:n] # Tie breaker using query term density for index in range(n - 1): if ranked[index][1] == ranked[index+1][1]: left = qtd(ranked[index][0], query) right = qtd(ranked[index][0], query) if right > left: ranked[index], ranked[index + 1] = ranked[index + 1], ranked[index] ranked = [item[0] for item in ranked] return ranked
0440d2bec742f88728c294e9e359e18d38a106a7
48,232
def clean_up_feature_sets(*feature_sets, earliest_date: dict, last_date: dict) -> list: """Leave only features from inside the observation window.""" results = [] for feats in feature_sets: results.append(feats[(feats.DATE < feats.SUBJECT_ID.map(last_date)) & (feats.DATE >= feats.SUBJECT_ID.map(earliest_date))]) return results
06e4ac3713dce63a4237694fa6b6b0ed850216f6
48,233
def flex_select_nb(i, col, a, def_i=-1, def_col=-1, is_2d=False): """Select element of `a` as if it has been broadcasted.""" if def_i == -1: def_i = i if def_col == -1: def_col = col if a.ndim == 0: return a.item() if a.ndim == 1: if is_2d: return a[def_col] return a[def_i] return a[def_i, def_col]
e8a51bc7268361c6204a5b1a8cfaca99f6dde1c0
48,234
from typing import Tuple def _make_tuple(line: str, filename: str) -> Tuple[str, str]: """Helper method for making a tuple from two args.""" return (filename, line)
cca2478caafbf4b3e1665356b8ac098f468031f0
48,235
from datetime import datetime def str2timestamp(str_timestamp: str, date_format: str = '%d.%m.%Y %H:%M:%S') -> datetime: """ convert a string into a datetime object :param str_timestamp: :param date_format: :return: """ return datetime.strptime(str_timestamp, date_format)
fc66d55fdd7004d8ff1299cdc6adf39412701bd7
48,236
def str2bool(stuff): """Converts a string to a Boolean as a human would expect.""" return stuff.lower() in ("yes", "true", "y", "1")
f48b8c5062ecf072c06903bfd1f9638926606811
48,237
from io import StringIO def format_read_data(read_data, read_header): """Format a dictionary representation of an SFF read data as text. The read data is expected to be in native flowgram format. """ out = StringIO() out.write('\n') out.write('Flowgram:') for x in read_data['flowgram_values']: out.write('\t%01.2f' % (x * 0.01)) out.write('\n') out.write('Flow Indexes:') current_index = 0 for i in read_data['flow_index_per_base']: current_index = current_index + i out.write('\t%d' % current_index) out.write('\n') out.write('Bases:\t') # Roche uses 1-based indexing left_idx = read_header['clip_qual_left'] - 1 right_idx = read_header['clip_qual_right'] - 1 for i, base in enumerate(read_data['Bases']): if (i < left_idx) or (i > right_idx): out.write(base.lower()) else: out.write(base.upper()) out.write('\n') out.write('Quality Scores:') for score in read_data['quality_scores']: out.write('\t%d' % score) out.write('\n') return out.getvalue()
2ddeb67a772ba594c848d2ed11f61f636e65e7c0
48,239
import os def set_weights(ctx, parameter, val): """ Input: Filename of weight file with one key-value pair per line. Output: Dictionary of feature : weight pairs. """ weights = {} if not os.path.isfile(val): print("WARNING:", val, "is not a weight file. Using default weights instead.") else: weight_file = open(val, mode="r", encoding="utf-8") for line in weight_file: line = line.strip() #Skip empty lines if not line: continue #Skip comments elif line.startswith("#"): continue else: line = line.split(":") #Not well-formed if len(line) < 2: print("WARNING: Cannot interpret line: {0}. Weight is skipped.".format(":".join(line))) continue #Feature contains colon elif len(line) > 2: feat = ":".join(line[:-1]).strip() weight = line[-1].strip() else: feat = line[0].strip() weight = line[-1].strip() try: weight = float(weight) #Overwrite existing weights. if feat in weights: print("WARNING: Feature {0} already exists. New weight is {1}.".format(feat, weight)) weights[feat] = weight #Weight is not a float value. except ValueError: print("WARNING: Cannot interpret weight {0}. Feature {1} is skipped.".format(weight, feat)) weight_file.close() return weights
08c44d09adad2270bdb9ceb5f180fc00f140e087
48,240
import os def _native_symlinks(symlinks, raise_on_error): """Create multiple symlinks using the native implementation.""" result = [] for source, link_name in symlinks: try: os.symlink(source, link_name) result.append(True) except OSError: if raise_on_error: raise result.append(False) return result
49563d85e89cd993b7f07624d573865618945203
48,243
import time def profiling(func): """ function with this decorator will print out the running time """ def profiling_wrapper(*args, **kwargs): now = time.time() result = func(*args, **kwargs) duration = time.time() - now print("Function", str(func), " took", duration, "seconds") return result return profiling_wrapper
67f9a18d650c8f583d22807a387c59311efbf2d8
48,246
import socket def fetch_url(host, url): """Fetch the headers and body of a URL from a given host. Useful when a host recognizes virtual hosts that don't actually point to it in DNS.""" s = socket.create_connection((host, 80)).makefile() s.write('GET %s HTTP/1.0\n\n' % url) s.flush() response = s.read() s.close() return response
109f879ff447248bab6a57df277c738499c9c106
48,247
def _check_if_any_list_value_in_string(listx, line): """ Internal method to test if any of the list value present in a line. """ flag = False for value in listx: if value in line: flag = True break return flag
c5461fa063c0148e4017a94c2f3540dbc3bd2b7e
48,248
def flatten_lists_to_csv(data): """Converts the passed in data to csv. Assuming: x = [ ["v1", 98, 23], ["v2", 0.25, 0.56], ] then flatten_lists_to_csv(data) will return the following string: v1,v2 98,0.25 23,0.56 :param list data: A list of lists holding a flat view of the data to convert to csv. :return: A string representing the csv view of the passed in data. """ rows = [] i = 0 while True: try: row = [] for j in range(len(data)): row.append(str(data[j][i])) rows.append(",".join(row)) i += 1 except IndexError: break return "\n".join(rows)
d02297c9829dac0fda4629e69db09cceae402a7a
48,249
def get_time(key): """Get timestamp for orders.""" def get_key_time(order): """Open time for opened orders and closed time for others.""" return order.get(key) return get_key_time
232f9099a7874a5cfa9bf0bb7cfbb13b7eb4f9db
48,250
def rgb16_to_rgb24(color): """ Convert 16-bit RGB color to a 24-bit RGB color components. :param color: An RGB 16-bit color. :type color: int :return: A tuple of the RGB 8-bit components, (red, grn, blu). :rtype: tuple """ #red = (color & 0b1111100000000000) >> 11 << 3 #grn = (color & 0b0000011111100000) >> 5 << 2 #blu = (color & 0b0000000000011111) << 3 red = round((0xFF * ((color & 0b1111100000000000) + 4)) / 0x1F) >> 11 grn = round((0xFF * ((color & 0b0000011111100000) + 2)) / 0x3F) >> 5 blu = round((0xFF * (color & 0b0000000000011111)) / 0x1F) return red, grn, blu
3c914603381ca30e73369a5e36095e4a341e2efc
48,251
import re def re_search(pattern, text, plural=False): """Regex helper to find strings in a body of text""" match = [m.group(1) for m in re.finditer(pattern, text)] if plural: return match else: if match: return match[0]
66998eb3b29978260eb60603cf440f95a16eb532
48,252
import re def merge(df, replacement_dict, columns, level='token'): """ Merge values based on a dictionary of replacement in level token / word :param df: :param replacement_dict: :param columns: :param level: :return: """ if isinstance(columns, str): columns = [columns] if level == 'token': df = df.replace(replacement_dict) elif level == 'word': for f, r in replacement_dict.items(): f_pattern = re.compile('(^|_)' + f + '($|_)', re.IGNORECASE) r_repl = lambda m: m.group(1) + r + m.group(2) for c in columns: df[c] = df[c].str.replace(f_pattern, r_repl) return df
f37e062e9817626c1fa30dc1cb64d9948a9fada8
48,253
import copy def merge_rows(row1, row2): """Merge two rows of the table of CVE data""" output = copy.deepcopy(row1) for key in row2: if key not in output: output[key] = row2[key] elif output[key] == row2[key]: continue elif key == 'References': output['References'].update(row2['References']) elif key == 'Severity': if output['Severity'] == 'Critical' or row2['Severity'] == 'Critical': output['Severity'] = 'Critical' else: output[key] = '{old}, {new}'.format(old=output[key], new=row2[key]) else: output[key] = '{old}, {new}'.format(old=output[key], new=row2[key]) return output
62e206636f4775efc2173ea6a835ec7a7c1c3d1f
48,254
def _year_to_decade(yr): """ A simple function so I don't mess this up later, this constructs the *redistricting* decade of a district. This is offset from the regular decade a year is in by two. """ return (yr - 2) - (yr - 2) % 10
69c971f422801d260bdd5151ce47165785f9a46b
48,255
def positive(number: int) -> int: """ :return: Number, or 1 if number is negative or 0 """ return max(1, number)
82380388554c5f42096e33509424c5f67167c463
48,256
import time import socket def send_icmp_packet(host, packet): """发送原始 ICMP 数据包""" send_begin_time = time.time() socket_raw = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) socket_raw.sendto(packet, (host, 80)) return send_begin_time, socket_raw, host
8dde4263c98915f3e53d70edc3b0e5c2b0344715
48,259
import click def _cb_key_val(ctx, param, value): """ from: https://github.com/mapbox/rasterio/blob/69305c72b58b15a96330d371ad90ef31c209e981/rasterio/rio/options.py click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect in a dictionary like the one below, which is what the CLI function receives. If no value or `None` is received then an empty dictionary is returned. { 'KEY1': 'VAL1', 'KEY2': 'VAL2' } Note: `==VAL` breaks this as `str.split('=', 1)` is used. """ if not value: return {} else: out = {} for pair in value: if "=" not in pair: raise click.BadParameter( "Invalid syntax for KEY=VAL arg: {}".format(pair) ) else: k, v = pair.split("=", 1) # cast numbers for func in (int, float): try: v = func(v) except Exception: pass # cast bools and None if isinstance(v, str): if v.lower() in ["true", "yes"]: v = True elif v.lower() in ["false", "no"]: v = False elif v.lower() in ["none", "null", "nil", "nada"]: v = None out[k.lower()] = v return out
ed06b49232921fb63f2e99269ea1d32fb1bdaa84
48,260
import torch def mean_squared_error(dist, y, squared=True): """ Mean Squared Error """ mse = torch.square(y.ravel() - dist.mean).mean().item() if not squared: return mse**0.5 # Root mean square error return mse
bfc81d57076cac2090f2887833272faecf688969
48,261
import sys def handle_file(filename): """handle_file takes a given filename and parses it it returns formatted the contents and the number of bytes that were in the file""" with open(filename, 'rb') as o: fc = " " linect = 0 lc = 0 for x in o.read(): if linect == 12: linect = 0 fc += "\n " linect += 1 if (sys.version_info > (3, 0)): fc += "0x%02X," % x else: fc += "0x%02X," % ord(x) lc += 1 return (lc, fc[:-1])
aeb486a38525bebc4ee4f0f4b008554184c61d09
48,262
def normalize(vector): """ calculates the unit-length vector of a given vector :param vector: an iterable of integers :return: a list of integers """ return [int/sum(vector) for int in vector]
aeee06bb3132901e44732b64c4a84ae6eb6ec8da
48,263
def goodDLK_2(d,l,k) : """ check for parity (= orientability), stability, and sign of conjugation. See more about this in documentation. """ if (d == 0) and ((l != 0) or (k != 3)) : return False return ((2*l + k - (3*d -1)) % 4 in [0,3])
22d138323bb99bd7ec154136c3cfc92e42fb2d36
48,264
import argparse def create_argparser(): """ setup all required arguments as in: parser.add_argument('-s', '--side', required=False, default=0, help='some help text') :return: argparse dictionary """ parser = argparse.ArgumentParser(description='template opendlv for python') parser.add_argument('-f', '--frequency', required=False, default=2, help='updating frequency') parser.add_argument('-c', '--cid', required=False, default=111, help='conference ID') parser.add_argument('-n', '--name', required=False, default="execution", help='logging output name') args = vars(parser.parse_args()) return args
18845a0c00f463f227054073a23d34badc4eecea
48,265
def get_path(obj, path): """Fetch data from an object with nested, or return None. Avoids raising.""" for each in path: try: obj = obj[each] except Exception: return None return obj
799c3a68ecdc6662ad5d2747501ff05ae93fe39b
48,266
def convert_collection_to_path_param(collection): """ Convert a list of elements to a valid path parameter by concatenating with ",". This is used when calling some endpoints that require a list of instance ids. If the collection parameter is not of type ``list``, ``tuple``, ``str`` or ``int``, ``None`` is returned. :param collection: Some object that may be a collection :return: Either a comma-separated list of the elements or None if invalid type """ if isinstance(collection, list) or isinstance(collection, tuple): return ','.join(collection) elif isinstance(collection, str) or isinstance(collection, int): return str(collection) else: return None
3fa52bbc39c32c5e088338b6e8f1f84ec39a3630
48,269
def cut_half(toas): """Selection function to split by data segment""" midpoint = (toas.max() + toas.min()) / 2 return dict(zip(["t1", "t2"], [toas <= midpoint, toas > midpoint]))
c4c8150eee1536d6ed8f3d7a9488d04a0e36ca24
48,270
def stringifyRequestArgs(args): """Turn the given HTTP request arguments from bytes to str. :param dict args: A dictionary of request arguments. :rtype: dict :returns: A dictionary of request arguments. """ # Convert all key/value pairs from bytes to str. str_args = {} for arg, values in args.items(): arg = arg if isinstance(arg, str) else arg.decode("utf-8") values = [value.decode("utf-8") if isinstance(value, bytes) else value for value in values] str_args[arg] = values return str_args
72bbcd3189cff036f31125a0c28b8ab497a54280
48,271
def get_digit(x): """ x must be sorted. """ assert len(x) > 0 if len(x) == 1: m = str(x[0]) else: m = str(min(map(lambda x,y:y-x, x[:-1], x[1:]))) if "." in m: return len(m) - m.index(".") - 1 else: return 0
0daa601b85027f29193187c2211d845e8393b05c
48,272
import time import logging import os import subprocess import asyncio def refine_2d_subjob(process_number, round=0, input_star_filename="class_0.star", input_stack="combined_stack.mrcs", particles_per_process=100, mask_radius=150, low_res_limit=300, high_res_limit=40, class_fraction=1.0, particle_count=20000, pixel_size=1, angular_search_step=15.0, max_search_range=49.5, smoothing_factor=1.0, process_count=32, working_directory="~", automask=False, autocenter=True): """ Call out to cisTEM2 ``refine2d`` using :py:func:`subprocess.Popen` to generate a new partial set of *Refined* classes for a slice of a particle stack (used in parallel with other slices). Args: start_cycle_number (int): Iteration number of the classification (indexes from 0 in :py:func:`execute_job_loop`) input_stack (str): Filename of combined monolithic particle stack particles_per_process (int): Number of particles to classify in this job. process_count (int): How many processes have run before this one (assumes other processes have classified the same number of particles). mask_radius (int): Radius in Å to use for mask (default 150). low_res (float): Low resolution cutoff for classification, in Å. high_res (float): High resolution cutoff for classification, in Å. class_fraction (float): Fraction of particles [0-1] in a section to classify. Values below 1 improve speed but have lower SNR in final classes. particle_count (int): Total number of particles in dataset. pixel_size (int): Pixel size of image files, in Å. angular_search_step (float): Angular step in degrees for the classification. max_search_range (float): XY search range in Å for the classification. working_directory (str): Directory where data will output. automask (bool): Automatically mask class averages autocenter (bool): Automatically center class averages to center of mass. Returns: str: STDOUT of :py:func:`subprocess.Popen` call to ``refine2d``. """ start_time = time.time() live2dlog = logging.getLogger("live_2d") start = process_number*particles_per_process+1 stop = (process_number+1)*particles_per_process if stop > particle_count: stop = particle_count automask_text = "No" if automask is True: automask_text = "Yes" autocenter_text = "No" if autocenter is True: autocenter_text = "Yes" input = "\n".join([ os.path.join(working_directory, input_stack), # Input MRCS stack os.path.join(working_directory, input_star_filename), # Input Star file os.path.join(working_directory, "cycle_{0}.mrc".format(round)), # Input MRC classes os.path.join(working_directory, "partial_classes_{0}_{1}.star".format(round+1, process_number)), # Output Star file os.path.join(working_directory, "cycle_{0}.mrc".format(round+1)), # Output MRC classes "0", # number of classes to generate for the first time - only use when starting a NEW classification str(start), # First particle in stack to use str(stop), # Last particle in stack to use - 0 is the final. "{0:.2}".format(class_fraction), # Fraction of particles to classify str(pixel_size), # Pixel Size # "300", # keV # "2.7", # Cs # "0.07", # Amplitude Contrast str(mask_radius), # Mask Radius in Angstroms str(low_res_limit), # Low Resolution Limit str(high_res_limit), # High Resolution Limit "{0}".format(angular_search_step), # Angular Search "{0}".format(max_search_range), # XY Search "{:.2f}".format(smoothing_factor), # Tuning "2", # Tuning "Yes", # Normalize "Yes", # Invert "No", # Exclude blank edges automask_text, # Automask autocenter_text, # Autocenter "Yes", # Dump Dat os.path.join(working_directory, "dump_file_{0}.dat".format(process_number+1)), "1", # Max threads ]) # if process_number=0: # live2dlog.info(input) p = subprocess.Popen("refine2d", shell=True, stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE) out, _ = p.communicate(input=input.encode('utf-8')) end_time = time.time() total_time = end_time - start_time live2dlog.info("Successful return of process number {0} out of {1} in time {2:0.1f} seconds".format(process_number+1, process_count, total_time)) return(out)
7bc23fd6373d3bd76248418ff8ba6961a466a0c2
48,273
def find_letter(letters, l, index): """ Find the first occurrence of a letter in a word after a given index. Searches forward in the word after index-th letter. If no matching letter is found, search backwards for the latest occurrence before the index-th letter. :return: index of the found occurrence, otherwise -1 """ try: index_offset = letters.index(l, index + 1) except ValueError: letters.reverse() try: index_offset = len(letters) - letters.index(l, len(letters) - index) - 1 except ValueError: index_offset = -1 return index_offset
5e681a2734996055eb6d117af14ff8b1f11e9ab3
48,275
def expected_value(win_loss_ratio, win_probability): """ Calculates expected value of a bet. :return: Returns expected value. :rtype: Float """ return win_loss_ratio * win_probability - (1 - win_probability)
5c2d1492b20d81edd05f1f9f0981523367154f81
48,277
def seat (bpass): """Returns the seat ID for the given boarding pass (`bpass`).""" row = sum(2**(6-n) for n, s in enumerate(bpass[0:7]) if s == 'B') col = sum(2**(2-n) for n, s in enumerate(bpass[7:] ) if s == 'R') return (row * 8) + col
29b6e929798ccbd6a2c3aaf916940df75844d176
48,279
from typing import List def _break_line_in_two(line: str, max_chars_per_line: int) -> List[str]: """Breaks a line into the first line lower than max_char_per_line and the remaining string""" if len(line) <= max_chars_per_line: return [line] position = 0 while position < max_chars_per_line: new_position = line.find(" ", position) if new_position == -1 or new_position >= max_chars_per_line-1: return [line[:position-1], line[position:]] position = new_position + 1 return [line]
d7997ee8e5033ebf5b109c126181b9978a622a0e
48,281
def lockerNumDigits (lockerNum, theDictionary): """Adds leading zeroes as needed to lockerNum. End number of digits of lockerNum (leading zeroes and the num) is equivalent to number of digits of length of theDictionary. :param str lockerNum: The locker number to ensure has sufficient digits :param dict theDictionary: The length of theDictionary determines the total number of digits. :except ValueError: lockerNum must be able to be represented as an int. Returns None otherwise. :return: The locker number with sufficient digits unless len(lockerNum) > target number of digits, then returns None :rtype: str or None """ try: int(lockerNum) except ValueError: return None digits = len(str(len(theDictionary))) if 1 <= len(lockerNum) <= digits: lockerNum = "0" * (digits - len(lockerNum)) + lockerNum return lockerNum
d1c887728de439d98bf1cd057a9c4f1b10666fde
48,283
def getInstanceState(inst): """Utility function to default to 'normal' state rules in serialization. """ if hasattr(inst, "__getstate__"): state = inst.__getstate__() else: state = inst.__dict__ return state
ad6f976e0d2c35c968e505f04c2dc78e72c8d812
48,284
def PM_ds_initialized_1d(PM_ds3v_initialized_1d): """MPI Perfect-model-framework initialized timeseries xr.Dataset.""" return PM_ds3v_initialized_1d.drop_vars(["sos", "AMO"])
14cfb9eed12632763b18d931a92a3a7830b319a2
48,285
import os import base64 def create_client_uuid(): """Creates a valid client UUID. The UUID is not intended to be cryptographically random.""" rand_bytes = os.urandom(16) base32_utf8 = base64.b32encode(rand_bytes).decode("utf-8") return base32_utf8.rstrip("=").lower()
e7923ccef7f05906427c4a3b58d6d9955dc11130
48,286
def get_common_label_from_labels(labels): """Extract the common label from a label list. All the labels in the label list will have the common part removed. :param labels: the list of labels to extract the common part from. :return: the part of the labels. """ split_labels = [] ret = "" for label in labels: split_labels.append(label.split(" ")) for word in split_labels[0]: in_all = True for split_label in split_labels: if not word in split_label: in_all = False if in_all: for i, label in enumerate(labels): labels[i] = label.replace(word, "") ret += word + " " return ret
21c6b172c4f98b7af780caa9c6fe0bd7b9e5fc0b
48,287
def has_access(user, workflow): """Calculate if user has access to workflow. :param user: User object :param workflow: Workflow object :return: True if it is owner or in the shared list """ return workflow.user == user or user in workflow.shared.all()
d42ec8b9f7ca8dc7842b8a9950c1374ae7afcfbd
48,290
def abs_value_equal(x, y): """Return whether or not the absolute value of both numbers is the same. Please refrain from using libraries (abs) >>> abs_value_equal(-2, -2) True >>> abs_value_equal(-3, 3) True >>> abs_value_equal(1, 2) False >>> abs_value_equal(3, 3) True >>> abs_value_equal(-6, -6) True >>> abs_value_equal(-1, -5) False >>> abs_value_equal(5, -6) False """ return ((x==y) or (x+y==0))
5047caa23e0e1fd78e8696f706c5aa8ad43b0e22
48,291
def ListVersions(client, messages, pkg, version_view): """Lists all versions under a package.""" list_vers_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest( parent=pkg, view=version_view) list_vers_res = client.projects_locations_repositories_packages_versions.List( list_vers_req) return list_vers_res.versions
ea7b3c320817094df3bc23b05cf9cb9f18c2a7d1
48,292
def _get(d, path): """Get a value (or None) from a dict by path given as a list Integer values represent indexes in lists, string values are for dict keys """ if not isinstance(path, list): raise LookupError("The path needs to be a list") for step in path: try: d = d[step] except KeyError: return None return d
01982fbf2e0e1d16076007e1cc635ae22d29eadf
48,293
def qcVector(vectint, records): """ Given a set of vector interval and a genome to correct remove the sequence of the genomes overlapping with the vector intervals, if the overlap occurs less than 100nts away from the contig ends Args: vectint list of intervals genome genome sequence file Return: corrected genome sequence. Check the warning to see if regions were not corrected """ tomodify = {} for record in records: if record in vectint: modifications = {} # if contig in vectint # fuse interval fuseintervals = vectint[record] # begin or end +/- 10 recordlength = len(records[record].seq) if fuseintervals[0][0] < 100: # correct sequence at the begining modifications["trim5"] = fuseintervals[0][1] elif fuseintervals[-1][1] > recordlength-100: modifications["trim3"] = fuseintervals[-1][0] if len(modifications): tomodify[record] = modifications return tomodify
b2176213c25b71b23ae5c0e2e10c55d3fbca6bc0
48,294
def get_distances_for_elems(elems,center,condensed_distance_matrix): """ Extracts the distances of a list of elements to another element using the information of a distance matrix. @param elems: The array containing the elements we want to calculate the distance. @param center: The element to which get the distance. @param condensed_distance_matrix: The distance matrix containing the distance data between elements. @return: An aray with all the distances. """ distances = [] for e in elems: distances.append(condensed_distance_matrix[center,e]) return distances
26fe1211550ec2b057d4f810700f8fd1665f9086
48,296
import typing def decode_byte_array(as_bytes: typing.List[int]) -> bytes: """Decodes a byte array. """ return bytes(as_bytes)
a029272fc8ab76e169d1cc15ae7b248fac9ea719
48,297
import struct def hex_to_32bit_IEEE754_float(hex_str): """Transform a string containing a hexidecimal representation of a 32-bits IEEE754-formatted float value to a float """ return (struct.unpack("f", struct.pack("i", int(hex_str, 16))))[0]
961b87e7774268bcb3e55459097397d7cfd22b4c
48,299
def sort_sname(contact): """Sort by name (case sensitive)""" return contact.name or contact.bare_jid
d8b8bef46e5fa48121885f8e99d37a4a1eab6ca5
48,300
def subsample(y, limit=256, factor=2): """ If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor """ if len(y) > limit: return y[::factor].reset_index(drop=True) return y
dda6aa9ec00276e94be1db7354cbe3ad830f728b
48,301
def topolsplit(LFtop, name, lineindex): """ Spliting ligands topology into atomtypes and moleculetype """ file1 = open(LFtop, "r") readline = file1.readlines() LIGS_at = name + "_at.itp" LIGS_mt = name + "_mt.itp" file2 = open(LIGS_at, "+a") file3 = open(LIGS_mt, "+a") x = lineindex['atomtypes'] while x < lineindex['moleculetype']: try: file2.write(readline[x]) except IndexError: pass x += 1 y = lineindex['moleculetype'] while y < lineindex['system']: try: file3.write(readline[y]) except IndexError: pass y += 1 file1.close() file2.close() file3.close() return LIGS_at, LIGS_mt
b78bec5e68e4b7f29fab2e943a8b89cc67816554
48,302
def applyFtoEachElemList2 (L, f): """ argument: List L and function f apply function to each element inside of list mutates L by replacing each element of L by f(elem) return mutated L """ for index in range(len(L)): L[index] = f(L[index]) return L
1c775b591ca327c44fe46cb2538c48b2f8296325
48,303
def tag_extract(str): """ This function is for extracting tags from every sequence procedings """ tag = '' for char in list(str): if char != '|': tag += char else: break return tag
15502eddfa4fd5d138065649fbd9224dde3a0f55
48,304
def multiplicado_por_fator(numeros, fator): """Obtém uma cópia dos números da lista multiplicados por um fator. Por exemplo, para numeros [8, 12, 3] e fator 2 o algoritmo deve retornar uma nova lista com os números [16, 24, 6] SEM ALTERAR a lista numeros. """ return []
eacd5713e151735f34c2c69b2b3a7840da97d4ba
48,307
def validate_and_format_annotator_metadata(annotator_metadata_str: str): """Validate metadata passed in through arguments to be shown to the annotator.""" annotator_metadata = dict() annotator_metadata_parts = annotator_metadata_str.split(',') if len(annotator_metadata_parts) >= 2: key_value_array = [annotator_metadata_parts[i:i+2] for i in range(0, len(annotator_metadata_parts), 2)] for key_str, val_str in key_value_array: if not (key_str.lower().startswith('key=') and val_str.lower().startswith('value=')): raise ValueError('Incorrect annotator metadata argument') annotator_metadata[key_str.split('=')[1]] = val_str.split('=')[1] return annotator_metadata return ValueError('Incorrect annotator metadata argument')
e28b70650ecf0d106b8073158e7e31237f8e66ea
48,308
from typing import Union import torch def AvgPoolNd( dim: int, kernel: int, stride: Union[None, int] = None, dtype=None, ): """Average pooling layer. Args: dim (int): Dimensionality. kernel (int): Kernel size. stride (int, optional): Stride. dtype (dtype): Data type. Returns: object: Average pooling layer. """ return getattr(torch.nn, f"AvgPool{dim}d")( kernel_size=kernel, stride=stride, padding=0, )
5488f87b9f371bba9bd5de4f29a2e04e799825db
48,309
def logout_user(_): """Log user out.""" return {}, 200
edb75ddc32f905c62600ef7706ccd060d02f466c
48,310
import os def ftp_path_join(*paths): """ FTP paths should have Linux OS separator? """ joined = os.path.join(*paths) return joined.replace("\\", "/")
32a221be68d63985c1fa66b1c0e461728e26d6ce
48,313
import os def __get_tests__(): """returns the contents of the tests in a list""" filenames = [file for file in os.listdir('./tests') if file.startswith('test')] tests = [] for fn in filenames: with open('./tests/{}'.format(fn), 'r') as f: tests.append(f.read()) f.close() return tests
79ecbbf0701398954db69bee5966fa38961bdb9f
48,314
from typing import Any def default_serialize(value: Any) -> str: """Default value serializer. ``None`` -> ``""`` ``value: Union[bool, int, float, str]`` -> ``str(value)`` ``value: Any`` -> ``repr(value)`` """ if isinstance(value, str): return value if isinstance(value, (bool, int, float)): return str(value) if value is None: return "" return repr(value)
fdb96224b765cd2eef8bb33bd04b8bc6f9a6510e
48,315
def tuple_(*args): """:yaql:tuple Returns tuple of args. :signature: tuple([args]) :arg [args]: chain of values for tuple :argType [args]: chain of any types :returnType: tuple .. code:: yaql> tuple(0, [], "a") [0, [], "a"] """ return args
54474e0230e3821903d60f441c698cb73beec596
48,316
def conv_biases_converter(conv_biases): """Convert torch convolutional bias for the numpy model.""" biases = [] for conv_bias in conv_biases: biases.append(conv_bias.numpy().reshape(1, 1)) return biases
3c3fcc5dfdf8509e85cdf3f95f0cbae4918f711f
48,319
import re def get_web_element_attribute_names(web_element): """Get all attribute names of a web element""" # get element html html = web_element.get_attribute("outerHTML") # find all with regex pattern = """([a-z]+-?[a-z]+_?)='?"?""" return re.findall(pattern, html)
33aa9d0ab94525e6c68581aa4ce45c457e269bc4
48,320
import torch def sort_rows(m, n_rows): """sort N*M matrix by row Args: m (torch.Tensor): N*M matrix to be sorted n_rows (int): no of rows to be sorted Returns: sorted (torch.Tensor): N*M matrix with sorted row """ m_T = m.transpose(1, 0) sorted_m = torch.topk(m_T, k=n_rows)[0] return sorted_m.transpose(1,0)
c8c27cc05302d55750e54600bbebe1726939dd83
48,321
from typing import Any def _isstr(value: Any) -> bool: """ Check to see if this is a stringlike or a (nested) iterable of stringlikes """ if isinstance(value, (str, bytes)): return True if hasattr(value, "__iter__"): return all(_isstr(v) for v in value) return False
a1232c628f3a174297e4db03f239673c8b11bd16
48,322
def c_to_f(temperature): """Converts temperature from celcius to fahrenheit Args: temperature: floating point representing the temperature in celcius Returns: temperature in fahrenheit """ if temperature is None: return None return (temperature * 9 / 5) + 32
1606e0f64fb5ebe8146c63a3a78d3b47fbaf9871
48,323
def geo_features(geo): """extracts a list of features, for multiple types of input""" features = geo try: features = geo.__geo_interface__ except AttributeError: pass try: features = features['features'] except TypeError: pass return features
f9a251eeb4be25423768f9ba9fd25deee7af8f6a
48,326
import torch def pixel_wise_label(x): """ Takes an image in [0, 1], re-scales to 255 and returns the long-tensor :param x: input image :returns: label tensor :rtype: torch.Tensor """ assert x.max() <= 1 and x.min() >= 0, \ "pixel-wise label generation required x in [0, 1], is [{}, {}]".format(x.min(), x.max()) labels = (x * 255.0).type(torch.int64) # labels = F.one_hot(labels, num_classes=256) return labels
0996d8cd90f45b43ab75e4b5b49b6cf13d78803d
48,327
import torch def adversarial_loss(prob, label): """Compute adversarial losses in GAN networks. Note: As a reminder, prob is either D(x) or D(G(z)), and label is either 0 (fake) or 1 (real). With BCELoss(), this means that - l(D(x), 1) = -log(D(x)): "Real" discriminator loss - l(D(x), 0) = -log(1 - D(x)): "Fake" discriminator loss - l(D(G(z)), 1) = -log(D(G(z))): Non-saturating generator loss Args: prob: Discriminator output, in interval [0, 1] label: Data label, with fake = 0 and real = 1 """ return torch.nn.BCEWithLogitsLoss()(prob, label)
1eb7eb8e5d82a354e4272fa5de57d67f30ceca92
48,328
def count_containers(log_file): """Reads the agent logs and calculates the number of warm containers in each state at every time Args: log_file (str) : Path to the agent log file Returns: A list of number of warm containers """ num_warm = [] num_cont = [] with open(log_file, "r") as f: content = f.readlines() content = [x.strip().split() for x in content] for log_line in content: # Extract text text = ' '.join(log_line[3:]) search_text = "num warm containers" if search_text in text.lower(): warm = int(log_line[-2]) total = int(log_line[-1]) num_warm.append(warm) num_cont.append(total) return num_warm, num_cont
5194b79ac0773fb5b93de041e8565a87c4942c2e
48,330
import os import subprocess def cli_call(arg_list, expect_success=True, env=os.environ.copy()): """Executes a CLI command in a subprocess and return the results. Args: arg_list: a list command arguments expect_success: use False to return even if an error occurred when executing the command env: Returns: (string, string, int) output message, error message, return code """ p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) output, error = p.communicate() if p.returncode != 0: if output: print("Output:\n" + str(output)) if error: print("Error Message:\n" + str(error)) if expect_success: raise subprocess.CalledProcessError( p.returncode, arg_list, output) return output, error, p.returncode
87eb25b78579cab847cd367b572d06c7e2821eb9
48,332
def execute_coroutine(coroutine): """Execute a coroutine. The coroutine is a generator which 'yields from' other iterables in the form of asyncio.Future instances. For each generated value, wait for it to be done and move on to the next one, until the generator has completed. Since there is no loop instance to enter, it might be needed to mock each yielded value before executing the coroutine, to avoid blocking situations. Exemple: with mock.patch('coro.yielded.metod.Function'): result = execute_coroutine(coro) Althogh this helper is thought to work with coroutined generators, it should work with any iterator. @param coroutine: an iterable, e.g. an asyncio.coroutine decorated method yielding something. @returns the return value of the coroutine, or None if nothing is returned """ try: future = next(coroutine) while not future.done(): pass except StopIteration as exc: if hasattr(exc, 'value'): return exc.value
acc734bfe1a940cfd9d4b664bc19909b37622040
48,334
def create_vocab(docs): """Create a vocabulary for a given set of documents""" words = set() for d in docs: for w in d: words.add(w) vocab = {} for i, w in enumerate(list(words)): vocab[w] = i return vocab
e02a5b6d1c8eb18ef6fbd279b3ecd7f0b62d1107
48,335
def get_P(tasks): """ Get the HB product for the given periodic task set. Parameters ---------- tasks : list of pSyCH.task.Periodic Set of periodic tasks, for which the HB product needs to be computed. Returns ------- float HB product for the task set. """ P = 1 for task in tasks: P *= (1 + task.c/task.t) return round(P, 3)
e2583bcd0abf65f3948c45c14603d5b8942743f4
48,336
def get_outdir_min(outdir_min: str) -> int: """ Get disk requirement. Value is always given in GiB. Covers an Int type input and a string input without suffix only. """ if '"' in outdir_min: outdir_min = outdir_min[outdir_min.find('"') + 1 : -1] outdir_value = int(float(outdir_min.strip()) * 1024) return outdir_value
08043de2675eb7c285943c27ff09df37e4007910
48,337
def partition(length, parts): """Partitions 'length' into (approximately) equal 'parts'.""" sublengths = [length//parts] * parts for i in range(length % parts): # treatment of remainder sublengths[i] += 1 return sublengths
d398b104ed434d076244b00ad4dc8876821e3e75
48,339
import string def make_words_dict( fname ): """Reads a file, break each line into words, strips whitespace and punctuation from the words, and converte them to lowercase. """ d = {} fin = open( fname ) for line in fin: words = line.replace( '-', ' ' ) for word in words.split(): word = word.strip( string.whitespace + string.punctuation ).lower() d[word] = d.get( word, 0 ) + 1 return d
4bafc7ebeec6e16b40fd2c70ec41ab663d9ab07e
48,340
def add_scenario(df, ax, label='', addline=True, fillcolor='0.6', linecolor='k', linestyle='-'): """Plots polygon of mean+/-std scenario with mean line""" ax.fill_between(df.index, df.lower, df.upper, color=fillcolor) if addline: ax.plot(df.index, df['mean'], lw=3, linestyle=linestyle, color=linecolor, label=label) return ax
6b7d4e6fc620a346bd5643b6fe41f06d656490ad
48,341
import re def parse_checksum_row(row): """ Args: row: a line of text from pt-table-checksum Returns: An array of elements, if the regex matches [ts, errors, diffs, rows, chunks, chunks_skipped, elapsed_time, db, tbl] Ex: [ '08-30T06:25:33', '0', '0', '28598', '60', '0', '0.547', 'pbdata04159', 'userstats' ] If the regex doesn't match, return nothing. """ p = re.compile(''.join("^(\d+-\d+T\d+:\d+:\d+)\s+(\d+)\s+(\d+)\s+" "(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+)\s+" "(.+?)\.(.+)$")) m = p.match(row) if m: return m.groups()
4f239ba582c07a7135d00e7078ec578dcd13de83
48,342