content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def operations_get_supported_notification_types_post(): # noqa: E501 """operations_get_supported_notification_types_post # noqa: E501 :rtype: TapiNotificationGetSupportedNotificationTypes """ return 'do some magic!'
964787f6c28e8e7ae5ad83db0601b119902061b7
36,207
import torch def check_cuda(): """ Checks if cuda device is available returns device """ device_type = "cpu" if torch.cuda.is_available(): print("NOTICE: You have a CUDA device. Running with CUDA") device_type = "cuda" device = torch.device(device_type) return device, device_type
aa6d2b45fb1ce8a57deede9aa8a6b4fc2e0368b5
36,208
def to_degrees(dir, value): """ convert the GPS coordinates stored in the EXIF to degress in float format :param value: tuples of DMS :param dir: direction E/N/W/S """ d = float(value[0][0]) / float(value[0][1]) m = float(value[1][0]) / float(value[1][1]) s = float(value[2][0]) / float(value[2][1]) w = 1 if dir in ('E', 'N') else -1 return w * (d + (m / 60.0) + (s / 3600.0))
3cb60e15049bf3c538d8dc46fae92b123e4cafd5
36,210
def instance_module_default_style(module_name, inst_name, ports): """ Connect in the same order as declared in the module :param module_name: :param inst_name: :param ports: :return: """ # Instance the module disp_str = "\n{} {} (\n".format(module_name, inst_name) for port in ports: disp_str = disp_str + " .{0:42}({0}),\n".format(port.name) disp_str = disp_str[:-2] + "\n);\n" return disp_str
bef2ed91863d6c5628e4378f407e9f39ec08d964
36,211
import math def distance(x1, y1, x2, y2): """Get euclidean distance between two points""" return math.sqrt(math.pow(abs(x1-x2), 2) + math.pow(abs(y1-y2), 2))
722600df2cba61443a0663e4076c58f5522ea7b1
36,212
def getLinks(info='site.info'): """look in the info, check for files, and make a list of links""" infoFile = open(info) links = infoFile.readlines()[1:] links = [link.strip() for link in links] linkInfo = [[item.strip() for item in name.split(',')] for name in links] infoFile.close() return(linkInfo)
9ad91a596a09fca866bb6b7f8a9047eb8680619a
36,214
def unknown_pairs_present(model): """ Test if PDB file contains unknown type pairs Parameters: model (obj): model object Returns: (bool): True if PDB file contains unknown type pairs """ grm = model.get_restraints_manager() sites_cart = model.get_sites_cart() site_labels = model.get_xray_structure.scatterers().extract_labels() pp= grm.pair_proxies(sites_cart=sites_cart,site_labels=site_labels) return (pp.nonbonded_proxies.n_unknown_nonbonded_type_pairs != 0)
f826a41f2e62436b68a3ed9ab15d15b5a8335252
36,215
import re def hashtag_token(doc): """Create a hashtag token by merging the # token and relevant text""" indexes = [m.span() for m in re.finditer(r'#\w+', doc.text, flags=re.IGNORECASE)] for start, end in indexes: doc.merge(start_idx=start, end_idx=end) pattern = re.compile(r'#\w+') for token in doc: if bool(re.match(pattern, token.text)): token._.is_hashtag = True return doc
dfd1d9bb7d25178b618a6fc024d30cee0bb58a64
36,216
import hashlib def gen_hash_filename(filename): """Generates an hashed filename as an unique picture ID param filename: Filename that schould be hashed """ return hashlib.sha224(str(filename).encode("utf-8")).hexdigest() + ".jpg"
a46d2828fd6eaa980e0562319be6f1eb75bc9a0d
36,217
def get_list_query_cond(type: str, val: list, query_params: dict): """ Returns the entered list of strings as part of an SQL condition on the AHJ table of the form: (AHJ.`type` = 'val1' OR AHJ.`type` = 'val2' OR ... ) AND """ if val is not None and len(val) != 0: or_list = [] for i in range(len(val)): param_name = f'{type}{i}' query_params[param_name] = val[i] or_list.append('AHJ.' + type + '=%(' + param_name + ')s') ret_str = '(' + ' OR '.join(or_list) + ') AND ' return ret_str return ''
89a0cc8e639a28929c19df1cda5b483b526f72d4
36,218
def _quote_tag_if_needed(tag: str) -> str: """Quotes tags just like timewarrior would quote them. Args: tag: The tag that should be quoted. Returns: The quoted tag. """ if tag[0] == '"' or tag[0] == "'": return tag special_chars = [" ", '"', "+", "-", "/", "(", ")", "<", "^", "!", "=", "~", "_", "%"] if any(char in tag for char in special_chars): return f'"{tag}"' return tag
bfcb2064dae08c758d9c75eac3abe371854ea62b
36,219
def largest_prime_factor(input_num): """ Function returns the largest prime factor of an input. REQ: input_num >= 0 and whole :param input_num: {int} is original input number :return: {int} the largest prime factor of the input or {NoneType} if input_num < 2 """ # if input is less than 2, there are no prime factors less than 2, return None if input_num < 2: return None # set current lowest prime to lowest prime factor (2) curr_lpf = 2 # loop while our current input is greater than our current lpf while input_num > curr_lpf: # if division results in whole number, divide input by curr_lpf and reset to 2 if input_num % curr_lpf == 0: input_num = input_num // curr_lpf curr_lpf = 2 # else move onto next largest factor else: curr_lpf += 1 return curr_lpf
507c36186f29ba96ec842dce0b3c7c76cca89d9e
36,222
def get_volumes(self): """ Documentation: --- Description: Associated each EBS volume object with its Name tag. --- Returns: volumes : dictionary Dictionary where the keys are the Name tags and the values are the EBS volume objects. """ # return dictionary containing EBS volume ID / EBS volume pairs raw_volumes = self.get_raw_volumes() # return dictionary containing EBS volume ID / Name tag pairs volume_name_tags = self.get_volume_name_tags() # gather EBS volume object for each Name tag volumes = {} for volume_id, volume in raw_volumes.items(): try: volumes[volume_name_tags[volume_id]] = volume except KeyError: continue return volumes
0756ee63412982d94cfe26b3b5f4e39176220b84
36,223
def chose_examples(labels, label_set=None, number=1): """Choses n example of each label. """ if label_set is None: label_set = set(labels) out = [] for l in label_set: start = -1 for _ in range(number): start = labels.index(l, start + 1) out.append(start) return out
5c7adef3a9a93e37f7ff2849955696b082f4545f
36,225
def get_fastqs_for_read_index(lane_to_fastqs, read_index): """Get a list of fastq urls for the given read index Args: lane_to_fastqs (Dict[Dict[str, str]]): dict of dict mapping each lane to a dict of read index to fastq url read_index (str): the read_index to filter by, e.g. "read1", "read2", or "index1" Returns: fastq_urls (list): list of fastq urls """ lanes = sorted(lane_to_fastqs.keys()) fastq_urls = [] for lane in lanes: if read_index in lane_to_fastqs[lane]: manifest_entry = lane_to_fastqs[lane][read_index] fastq_urls.append(manifest_entry.url) return fastq_urls
2f81d5f4457bb635b47ddcf85c8d8acca0eccbb2
36,227
from pathlib import Path import torch def load_pretrained(vae, path, load_predictor): """Load a previously trained model, and optionally ignore weights/bias for predictor""" load_path = Path(path) state = torch.load(load_path) if "epoch" in state.keys(): print(f"Loading model from epoch {state['epoch']}") state_dict = state["state_dict"] else: state_dict = state if not load_predictor: state_dict = {k: v for k, v in state_dict.items() if "predictor" not in k} mismatch = vae.load_state_dict(state_dict, strict=False) print("Missing keys:", mismatch) return vae
20ea5b631b56078732da4c3631e614e53d37ac19
36,228
def determine_best_contact(contacts): """ gibt Kontakt mit höchster übertragener Datenmenge aus übergebener Menge zurück :param contacts: list / set / ... :return: Contact """ current_best_data = 0 best_contact = None for contact in contacts: current_data = contact.get_data() if current_data > current_best_data: current_best_data = current_data best_contact = contact return best_contact
5ce0ccb4c6489ca545983adf78cead03311aae03
36,229
import inspect def distprops(dist): """return a list of a distribution's properties""" return [ p for p in (set(inspect.getfullargspec(dist.__init__).args) - {"self"}) if hasattr(dist, p) ]
b0904462faddcae1b2d03cf7c6cf967bb2215364
36,230
def find_key(d: dict, key: str, default: None): """ Search for the first occurence of the given key deeply in the dict. When not found is returned the default value """ if key in d: return d[key] for k, v in d.items(): if isinstance(v, dict): item = find_key(v, key, default) if item is not None: return item return default
8f3741a99da6eb3d7989089e460cb7a5eaa4554a
36,231
def polygon_trans(p): """ :param p: polygon list with dict("lat": v1, "lng": v2) as elements :return: polygon list with (v_lat, v_lng) as elements """ new_p = [] for point in p: new_p.append((point["lat"], point["lng"])) return new_p
86000bc8f94a79060ad0321808d13fbc7296be6b
36,232
import os def ListFile(FilePath, extensions): """ 给定文件夹的路径和要提取的文件扩展名,返回一个文件列表 """ Files = [] filenames = os.listdir(FilePath) for file in filenames: AbsolutePath = os.path.abspath(os.path.join(FilePath, file)) # 文件的绝对路径 if os.path.splitext(file)[1] == extensions: # os.path.splitext分离文件名和扩展名 Files.append(AbsolutePath) return Files
592f44c1fa48eac32a2564e11de775f24b74c252
36,234
def _buffer_if_necessary(shape): """Fix the basins shapes which are invalid. Following the advice given here: https://github.com/Toblerity/Shapely/issues/344 """ if not shape.is_valid: shape = shape.buffer(0.0) assert shape.is_valid return shape
f7646b8a909f9790f16e672e8de5b479402a5b5d
36,235
def get_stats(data): """ Returns some statistics about the given data, i.e. the number of unique entities, relations and their sum. Args: data (list): List of relation triples as tuples. Returns: tuple: #entities, #relations, #entities + #relations. """ entities = set() relations = set() for triple in data: entities.add(triple[0]) entities.add(triple[2]) relations.add(triple[1]) return len(entities), len(relations), len(entities) + len(relations)
6496db5be15b330de84345d862a66a16c0ebc969
36,236
def format_pylint_disables(error_names, tag=True): """ Format a list of error_names into a 'pylint: disable=' line. """ tag_str = "lint-amnesty, " if tag else "" if error_names: return u" # {tag}pylint: disable={disabled}".format( disabled=", ".join(sorted(error_names)), tag=tag_str, ) else: return ""
de7355f51fc20ba174f5f8db852c9254c016fa75
36,237
def full_bed_location(): """returns path to example full spec bed file""" return "tests/test_data/full_bed.bed"
f144f7b08a33eb6d37a61f164098f5450ced3fbc
36,238
def default_permission(permissions): """ This permission should be used as the default (e.g. all guest, all media)""" for p in permissions: if p.name == 'Default': return p
40ba54a080c5d44ba6e8f9c662f0b93eb1a7d4e5
36,241
async def u2_get_device_display_h_and_w(d) -> tuple: """ u2获取设备的高跟宽 :param d: eg: u2 d :return: """ device_height = d.device_info.get('display', {}).get('height') device_width = d.device_info.get('display', {}).get('width') return device_height, device_width
0682600c93eebf2bfafd3d1915ef831c42fd3d7d
36,242
def sanitize_dict(value): """Recursively remove special characters from dictionary keys. The special characters are . and $, eg the special characters for MongoDB. Note that, in case of key collision after the removal of special characters, the common key will be updated with the value of the renamed key. """ if isinstance(value, dict): for key in value.keys(): try: cleaned = key.translate({ord(c): None for c in '$.'}) except TypeError: cleaned = key.translate(None, '$.') # Note that translate() signature changes with key # type but one # # can't use isinstance since, in Python3, # key may be of type str or bytes; In Python2 it # will # be unicode or str. value[cleaned] = sanitize_dict(value.pop(key)) elif isinstance(value, list): for item in value: sanitize_dict(item) return value
1322f413e2d078db84448c3ab02c640a535164ca
36,244
def is_jar_library(target): """Returns True if the target is an external jar library.""" return target.has_label('jars')
020d389e3bc71723a21d948f43debaa274b0de39
36,245
def myfuncMean( TheListOfvalues ): """ This function computes and returns: 1.- The mean of a list holding any set of values. 2.- A message regarding whether the mean is or not a whole number. To call this function do: thevalues = [1,2,3,4,5] meanval, message = myfunc_mean( thevalues ) """ NumberOfValues = 0 TheAdding = 0 for newValue in TheListOfvalues: NumberOfValues = NumberOfValues + 1 TheAdding = TheAdding + newValue if (TheAdding % NumberOfValues) == 0: TheMean = TheAdding//NumberOfValues mesg = 'The mean of the values is the whole number = {0}'.format(TheMean) else: TheMean = TheAdding/NumberOfValues mesg = 'The mean of the values IS NOT a whole number = {0}'.format(TheMean) return TheMean, mesg
1276f97d42161b5a4b8ddb882660ce97fdaae030
36,247
def is_zipfile(filename): """Quickly see if file is a ZIP file by checking the magic number. Will not accept a ZIP archive with an ending comment. """ try: fpin = open(filename, "rb") fpin.seek(-22, 2) # Seek to end-of-file record endrec = fpin.read() fpin.close() if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000": return 1 # file has correct magic number except IOError: pass
3d5e36ca26cddc1142063fb914453c59053e67c5
36,249
from pathlib import Path def get_expect_file(sql_file: Path) -> Path: """ Returns the csv file with the expected results for a sql file. """ if ( str(sql_file) == "" or sql_file.stem == "" or sql_file.suffix == "" or sql_file.suffix.lower() not in (".sql") ): return Path() return sql_file.parent / (sql_file.stem + ".csv")
a110787038fe149a7282beb8830f620fe41ac23a
36,250
import itertools def union(first, *args, **kwargs): """Takes a list of dictionaries and performs union of each. Can take additional key=values as parameters to overwrite or add key/value-pairs. No side-effects,""" new = first.__class__() for item in itertools.chain([first], args, [kwargs]): new.update(item) return new
10e8dee9ec8c0345f2a3e3c50d6fd0221d23106d
36,251
def binaryFilesEqual(fn1, fn2): """True if two files are bytewise identical.""" with open(fn1, "rb") as f1, open(fn2, "rb") as f2: for byte1, byte2 in zip(f1, f2): if byte1 != byte2: return False return True
e1d71d0a8ed51eb85d4e3deae11f46730bd51d51
36,252
def score_char_overlap(term1: str, term2: str) -> int: """Count the number of overlapping character tokens in two strings. :param term1: a term string :type term1: str :param term2: a term string :type term2: str :return: the number of overlapping ngrams :rtype: int """ num_char_matches = 0 for char in term2: if char in term1: term1 = term1.replace(char, "", 1) num_char_matches += 1 return num_char_matches
8bda41b2babffcc55b27831bced476b6d9a77eb5
36,253
from typing import Iterable from typing import Callable def set_validator(valid_set: Iterable[str]) -> Callable: """Return set validator for given iterable of strings. Parameters ----------------- valid_set: Iterable[str], Iterable of lists that compose the valid set. Returns ----------------- Callable for validating the set. """ def wrapper(value: str) -> bool: return value in valid_set return wrapper
1e825f8bd4c26f8eb231a3de23dbc5cd0a207e34
36,254
def cleanup_value(value): """Try and convert the given value to a float.""" value = value.strip() try: return float(value) except Exception: return value
364bfd0eef63be1220c5f73c42d0c3176f18e851
36,255
def clean_command_type(text: str) -> str: """Remove parents from the command type""" text = text.replace("Command.", "") text = text.replace("CommandType.", "") return text
ca6b1a8ee0a3ee87487c5901413ad141c6a97ff2
36,257
import tempfile import getpass def get_default_session_filename(username: str) -> str: """Returns default session filename for given username.""" dirname = tempfile.gettempdir() + "/" + ".instaloader-" + getpass.getuser() filename = dirname + "/" + "session-" + username return filename.lower()
8f8e9415f5151088a55144e9c3e6f0a9608e1dba
36,258
import re def remove_html_tags(s, pad=' '): """ From http://stackoverflow.com/a/12982689/610569 """ return re.sub('<.*?>', pad, s)
4022e3742e9abb7f209fe72b398ad9e2ba190f08
36,259
import re def parse_points(points_str): """Parses the points specification for polyline and polygon elements (SVG 1.1, 9.7.1). """ # Treat #-# as # -# points_str = points_str.replace('-', ' -') return [float(s) for s in re.split("[\x20\x09\x0D\x0A]+|[,]", points_str) if s != ""]
b2b29ffcf9e240ea06ca75d55f2595416b75963d
36,260
def ccw(A, B, C, dim): """ Check if the points are listed in counter-clockwise order. """ if dim == 2: # only 2 dimensions return((C[:, 1] - A[:, 1]) * (B[:, 0] - A[:, 0]) > (B[:, 1] - A[:, 1]) * (C[:, 0] - A[:, 0])) else: # dim should be equal to 3 return((C[:, 1, :] - A[:, 1, :]) * (B[:, 0, :] - A[:, 0, :]) > (B[:, 1, :] - A[:, 1, :]) * (C[:, 0, :] - A[:, 0, :]))
ee23a25016ebaa8badcef10b62e6543d5b2f8bba
36,262
def group_type_object_factory(group_type_name): """Cook up a fake group type """ group_type = { 'name': group_type_name } return group_type
559cd9723b3091454b8655d0631b354d8d8c6755
36,264
import sqlite3 import os def open_sqlite3_db(dir): """Open the sqlite3 database contained in dir. We use "data.sqlite3".""" return sqlite3.connect(os.path.join(dir, 'data.sqlite3'))
c19c12fb52751036abbc0fee014a6102798e0e01
36,265
import os import json def get_sent_list(sent_file): """Return the list of (name, email) tuples that we've already sent to.""" results = [] if os.path.exists(sent_file): for line in open(sent_file): info = json.loads(line) results.append(info) return results
e2bd1e461fc673ff1c8933b1e14c3af32cef6cbd
36,267
import os def CheckHomeForFile(filename): """Checks the users home dir for the existence of the given file. Returns the path to the file if it's there, or None if it is not. """ full_path = os.path.expanduser(os.path.join('~', filename)) if os.path.exists(full_path): return full_path return None
aa3199a241a29a22408da358f5a693967f66f298
36,268
def find_text(node, path): """Find a node's text or None """ return getattr(node.find(path), 'text', None)
ddc951dcec720ab2ed73c4277d582e8ffa25ae2b
36,270
def format_time(time): """ Format time based on strftime. Args: time: Expected time as datetime.datetime class Returns: Formatted time. """ return time.replace(second=0).strftime('%Y-%m-%d %H:%M:%S')
fe6abda2c787c5504ea8518c25fc2ddf0054b8f2
36,271
import string import random def get_random_inum(): """[Generates a fake random inum, aplhanumeric 30 digits] Returns: [str]: [30 digit alphanumeric string to be used as inum] """ letters_and_digits = string.ascii_letters + string.digits fake_inum = ''.join((random.choice(letters_and_digits) for i in range(30))) return fake_inum
8e728a30653f9fb7e5b954bb52e343cc9cd0d564
36,273
def lr_decay(optimizer, hp): """Decay learning rate by a factor of lr_decay""" for param_group in optimizer.param_groups: if param_group['lr'] > hp.min_lr: param_group['lr'] *= hp.lr_decay return optimizer
33bbe6f50cf7367fdc49bf2fee0c1305da12266e
36,277
def get_accuracy_string(full_rep_accs): """format string with accuracies""" return '%05.2f/%05.2f/%05.2f'%(full_rep_accs[0], full_rep_accs[1], full_rep_accs[2])
dbd83fc93e0d511dbbc246c858a41d419ec264bc
36,279
def foobar(x): """ foobar docstring :param x: :return: """ return x**2
c1ae22fe0726305dbea7386dbb98bc3408d9e1ee
36,281
from typing import Iterable from pathlib import Path def list_images(img_dir) -> Iterable[str]: """List all image files in img_dir. Returns an iterator that lists the files to process. Subclasses may want to override this to return specific image types or filter the results. By default, will list all images in self.img_dir if the file extension is in the extensions list. Returns ------- Iterable[str] The list of files to be used for generating the masks. """ extensions = (".png", ".jpg", ".jpeg", ".tif", ".tiff") paths = Path(img_dir).glob("**/*") paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths) return (str(p) for p in paths)
c7585c4fe737fb95af27a3fad578ebf3347e4f9c
36,282
def map_label(label, alias): """ Update label from its alias value in config.yaml. ``` alias.get(label, label) ``` If the `label` has an alias, use it else use the label as-is. This requires labels to be created this way: ```yaml alias: aliased_label_1: real_label aliased_label_2: real_label ``` as opposed to ```yaml alias: real_label: - aliased_label_1 - aliased_label_2 ``` The above snippet requires an algorithm of O^3 and more lines of code than present here. """ if not alias: return label return alias.get(label, label)
235b9bb0ef808cba106acb1e612837677bfdeddc
36,284
from pathlib import Path def assemble_path(*args): """Join together all specified inputs into a directory path.""" if not args: raise ValueError("You must specify at each one path parameter to assemble") assembled_path = Path(args[0]) for index, path in enumerate(args[1:]): assembled_path /= path return assembled_path
cfe2102683046fa655535bbf1ae6323960046780
36,286
from typing import OrderedDict def list_drop_duplicates(li: list, keep: str = 'first') -> list: """ Drop duplicates from a (ordered) list :param li: List to drop duplicates from :param keep: Keep first or last occurrence of the unique items """ if keep == 'first': return list(OrderedDict((x, True) for x in li).keys()) elif keep == 'last': li.reverse() li = list(OrderedDict((x, True) for x in li).keys()) li.reverse() return li else: raise ValueError(f'Cannot parse {keep} as argument for keep. This should be either "first" or "last"')
b43b59a7d6ea266843266ee3eb8d5af5eaf7bb33
36,287
def get_all_locales(cache): """Gets a list of all locales and prints them out to the terminal""" locales = sorted(list(set([x['locale'] for x in cache.itervalues()]))) return locales
8b1447772d73a5d35a94c16df1f464b65208fd7f
36,288
def is_api_disabled(config, api_name): """Check if api_name is disabled in the config. Args: config (dict): GCP API client configuration. api_name (str): The name of the GCP api to check. Returns: bool: True if the API is disabled in the configuration, else False. """ return config.get(api_name, {}).get('disable_polling', False)
cdce24c07cdf1190c1ea46613cbdccfaec649404
36,289
import glob def get_file_paths(path_to_data: str = 'drive/MyDrive/Belgorodskaya/*.tif', feature_names: list = ['tmax', 'tmin', 'pr']): """ Filters out required features amongs terraclim dataset Arguments: path_to_data (str): path to directory that containts terraclim dataset feature_names (list): list of required features Returns: dict: key -- feature name; value -- list of related tif files """ files_to_mosaic = glob.glob(path_to_data) files_to_mosaic = list(filter(lambda x: sum(fn in x for fn in feature_names) > 0, files_to_mosaic)) file_paths = {fn: list(filter(lambda x: fn in x, files_to_mosaic)) for fn in feature_names} return file_paths
3e9e5d3a527cb7c7dafa800736cc274ea23e34e0
36,290
def normalize(v): """ Normalizes v Args: v -- Vector Returns: Vector """ l = sum([t ** 2 for t in v]) ** -0.5 return [l * t for t in v]
699dd7a3b19ecf4f4b371c1700c80089cab41239
36,291
def scfenergy(outputfile): """ Temp fix for Q-Chem outputs (need to fix cclib upstream)""" scfenergies = [] with open(outputfile, 'r') as handle: lines = handle.readlines() for line in lines: if 'Total energy in the final basis set' in line: scfenergies.append(float(line.split()[-1])) if not scfenergies: print(outputfile, "incomplete") scfenergies.append(0) return scfenergies[-1]
b6fbd4812f2385033ac6ed3c6758e649de7a5f08
36,292
def deserialize_measurement(serialized): """Deserialize a `openff.evaluator.unit.Measurement` from a dictionary of the form `{'value', 'error'}`. Parameters ---------- serialized : dict of str and str A dictionary representation of a `openff.evaluator.unit.Measurement` which must have keys {"value", "error"} Returns ------- openff.evaluator.unit.Measurement The deserialized measurement. """ if "@type" in serialized: serialized.pop("@type") return serialized["value"].plus_minus(serialized["error"])
80b28ac9c641fb0399efc5d24eddfc381df68f27
36,293
def score(letter): """ Returns index of letter in alphabet e.g. A -> 1, B -> 2, ... """ string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' return(string.index(letter) + 1)
becba57c57c36204d2b106836233a91b90960712
36,294
def aggregate_to_ts_and_endpoint(data, verbosity=False): """Aggregate/sum the returned data by timestamp and endpoint alias.""" aggs = {} # Aggregate/sum the returned data by timestamp and endpoint alias. for row in data.data: if verbosity: print(' *', row) for data in row.data: if verbosity > 1: print(' *', data) # if not aggs.has_key(data.ts_epoch): if data.ts_epoch not in aggs: aggs[data.ts_epoch] = {} # if not aggs[data.ts_epoch].has_key(row.endpoint): if row.endpoint not in aggs[data.ts_epoch]: aggs[data.ts_epoch][row.endpoint] = 0 if data.val is not None: aggs[data.ts_epoch][row.endpoint] += data.val return aggs
9c215fb2b2b6112bd28fcd16010e75d5e8175aa2
36,296
def exhausted_search(nums, k): """Linear search the top k smallest elements. Arguments: nums {list} -- 1d list with int or float. k {int} Returns: list -- Top k smallest elements. """ rets = [] idxs = [] key = None for _ in range(k): val = float("inf") for i, num in enumerate(nums): if num < val and i not in idxs: key = i val = num idxs.append(key) rets.append(val) return rets
103284cada5a80a2799eacec92ce9cca961ac3d8
36,297
def getConnectionParameters(): """ :returns: URL string respresenting broker connection parameters """ return "amqp://guest:guest@localhost:5672/%2F"
3969b081b15ae740e5ad01e650580fa45a7cebed
36,298
import re import unicodedata def slugify(string): """ Slugify unicode string. Example: >>> slugify("Hélló Wörld") "hello-world" """ if not string: return '' return re.sub(r'[-\s]+', '-', re.sub(r'[^\w\s-]', '', unicodedata.normalize('NFKD', string)).strip().lower())
8924c712d7b9527f3c0df2d2b3522a411a3dfd4a
36,299
import argparse def argument_parser(): """This function captures the comandline arguments for clean_dataset module""" # passing arguments to average dataset parse = argparse.ArgumentParser(description="summarize the dataset by average") # summary option parse.add_argument( "-a", "--average_by", type=str, required=False, default="hour", dest="average_by", help="Summarized the data by average", choices=["month", "week", "day", "hour", "minute"], ) # parse.add_argument( "-f", "--from_date", type=str, required=False, default="2021-01-01", dest="pull_data_from_date", help="To pull historical data from a specific date e.g 2021-12-31", ) return parse.parse_args()
edf95d83d2944dee1a7f5e60bf0115205156b287
36,300
import tempfile import gzip def process_reads_crick(args): """process crick trimmed reads and make them ready for mapping with STAR""" crick_r1 = tempfile.NamedTemporaryFile(suffix=".fastq", prefix='crick_r1', dir=args.tmpdir, delete=False) crick_r2 = tempfile.NamedTemporaryFile(suffix=".fastq", prefix='crick_r2', dir=args.tmpdir, delete=False) args.crick_r1 = crick_r1.name args.crick_r2 = crick_r2.name print('Started processing crick reads') if args.crick_val_r1.endswith('.gz'): c_r1_handle = gzip.open(args.crick_val_r1, 'rt') c_r2_handle = gzip.open(args.crick_val_r2, 'rt') else: c_r1_handle = open(args.crick_val_r1, 'rt') c_r2_handle = open(args.crick_val_r2, 'rt') #make 4 file handles for forward and reverse watson and crick crick_r1_handle = open(args.crick_r1, 'w') crick_r2_handle = open(args.crick_r2, 'w') j = 0 while True: c_r1 = [] c_r2 = [] for i in range(4): try: c_r1.append(next(c_r1_handle)) c_r2.append(next(c_r2_handle)) except StopIteration: break j += 1 try: if int(args.sequences) == j: break except TypeError: pass if not j % 1000000: print('Processed %s reads' % (j)) if not c_r1: break convert_c_r1 = c_r1[1].upper().replace('G', 'A') convert_c_r2 = c_r2[1].upper().replace('C', 'T') g_pos_c = [str(n) for n, i in enumerate(c_r1[1]) if i.upper() == 'G'] c_pos_c = [str(n) for n, i in enumerate(c_r2[1].rstrip('\n')[::-1]) if i.upper() == 'C'] header_c = '@%s' % (c_r1[0][1:-1].replace(' ', '|').replace('\t', '|')) header_c += '|%s\n' % (','.join(c_pos_c) + '|' + ','.join(g_pos_c)) crick_r1_handle.write(header_c + convert_c_r1 + '+\n' + c_r1[3]) #print(read_r1[3]) crick_r2_handle.write(header_c + convert_c_r2 + '+\n' + c_r2[3]) crick_r1_handle.close() crick_r2_handle.close() return args
78a7256fd9349f49071ae13fa6b3c7e5a8686472
36,301
def computeSimilarityScores(inputs, fvecfunc, combfunc, clsfunc): """Computes similarity scores for all pairs in the given set of inputs. Exhaustively goes through all pairs of inputs, and looks up feature vectors using the given fvecfunc. Pairs of feature vectors are combined using the combfunc. These are fed into clsfunc (as a list) to get a score. Returns a dict mapping pairs (input1, input2) -> similarity score. """ ret = [] for i, i1 in enumerate(inputs): f1 = fvecfunc(i1) for i2 in inputs[i+1:]: f2 = fvecfunc(i2) fvec = combfunc(f1, f2) ret.append((i1,i2,fvec)) allin1, allin2, allfvecs = zip(*ret) scores = clsfunc(allfvecs) ret = dict(((i1,i2), s) for (i1,i2,fvec), s in zip(ret, scores)) return ret
383be99d20e2bea50b3b551f009daa0e5f54cb0a
36,302
import os def create_directory(directory): """Create parent directories as necessary. :param directory: (~str) Path of directory to be made. :return: True - if directory is created, and False - if not. """ try: os.makedirs(directory) return True except OSError: # Checks if the folder is empty if not os.listdir(directory): return True return False
46303095cacd93c1124191ad004bab23bc05274b
36,303
def get_device_id(device): """ Get the device_id of a device """ return device.type[len('_arsdk-'):-len('._udp.local.')]
f53f38a06089d584382d5ab02d3e3dd3b250bb41
36,304
def _rename_chord(chord): """Rename chord. Default: 26 chord classes, 12 maj + 12 min + other + non-chord.""" root, attribute = chord['root'], chord['attribute'] attribute = attribute.split('/')[0] # remove inversion if root == 'N': # non-chord return root elif any(s in attribute for s in [':min', ':minmaj']): # minor return root + ':min' elif attribute == '' or any(s in attribute for s in [':maj', ':7', ':9']): # major return root + ':maj' elif any(s in attribute for s in [':(', ':aug', ':dim', ':hdim7', ':sus2', ':sus4']): # others return 'others' else: print('invalid syntax:', chord, root, attribute) exit(1)
8e2ce7163536b3e2a1cfd8ed94060f32ed0e5154
36,306
def _valueList(self, mat): """Return the given MMatrix as a value list. :param matrix: The MMatrix to generate the list from. :type matrix: om2.MMatrix :return: The list of matrix values. :rtype: list(float) """ values = [] for i in range(4): for j in range(4): values.append(mat.getElement(i, j)) return values
dc3578b58938a3329cbfb6e59d30bc71dcc095fb
36,309
import re def remove_default_namespace(src): """ Remove default xmlns from the given string. :param str src: Source string :returns: String with xmlns definitions removed. """ return re.sub(r' xmlns="[^"]+"', '', src)
600a0d2331b32010d29c4ba9cb004cc3bdf80d05
36,310
import click import shutil import os import subprocess import sys def create_env(dest, env_name='env'): """ Create a virtual environment. :param dest: The full path to the project root. """ click.echo('Creating a virtual environment...') virtualenv = shutil.which('virtualenv') if virtualenv is None: click.echo('Failed to find virtualenv executable...Skipping!') return False env_path = os.path.join(dest, env_name) try: subprocess.run([virtualenv, '--python=%s' % sys.executable, env_path], check=True) except subprocess.SubprocessError: shutil.rmtree(env_path) click.echo('A problem occured whith virtualenv...Skipping!') return False with open(os.path.join(dest, '.gitignore'), 'a') as f: f.writelines(['\n', '%s/' % os.path.basename(env_path)]) click.echo('Installing packages...') pip = os.path.join(env_path, 'bin/pip') requirements = os.path.join(dest, 'requirements.txt') try: subprocess.run([pip, 'install', '-r', requirements], check=True) subprocess.run([pip, 'freeze', '>', requirements], check=True) except subprocess.SubprocessError: click.echo('A problem occurred with pip...Skipping!') return False else: return True
8b1f26db8b21a41183df03e826eb9605d48ed4a9
36,311
def get_display_range(worksheet): """ Get the conditions displayed on a worksheet. args: worksheet (seeq.spy.workbooks._worksheet.AnalysisWorksheet): Worksheet returns: conditions (dict): Display range. {'Start':Timestamp, 'End':Timestamp} """ return worksheet.display_range
160447f5adf495748042f1b2010921eb4da3c573
36,312
def _is_prefix(lhs, rhs): """ return True if the first list is a prefix of the second """ rhs = list(rhs) while rhs: if lhs == rhs: return True rhs.pop() return False
6b80093479f4f8f989386f51af98862593fc6867
36,315
def get_pipeline(pipeline): """Manipulates the input pipeline into a usable form.""" if isinstance(pipeline, list): pipeline = {"pipeline": pipeline} elif isinstance(pipeline, dict): if "pipeline" not in pipeline: pipeline = {"pipeline": [pipeline]} else: raise TypeError("Pipeline must be a dict or a list") return pipeline
30e615adec130dca3c2119ceab3f3cc98e5d7688
36,316
def count_number_of_entries(row, feature, ref_counts): """Count the number entries for given building based on building reference number. row : pandas.Series EPC dataset row. feature: str Feature by which to count building entries. e.g. "BUILDING_REFERNCE_NUMBER" or "BUILDING_ID" ref_counts : pandas.Series Value counts for building reference number. Return --------- counts : int How many entries are there for given building.""" building_ref = row[feature] try: counts = ref_counts[building_ref] except KeyError: return building_ref return counts
2658281beeb51cea8ca1bd3484a8ecd089763d55
36,319
import functools def compose(*args): """ :param args: a list of functions :return: composition of the functions """ def compose2(f1, f2): def composed(*args_c, **kwargs_c): return f1(f2(*args_c, **kwargs_c)) return composed return functools.reduce(compose2, args)
7afd956b88ecee8d942c3d1143d83c7a41511ff3
36,321
def comment_parser(reddit_comment_object): """Parses a comment and returns selected parameters""" post_timestamp = reddit_comment_object.created_utc post_id = reddit_comment_object.id score = reddit_comment_object.score ups = reddit_comment_object.ups downs = reddit_comment_object.downs post_body = reddit_comment_object.body thread_title = reddit_comment_object.link_title thread_url = reddit_comment_object.link_url subreddit = reddit_comment_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, post_body, thread_title, thread_url, subreddit
2331c0b52201272a39d0b3befeb8a962f59c05a6
36,322
def ids2tokens(vocab, tokids): """ Convert list of numeric token ID arrays `tokids` to a character token array with the help of the vocabulary array `vocab`. Returns result as list of string token arrays. .. seealso:: :func:`~tmtoolkit.preprocess.tokens2ids` which reverses this operation. :param vocab: vocabulary array as from :func:`~tmtoolkit.preprocess.tokens2ids` :param tokids: list of numeric token ID arrays as from :func:`~tmtoolkit.preprocess.tokens2ids` :return: list of string token arrays """ return [vocab[ids] for ids in tokids]
70149c881d362bbe32fa39de6941891e0f8915db
36,323
def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data
875cceddf2be0b5b7e3a177e6cb799d434805e42
36,325
def get_block_size(num_antennas=1, tchans_per_block=128, num_bits=8, num_pols=2, num_branches=1024, num_chans=64, fftlength=1024, int_factor=4): """ Calculate block size, given a desired number of time bins per RAW data block `tchans_per_block`. Takes in backend parameters, including fine channelization factors. Can be used to calculate reasonable block sizes for raw voltage recording. Parameters ---------- num_antennas : int Number of antennas tchans_per_block : int Final number of time bins in fine resolution product, per data block num_bits : int Number of bits in requantized data (for saving into file). Can be 8 or 4. num_pols : int Number of polarizations recorded num_branches : int Number of branches in polyphase filterbank num_chans : int Number of coarse channels written to file fftlength : int FFT length to be used in fine channelization int_factor : int, optional Integration factor to be used in fine channelization Returns ------- block_size : int Block size, in bytes """ obsnchan = num_chans * num_antennas bytes_per_sample = 2 * num_pols * num_bits // 8 T = tchans_per_block * fftlength * int_factor block_size = T * obsnchan * bytes_per_sample return block_size
25d667f84ddaaf25b0cb4bae48a0ab1f363bc63a
36,329
def dimensionsKeepAspect(targetWidth, targetHeight, oldWidth, oldHeight): """ Gives resizing dimensions to keep an image within (targetWidth, targetHeight) while preserving the original aspect ratio. Does not upsize iamges smaller than the target dimensions. """ if (oldWidth < targetWidth) and (oldHeight < targetHeight): return (int(oldWidth), int(oldHeight)) oldAspect = oldWidth/float(oldHeight) newAspect = targetWidth/float(targetHeight) if oldAspect > newAspect: newWidth = targetWidth newHeight = targetWidth/oldAspect return (int(newWidth), int(newHeight)) elif oldAspect < newAspect: newHeight = targetHeight newWidth = targetHeight*oldAspect return (int(newWidth), int(newHeight)) elif oldAspect == newAspect: return (int(targetWidth), int(targetHeight))
9763638a9a4334dcc22f2d38f1e6b4a8fda1d1b6
36,330
from datetime import datetime def datetime_to_number(dt: datetime): """ convert datetime to number :param dt: datetime :return: 4 byte number """ year = dt.year - 2000 month = dt.month - 1 day = dt.day - 1 hour = dt.hour minute = dt.minute second = dt.second temp = year * 12 temp = temp + month temp = (temp << 5) - temp temp = ((temp + day) * 24) + hour temp = (temp << 4) - temp temp = (temp * 4) + minute temp = (temp << 4) - temp temp = (temp * 4) + second return temp
2abdef39a58eb069a5cedcc0d1ae94836a5f38bb
36,332
import json import os def _workflow_upload_file_load(workflow_file: str) -> tuple: """Attempts to load the workflow from a file Arguments: workflow_file - the path to the workflow file Returns: A tuple containing the loaded workflow and a message, in that order Notes: If the workflow could not be loaded, None is returned for the loaded workflow. If there were no issues the message will be None in the return tuple. """ loaded_workflow, msg = None, None try: with open(workflow_file, 'r', encoding='utf8') as in_file: loaded_workflow = json.load(in_file) except json.JSONDecodeError as ex: # pylint: disable=consider-using-f-string msg = 'ERROR: A JSON decode error was caught processing file "%s"' % os.path.basename(workflow_file) print(msg, ex) except Exception as ex: # pylint: disable=consider-using-f-string msg = 'ERROR: An unknown exception was caught processing file "%s"' % os.path.basename(workflow_file) print(msg, ex) if loaded_workflow and not 'version' in loaded_workflow: # pylint: disable=consider-using-f-string msg = 'ERROR: Version not found in workflow file "%s"' % os.path.basename(workflow_file) return loaded_workflow, msg
316986c444efaf921a9807f6c437a0dfd5250c4e
36,333
import numpy import re def fread_vector(stream): """The fread_vector() function reads a vector from the stream pointed to by @p stream and returns it as a numpy array. @param stream Stream object @return Tensor as numpy array """ return (numpy.array( [float(d) for d in re.split("\s+", stream.readline()) if len(d) > 0]))
2020e7014a3450a3d913ed9576c128414e3b1c43
36,334
def calcIPNotation(value): """ Calculates the notation of the ip addresse given its value """ notat = [] for i in range(4): shift = 255 << ( 8*(3-i) ) part = value & shift part = part >> ( 8*(3-i) ) notat.append(str(part)) notat = '.'.join(notat) return notat
f11d288a0abe292dfd5c73b1fcab9c8cb41fcdab
36,335
def load_best_blast_hit( blast_result_file ): """! @brief load best blast hit per query """ best_hits = {} with open( blast_result_file, "r" ) as f: line = f.readline() while line: parts = line.strip().split('\t') try: data = best_hits[ parts[0] ] if float( parts[-1] ) > data['score']: del best_hits[ parts[0] ] best_hits.update( { parts[0]: { 'score': float( parts[-1] ), 'len': int( parts[3] ) } } ) except: best_hits.update( { parts[0]: { 'score': float( parts[-1] ), 'len': int( parts[3] ) } } ) line = f.readline() return best_hits
072c02527cc130ed65d9dbaa742314a03e95f491
36,336
def get_sites(): """ Return the list of jahia sites for which we have data """ return ['atelierweb2']
0b2a39c7d4f66627ff5d2bc4517fe49b059366f8
36,337
def parse_slack_message_object(message_obj): """parse user_name/channel_name out of slack controller Notes: `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team] Args: message_obj (:obj:`slackbot.message`): response object for slack Returns: dict: message data """ metadata = dict(message_obj._body) try: metadata['channel_name'] = message_obj._client.channels[metadata['channel']]['name'] except KeyError: metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format( message_obj._client.users[metadata['user']]['name'] ) metadata['user_name'] = message_obj._client.users[metadata['user']]['name'] metadata['team_name'] = message_obj._client.login_data['team']['name'] return metadata
6e3334ccb2eda7a07892d50db0e9df11e177a044
36,338
def generate_builder_inputs(fixture_code, generate_structure): """Generate a set of default inputs for the ``Wannier90BandsWorkChain.get_builder_from_protocol()`` method.""" def _generate_builder_inputs(structure_id="Si"): inputs = { "codes": { "pw": fixture_code("quantumespresso.pw"), "pw2wannier90": fixture_code("quantumespresso.pw2wannier90"), "wannier90": fixture_code("wannier90.wannier90"), "projwfc": fixture_code("quantumespresso.projwfc"), "opengrid": fixture_code("quantumespresso.opengrid"), "yambo": fixture_code("yambo.yambo"), "p2y": fixture_code("yambo.p2y"), "ypp": fixture_code("yambo.ypp"), "gw2wannier90": fixture_code("yambo_wannier90.gw2wannier90"), }, "structure": generate_structure(structure_id=structure_id), } return inputs return _generate_builder_inputs
5f0fb66a816fd0496084e29a68e39b6022d0fd90
36,341
def calculate_IonS(salt): """DOE handbook, Chapter 5, p. 13/22, eq. 7.2.4""" return 19.924 * salt / (1000 - 1.005 * salt)
7bb10b47580831b49c7f8f2b9af3fce31272eb43
36,342
def filter_atlas_and_dataset(scores_df, atlas_df, metatlas_dataset, column='passing'): """ Splits atlas and metatlas_dataset by compound according to if it passes/fails minimum requirements set by: 'min_intensity', 'rt_tolerance','mz_tolerance', 'min_msms_score', 'min_num_frag_matches', and 'min_relative_frag_intensity'. 'min_intensity' <= highest intensity across all files for given compound 'rt_tolerance' >= shift of median RT across all files for given compound to reference 'mz_tolerance' >= ppm of median mz across all files for given compound relative to reference 'min_msms_score' <= highest compound dot-product score across all files for given compound relative to reference 'min_num_frag_matches' <= number of matching mzs when calculating max_msms_score 'min_relative_frag_intensity' <= ratio of second highest to first highest intensity of matching sample mzs :param scores_df: :param atlas: :param metatlas_dataset: :return pass_atlas_df, fail_atlas_df, pass_dataset, fail_dataset: """ try: assert(column in scores_df) except AssertionError: print('Error: ' + column + ' not in scores_df. Either set column where pass/fail boolean values are or run test_scores_df().') raise try: assert(scores_df.inchi_key.tolist() == atlas_df.inchi_key.tolist() == [metatlas_dataset[0][i]['identification'].compound[0].inchi_key if len(metatlas_dataset[0][i]['identification'].compound) > 0 else 'None' for i in range(len(metatlas_dataset[0]))]) except AssertionError: print('Error: scores_df, atlas_df, and metatlas_dataset must have the same compounds in the same order.') raise pass_atlas_df = atlas_df[scores_df[column]] fail_atlas_df = atlas_df[~scores_df[column]] pass_dataset = [] fail_dataset = [] for file_idx in range(len(metatlas_dataset)): pass_temp = [] fail_temp = [] for compound_idx in range(len(metatlas_dataset[0])): if scores_df.iloc[compound_idx][column]: pass_temp.append(metatlas_dataset[file_idx][compound_idx]) else: fail_temp.append(metatlas_dataset[file_idx][compound_idx]) pass_dataset.append(pass_temp) fail_dataset.append(fail_temp) return pass_atlas_df, fail_atlas_df, pass_dataset, fail_dataset
959939a22d98e3fca288b96a09cbb3d2dca147f6
36,343
def parse_namespace(publicDeclarations): """ from . import util import json namespace_json = util.parse_namespace(publicDeclarations["publicDeclarations"]) json.dump(namespace_json, open("c:/namespace.json",'w')) """ namespaces_dict = {} for namespace in publicDeclarations: namespaces_dict[namespace] = list(publicDeclarations[namespace].keys()) return namespaces_dict
6d376de4a6adf261d078f73aee358ae09ec01ad3
36,344
def str_dict(input_dict): """ Convert all the values in a dictionary to str :param input_dict: :type input_dict: dict{any: any] :return: dictionary including str values :rtype dict[any: str] """ return {key: "{}".format(value) for key, value in input_dict.items()}
cc893ede066a50426990e4e578151d8ef97bfb55
36,346
from typing import Union from pathlib import Path from typing import Any import pickle def read_pkl(pkl: Union[str, Path]) -> Any: """ Read pickle file. Parameters ---------- pkl : str or pathlib.Path The path od pickle file. Returns ------- obj : Any Restored object. """ with open(pkl, "rb") as f: obj = pickle.load(f) return obj
e98815a571e4812b654cf264f53e044f7b31ae8a
36,347
def straightforward_search_1d(a): """ @fn straightforward_search_1d """ if (a[0] >= a[1]): return 0 N = a.length() if (a[N - 1] >= a[N - 2]): return N - 1; for i in range(1, N - 1): if (a[i] >= a[i - 1] and a[i] >= a[i + 1]): return i return -1
f64ffe6609a8ec7b8d6eb9ee83afee48f15924f1
36,348