content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def cov_parse_rms_unit_of_weight(line: str) -> float: """ Læs 'RMS unit of weight' fra COV-fil. Værdien find filens linje 7, der ser ud i stil med: 'RMS OF UNIT WEIGHT: 0.0010 # OBS: 328817 # UNKNOWNS: 5424' """ return float(line[21:27])
ff339cba1ac2ea01802300ba2699ce9d994c92be
125,315
def query_definitions(database, dict_name, term): """Search the database for a dictionary definition.""" definitions = [] curs = database.cursor() sql = ''' SELECT DF_ALT, DF_DEFN FROM DEFINITIONS WHERE DF_DICT=? AND DF_FORM1=? OR DF_DICT=? AND DF_FORM2=? ''' curs.execute(sql, (dict_name, term, dict_name, term,)) for alt_form, definition in curs: definitions.append([term, alt_form, definition]) curs.close() return definitions
2f48468ae631eedc44ddee5abf50e4d27242fc40
125,316
def detect_bedcov_columns(text): """Determine which 'bedcov' output columns to keep. Format is the input BED plus a final appended column with the count of basepairs mapped within each row's region. The input BED might have 3 columns (regions without names), 4 (named regions), or more (arbitrary columns after 'gene'). """ firstline = text[:text.index('\n')] tabcount = firstline.count('\t') if tabcount < 3: raise RuntimeError("Bad line from bedcov:\n%r" % firstline) if tabcount == 3: return ['chromosome', 'start', 'end', 'basecount'] if tabcount == 4: return ['chromosome', 'start', 'end', 'gene', 'basecount'] # Input BED has arbitrary columns after 'gene' -- ignore them fillers = ["_%d" % i for i in range(1, tabcount - 3)] return ['chromosome', 'start', 'end', 'gene'] + fillers + ['basecount']
a5e59ae5903bb83151c8a81a0376711191c0f591
125,319
def pad_to_5_str(num): """ Converts an int to a string, and pads to 5 chars (1 -> '00001') :param num: int to be padded :return: padded string """ return '{:=05d}'.format(num)
39fb0cf8e6362db4103e9c8ea7ba40055406abea
125,322
def get_dotted_flag_value(config, flag): """Returns the value of a dotted tag name in a dictionary.""" value = config for flag_i in flag.split('.'): if flag_i not in value: return None value = value[flag_i] return value
6fda86456a8efa16ea94dd3468871e8bd97f3bbf
125,323
def readlistfile(filename): """ Read file as a list, each list item separated by being in a different line; Ignore the lines commented out with the "#" symbol as well as empty lines. """ listread = [] for line in open(filename, mode='r', encoding='utf-8', newline=''): li = line.strip() # Strip the list items of the '/n' character # Append the line to the list if it doesn't start with the '#' character and if # it is not empty if (not li.startswith("#")) and (not li == ''): listread.append(li) return listread
1cb9a9a248e1b089ae6e5fa8212b9a0eeef5181e
125,331
def get_os_tag(base): """ Constructs an OS tag based on the passed base. The operating system is described with the version name. If the operating system version is numeric, the version will also be appended. """ os_name = base.split(':')[0] os_version = base.split(':')[1] os_tag = os_name if os_version.isnumeric(): os_tag += os_version return os_tag
bed1c15ab6bea913e1a0afaee0a9c42ddb3bba7a
125,332
import json def read_file(file_path, is_json=False): """ Reads the content of a file. :param file_path: path of the file :param is_json: True if it's a json file :return: file's content """ with open(file_path, 'r') as infile: if is_json: content = json.load(infile) else: content = infile.read() return content
c01d56b0a63b6f616824ae653182566b0b22eda9
125,339
def split_hoststring(hoststring): """ Splits a host string into its user, hostname, and port components e.g. 'vagrant@localhost:22' -> ('vagrant', 'localhost', '22') """ user = hoststring[0:hoststring.find('@')] ip = hoststring[hoststring.find('@') + 1:hoststring.find(':')] port = hoststring[hoststring.find(':') + 1:] return (user, ip, port)
13ed98e5d09686053af3636d644ade49fded0445
125,346
def get_end_activities_threshold(end_activities, ealist, decreasingFactor): """ Get end attributes cutting threshold Parameters ---------- end_activities Dictionary of end attributes associated with their count ealist Sorted end attributes list Returns --------- threshold End attributes cutting threshold """ threshold = ealist[0][1] i = 1 while i < len(ealist): value = ealist[i][1] if value > threshold * decreasingFactor: threshold = value i = i + 1 return threshold
63258c4cbe8d244d0e751ff3a1c33e7781be4087
125,350
def phoneword(phonenumber): """Returns all possible phone words respective to a phone number :param phonenumber: str :return: list of str with all phone words """ digit_to_chars = { '2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz' } size = len(phonenumber) def phoneword_rec(previous_results, cursor): if cursor == size: return previous_results digit = phonenumber[cursor] results = [] for char in digit_to_chars[digit]: results.extend(prev_result + char for prev_result in previous_results) return phoneword_rec(results, cursor + 1) return phoneword_rec([''], 0)
39afb1c21c6c3d53268d9fff99cc880d08e889a1
125,355
def normalize_newlines(s): """Normalize line breaks in a string.""" return s.replace('\r\n', '\n').replace('\r', '\n') if s else s
5aba7b6d95f659735d3b8be27814dfcbc3cedc2b
125,356
def getWorst ( population, n ) : """ fitnessesの大きい方からn個の個体を取得 @param population individualのリスト @param n 取得する個体数 @return nth worst individuals """ return sorted ( population, key=lambda x: x.fitness.values, reverse=True )[ : n ]
b2f50790f1365469d17d2106c4fd591121c1f3ce
125,359
def items_in_this_page(ls_resp, mode=None): """ Returns a dictionary of data record and/or collections from the given listing response from DataFed Parameters ---------- ls_resp : protobuf message Message containing the listing reply mode : str, optional Set to "d" to only get datasets from the listing Set to "c" to only get collections from the listing Leave unset for all items in the listing Returns ------- dict Keys are IDs and values are the title for the object """ data_objects = dict() if not mode: # return everything - records and collections for item in ls_resp[0].item: data_objects[item.title] = item.id else: for item in ls_resp[0].item: # return only those that match with the mode (dataset / collection) if item.id.startswith(mode): data_objects[item.title] = item.id return data_objects
24241b2acf1bf20a47c0c69263209df6424dcfa9
125,363
def batchify(instances, batch_size=32): """splits instances into batches, each of which contains at most batch_size""" batches = [instances[i:i + batch_size] if i + batch_size <= len(instances) else instances[i:] for i in range(0, len(instances), batch_size)] return batches
d5676fe90da26ff2c98fffe48b9afd4f3b02a06e
125,371
def list_equal(*elements): """Check if all given elements are equal.""" l0 = elements[0] for element in elements[1:]: if element != l0: return False return True
bfca85ebfb07d3bbdb5038fbe1a27165b684cfd7
125,375
def atmospheric_pressure(z, temp = 293.15, lb = 6.5e-3): """ Calculates atmospheric pressure at a given height. Parameters ---------- z : float Altitude above sea level [m]. temp : float, optional Meam atmospheric temperature [K]. The default is 288.15. lb : float, optional Temperature lapse rate [K / m] (i.e. how many Kelvin the temperature of air decreases with a 1 m increase in altitude). The default is 5e-3 K / m. Returns ------- float Atmospheric pressure at altitude z. """ p0, g, rd = 101325, 9.80665, 287.058 power = -g / (rd * lb) return p0 * ((temp + lb * z) / temp) ** power
4ffc602e4e43e21ec183c17b1770129070c66417
125,381
def strip_query_string(url): # type: (str) -> str """ Strips the query string from a URL for use as tag in spans. :param url: The URL to be stripped :return: The given URL without query strings """ hqs, fs, f = url.partition("#") h, _, _ = hqs.partition("?") if not f: return h return h + fs + f
ca24f21fc46fe08f63968cb5a999c9ac0ae79a01
125,382
def nSquaredSum(X, Y): """Returns negative squared sum of X and Y""" return -(X**2 + Y**2)
aea09d9f4ff10b1effaecfbb1f46e19c6d07089d
125,383
import re def format_id(s): """Formats the given string so it can be used as an ID. Example: >>> format_id('name.surname') name-surname """ if not s: return '_' return re.sub(r'[^-a-zA-Z0-9_]', '-', s)
22e2a3353453b9e1c38402fec850fdc666ac7815
125,384
def fibonacci(num: int) -> int: """Return fibonacci number.""" assert num >= 0 prev, curr = 0, 1 for _ in range(num): curr, prev = curr + prev, curr return prev
3849b4f753989e0b399cd4a98a5b7e1df5e060b4
125,387
import json def df_json(df): """Convert rows in a pandas dataframe to a JSON string. Cast the JSON string back to a list of dicts that are palatable to the rules engine. """ return json.loads(df.to_json(orient='records'))
52fa45f4a9e44bf4303c9704314d9e0ddba0aa61
125,388
def filter_gdf( gdf, rel_orbit_numbers=None, footprint_overlaps=None, start_date=None, end_date=None): """Filter S1 metadata GeoDataFrame Parameters ---------- gdf : GeoDataFrame S1 metadata rel_orbit_numbers : list of int relative orbit numbers footprint_overlaps : shapely.geometry.Polygon AOI polygon start_date, end_date : datetime.datetime or datestr date interval """ if len(gdf) and start_date is not None: mask = gdf['sensing_end'] >= start_date gdf = gdf[mask] if len(gdf) and end_date is not None: mask = gdf['sensing_start'] <= end_date gdf = gdf[mask] if len(gdf) and rel_orbit_numbers is not None: mask = gdf.apply((lambda d: d['relative_orbit_number'] in rel_orbit_numbers), axis=1) gdf = gdf[mask] if len(gdf) and footprint_overlaps is not None: mask = gdf.intersects(footprint_overlaps) gdf = gdf[mask] return gdf
7add653956e6f9d8fa66a6d14520724d25291d1c
125,389
import gzip def gzip_open(filename): """Open a possibly gzipped text file.""" with open(filename, "rb") as h: magic = h.read(2) if magic == b"\x1f\x8b": return gzip.open(filename, "rt") else: return open(filename)
d405c4c1a4b1f91b0e9f932b02b86245f7c9e767
125,396
def normalizeCellsByRows(cells, mode="max", valueExtractor=None): """All values in the table's rows are scaled so that: a) if mode='max', then maximum value in the row is 1 and all other are value/maxValue b) if mode='min', then minimum value in the row is 1 and all other are minValue/value """ assert mode in {"max", "min"} if valueExtractor is None: valueExtractor = lambda x: x newCells = [] for row in cells: newRow = [] for value in row: value = valueExtractor(value) if value is None: newRow.append("-") elif mode == "max": maxValue = max(row) newRow.append("{:.2f}".format(value / maxValue)) elif mode == "min": minValue = min(row) newRow.append("{:.2f}".format(minValue / value)) newCells.append(newRow) return newCells
221743680c4b5e3b26551d65c20d9a9d421ace4a
125,398
def get_inch_multiplier() -> float: """ Get the multiplier needed for converting millimeters to inches. :return: a floating point value representing the multiplier. """ return 0.254
b9de93a9034b9fcf5071d46b379e707b84d89bf2
125,403
def day_of_month(date_time_col): """Returns the day of month from a datetime column.""" return date_time_col.dt.day
c69e12d5a7bbababd9816df0ca13e09fd12eca48
125,405
def validate_value(val): """ Returns a boolean to indicate that a value stored in the DHT is valid. Currently *all* values are valid although in the future, size may be limited. """ return True
bdf52d211642b3a4f237aca40444a693edbe5448
125,409
def board_columns(board, rows, cols): """ Returns a list of columns of the given board. """ return [''.join([board[row * cols + col] for row in range(rows)]) for col in range(cols)]
ddb74e479e8c442e9905281a3626489bda2864d8
125,410
from typing import List def reverse_steps(steps: List[str]) -> List[str]: """ >>> reverse_steps([]) [] >>> reverse_steps(["U"]) ["U'"] >>> reverse_steps(["U", "R'", "D2"]) ['D2', 'R', "U'"] """ return [step if step[-1] == "2" else step[0:-1] if step[-1] == "'" else step + "'" for step in reversed(steps)]
0b194e2fb1685baead42d3f12cf8e8b204994c39
125,411
def count_pct(df, column="ClassId"): """Returns a `pandas.DataFrame` with count and frequencies stats for `column`.""" class_count = df[column].value_counts().sort_index() class_count.index.set_names(column, inplace=True) class_count = class_count.to_frame() class_count.rename(columns={column: "num"}, inplace=True) return class_count.assign(freq=lambda df: df["num"] / df["num"].sum())
9d3c4185ce1a4096d75e0cc562ec955abd912e6f
125,415
def dunder_to_chained_attrs(value, key): """ If key is of the form "foo__bar__baz", then return value.foo.bar.baz """ if '__' not in key: return getattr(value, key) first_key, rest_of_keys = key.split('__', 1) first_val = getattr(value, first_key) return dunder_to_chained_attrs(first_val, rest_of_keys)
89fd1e3b3252c9ddb367529f634d8155ae6ebb97
125,417
def atr(balance_df, financials_df): """Checks ATR (Asset Turnover Ratio) grew since previous year Explanation of ATR: https://www.investopedia.com/terms/a/assetturnover.asp balance_df = Balance Sheet of the specified company financials_df = Financial Statement of the specified company """ # Net Sales (= Revenue) net_sales_curr = financials_df.iloc[financials_df.index.get_loc("Total Revenue"),0] net_sales_prev = financials_df.iloc[financials_df.index.get_loc("Total Revenue"),1] # Asset inventory change (previous period) beginning_assets_curr = balance_df.iloc[balance_df.index.get_loc("Total Assets"),1] ending_assets_curr = balance_df.iloc[balance_df.index.get_loc("Total Assets"),0] # Asset inventory change (period before previous period) beginning_assets_prev = balance_df.iloc[balance_df.index.get_loc("Total Assets"),2] ending_assets_prev = balance_df.iloc[balance_df.index.get_loc("Total Assets"),1] # Asset Turnover Ratios atr_curr = net_sales_curr / ((beginning_assets_curr + ending_assets_curr)/2) atr_prev = net_sales_prev / ((beginning_assets_prev + ending_assets_prev)/2) if (atr_curr > atr_prev): return True else: return False
c537f91812938fb1d389b9b4996faacdcb7fe5f2
125,419
def _get_title(video): """Get the title of a video (either from direct key or nested within summary)""" return video.get('title', video.get('summary', {}).get('title'))
77be9027e8620b85e1bf9538a639deb55308030d
125,424
def subtract_weight(distance, const=1.0): """ 权重函数: 距离的截断. :param distance: 距离 :type distance: float :param const: 常量 :type const: float :return: 权重值 """ if distance > const: return 0 else: return const - distance
671d1bc1aa8fef0d200b775739bdf22a46a70180
125,427
def to_int(value) -> int: """Convert base 10 or base 16 string to int.""" if value.startswith('0x'): return int(value, base=16) return int(value)
7a7c3c0094aa7422f130acc8e1b8467add3a4833
125,430
def mergeCoords(A,B): """ Takes two tuples and outputs two tuples, which will be identical if the original overlap otherwise will be the originals A = (a1, a2), B = (b1, b2) and a1<=b1, a1<=a2, b1<=b2 case 1: a2<=b1 ---> output originals case 2: b1<a2 && b2>a2 ---> output (a1, b2) case 3: b2<=a2 ---> output A """ assert (min(A) <= min(B)), ( 'tuples given to mergeCoords in wrong order: A={0}, B={1}').format(A,B) if min(B) >= max(A): return ((A,B), 0) elif min(B) < max(A) and max(B) > max(A): output = (min(A),max(B)) return ((output, output), 1) elif max(B) <= max(A): return ((A,A), 2) else: raise Exception( 'Unexpected result from mergeCoords(A,B) using A={0}, B={1}'.format( A,B))
011234eb844a8def2a1c2a0e5b415e8c5bbd7741
125,439
from typing import Union from typing import Callable import torch def get_activation(fn: Union[Callable, str]): """Get a PyTorch activation function, specified either directly or as a string. This function simplifies allowing users to specify activation functions by name. If a function is provided, it is simply returned unchanged. If a string is provided, the corresponding function in torch.nn.functional is returned. """ if isinstance(fn, str): return getattr(torch.nn.functional, fn) return fn
af66502925599653e7f7c577c7754e0323ee93d7
125,441
def normalize(_input): """Returns string input as it's correspondent type.""" if '-' in _input: pass elif '.' in _input: _input = float(_input) else: _input = int(_input) return _input
5ba03bb04eb01609ccf0ce57843f47919dd23216
125,445
def get_page_id_from_url(path: str): """ Extract page id from URL path. Parameters ---------- path: URL path Return value ------------ page id """ split_url = path.split('/') return split_url[-1]
fb6510b123dab5e26c940583b558d3b6fee70b4d
125,450
import torch def get_linear_scheduler(optimizer, num_epochs, last_epoch=-1): """Return LambdaLR scheduler that follows a linear decay schedule.""" def lr_lambda_func(epoch): """Linear decay function.""" return 1 - epoch / num_epochs scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda_func, last_epoch=last_epoch) return scheduler
581f28c51e92883d07f259d673f951b26a4d42a1
125,451
def convert_schema_to_json(lists, labels): """ Input: lists: non-empty list of n lists each of length x labels: list of strings of length n Output: list of x dictionaries with n entries, each row corresponding to a labelled row (merged from lists) """ dicts = [] for i in range(len(lists[0])): dict = {} for j in range(len(labels)): dict[labels[j]] = lists[j][i] dicts.append(dict) return dicts
d9fa4df851b77e98b8de653733d93698896e6b3c
125,453
def is_list_diff(list1, list2): """ Check if 2 list have some differences. Parameters ---------- list1 : list First list to compare. list2 : list Second list to compare. Returns ------- boolean True if lists have diffs. False if not. """ has_diff = False for entry1 in list1: if entry1 not in list2: has_diff = True for entry2 in list2: if entry2 not in list1: has_diff = True return has_diff
eec4e4469ab8f580cadca5a79c050efebc3b346b
125,455
def load_utt_list(utt_list): """Load a list of utterances. Args: utt_list (str): path to a file containing a list of utterances Returns: List[str]: list of utterances """ utt_ids = [] with open(utt_list) as f: for utt_id in f: utt_id = utt_id.strip() if len(utt_id) > 0: utt_ids.append(utt_id) return utt_ids
ec825236acd3e510826ca8a70ac4afe03144560d
125,457
def get_resolution_order(json_data, target_name, order, level=0): """ Return the order in which target descriptions are searched for attributes. This mimics the Python 2.2 method resolution order, which is what the old targets.py module used. For more details, check http://makina-corpus.com/blog/metier/2014/python-tutorial-understanding-python-mro-class-search-path The resolution order contains (name, level) tuples, where "name" is the name of the class and "level" is the level in the inheritance hierarchy (the target itself is at level 0, its first parent at level 1, its parent's parent at level 2 and so on) """ # the resolution order can't contain duplicate target names if target_name not in [l[0] for l in order]: order.append((target_name, level)) parents = json_data[target_name].get("inherits", []) for par in parents: order = get_resolution_order(json_data, par, order, level + 1) return order
996627634e1248184fa0ee931963f588d12dbb50
125,460
def code_gen(blocks): """ From a list of L{CodeBlock} instances, returns a string that executes them all in sequence. Eg for C{(decl1, task1, cleanup1)} and C{(decl2, task2, cleanup2)} the returned string will be of the form: decl1 decl2 { task1 { task2 cleanup2 } cleanup1 } Parameters: ---------- blocks List of CodeBlock instances such that * declarations, behavior and cleanup are in the run() method of the struct """ decl = "" head = "" tail = "" for block in blocks: decl += block.declare head = head + ("\n{\n%s" % block.behavior) tail = ("%s\n}\n" % block.cleanup) + tail return decl + head + tail
1246301297eb5c6681a60fd5db7800bb1535703c
125,461
def collect_all_methods(cls, method_name): """Return list of all `method_name` methods for cls and its superclass chain. List is in MRO order, with no duplicates. Methods are unbound. (This is used to simplify mixins and subclasses that contribute to a method set, without requiring superclass chaining, and without requiring cooperating superclasses.) """ methods = [] for ancestor in cls.__mro__: try: validator = getattr(ancestor, method_name) except AttributeError: pass else: if validator not in methods: methods.append(validator) return methods
2de52b6926404160adeee5bfcd56d2c65779a6a8
125,464
def cli(ctx, dataset_id): """Get the permissions for a dataset. Output: dictionary with all applicable permissions' values """ return ctx.gi.libraries.get_dataset_permissions(dataset_id)
664bbb2d5cbd4ba9f341c2b14e45fb106b8d7d18
125,472
import sqlite3 def get_win_rate(num_moves=None, num_trials=None): """Get the win rate for a specified configuration of num_moves and num_trials Args: num_moves (int, optional): The value for num_moves which printed trials must match. Defaults to None. num_trials (int, optional): The value for num_trials which printed trials must match. Defaults to None. Returns: float: The win rate of all the valid trials """ conn = sqlite3.connect("2048_AI_results.db") cursor = conn.cursor() if num_moves and num_trials: cursor.execute("SELECT AVG(did_win) FROM results WHERE num_moves = ? AND num_trials = ?", (num_moves, num_trials)) else: cursor.execute("SELECT AVG(did_win) FROM results") win_rate = round(cursor.fetchone()[0] * 100, 2) print(f"WIN RATE: {win_rate}%") conn.close() return win_rate
466d2bf1a6c3dd1d737564b0731d3067acdab417
125,474
def spans(pattern, string): """Return a list of span-tuples for matches of pattern in string.""" return [match.span() for match in pattern.finditer(string)]
de8d14346a0f5241f108316cf2f998b31d2b2056
125,475
def console_sep(char='- ', length=40): """Return a separator line to the console output.""" return ''.join([char * length])
50f129487af76e2e1c230aa9af8b716fc16d5a88
125,478
def pretty_version(release_str: str): """ >>> pretty_version('0.2.0-rc1') 0.2 """ return '.'.join(release_str.split('.')[:2])
5f7548112c4e7546cf78ae66a0183a45d8210395
125,483
def clean_text(document_string): """ Function that takes in a document in the form of a string, and pre-processes it, returning a clean string ready to be used to fit a CountVectorizer. Pre-processing includes: - lower-casing text - eliminating punctuation - dealing with edge case punctuation and formatting - replacing contractions with the proper full words :param: document_string: str :returns: cleaned_text: str :returns: words: list """ # Make text lowercase raw_text = document_string.lower() # Replace encoding error with a space raw_text = raw_text.replace('\xa0', ' ') # Normalize period formatting raw_text = raw_text.replace('. ', '.') raw_text = raw_text.replace('.', '. ') # Replace exclamation point with a space raw_text = raw_text.replace('!', ' ') # Replace slashes with empty raw_text = raw_text.replace('/', '') # Replace questin marks with empty raw_text = raw_text.replace('??', ' ') raw_text = raw_text.replace('?', ' ') # Replace dashes with space raw_text = raw_text.replace('-', ' ') raw_text = raw_text.replace('—', ' ') # Replace ... with empty raw_text = raw_text.replace('…', '') raw_text = raw_text.replace('...', '') # Replace = with 'equals' raw_text = raw_text.replace('=', 'equals') # Replace commas with empty raw_text = raw_text.replace(',', '') # Replace ampersand with and raw_text = raw_text.replace('&', 'and') # Replace semi-colon with empty raw_text = raw_text.replace(';', '') # Replace colon with empty raw_text = raw_text.replace(':', '') # Get rid of brackets raw_text = raw_text.replace('[', '') raw_text = raw_text.replace(']', '') # Replace parentheses with empty raw_text = raw_text.replace('(', '') raw_text = raw_text.replace(')', '') # Replace symbols with letters raw_text = raw_text.replace('$', 's') raw_text = raw_text.replace('¢', 'c') # Replace quotes with nothing raw_text = raw_text.replace('“', '') raw_text = raw_text.replace('”', '') raw_text = raw_text.replace('"', '') raw_text = raw_text.replace("‘", "") # Get rid of backslashes indicating contractions raw_text = raw_text.replace(r'\\', '') # Replace extra spaces with single space raw_text = raw_text.replace(' ', ' ') raw_text = raw_text.replace(' ', ' ') # Some apostrophes are of a different type --> ’ instead of ' raw_text = raw_text.replace("’", "'") # Replace contractions with full words, organized alphabetically raw_text = raw_text.replace("can't", 'cannot') raw_text = raw_text.replace("didn't", 'did not') raw_text = raw_text.replace("doesn't", 'does not') raw_text = raw_text.replace("don't", 'do not') raw_text = raw_text.replace("hasn't", 'has not') raw_text = raw_text.replace("he's", 'he is') raw_text = raw_text.replace("i'd", 'i would') raw_text = raw_text.replace("i'll", 'i will') raw_text = raw_text.replace("i'm", 'i am') raw_text = raw_text.replace("isn't", 'is not') raw_text = raw_text.replace("it's", 'it is') raw_text = raw_text.replace("nobody's", 'nobody is') raw_text = raw_text.replace("she's", 'she is') raw_text = raw_text.replace("shouldn't", 'should not') raw_text = raw_text.replace("that'll", 'that will') raw_text = raw_text.replace("that's", 'that is') raw_text = raw_text.replace("there'd", 'there would') raw_text = raw_text.replace("they're", 'they are') raw_text = raw_text.replace("there's", 'there are') raw_text = raw_text.replace("we'd", 'we would') raw_text = raw_text.replace("we'll", 'we will') raw_text = raw_text.replace("we're", 'we are') raw_text = raw_text.replace("we've", 'we have') raw_text = raw_text.replace("wouldn't", 'would have') raw_text = raw_text.replace("you'd", 'you would') raw_text = raw_text.replace("you'll", 'you will') raw_text = raw_text.replace("you're", 'you are') raw_text = raw_text.replace("you've", 'you have') # Fix other contractions raw_text = raw_text.replace("'s", ' is') cleaned_text = raw_text # Extract tokens text_for_tokens = cleaned_text text_for_tokens = text_for_tokens.replace('.', '') words = text_for_tokens.split() return (cleaned_text, words)
4965047d64e805db265a14152d39473e74e06e29
125,489
def url_from_input(station_name, days, base_url): """ Returns a nicely formatted url. Arguments --------- station_name: str The name of the station. All lowercase. days: str The number of days. base_url: str The base of the url. Returns ------- nice_url: str Nicely formatted url. """ return base_url + '/' + station_name + '/' + days
ce613141d47184f0d7b4d356feecc8d60c6f1fbe
125,490
import string import random def generate_random_password(length=64): """ Generates a random password containing upper case characters and digits """ char_list = string.ascii_uppercase + string.digits return ''.join(random.SystemRandom().choice(char_list) for _ in range(length))
f93b13d010bd1dbb7015c48f06ad880cd5f08d33
125,493
def hex2rgb(hex, max_val=1): """Convert color code from ``hex`` to ``rgb``""" return tuple( [int(hex[-6:][i * 2 : (i + 1) * 2], 16) / 255 * max_val for i in range(3)] )
4c55f058da0a3e877896386982506e95a9827a65
125,495
def weight_param(name, learn_all=True): """ Creates a named param for weights of a conv/fc layer Example of named param for weights: param { name: "conv1_w" lr_mult: 1 decay_mult: 1 } :param name: str :param learn_all: bool. If True, sets the weights of that layer to be modified during the training process :returns: dict with params """ lr_mult = decay_mult = 1 if learn_all else 0 return dict(name=name, lr_mult=lr_mult, decay_mult=decay_mult)
b770ccb9c50b763df711835d82b0483e10c094b7
125,501
import re def xml_id_display_align_before(text): """ displayAlign="before" means the current sub will be displayed on top. That is and not at bottom. We check what's the xml:id associated to it to have an {\an8} position tag in the output file. """ align_before_re = re.compile(u'<region.*tts:displayAlign=\"before\".*xml:id=\"(.*)\"/>') has_align_before = re.search(align_before_re, text) if has_align_before: return has_align_before.group(1) return u""
a4ce9c302dff14d8869c58ee939bb687eac7c351
125,515
import torch def create_optimizer(model, name, **kwargs): """ Create optimizer for the given model. Args: model: nn.Module whose parameters will be optimized name: Name of the optimizer to be used Returns: torch.optim.Optimizer instance for the given model """ if name == "adagrad": return torch.optim.Adagrad(model.parameters(), **kwargs) elif name == "adam": return torch.optim.Adam(model.parameters(), **kwargs) elif name == "sgd": return torch.optim.SGD(model.parameters(), **kwargs) else: raise ValueError("Optimizer \"{}\" undefined".format(name))
dbdef388c4ca8c21f09c2596cd72bad8a54ced4b
125,521
import random def generate_random_key(length): """Generates a random key with given length.""" source = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '1234567890' '-_') rng = random.SystemRandom() return ''.join(rng.choice(source) for i in range(length))
5f01bbbe7df7953edbd567573c347918c1ecef0c
125,523
def create_dupe_index(df, ind_name): """ Creates a duplicate column in the input DataFrame from the input column name Parameters: df (pandas DataFrame): DataFrame to create duplicate column ind_name (str): Name of column to duplicate Returns: df (pandas DataFrame): DataFrame with new duplicate column """ df[ind_name+'_back'] = df[ind_name] return df
3e5feb072047714c2c35782b2555fa515c18cd6e
125,525
def add_common_parser_args(parser): """Parse CLI common login and repo arguments.""" parser.add_argument( 'repository', help="Repository name to generate the Changelog for, in the form " "user/repo or org/repo (e.g. spyder-ide/spyder)") parser.add_argument( '-u', '--username', action="store", dest="username", default='', help="Github user name") parser.add_argument( '-p', '--password', action="store", dest="password", default='', help="Github user password") parser.add_argument( '-t', '--token', action="store", dest="token", default='', help="Github access token") parser.add_argument( '-zt', '--zenhub-token', action="store", dest="zenhub_token", default='', help="Zenhub access token") return parser
c7986a2d08b62cd3c146d713ffbe47fb1180a205
125,526
def calc_color_percent(min, max, temp): """ Calculates the percentage of the color range that the temperature is in Arguments: min - minimum temperature max - maximum temperature temp - temperature """ tp = (temp-min)/(max-min) if tp > 1: tp = 1 if tp < 0: tp = 0 return tp
754bb9def42d17c870acd86d81506e97e2953917
125,527
import torch def compute_score(logits, labels): """Computes score of the prediction. Ground truth soft scores for labels is calculated as per: https://github.com/hengyuan-hu/bottom-up-attention-vqa/blob/master/tools/compute_softscore.py#L80 and http://visualqa.org/evaluation.html Args: logits: Tensor containing predicted score for each answer candidiate predicted. Scores are pre-sigmoid and not between 0 and 1. Tensor is of dimension (batch size, answer vocabulary size). labels: Ground truth soft scores for each answer in the set of candidiate answers. Soft scores/ accuracies are between [0, 1]. Tensor is of dimension (batch size, answer vocabulary size). """ # logits has dimension [batch_size, num_ans_candidiates] # Each sample in the batch is of dimension num_ans_candidiates and each # element is the predicted score for the corresponding answer. # Finding the element index (offset) with max score for each sample in the # batch (i.e. argmax). # logits will now have dimension [batch_size] i.e. batch_size * 1. logits = torch.max(logits, 1)[1].data # Making zero vector of dimension [batch_size, num_ans_candidiates]. one_hots = torch.zeros(*labels.size()).cuda() # Making a one-hot vector of dimension [batch_size, num_ans_candidiates]. # For each sample the value at an index = index with max score, will be # set to 1. # This one hot vector is essentially encodes the most probable answer in # the vocabulary as predicted by the model. one_hots.scatter_(1, logits.view(-1, 1), 1) # Score will be a vector of dimension [batch_size, num_ans_candidiates]. # Each sample will be a vector containing all zeroes, except at the index # of the predicted answer which contains the soft score/accuracy of that # answer calculated as per http://visualqa.org/evaluation.html. scores = one_hots * labels return scores
37dc135e998bbb4116ae93c322ff6ea857411c31
125,528
def lim_growth(x, s, b0, k): """ Function for limited growth. Used in several fits, thus it is implemented here as a raw function, which can be used in closures, inlining etc. Parameters ---------- x : float, int, np.ndarray x values of the growth function. s : float, optional Limit of the growth function. b0 : float, optional Starting value. Values of 0 are **NOT RECOMMENDED**. k : float, optional Curvature parameter. Returns ------- float, np.ndarray Value at point `x`. """ return s - (s - b0) * k ** x
0193829f19ee077274e5e60b9fafccb6d8998a84
125,531
def group(items, n): """Group sequence into n-tuples.""" return list(zip(*[items[i::n] for i in range(n)]))
f14762a410b498365d26eef499f16dcd7b31e8ad
125,532
def flatten(seq): """ Returns a list of the contents of seq with sublists and tuples "exploded". The resulting list does not contain any sequences, and all inner sequences are exploded. For example: >>> flatten([7,(6,[5,4],3),2,1]) [7, 6, 5, 4, 3, 2, 1] """ lst = [] for el in seq: if isinstance(el, (list, tuple)): lst.extend(flatten(el)) else: lst.append(el) return lst
dddf5d176a083a62566b61abf8587abd75a83af2
125,533
def _match_single_glob_tokens(path_tokens, prefix_tokens): """If the prefix matches the path (anchored at the start), returns the segment of the path tokens that matched -- or None if no match. The arguments are lists of strings, with an implied "/" between elements. The token "*" must match exactly one path token. The token "**" is not allowed. """ if len(prefix_tokens) > len(path_tokens): # The prefix is too long ==> no match. return None # Check the tokens pairwise (stopping at the shorter of the two lists). for prefix, path in zip(prefix_tokens, path_tokens): if prefix == "*": # The "*" matches anything. continue if prefix != path: # Mismatch. return None # Successful match. return path_tokens[:len(prefix_tokens)]
f01c9c2bfbe03f64f4a48322af8b2d71be0fdcb4
125,535
def parse_type(_type): """Parse type str as builtin type.""" if isinstance(_type, type): return _type elif _type == "integer": return int elif _type == "real": return float elif _type == "string": return str else: raise NotImplementedError()
4e4773cf9c7255f9f054da4b5243772e84321197
125,537
import torch def change(gt, priors): """ Compute the d_change metric proposed in Box2Pix: https://lmb.informatik.uni-freiburg.de/Publications/2018/UB18/paper-box2pix.pdf Input should be in point form (xmin, ymin, xmax, ymax). Output is of shape [num_gt, num_priors] Note this returns -change so it can be a drop in replacement for """ num_priors = priors.size(0) num_gt = gt.size(0) gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors) gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors) gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4) pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4) diff = gt_mat - pr_mat diff[:, :, 0] /= gt_w diff[:, :, 2] /= gt_w diff[:, :, 1] /= gt_h diff[:, :, 3] /= gt_h return -torch.sqrt( (diff ** 2).sum(dim=2) )
7ebbb3fcb5b8e8cff909482901746945ed903242
125,540
def nD0_thermal(N, kT, gamma, L): """ Returns product of particle density n = N/L^2 and diffusion constant D_0 = 2*kT*N/gamma*L^2. Parameters ---------- N : int or float Number of particles. kT : float Dimensionless temperature. gamma : float Brownian dumping coefficient. L : float Characteristic system length. Returns ------- product : float n D_0 """ return (2*kT*N)/(gamma*(L**2))
b6560fb5682a8f4f85212f6cfe665595a0876766
125,542
import inspect def origin_fn_get(depth = 1, sep = ":"): """ Return the name of the function and line from which this was called """ frame = inspect.stack()[depth][0] return frame.f_code.co_name + sep + "%d" % frame.f_lineno
290100d7c10efe703a115ec8065fe36ce79874e8
125,545
def _escapefun(matchobj): """Callback to interpret an escape sequence""" s = matchobj.group() c = s[1] i = "bfnrt".find(c) if i >= 0: c = "\b\f\n\r\t"[i] elif c == 'x': c = chr(int(s[2:], 16)) elif c in '01234567': c = chr(int(s[1:], 8)) return c
b2a6760913d0479dec7422267d77f14c143b02e0
125,549
def addresses_for_key(gpg, key): """ Takes a key and extracts the email addresses for it. """ return [address.split("<")[-1].strip(">") for address in gpg.list_keys().key_map[key['fingerprint']]["uids"] if address]
5b916f8bb2a66fb8b480d158ea4667bc1b4a68df
125,553
def this(self, x): """This just returns the input""" return x
f548d23956841bf35c485d0fe44b58430bedf2fa
125,560
def constant_output(return_val=None, *args, **kwargs): """Function that returns a constant value no matter what the inputs are. Is meant to be used with functools.partial to create custom versions. >>> from functools import partial >>> always_true = partial(constant_output, True) >>> always_true('regardless', 'of', the='input', will='return True') True """ return return_val
77dcd9a5af52a9412149fe00967e4489791bb124
125,561
import builtins def is_text(value): """Indicate if object is text-like. future.utils.istext is deprecated, so re-implementing, as per their recommendation. """ return isinstance(value, builtins.str)
184ed7a8290d7389721e4fa4d422b7ba87f563be
125,564
import math def latlng2tile_google(lat_deg, lng_deg, z): """ convert latitude, longitude and zoom into tile in x and y axis referencing http://www.cnblogs.com/Tangf/archive/2012/04/07/2435545.html and https://blog.csdn.net/mygisforum/article/details/7582449 :param lat_deg: latitude in degree :param lng_deg: longitude in degree :param z: map scale (0-18) :return: Return two parameters as tile numbers in x axis and y axis """ if lat_deg >= 85.05112877980659 or lat_deg <= -85.05112877980659: raise ValueError('wmts latitude error lat') lat_rad = math.radians(lat_deg) n = 2.0 ** z x = ((lng_deg + 180.0) / 360.0 * n) y = ((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n) return x, y
01cef0038203da98728814009fd48f8209d97459
125,565
def has_navigation(node, key=None): """Returns ``True`` if the node has a :class:`.Navigation` with the given key and ``False`` otherwise. If ``key`` is ``None``, returns whether the node has any :class:`.Navigation`\ s at all.""" try: return bool(node.navigation[key]) except: return False
2ad53aa299c3a335aa99304b984827e868858930
125,569
def _compute_scale(dimension, spread, special_scale): """See BFaS; p. 83. Parameters ---------- dimension: int Spatial dimensionality of state space model spread: float Spread of sigma points around mean (1; alpha) special_scale: float Spread of sigma points around mean (2; kappa) Returns ------- float Scaling parameter for unscented transform """ return spread ** 2 * (dimension + special_scale) - dimension
7c7f4226bb1cb6ad0e8c00d610c7284976576d48
125,573
def generate_conanfile_txt(requires, build_requires, generators) -> str: """Generate contents of a ``conanfile.txt``.""" text = '' if requires: text += '[requires]\n' text += ''.join(f'{line}\n' for line in requires) if build_requires: text += '[build_requires]\n' text += ''.join(f'{line}\n' for line in build_requires) if text and generators: text += '[generators]' text += ''.join(f'{line}\n' for line in generators) return text
61140b38c595bcdf63a5e4ceba3672535419c221
125,575
def hours_time_string(hours): """ convert a number of hours (float) into a string with format '%H:%M' """ minutes = int(round(hours * 60)) return "%02d:%02d" % divmod(minutes, 60)
a88662a019748081c2c3c991f11a82dc327bff77
125,577
def fib_recursive(position): """ Fibonacci sequence function using recursive algorithm""" if position < 0: return -1 elif position == 0 or position == 1: return position else: return fib_recursive(position - 2) + fib_recursive(position - 1)
c5a548b37e70859b9725c873a8fd55875ccf5e0e
125,578
from typing import Dict def _dict2h5attr(data: Dict[str, str]) -> str: """Convert a dictionary to a string that can be stored as an HDF5 attribute""" return f"dict {str(data)}"
abdbfc3eea71acbf9f6989f21d6b069004ace5ee
125,583
def get_angle_unit_data(sum_df, **kwargs): """ Function: get angle unit information from measured target positions. Input: - sum_df: DataFrame. processed DataFrame that contains both bundle heel and target info and growth cone length and angle info. - kwargs: additional parameters - 'criteria': Dataframe with Boolean values. filtering which bundles to include in the calculation. Output: - phi_unit: radian value of "1" in standardized coordinate. """ if('criteria' in kwargs.keys()): criteria = kwargs['criteria'] sum_df = sum_df.loc[criteria, :] # print(f"get_angle_unit_num={len(sum_df)}") phi_unit = sum_df['aT3cT7'].mean()/2 return phi_unit
5ec2d1c3d0ed819d3322258f1eae347424fbb7d2
125,589
import mimetypes def is_video(file_path): """ Checks whether the file is a video. """ type = mimetypes.guess_type(file_path)[0] return type and type.startswith('video')
93c44492816f3156d1b899750ec1551379ae4bef
125,591
def _find_key_cols(df): """Identify columns in a DataFrame that could be a unique key""" keys = [] for col in df: if len(df[col].unique()) == len(df[col]): keys.append(col) return keys
08b04517b2c5c5eded24bb7e096416e4bac67bc7
125,596
def _parse_svg_color(color): """Return RGB integers from Inkscape color. @type color: `str` @rtype: `tuple` (triplet) """ if color[0] != '#': raise Exception('only hash-code colors are supported!') red = int(color[1:3], 16) green = int(color[3:5], 16) blue = int(color[5:7], 16) return (red, green, blue)
050a71a51995061e9fa624ab801af97e466eb68a
125,597
def check_annot(df, img_name): """ return bool whether annotation available or not in df """ flag = [] flag.append(img_name) #print(df.loc[df.FRAME == img_name]["FRAME"].values) #print(df.loc[df['FRAME'].isin(flag)]) #print(df.loc[df['FRAME'].isin(flag)].shape[0]) #print(df.loc[df['FRAME'].isin(flag)]) return df.loc[df['FRAME'].isin(flag)].shape[0] != 0 #df.loc[df.FRAME == img_name]["FRAME"].values == img_name
df518ca9d8970a4eebbbe75bdde15ee84167c70a
125,598
def string_to_bool(_value): """Converts "True" to True None to False. Case-insensitive.""" if _value is not None and _value.lower() == "true": return True else: return False
07a6639d4fb2a90fdb2040a0e6d22438d4532167
125,599
def GetAtxCertificateSubject(cert): """Parses and returns the subject field from the given AvbAtxCertificate struct.""" CERT_SUBJECT_OFFSET = 4 + 1032 # Format version and public key come before subject CERT_SUBJECT_LENGTH = 32 return cert[CERT_SUBJECT_OFFSET:CERT_SUBJECT_OFFSET + CERT_SUBJECT_LENGTH]
ef4b21b39a8101e26fd72007f857b98ef59dbb65
125,600
import math def exp_ce(x: float, a: float = 1) -> float: """ Inverse of exp_utility """ return -math.log(1 - x) / a
224164fae4d21c4dcba9d19238ab8bba74efd470
125,602
def parse(puzzle_input): """Parses puzzle input into a list of ints""" return [int(n) for n in puzzle_input.splitlines()]
e972366448d47959e5f5ed81d651d0bdcba2c217
125,603
def filter_out(orig_list,*unwanted,INV=False): """ Returns new list from orig_list with unwanted removed. Also supports re.search() by inputting a re.compile('patrn') among unwanted. """ new = [] for word in orig_list: for x in unwanted: try: # Regex compare rm = x.search(word) except AttributeError: # String compare rm = x==word if (not INV)==bool(rm): break else: new.append(word) return new
fbf0be7ef8848ea0368f134748c966a420b80575
125,606
def wilight_to_hass_saturation(value): """Convert wilight saturation 1..255 to hass 0..100 scale.""" return min(100, round((value * 100) / 255, 3))
048f4b5a210835591a70c5c1c2c844be7f7e962f
125,610
from typing import List def get_columns(rows: List[List[str]]) -> List[List[str]]: """Takes data rows and converts to columns""" return [[x[j] for x in rows] for j in range(len(rows[0]))]
d7fece89fcf5b1a72f2cece8a3500c991d45ecf8
125,611
def growth_calculation(val1, val2, t1, t2): """Calculate cumulative annual growth rate with required arguments. Args: val1: float. Current value. val2: float. Value from base year. t1: float. Current year. t2: float. Base year. Returns: float: Growth value. """ return ( (val1 / val2) ** (1 / (t1 - t2)) ) - 1
3ec45f2d3abd0a1c55fdeacbff3aeccdfab6562a
125,619
def getCC(codelChooser: int) -> str: """ finds the correct codel chooser direction string :param codelChooser: input codel chooser :return: codel chooser direction string """ if codelChooser == 0: return 'l' return 'r'
273133fd32fd5d802311e7d027790b2ce331c9e3
125,625
def get_ext_coeffs(band): """ Returns the extinction coefficient for a given band. Args: band: band name: "G", "R", "Z", "W1", or "W2" (string) Returns: ext: extinction coefficient (float) Note: https://www.legacysurvey.org/dr9/catalogs/#galactic-extinction-coefficients """ exts = {"G": 3.214, "R": 2.165, "Z": 1.211, "W1": 0.184, "W2": 0.113} return exts[band]
a8bf8f8a340de6ce04dc6a5d8d51a1f0937c6610
125,627
def parse_job_line(line): """ >>> parse_job_line("* * * *,myquery,mycredentials\\n") ('* * * *', 'myquery', 'mycredentials', 'collect.py') >>> parse_job_line("* * * *,myquery,mycredentials,scripts/foo.py\\n") ('* * * *', 'myquery', 'mycredentials', 'scripts/foo.py') """ parts = line.strip().split(',') if len(parts) == 3: parts.append('collect.py') return tuple(parts)
8a83a3a5721e9e9b15cae7cd438bf505e776b38f
125,629