content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _get_default_annual_spacing(nyears): """ Returns a default spacing between consecutive ticks for annual data. """ if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing)
ddc98001e9324f2486f141b86f285ac48b726dd2
48,558
import torch def get_attention(model, tokenizer, text, include_queries_and_keys=False): """Compute representation of the attention to pass to the d3 visualization Args: model: pytorch_transformers model tokenizer: pytorch_transformers tokenizer text: Input text include_queries_and_keys: Indicates whether to include queries/keys in results Returns: Dictionary of attn representations with the structure: { 'left_text': list of source tokens, to be displayed on the left of the vis 'right_text': list of target tokens, to be displayed on the right of the vis 'attn': list of attention matrices, one for each layer. Each has shape (num_heads, source_seq_len, target_seq_len) 'queries' (optional): list of query vector arrays, one for each layer. Each has shape (num_heads, source_seq_len, vector_size) 'keys' (optional): list of key vector arrays, one for each layer. Each has shape (num_heads, target_seq_len, vector_size) } """ # Prepare inputs to model token_ids = tokenizer.encode(text) tokens = [tokenizer.decode([t]).strip() for t in token_ids] tokens_tensor = torch.tensor(token_ids).unsqueeze(0) # Call model to get attention data model.eval() _, _, attn_data_list = model(tokens_tensor) # Format attention data for visualization all_attns = [] all_queries = [] all_keys = [] for layer, attn_data in enumerate(attn_data_list): attn = attn_data['attn'][0] # assume batch_size=1; output shape = (num_heads, seq_len, seq_len) all_attns.append(attn.tolist()) if include_queries_and_keys: queries = attn_data['queries'][0] # assume batch_size=1; output shape = (num_heads, seq_len, vector_size) all_queries.append(queries.tolist()) keys = attn_data['keys'][0] # assume batch_size=1; output shape = (num_heads, seq_len, vector_size) all_keys.append(keys.tolist()) results = { 'attn': all_attns, 'left_text': tokens, 'right_text': tokens } if include_queries_and_keys: results.update({ 'queries': all_queries, 'keys': all_keys, }) return {'all': results}
082d7f7b6f7036d1691dba120c07cf7d97443c1b
242,404
def macd(df, short_long_signals=[(12, 26, 9)]): """ Moving Average Convergence Divergence Parameters ---------- df : pandas.DataFrame, must include columns ['Close'] Dataframe where the macd is extracted from short_long_signals : list, default [(12, 26, 9)] List of periods of (short_ema, long_ema, signal) of Moving Average Convergence Divergence to be extracted Return ------ df : pandas.DataFrame Dataframe with Moving Average Convergence Divergence """ df = df.copy() for (short, long, signal) in short_long_signals: df[f"EMA{short}"] = df["Close"].ewm(span=short, adjust=False).mean() df[f"EMA{long}"] = df["Close"].ewm(span=long, adjust=False).mean() df[f"MACD{long}"] = df[f"EMA{short}"] - df[f"EMA{long}"] df[f"MACD{long}Signal"] = df[f"MACD{long}"].ewm(span=signal, adjust=False).mean() df = df.drop(columns=[f"EMA{short}", f"EMA{long}"]) return df
890e6014c7603d3ae274411928500fd48f78085b
498,863
def replication(samples): """ Deterministically replicate samples. :param samples: A list in which each element again is a list containing the sample and the number of times the sample needs to be replicated, e.g.: samples = [(x1, 2), (x2, 0), ..., (xM, 1)] :return: list of replicated samples: [x1, x1, ..., xM] """ # A perhaps more understandable way to solve this could be: # replicated_samples = [] # for m in range(1, len(samples)+1): # xk, Nk = samples[m-1] # for unused in range(1, Nk+1): # replicated_samples.append(xk) # return replicated_samples # Same result: repeat each state (denoted by s[0]) Nk times (denoted by s[1]) return [l for s in samples for l in s[1] * [s[0]]]
38f4bcddcfaa1e5bdba2112a9481ceaa9da51d48
410,161
from datetime import datetime def from_timestamp(timestamp: str) -> datetime: """Parses the raw timestamp given by the API into a :class:`datetime.datetime` object.""" return datetime.strptime(timestamp, "%Y%m%dT%H%M%S.000Z")
81d3c0a5297fa0053ae9a951147fac6f46907956
693,067
def read_text_from_file(path_to_text_file): """Reads the text file and returns whole content as a string. Args: path_to_text_file (str) Returns: A string that concatenates all the lines in a single line and uses '\n' as the line separator. That means if the text file includes any '\n' it is also Huffman Coded. str: content of text file in a single line """ with open(path_to_text_file, "r") as file: array_of_lines = [line.strip('\n') for line in file.readlines()] line_seperator = "\n" # To join all the read lines into one string text = line_seperator.join(array_of_lines) # Join into one string return text
923befb88b04eb7f36ba52bca1d010fb39d094f4
208,862
from time import mktime def toMillis(date): """Converts a Date object to its millisecond value elapsed since January 1, 1970, 00:00:00 UTC (GMT). Args: date (Date): The date object to convert. Returns: int: 8-byte integer representing the number of millisecond elapsed since January 1, 1970, 00:00:00 UTC (GMT). """ millis = mktime(date.timetuple()) * 1000 + date.microsecond // 1000 return int(millis)
12d61ccc39ca38bf1155ef79185e5ba46a0e3698
469,858
def find_dict(l, key, value): """Find dict d from list l where key matches value. Return index""" for i, d in enumerate(l): if d[key] == value: return i
1f7b379c01f544cd72781959cda671ff72af252a
79,546
from datetime import datetime def date_from_timestamp(ts): """ Returns a tuple containing (date, time) strings for a given Unix timestamp. """ day = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d') tod = datetime.utcfromtimestamp(ts).strftime('%H:%M:%S') return (day, tod)
ceddfe2e1ff5d8efbf559a270f7a6aa4fb963f17
312,677
def snake_to_camel(snake_str: str, delimiter: str = '_') -> str: """ Converts snake_case to camelCase. Useful for projects where different languages like NodeJS are used and you have to transform your interchange format from snake_case into camelCase. Args: snake_str (str): The snake_case string to convert to camelCase. delimiter (str): The delimiter to use for the snake_case. Defaults to '_'. Returns: str: Returns the camelCase representation of the passed snake_case string. Example: >>> snake_to_camel('the_id') 'theId' >>> snake_to_camel('The_id') 'theId' >>> snake_to_camel('Theid') # Not snake_case -> as is 'Theid' >>> snake_to_camel('snake_CasE') 'snakeCase' >>> snake_to_camel('camelCase') # Already camelCase -> as is 'camelCase' >>> snake_to_camel('') '' >>> snake_to_camel(None) # Not snake_case -> as is 'None' >>> snake_to_camel('snake.CasE', '.') 'snakeCase' """ init, *rest = str(snake_str).split(str(delimiter)) if not rest: return init # Leave it as it is. Probably not snake_case return ''.join([init.lower(), *map(str.title, rest)])
ff144a99486246cbad7e358fd715a1e743dfdaa1
401,809
import random def shuffle(coll): """ Return a random permutation of ``coll``. Not lazy. """ coll = coll[:] random.shuffle(coll) return coll
ca714cca1b45a2e71501bf796b21ad5d19a848aa
313,713
def test_dir_type(arg): """Parse a directory argument for the test command.""" if ',' in arg: arg = arg.split(',') return (arg[0], [int(i) for i in arg[1:]]) else: return (arg, [None])
e30b18800f655c2c73b0642a5e62e5a52b5125c5
224,430
def moveZeroes(nums): """ Move all zeros to the end of the given array (nums) with keeping the order of other elements. Do not return anything, modify nums in-place instead. """ # counter = 0 # for i in range(len(nums) - 1): # if nums[counter] == 0: # nums.pop(counter) # nums.append(0) # else: # counter += 1 # return nums """O(n) solution""" last_zero = 0 for i in range(len(nums)): if nums[i] != 0: nums[i], nums[last_zero] = nums[last_zero], nums[i] last_zero += 1 return nums
a0830af05d8d51ba4289279c038d7f3e09ffcda8
272,377
import re def satisfy_label_requirements(k, v): """Takes in a key and value and returns (String, String) that satisfies the label text requirements. Label text requirements include max length of 63 chars, only allowing (a-z, 0-9, -, _), key must start with a letter, and value must be a string. https://cloud.google.com/deployment-manager/docs/creating-managing-labels#requirements Arguments: k: stringify-able input v: stringify-able input Returns: (String, String) that satisfies the label text requirements for key and value """ LABEL_MAX_LENGTH = 63 ALLOWED_CHARS_COMPLEMENT = r'[^a-z0-9-_]+' KEY_ALLOWED_STARTING_CHARS_COMPLEMENT = r'^[^a-z]*' new_k = str(k).lower() new_k = re.sub(KEY_ALLOWED_STARTING_CHARS_COMPLEMENT, '', new_k) # make sure first char of key is a lowercase letter new_k = re.sub(ALLOWED_CHARS_COMPLEMENT, '--', new_k) # remove each group of illegal characters and replace with '--' new_k = new_k[0:LABEL_MAX_LENGTH] new_v = str(v).lower() new_v = re.sub(ALLOWED_CHARS_COMPLEMENT, '--', new_v) # remove each group of illegal characters and replace with '--' new_v = new_v[0:LABEL_MAX_LENGTH] return new_k, new_v
be033ae63942508c07dcfc9045f26e3bbbb4024e
445,575
def JaccardIndex(axial_auto, axial_validation): """ Calculates the Jaccard Index / Tanimoto Index between 2 numpy ndarrays. DSC = (A and B) / (A or B) Input ndarrays must be boolean. From Jaccard's paper: Jaccard, Paul (February 1912). "THE DISTRIBUTION OF THE FLORA IN THE ALPINE ZONE.1". New Phytologist. 11 (2): 37–50. doi:10.1111/j.1469-8137.1912.tb05611.x """ denom = (axial_auto.astype('bool') | axial_validation.astype('bool')).sum() num = (axial_auto.astype('bool') & axial_validation.astype('bool')).sum() return num/denom
1037bbb09f3ae1103bc54017cf407fb2477a7d52
166,439
def _acct(v): """Convert to accounting (float) for excel, but default to original value.""" try: if v is None or v == '': return '' return float(v) except ValueError: return v
5d7985a3b5392a30a496a0a480d1109681024197
521,296
def StringFill(_string, _len, fill_front = False, fill_spaces = False): """Function to fill the string _string up to length _len with dots. If len(_string) > _len, the string is cropped. **kwargs: fill_front = True to fill in front of the input string. (Preset fill_front = False) Examples: StringFill('test 123', 12) = 'test 123 ...' StringFill('test 123', 12, fill_front = True) = '... test 123' """ tmp = len(_string) if tmp < _len: if fill_spaces == False: if fill_front == True: return (_len-tmp-1)*'.'+' '+_string else: return _string+' '+(_len-tmp-1)*'.' else: if fill_front == True: return (_len-tmp)*' '+_string else: return _string+(_len-tmp)*' ' else: return _string[:_len]
e09549e7f91b406151de813764e30cc3bb5301e6
284,538
from typing import OrderedDict def readUniqueHashMap(fileName, keyIndex = 1): """Read file list file and return key/value hash map. Can be used to sample name/sample file definition, or parameter options definition. Arguments: fileName {str} -- the file containing at least two columns, one is key and another is value, no header. One key corresponds to one value. keyIndex {int} -- the column index (0-based) of key. Result: Hash map, key:value """ valueIndex = 1 if keyIndex == 0 else 0 maxIndex = max(valueIndex, keyIndex) result = OrderedDict() with open(fileName, "r") as f: for line in f: parts = line.rstrip().split('\t') if(len(parts) > maxIndex): result[parts[keyIndex]] = parts[valueIndex] return(result)
de88e252525da7d04249c1eace0d759e67d20d13
319,802
import math def relatively_prime(a:int, b:int) -> bool: """ Two numbers are relatively prime if their greatest common denominator (GCD) is 1. Parameters: a (int): first number b (int): second number Returns: (bool): True if the two numbers' GCD is 1, otherwise False. """ return math.gcd(a,b) == 1
ec53289fc0299e7f543a896288985a5c4031b3fa
381,672
def get_string_from_bytes(byte_data, encoding="ascii"): """Decodes a string from DAT file byte data. Note that in byte form these strings are 0 terminated and this 0 is removed Args: byte_data (bytes) : the binary data to convert to a string encoding (string) : optional, the encoding type to use when converting """ string_bytes = byte_data[0:(len(byte_data) - 1)] # strip off the 0 at the end of the string string = string_bytes.decode(encoding) return string
c07523139e2509fcc19b2ce1d9a933fcb648abfd
2,931
def get_index_after_weight_limit(items_by_weight, weight_limit): """Given a list of (item_index, [any other fields,] item) tuples sorted by item weight, find the positional index that first exceeds the passed weight limit""" # find the item that first exceeds the weight limit; binary search code is based on bisect.bisect_right from standard Python library, but adapted to weight check lowest = 0 highest = len(items_by_weight) while lowest < highest: middle = (lowest + highest) // 2 if items_by_weight[middle][-1].weight >= weight_limit: highest = middle else: lowest = middle + 1 return lowest
1efda1a7bd63d2af1ca4415a13becbc9b8189320
442,135
def on_raised(callback, *args): """Invoke callback if exception within args occurs. Reraise exception.""" def decorator(main_function): def wrapper(*wargs, **kwargs): try: return main_function(*wargs, **kwargs) except Exception as e: if len(args) == 0 or isinstance(e, args): callback(main_function, e) raise e return wrapper return decorator
c58e08be9e2da68e1332e9c20c5323fc6753ebf7
444,080
def _test_success(block_id, block_dict, extra_args): """ Automatically returns success """ return True
6866b2645debbf2c5200cf85f341975143f01e08
346,089
def gate_error_to_irb_decay(irb_error: float, rb_decay: float, dim: int): """ For convenience, inversion of Eq. 4 of [IRB]_. See :func:`irb_decay_to_error`. :param irb_error: error of the interleaved gate. :param rb_decay: Observed decay parameter in standard rb experiment. :param dim: Dimension of the Hilbert space, 2**num_qubits :return: Decay parameter in irb experiment with relevant gate interleaved between Cliffords """ return (1 - irb_error * (dim / (dim - 1))) * rb_decay
4298d9d132d12929711aa2388118179649ad2fe4
324,462
def get_user(txn, email): """Gets user node.""" query = """ MATCH (a:User) WHERE a.email = "{email}" RETURN a """.format(email=email) result = txn.run(query) return result.value()
e623c22d7ddc5fddf29031e906a10e341d0e41a2
411,818
import collections def _group_by_id(objs): """Return a dictionary whose keys are MBIDs and whose values are lists of objects (Albums or Items) with that ID. """ counts = collections.defaultdict(list) for obj in objs: mbid = getattr(obj, 'mb_trackid', obj.mb_albumid) counts[mbid].append(obj) return counts
714ae5ac1cbac1b7d60fddea70f43b93d32f4700
138,233
def alert2subject(alert_obj): """ Transforms an alert into a subject for mails :param alert_obj: :return: """ return '[WEBMONITOR] WEBSITE : ' + alert_obj['website'] + ' IS ' + alert_obj['status']
d582eb5c0c02ea643ba7e5ee7d323f997bff8a30
579,866
def assert_raises(c, exc): """Test whether callable c raises an exception of type exc.""" try: c() except exc: return True else: raise AssertionError("%r failed to raise exception %r" % (c, exc))
e3ed1a7734a223801e520b863e6006d6a1bdff29
266,794
import warnings def lmc(wave, tau_v=1, **kwargs): """ Pei 1992 LMC extinction curve. :param wave: The wavelengths at which optical depth estimates are desired. :param tau_v: (default: 1) The optical depth at 5500\AA, used to normalize the attenuation curve. :returns tau: The optical depth at each wavelength. """ if (wave < 1e3).any(): warnings.warn('LMC: extinction extrapolation below 1000AA is poor') mic = wave * 1e-4 aa = [175., 19., 0.023, 0.005, 0.006, 0.020] ll = [0.046, 0.08, 0.22, 9.7, 18., 25.] bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00] nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0] abs_ab = mic * 0. norm_v = 0 # hack to go from tau_b to tau_v mic_5500 = 5500 * 1e-4 for i, a in enumerate(aa): norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] + (ll[i] / mic_5500)**nn[i] + bb[i]) abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i]) return tau_v * (abs_ab / norm_v)
04c89605e8ad4188c62b631e173a9c8fe714958a
707,441
def get_available_group(car): """ Detect and, if is possible, assign a group to a car :param car: Available car that wants a group :type car: journey.Car :returns: Group assigned if is possible, None if isn't :type returns: journey.Group """ group = car.get_available_group() if group: group.assign_car(car) return group
8a5661db72d6c9cd505fd0962aedb632fe210a9d
360,277
from pathlib import Path def is_scene(directory: Path) -> bool: """Checks if a directory is a scene that contains instructions. To be a scene, a directory must: * Contain files. * Its name must start with `scene`. Args directory (Path): The path towards the directory to check. Returns: bool: Whether the directory is a scene that contains elements or not. """ if directory.is_dir(): dir_name = directory.name contains_something = any(directory.iterdir()) else: return False return dir_name[0:5] == "scene" and contains_something
32cd107183aa02cc43c506ee1e25af656ebf7628
502,501
def normalize_date(date): """Bring the date argument to a uniform format, which is YYYY-MM-DD.""" # Remove the time portion. time_pos = date.find('T') if time_pos >= 0: date = date[:time_pos] # Insert dashes. if len(date) == 8: date = date[:4] + '-' + date[4:6] + '-' + date[6:] return date
68589207d4c2c24d8f3af4afcfd6bf2c4c01fe67
361,885
def get_dbtype_from_file_exten(infp): """Determine data type from fasta filename extension. """ dbtype = None exten = infp.rsplit('.', 1)[1] if exten == 'faa': dbtype = 'prot' elif exten == 'fna': dbtype = 'nucl' assert dbtype is not None, """Could not determine data type for file:\n\t %s""" % infp return dbtype
c2a9a15f7fb531bc83ac61641a7097f4a6878167
417,294
def sortedDictValues(inputDict, reverse=False): """ Returns the given dictionary as a list of keys sorted deterministically by value then key:: {"spam": 0, "eggs": 1, "ham": 1} => ["spam", "eggs", "ham"] :param dict inputDict: :param bool reverse: Reversed sorting :rtype: list """ sortedDict = sorted(inputDict.items(), key=lambda item: (item[1], item[0]), reverse=reverse) values = [k for k, v in sortedDict] return values
50581e6e66094795ed1a8842e2492fe8c06f19a2
86,432
from typing import List def span_labels_to_tag_labels( labels: List[str], label_encoding: str = "BIO" ) -> List[str]: """Converts a list of span labels to tag labels following `spacy.gold.biluo_tags_from_offsets` Parameters ---------- labels Span labels to convert label_encoding The label format used for the tag labels Returns ------- tag_labels """ if label_encoding == "BIOUL": converted_labels = [ f"{char}-{label}" for char in ["B", "I", "U", "L"] for label in labels ] + ["O"] elif label_encoding == "BIO": converted_labels = [ f"{char}-{label}" for char in ["B", "I"] for label in labels ] + ["O"] else: raise ValueError( f"'{label_encoding}' is not a supported label encoding scheme." ) return converted_labels
3e706d10f7c3667c527150c1178b63938362e47f
343,320
def get_clip_index_by_media_id(clip_info, clip_list): """ For a given clip info dict, will return the index at which it is found in the list by comparing media_id If nothing is found, return None """ media_id_list = [item["media_id"] for item in clip_list] try: output = media_id_list.index(clip_info["media_id"]) except ValueError: return None else: return output
7ca2466a65fcb54c7414d64726430ab0fe86789d
271,849
def issubstring(s1, s2, *args, **kwargs): """Is s1 a substring of s2""" return s2.count(s1) > 0
9a3aad1e3f46677457d78e9169c241577102200e
651,451
def getvaluelist(doclist, fieldname): """ Returns a list of values of a particualr fieldname from all Document object in a doclist """ l = [] for d in doclist: l.append(d.fields[fieldname]) return l
b85d171b537636477b00021ce717788b5e4735da
7,780
def find_neighbor(used_letters): """ This function help find the current position's neighbors that have not been used previously. :param used_letters: list, list of the indexes of used letters :return: list, list of indexes that are current position's neighbors and not have been used """ neighbor = [] center = used_letters[len(used_letters)-1] # Current position # x-index and y-index of current position x = int(center[1]) y = int(center[0]) # Find neighbors for i in range(x-1, x+2): if 0 <= i <= 3: for j in range(y-1, y+2): if 0 <= j <= 3: index = str(j) + str(i) if index not in used_letters: neighbor.append(index) return neighbor
5058665c1e1c659192587c26e4348f7bfe2f2326
603,576
def tolist(value=None, sep=',', empty_strings=False): """Convert a string to a list. The input string is split on the separator character. The default separator is ','. An alternative separator may be passed as the 'sep' keyword. If no string value is provided a function is returned that splits a string on the provided separator, or default if none was provided. Any empty strings are removed from the resulting list. This behaviour can be changed by passing True as the 'empty_strings' keyword argument. """ def tolist(input_value): if value is not input_value: result = input_value.split(sep) if not empty_strings: result = [r for r in result if len(r) > 0] else: result = value return result if value is None: return tolist return tolist(value)
93a1d3e45736fc57a4a85f9e481bfd32c32843f7
607,859
def chunk(seq, count): """Splits given sequence to n chunks as evenly as possible. Args: seq: Sequence to split count: Number of chunks Returns: List of chunks """ avg = len(seq) / float(count) res = [] i = 0.0 while i < len(seq): res.append(seq[int(i):int(i + avg)]) i += avg return res
35bcd80917a3a82db40d116cc3846c1ab7493194
102,035
import torch def numpy_img_to_torch(batch): """Preprocess images Process numpy images: 1. Normalize by dividing by 255.0 2. Set datatype to be float32 3. Make sure the input dimensions are (batch size, channels, height, width) 4. Convert to torch.Tensor type Returns ------- torch.Tensor Processed images ready to be fed as input to Conv2D """ return torch.from_numpy((batch/255.0).astype('float32'))\ .permute(0, 3, 1, 2)
f68d0d84d12f92a717f3f192724257399acd4a8b
231,158
def const(result): """Return a function that ignores any arguments and just returns the specified result""" def constResult(*args, **kwargs): return result return constResult
ea6f0cec9f7a5d86a4d2a22b1cb02cb2176b9335
483,793
def int2hex(number): """Returns a string representation of the number as hex. """ return "%04X" % number
fe43dfa07cf955a671e0be6ca82abee42e236482
617,700
def find_shared_neurons(listA, listB): """ :param listA: list of unique neurons in A(dtype:list of int) :param listA: list of unique neurons in B(dtype:list of int) :return: shared neurons between list A and B(dtype:list of int) """ shared_neurons = set.intersection(set(listA), set(listB)) return list(shared_neurons)
25bea1aa3d51286b2b4fbaf10070cb8ee8fe7220
338,609
def flatten(tensor): """ Flatten input tensor as the shape of (nb, nf) :param tensor: input Tensor :type tensor: torch.Tensor :return: flattened tensor :rtype: torch.Tensor """ assert len(tensor.shape) >= 2 if len(tensor.shape) > 2: flattened = tensor.view(tensor.shape[0], -1) else: flattened = tensor return flattened
d9103fd3554b8186a0c0a9840b914315d5646334
94,137
def request_value(request, key, default=None): """ Returns first found value, using this order: POST, GET, default. :param request: :param key: :param default: :return: """ value = request.POST.get(key, None) or request.GET.get(key, None) if value is not None: return value return default
28cc13be18632bee1c1ad43558470bdbaa5100ad
75,787
def makeFullName(parScope, parName): """ Create the fully-qualified name (inclues scope if used) """ # Skip scope (and leading dot) if no scope, even in cases where scope # IS used for other pars in the same task. if parScope: return parScope+'.'+parName else: return parName
0dda72bb1ed8a6f82eaa34362834d2329bae9630
629,471
import math def progressbar(cur, tot, length): """Generate a progress bar""" per = math.floor(cur * float(length) / tot) return "[{}{}]".format("#" * per, " " * (length - per))
0b8c7ebe27640d1b5f75c32756e31e6f3ba2b178
254,417
def TestToExtract(data,missing,overwrite): """ Test if need to extract the data :param float/int data: data to test :param float/int missing: missing value :param boolean overwrite: to overwrite or not :returns: boolean if condition met """ if data==missing or overwrite: return True else: return False
96366843466c1b285faa64bccc7a45e6bce6175c
219,073
def unique_prefixes(context): """Return a dictionary with unique prefixes for modules in `context`. Keys are 'module' statements and values are prefixes, disambiguated where necessary. """ res = {} for m in context.modules.values(): if m.keyword == "submodule": continue prf = new = m.i_prefix suff = 0 while new in res.values(): suff += 1 new = "%s%x" % (prf, suff) res[m] = new return res
b16d472726f61d7ff3dace6761b0bf8b356356bc
317,331
def link(method): """ decorator to flag an admin method as rendering a link on client list view. :param object method: method to be flagged. :returns: method. :rtype: type """ setattr(method, 'is_link', True) return method
78da478aaa7cade53d6f7a70c214eabd00715e2f
601,297
def strplural(n, name, nonumber=False, s=''): """ Returns the plural or singular of a string Parameters ---------- n : integer The plural or singular is based on this number. name : string String for which a plural is requested. nonumber : boolean If true, don't prepend the number. s : string String to be appended if n > 0 Examples -------- >>> strplural(0, 'cat') 'no cat' >>> strplural(1, 'cat') '1 cat' >>> strplural(2, 'cat') '2 cats' >>> strplural(2, 'cat', prepend=False) 'cats' >>> animals = ['cat', 'dog'] >>> strplural(len(animals), 'animal', s=': ') + ', '.join(animals) '2 animals: cat, dog' >>> strplural(0, 'animal', s=':') 'no animal' """ if n == 0: return ('' if nonumber else 'no ') + name elif n == 1: return ('' if nonumber else '1 ') + name + s else: return ('' if nonumber else str(n) + ' ') + name + 's' + s
9fb50adf94cf2ca951c4704acd18c6084bb8107f
437,125
def is_file_in_manifest(file_name, width, manifest): """Whether the given file, in its given width, is in manifest.""" return (manifest.get(file_name, '-1') == width)
11ef361d61ca1e5a6b0ac5a6b569f55751a12186
517,340
import cProfile def mpi_profile_decorator(comm, filename=None): """ Profiles execution of MPI processes and writes stats to separate files """ def prof_decorator(f): def wrap_f(*args, **kwargs): pr = cProfile.Profile() pr.enable() result = f(*args, **kwargs) pr.disable() if filename is None: pr.print_stats() else: filename_r = filename + ".{}".format(comm.rank) pr.dump_stats(filename_r) return result return wrap_f return prof_decorator
c6f9627aa3d8f19bb19f6de34de6c00670c347f2
551,246
def get_local_hostname() -> str: """ A function to get the information from /etc/hostname :return: string. Hostname """ hostname_path = "/etc/hostname" with open(hostname_path) as f: try: s = f.readlines() return s[0].strip() except IOError: raise IOError("Error opening {}".format(hostname_path))
205c748679f47e1dc331d1e1200ea7ecdc55cbc6
206,298
def average_barcodes(df, sequence_label="label", out_prefix=None): """Average RNA/DNA barcode counts for each sequence within replicates. Parameters ---------- df : pd.DataFrame Index is the barcode, one column must have the key sequence_label, the rest are assumed to be RNA/DNA counts for each replicate. sequence_label : str Name of the column in df containing the sequence IDs out_prefix : str If specified, save the df to file with this prefix. Returns ------- expression_df : pd.DataFrame Average RNA/DNA counts for each sequence in each replicate. Index is now the sequence label, column is the replicate. """ expression_df = df.groupby(sequence_label).mean() if out_prefix: expression_df.to_csv(f"{out_prefix}AverageExpressionPerReplicate.txt", sep="\t", na_rep="NaN") return expression_df
965ce9344aa749f1829534855396814e90316823
243,675
import json def _clean_output_json(output_json: str) -> str: """Make JSON output deterministic and nicer to read.""" try: output = json.loads(output_json) except json.JSONDecodeError: raise ValueError( f"Instead of JSON, output was:\n--- output start ---\n{output_json}\n--- output end ---" ) return json.dumps(output, indent=2, sort_keys=True)
0952ed8f8cc34ca2c18aa3d09ca0c81607066332
702,040
from typing import Sequence def order_inversions( ttset: Sequence ) -> int: """ Given a transformed version of a series with order numbers, returns the number of order inversions that ocurred because of the transformation. """ orders = [x[0] for x in ttset] inv_count = 0 for i in range(len(ttset)): for j in range(i+1, len(ttset)): if (orders[i] > orders[j]): inv_count += 1 else: pass return inv_count
6d061adb0035594e0076b0d370407f8e6f6ef4e3
410,382
def print_palindromes_from_list(palindrome_list): """ Given a list with palindrome positions, lengths and sequences, print the positions and lengths separated by a whitespace, one pair per line. """ for palindrome in palindrome_list: print("%s %s" % ( palindrome[0], palindrome[1] )) return None
475629e6ec967b2011a760993bb20b2d9b7c6967
356,634
def lr_decay(N, step, learning_rate): """ learning rate decay Args: learning_rate: base learning rate step: current iteration number N: total number of iterations over which learning rate is decayed """ min_lr = 0.00001 res = learning_rate * ((N - step) / N) ** 2 return max(res, min_lr)
307ac7a886a463320504a6983d1941e59d76b41a
597,901
def format_title(host: str) -> str: """Format the title for config entries.""" return "Controller ({})".format(host)
99ca5c97007a8f0373184d8d9c5c55bb5d7d64b9
25,916
def add_date_end(record: dict): """ Function to make ``date_end`` ``date_start`` if ``measure_stage`` is "Lift" Parameters ---------- record : dict Input record. Returns ------- type Record with date_end changed conditionally, or original record. """ if record['measure_stage'] == 'Lift': record['date_end'] = record['date_start'] return(record)
67f6ae063aabdf7a7d456ed0ce9660a75b37b6c2
11,385
def predict_model(model, X_score): """Uses a trained scikit-learn model to predict over the scoring data""" return model.predict(X_score)
9083ae727e7b1572c4f19c2adee3a80437d0b406
179,775
def post_process_sentencepiece_tokenizer_output(s): """ replaces whitespace encoded as '_' with ' ' for sentencepiece tokenizers. """ s = s.replace('▁', ' ') return s
fd14a796bdf45102584e7911e625e78ba4e5e05c
529,776
def br_to_us_number_format(numb_str: str) -> str: """ Removes dot as thousand separator and replaces comma decimal separator with dot >>> br_to_us_number_format('10.000,00') '10000.00' """ return numb_str.replace(".", "").replace(",", ".")
0c7e44918e865093f9c8d86e309a846cb5640116
197,947
def filter_year(vv, year): """Filters by year""" vvy = vv[vv['year'] >= year] return vvy
48ebca317ae29c3b3b1299794f23ea9c3ec7b914
188,402
from typing import List def get_prompt(letter_set: List[str], correct_words: List[str], incorrect_words: List[str], score: int, current_round: int, total_rounds: int) -> str: """Generate the prompt text for a single game round.""" prompt = ( f'Instructions: Please give as many examples as possible of words with ' f'four or more letters, using only the letters: ' f'{" ".join(letter_set).upper()}. Letters may be repeated. You will ' f'have {total_rounds} rounds to enter words. Please seperate words with ' f'a comma. i.e. Guess: word1, word2, ... More points will be given to ' f'longer words.\n\n' ) prompt += ( f'Correct words: {", ".join(correct_words)}.\n' f'Incorrect words: {", ".join(incorrect_words)}.\n' f'Score: {score}.\n' f'Round: {current_round+1} of {total_rounds}.\n\n' ) prompt += f'Guess: ' return prompt
916b23e87eef8d16f89b0d46b20da604c568029a
425,442
def get_ver_component(ver_list, idx): """Get version component from components list. Return 0 for components out of range as default. """ if idx < len(ver_list): return ver_list[idx] return 0
4425440a82b67e95553617a830f1ec7aa3bbc04e
438,910
def version_check(version): """Checks if OpenFlow version is compatible and returns the version if it is Parameters ---------- version: hex The OpenFlow version taken from the SDN switch 'hello' message Returns ------- The OpenFlow version if match, else 0 for unknown version """ return { 1: 'OF10', # 0x01 -> OF1.0 3: 'OF12', # 0x03 -> OF1.2 4: 'OF13', # 0x04 -> OF1.3 5: 'OF14', # 0x05 -> OF1.4 6: 'OF15', # 0x06 -> OF1.5 }.get(version, 0)
a0edb7f70dca6f9be64b020c13a9a02f1d65572c
640,875
def torch2numpy(img): """ Converts a torch image to numpy format """ if img.dim() == 4: img = img.permute(0, 2, 3, 1).contiguous() elif img.dim() == 3: img = img.permute(1, 2, 0).contiguous() return img.cpu().numpy()
25fee887319d6014bc480498550c6b95fb0f6ea8
684,281
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs): """ Determine if an implementation of an algorithm has the capabilities to execute a model langugae Args: parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``): specifications of the implementation of an algorithm Returns: :obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter """ for parameter_specs in algorithm_specs['parameters']: if parameter_specs['kisaoId']['id'] == parameter_kisao_id: return True return False
653712ae621bd014547e04009243cefe4c9eb8e1
706,441
def format_range(low, high, width): """Format a range from low to high inclusively, with a certain width.""" if low == high: return "%0*d" % (width, low) else: return "%0*d-%0*d" % (width, low, width, high)
59fc9f38967541924e1d74799d655b07884d22fe
642,926
import base64 import pickle def ObjectToBase64EncodedString(Object): """Encode Python object into base64 encoded string. The object is pickled before encoding. Arguments: object: Python object. Returns: str : Base64 encode object string or None. """ return None if Object is None else base64.b64encode(pickle.dumps(Object)).decode()
08936ec2ea3d4bcf324a81623b4a0082f9f8fa0a
162,238
def splitTag(chunkTag, oTag = "O", raw = False): """ Split chunk tag into IOB tag and chunk type; return (iob_tag, chunk_type) """ if chunkTag == "O" or chunkTag == oTag: tag, type_ = "O", None elif raw: tag, type_ = "B", chunkTag else: try: # split on first hyphen, allowing hyphen in type tag, type_ = chunkTag.split('-', 1) except ValueError: tag, type_ = chunkTag, None return tag, type_
c44d923c1799cd9af921abb2207956630c25e8c0
340,441
def _get_dimension(region): """ Return dimensionality of `region` (0, 1, 2, or 3). """ if len(region) == 7: zone_name, imin, imax, jmin, jmax, kmin, kmax = region elif len(region) == 5: zone_name, imin, imax, jmin, jmax = region kmin, kmax = 0, 0 else: zone_name, imin, imax = region jmin, jmax, kmin, kmax = 0, 0, 0, 0 dim = 0 if imin != imax: dim += 1 if jmin != jmax: dim += 1 if kmin != kmax: dim += 1 return dim
1ee0853d131262d9c08d76205b853c7e10704231
474,442
import collections def group_fields_by_root(field_locations): """Given a list of field_locations group together those that hang from the same parent. Parameters ---------- field_locations: List[FieldLocation] Returns ------- dict Dictionary mapping PageTree node numbers to a list of FieldLocation """ groups = collections.defaultdict(list) for field_location in field_locations: groups[field_location.item[field_location.root]].append(field_location) return groups
345b4c5c5f30c9806a26c3812e758341ac74fc42
106,564
def get_version(raw): """ This will parse out the version string from the given list of lines. If no version string can be found a ValueError will be raised. """ for line in raw: if line.startswith("# MODULE:"): _, version = line.split(":", 1) return version.strip() raise ValueError("Could not determine the version")
cddccd1905f7a75eb1122b300cccebb71782304f
81,476
def strip_vmdk_extension(filename): """ Remove the .vmdk file extension from a string """ return filename.replace(".vmdk", "")
c4968c0e92e3948d23226224064abf78368acfbb
152,381
from typing import Any def _yamlize(value: Any) -> Any: """Converts a python value to a valid YAML representation. Args: value: the python value to convert Returns: Either a string containing ``"null"``, ``"true"`` or ``"false"`` for the special cases ``None``, ``True`` and ``False``, resp., or ``value.name`` if present (for ``Enum``\\ s), or the same value received as input for other cases.""" if value is None: return "null" if value is True: return "true" if value is False: return "false" if hasattr(value, "name"): # For Enums return value.name # pylint:disable=no-member return value
b7b202fcd0b0e91c46a8d8c9dbd27dfa35ec348b
178,047
def get_z_sample(xbar, mu, SE): """ Return the z-score of a sample, from a sampling distribution. Parameters ---------- * xbar: mean of the current sample. * mu: mean of the population from where the sample is drawn. * SE: standard error of the sampling distribution (population SD / root(population size)) Returns ------- The z-score of the sample with mean less than or equal to xbar. Given by (xbar - mu) / SE. """ return (xbar-mu) / SE
be1b19d6c0ff6a9f47425e3b12813b6da68938f1
668,085
def apple_url_fix(url): """ Fix Apple URL. :param url: URL to fix :return: fixed URL """ if url.startswith("webcal://"): url = url.replace('webcal://', 'http://', 1) return url
e3576984708089e5932a45675ed5af7bc622b3fa
613,295
def load_text(file_name): """ Load lines from a plain-text file and return these as a list, with trailing newlines stripped. Arguments: file_name (str or unicode): file name. Returns: list of str or unicode: lines. """ with open(file_name) as text_file: lines = text_file.read().splitlines() return lines
11c4b0d57be2488800b11857b138b98fad689564
120,653
def in_order_traversal(tree, path=[]): """Visit left -> curr -> right""" if tree: in_order_traversal(tree.left, path) path.append(tree.value, path) in_order_traversal(tree.right, path) return path
7436f339f5eb9dfabf04b02eab5c1eab2f6023b8
210,039
def should_log_line(line): """ Filters out unhelpful lines in the stacktrace (such as type decorators) that can be left out without missing out. """ blacklist = ['google_appengine', 'typechecked_', 'add_1_monkey_patches', 'db_hooks', 'threading.py'] return all(word not in line for word in blacklist)
210c13e7e2eb51b88c122bd2d1a5710cd3611a52
394,183
def print_deps_stdout_arg(request): """Argument for printing deps to stdout""" return request.param
63d735cd08b71bf12bef624c187ca9e128621ae6
665,243
def all_different(x): """ Return True if all non-None values in x are different""" seen = set() return not any([i is not None and (i in seen or seen.add(i)) for i in x])
47296e27a261a1e2dfeaa1b67d4bc9ca142a5c99
194,191
def is_zip_archive(file_name): """test for zip file archive extension""" if file_name.lower().endswith('.zip'): return True else: return False
7e8f6b3304e05bb771d0fcce0677a4d89700829d
603,564
def get_sheet(book, sheetName=None, sheetIndex=0): """Get xlrd sheet object""" if sheetName is not None: sheet = book.sheet_by_name(sheetName) return sheet else: sheet = book.sheet_by_index(sheetIndex) return sheet
1db384417769299540e892321c029f71f9f4f10b
61,887
from typing import List def tcp_decode(message: bytes) -> List: """Decode TCP message to a list of strings.""" return message.decode().split('\r\n')[:-1]
a7445816445009a0550006ebb974fec6287ab469
265,670
def is_reference_html_file(filename): """Return true if the filename points to a reference HTML file.""" if (filename.endswith('-expected.html') or filename.endswith('-expected-mismatch.html')): return True return False
72c635528fb96d6a5c475c348df03c3b44147fbf
566,682
from functools import reduce def get_nested(dictionary: dict, keys: str, default=None): """ Get a value inside a nested dict, or return the default value otherwise. source: https://stackoverflow.com/a/46890853 :param dictionary: the dict with the value inside :param keys: a string to the path, nested values are concatenated with a '.'. :param default: the default value to return if the keys path is invalid :return: the value that is found when the dictionary is reduced with the keys, or the default value otherwise. """ return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary)
3130209ede766a2f59e34c414964f4d908ba745b
454,766
import torch def accuracy(y_true, y_pred): """Compute classification accuracy (pytorch tensors) """ return torch.mean(y_pred.eq(y_true).float())
976c02adbf0627aa8e6a9df495c4799750fdb584
562,043
def get_num_increases(data: list[int]) -> int: """ Get the total number of times the value of the next element in the list is greater than the value of the previous element >>> get_num_increases([1, 3, 4, 2, 5]) 3 """ return sum(b > a for a, b in zip(data[:-1], data[1:]))
d57422eba4cdcee2cc16877af8aa82590a5716cc
94,571
from typing import List import ast def create_request_data(data_fields: List, args: dict) -> dict: """ This function converts the input given by the user when creating a new record to a data dict that should be passed in the http request. Args: data_fields: A list with the fields that should be added to the data. args: The arguments that were filled by the user. Returns: A dictionary representing the data parameter that should be sent in the http request. """ data = {} for field in data_fields: if field == 'source': data[field] = args.get(field) elif field == 'attributes': # 'attributes' input should be of the form key1=value1,key2=value2... val = args.get(field) if val: try: attributes_dict = {} attributes_input = val.split(',') for attribute in attributes_input: pair = attribute.split('=') attributes_dict[pair[0]] = pair[1] data[field] = attributes_dict except Exception: raise Exception('Illegal input. Input format should be "key=value". Multiple values can be filled, ' 'separated by a comma.') else: # other fields should be converted to dict/list val = args.get(field) if val: try: data[field] = ast.literal_eval(val) except Exception: raise Exception('Illegal input. Please see the argument description for the correct input format.') return data
18237aa4be661a2bc70160d6923788a70f39a94f
356,471
def get_first_list_prop(lst): """ Returns the first element in the list that starts with list_, -1 if not found. Arguments: lst {list} """ for i, e in enumerate(lst): if e.startswith("list_"): return i return -1
faa73a11e835b4ee7133b504d06cb715df00331f
95,455
def _arguments_str_from_dictionary(options): """ Convert method options passed as a dictionary to a str object having the form of the method arguments """ option_string = "" for k in options: if isinstance(options[k], str): option_string += k+"='"+str(options[k])+"'," else: option_string += k+"="+str(options[k])+"," option_string = option_string.strip(',') return option_string
6f0cf1176f0bcada81dc8dbe17cb57e760fd4d8c
686,295
import warnings def prettywarn(msg, warntype): """Prints a suppressable warning without stack or line info.""" original = warnings.formatwarning def _pretty_fmt(message, category, filename, lineno, line=None): return "{0}: {1}\n".format(category.__name__, message) warnings.formatwarning = _pretty_fmt warnings.warn(msg, warntype) warnings.formatwarning = original
c3d1ba02346a23057880c0a323cf969cc0bc00fa
189,242
import json def load_json(json_path): """ json 파일 읽어오기 :param json_path: json 파일 경로 :return: json 파일 데이터 """ with open(json_path, 'r') as f: data = json.load(f) return data
cd533be2d00c6c10075147fbab81cc3b3b8469e9
607,046
import torch def quat2mat(quat): # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L112 """Convert quaternion coefficients to rotation matrix. Args: quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """ norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1) norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True) w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3) return rotMat
c1badeea24e4829ac67dd8e7657960146be4cbdb
281,473