content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def predictor(X, y, model): """ test predict accuracy of trained learning model Args: X: testing data y: testing label model: trained learning model Returns: loss: model's predict loss accuracy: model's predict accuracy """ loss, accuracy = model.evaluate(X, y) return loss, accuracy
972ca852211893e348528d7ea0b6ed81ca3d28e6
76,833
import struct def unpack_frames(b): """ Unpack bytes into a sequence of frames This assumes that length information is at the front of the bytestring, as performed by pack_frames See Also -------- pack_frames """ (n_frames,) = struct.unpack("Q", b[:8]) frames = [] start = 8 + n_frames * 8 for i in range(n_frames): (length,) = struct.unpack("Q", b[(i + 1) * 8 : (i + 2) * 8]) frame = b[start : start + length] frames.append(frame) start += length return frames
e0c91a017ac16e4a17e3b63d023f7136ac928459
76,835
import math def circumference(radius): """ Given radius of a circle, finds and returns its circumference. """ return math.pi*radius*2
edad2f50e8991eb3ce248f1e0d787330a14d059a
76,837
import uuid def generate_topics_override(message_type, current_topics): """Generates the topics for the given message type as an array Keyword arguments: message_type -- the message type in use (kafka_equalities, kafka_audit, first_topic) current_topics -- if not a valid topic type, then this will be used instead """ if message_type.lower() == "kafka_equalities": return [{"topic": "data.equality", "key": str(uuid.uuid4())}] elif message_type.lower() == "kafka_audit": return [{"topic": "data.businessAudit", "key": str(uuid.uuid4())}] elif message_type.lower() == "first_topic": return [current_topics[0]] return current_topics
0eecf925f35e66229b40ec1373c769c5db3a98cf
76,838
def float_array_to_str(array_of_floats): """ Convert a float numpy array to a string for printing purposes. """ str_float_array = '[' + ' '.join(['%.3f' %(val) for val in array_of_floats]) + ']' return str_float_array
d0165bb20f1a8c6ca173db7d2ef2c1faa4d232d3
76,844
import re def is_valid_region(region): """True if string looks like a GCE region name.""" return re.match(r'^[a-z0-9\-]+$', region)
54e88015e9bbaaa7f52b82f7cf80566b5951853d
76,845
import requests from bs4 import BeautifulSoup def soupify(url): """ Takes a url and returns parsed html via BeautifulSoup and requests. Used by the scrapers. """ r = requests.get(url) soup = BeautifulSoup(r.text) return soup
39985dc857fc344497589649a6f7aec3443bc7de
76,846
import random def position_mod_normal(in_str): """Select any position in the given input string with normally distributed likelihood where the average of the normal distribution is set to one character behind the middle of the string, and the standard deviation is set to 1/4 of the string length. This is based on studies on the distribution of errors in real text which showed that errors such as typographical mistakes are more likely to appear towards the middle and end of a string but not at the beginning. Return 0 is the string is empty. """ if (in_str == ''): # Empty input string return 0 str_len = len(in_str) mid_pos = str_len / 2.0 + 1 std_dev = str_len / 4.0 max_pos = str_len - 1 pos = int(round(random.gauss(mid_pos, std_dev))) while ((pos < 0) or (pos > max_pos)): pos = int(round(random.gauss(mid_pos, std_dev))) return pos
6ed75d80ccb4c4328639549133748126c7e3eec4
76,853
import glob def get_files_in_dir(dir, *exts): """ Creates a list of files in a directory that have the provided extensions. :param dir: String path of directory containing files to be listed :param exts: Variable amount of string arguments specifying the extensions to be used. If none are provided, will default to finding every file in the directory :return: A list of string paths to each desired file in the directory. """ file_paths = [] if exts is None: exts = [''] for ext in exts: file_paths.extend(glob.glob(dir + '*' + ext)) return file_paths
bae0c7bcc37b69aa5bac8db0486c46d7fc6d5788
76,859
def clean_str(str1: str) -> str: """ clean the string from ),;,\t and ' as not a wanted data :param str1: the string to clean :return:The new string with wanted data """ str1 = str1.replace(")", "") str1 = str1.replace(";", "") str1 = str1.replace("\t", "") str1 = str1.strip("'") return str1
0bbcfdf5b061d6df178cc6deed6c48a9266df4ca
76,861
import zipfile import io def _unzip_as_string(data): """Unzip stream of data in bytes as string. Arguments: data {bytes} -- Zipped data as bytes Returns: str -- Unzipped data as string """ z = zipfile.ZipFile(io.BytesIO(data)) unzipped_data = z.read(z.infolist()[0]) return unzipped_data.decode()
4e2dbeb98fe769682032d5e63d01c6b57a1f7ea3
76,871
def fib_dict(num): """ Finds the N-th fibonacci number recursively(but uses dict for memoization). :param int num: The N-th fibonacci number (index). :return: Computed number. """ cache = {0: 0, 1: 1} def fib_recurs(n): if n in cache: return cache[n] cache[n] = fib_recurs(n - 1) + fib_recurs(n - 2) return cache[n] return fib_recurs(num)
be3785825c59662945690714bcc80d76ea56052c
76,875
def cmdline(pid): """cmdline(pid) -> str list Args: pid (int): PID of the process. Returns: A list of the fields in ``/proc/<pid>/cmdline``. """ with open('/proc/%d/cmdline' % pid, 'r') as fd: return fd.read().rstrip('\x00').split('\x00')
9fddff806dbdafaacc0a3789377b1a3053cb9c38
76,876
from typing import OrderedDict def get_chunks_stats(chunks): """Iterates over chunks and collects info about main features, such as: - labels - classes - tracks Args: chunks (list): Tupls of chunks with video data Returns: OrderedDict: Statistics of chunks """ stats = OrderedDict() classes = OrderedDict() counted_tracks = [] labels = [] chunk_classes = [] for chunk in chunks: if chunk['track'] not in counted_tracks: stats['tracks_in_script_total'] = stats.setdefault('tracks_in_script_total', 0) + 1 counted_tracks.append(chunk['track']) if chunk['label'] not in labels: labels.append(chunk['label']) if chunk['class'] not in chunk_classes: chunk_classes.append(chunk['class']) classes[chunk['class']] = classes.setdefault(chunk['class'], 0) + 1 stats['labels'] = labels stats['classes'] = classes stats['tracks_used'] = counted_tracks return stats
1e6b69e22316108b0d1e33c84804598e4d5dca62
76,879
def rgb2bgr(tpl): """ Convert RGB color tuple to BGR """ return (tpl[2], tpl[1], tpl[0])
c6d82e9e03c8be76fe98ada07594c7425c45fc41
76,892
def mapping_constructor(mapping_type, loader, mapping): """ Construct an AttributeDict from the representation :param mapping_type: the class of the mapping to construct, must accept a dictionary as a sole constructor argument to be compatible :param loader: the yaml loader :type loader: :class:`yaml.loader.Loader` :param mapping: the attribute dict representation :return: the mapping type """ yaml_node = loader.construct_mapping(mapping) return mapping_type(yaml_node)
0772a137a8ef71792c856567d788a7d1b6008eaf
76,895
import itertools def strip_qualifiers(typename): """Remove const/volatile qualifiers, references, and pointers of a type""" qps = [] while True: typename = typename.rstrip() qual = next(itertools.dropwhile(lambda q: not typename.endswith(q), ('&', '*', 'const', 'volatile', ''))) if not qual: break typename = typename[:-len(qual)] qps.append(qual) while True: typename = typename.lstrip() qual = next(itertools.dropwhile(lambda q: not typename.startswith(q), ('const', 'volatile', ''))) if not qual: break typename = typename[len(qual):] qps.append(qual) return typename, qps[::-1]
1e6d2eec2b8b192f779efc236b54fd250ecca574
76,898
def find_next_multi_line_comment_end(lines, line_index): """We are inside a comment, find the end marker.""" while line_index < len(lines): if lines[line_index].strip().endswith('*/'): return line_index line_index += 1 return len(lines)
1b85dfe70ba54c573858116fecf2cf5d6a6a0bfa
76,900
def fmt_enum_repr(fmt: str, enum_type, enum_val): """Append repr of a given enum type to a format string. Arguments: fmt - Format string enum_type - Enum Type to construct. enum_val - Enum value. Returns: formatted string """ return fmt.format(repr(enum_type(enum_val)))
eba614c155e7609d41ac706a27594c70fb840115
76,910
import itertools def dict_product(dicts): """ from https://stackoverflow.com/questions/5228158/cartesian-product-of-a-dictionary-of-lists """ return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
0511139ebf3beea6205ef7569ecc7743345eb4b8
76,911
def str_to_bool(string): """Converts a case-insensitive string 'true' or 'false' into a bool. Args: string: String to convert. Either 'true' or 'false', case-insensitive. Raises: ValueError if string is not 'true' or 'false'. """ lower_string = string.lower() if lower_string == 'true': return True if lower_string == 'false': return False raise ValueError('Expected "true" or "false"; got "{}".'.format(string))
7cb46b888aacfc6c09a2601143b945281abe4952
76,919
def synonyms(word2nums, num2words, word): """Uses the word2nums and num2words dicts to look up synonyms for the given word. Returns a list of synonym strings. """ keys = word2nums[word] syns = set() for key in keys: syns = syns.union(num2words[key]) if word in syns: syns.remove(word) return sorted(syns)
6d209c43d194b7e5b65aeb2a4b5a427c40a63040
76,922
def logistic4(x, A, B, C, D): """4-parameter logistic model y = D + (A - D) / (1 + ((x / C)**B)) A is min B is Hill coef C is inflection D is max """ return D + (A - D) / (1 + ((x / C)**B))
28b2c8d9afd86a6dbbc394f7697a40937425a42e
76,923
def flatten_object(obj, path, ret): """Takes an object obj and flattens it into a dictionary ret. For instance { "x": { "y": 123 } } is turned into { "x__y": 123 }. """ for k, v in obj.items(): if isinstance(v, dict): flatten_object(v, (path + "__" if path else "") + k + "__", ret) elif isinstance(v, list): # don't peek inside lists pass else: ret[path + k] = v return ret
8891854c68c4aadd69569fce3cae70c3615b7188
76,927
from typing import List def read_file(path: str) -> List[str]: """ファイルの読み込みを行い、行ごとのリストにして返却する Args: path (str): 読み込みたいファイルのパス(絶対|相対) Returns: List[str]: 行ごとのリスト """ with open(path) as f: l_strip = [s.strip() for s in f.readlines()] return l_strip
2432737e3d2b6db3dc7cbc945803a62dd8ad30d0
76,931
import math def round_to_decimals(num, decs): """ Round floating point number to some number of decimals """ factor = math.pow(10.0, decs) return math.trunc(num * factor) / factor
e3bffd3ae120322fe49cefeb3e25ab87457a8894
76,932
def _tuplize(arg): """turns any sequence or iterator into a flat tuple""" return tuple(arg)
8f992fee0befae9e564f882c229b5661869c81de
76,933
import base64 def get_http_auth_header(username, password): """ :param username: Basic Auth Username :param password: Basic Auth Password :returns: HTTP Header as dict containing basic auth information """ b64string = base64.b64encode(bytes("{}:{}".format(username, password), encoding="utf-8")) return {"Authorization": "Basic {}".format(b64string.decode())}
53efeffc28bab0fe387c8b03e36d278fcb6c0251
76,934
from typing import List def parse_netstat(netstat: bytes) -> List[bytes]: """Parse the output of grab_netstat, returning a list of parsed values that we would like to search. :param netstat: Output of grab_dns :type netstat: bytes :return: A list of parsed values :rtype: List[bytes] """ netstatrecords = [x.lstrip() for x in netstat.splitlines()] temp_list = [] for record in netstatrecords: if len(record.split()) > 2 or record.startswith(b"Proto"): ip = record.split()[2] if ip not in [b"*:*", b"obtain", b"[::]:0"]: temp_list.append(ip) return temp_list
803617066e0d936b1eeb96da25903fc36aaa0d78
76,936
def listify(x): """ allow single item to be treated same as list. simplifies loops and list comprehensions """ if isinstance(x, tuple): return list(x) if not isinstance(x, list): return [x] return x
3e627fd0daebf63eeb54f87bffdba2d1a006fd7c
76,937
from typing import Dict def clean_package_list(package_details_dict: Dict): """Clean package list before sending response.""" packages_list = [] for package_detail in package_details_dict.values(): packages_list.append(package_detail) return packages_list
34ee7aff71e0d8099a481d5cf4aff034a6c85173
76,948
from typing import List from typing import Tuple from typing import Counter def compute_frequency( token_lst: List[str], amount=50 ) -> List[Tuple[str, int]]: # noqa: E501 """Compute word frequency from a list of tokens.""" word_freq = Counter(token_lst) return word_freq.most_common(amount)
ca9819cbf9088baf8ec1b58460befcc0a1efc4d7
76,950
def get_value(self): """get value as numpy array :return: numpy array, or None if value is empty""" self.__apply_lazy_initializer() return self._get_npyarr()
69a3d6c04870695da2e3948cac338833d8209c51
76,954
def change_type(x, func): """ change_type(x, func) Change object type Parameters: ----------- x: object Object to be converted. func: function Function for conversion. Return: ------- func(x) or None if there are value or type error. """ try: return func(x) except ValueError: return None except TypeError: return None
f3dbbd7a10294d5a3d9a6bcc6a9c0dcb18c17214
76,956
def get_ml_dag_id(parent_dag_id: str, **kwargs) -> int: """ Extracts ml_dag_id either from kwargs or from XCom Args: parent_dag_id: **kwargs: Returns: ml_dag_id """ if 'ml_dag_id' in kwargs: ml_dag_id = kwargs['ml_dag_id'] else: ml_dag_id = kwargs['task_instance'].xcom_pull(task_ids='initializer', key='ml_dag_id', dag_id=parent_dag_id) return ml_dag_id
8025531bdba16d86c328c802d84c6f8b0145355f
76,958
import math def calc_delta_angle(px, py): """Calculate the angle between the vertical and the force line of action. Args: px (float): Applied horizontal force component py (float): Applied vertical force component Returns: delta_angle (float): angle between vertical and the line of action of the force """ try: delta_angle = -1*math.atan(px/py) except: delta_angle = math.pi/2 return delta_angle
de8f9746c6074c13d1c8fd867041ba038d932e29
76,960
def forward_pass(output_node, sorted_nodes): """ Perform a forward pass through a list of sorted nodes """ for n in sorted_nodes: n.forward() return output_node.value
c9595c51d8e5e555824255269d40f5cf5676383e
76,964
def decode_mirror(cipher: str): """Flips the ciphertext. eg. tac => cat """ return cipher[::-1]
de564d69c0155d68998f15c0646e37d4c7d2323d
76,967
def GetNumSuffixes(start_state): """Compute number of minimal suffixes automaton accepts from each state. For each state reachable from given state, compute number of paths in the automaton that start from that state, end in the accepting state and do not pass through accepting states in between. It is assumed that there are no cyclic paths going entirely through non-accepting states. Args: start_state: start state (as defined in dfa_parser.py). Returns: Dictionary from reachable states to numbers of suffixes. """ num_suffixes = {} def ComputeNumSuffixes(state): if state in num_suffixes: return if state.is_accepting: num_suffixes[state] = 1 # Even though the state itself is accepting, there may be more reachable # states behind it. for t in state.forward_transitions.values(): ComputeNumSuffixes(t.to_state) return if state.any_byte: next_state = state.forward_transitions[0].to_state ComputeNumSuffixes(next_state) num_suffixes[state] = num_suffixes[next_state] return count = 0 for t in state.forward_transitions.values(): ComputeNumSuffixes(t.to_state) count += num_suffixes[t.to_state] num_suffixes[state] = count ComputeNumSuffixes(start_state) return num_suffixes
9d59f4e083ad5210d650b0bed836ee940bf62536
76,968
from typing import Optional from typing import Tuple import re def init_renumbering(renumber_group: Optional[str] = None) -> Tuple[Optional[int], int]: """Return renumber match group and renumber offset to use. Parse a renumbering format string of the form N(+|-)M and return the match group N and positive or negative offset M. For example, 2-10 would return match group 2 with offset -10. """ if not renumber_group: return None, 0 m = re.match(r"([0-9]+)(?:(\+|-)([0-9]+))?", renumber_group) if not m: raise ValueError(f"Invalid renumbering match group format: {renumber_group}") g = m.groups() renumber_match_group = int(g[0]) renumber_offset = 0 if g[1] == "+": renumber_offset = int(g[2]) elif g[1] == "-": renumber_offset = 0 - int(g[2]) return (renumber_match_group, renumber_offset)
ede56e65148705be965d38efa223003f343efd2a
76,972
def get_name(in_file): """ :param in_file: Path to file to convert :return: The inferred sample name, defined by file name shorn of any file extensions """ return in_file.split('/')[-1].split('.')[0]
30cea5b772a230b9c635f6dfbb32362480be2422
76,975
def get_donations(values): """ Acumulate the best possible donation sum at any point in the array. This is simply max(best(i-3), best(i-2)) + donation(i) with best(0) = values[0] best(1) = values[1] best(2) = values[0] + values[2] """ best_values = {0: values[0], 1: values[1], 2: values[0] + values[2]} for idx in range(3, len(values)): current = values[idx] + max(best_values[idx - 3], best_values[idx - 2]) best_values[idx] = current return best_values[len(values) - 1]
6c6d16ff8be5369eb71cfe9dba2d4c7fbf167f03
76,980
def _next_k_combination(x): """ Find the next k-combination, as described by an integer in binary representation with the k set bits, by "Gosper's hack". Copy-paste from en.wikipedia.org/wiki/Combinatorial_number_system Parameters ---------- x : int Integer with k set bits. Returns ------- int Smallest integer > x with k set bits. """ u = x & -x v = u + x return v + (((v ^ x) // u) >> 2)
dd558ee7a824352e85384b6be4455e059ed882bf
76,981
def round_down_half_hour(time): """ Rounds a datetime object DOWN to the nearest half hour. So if its 12:15 pm, return datetime at 12:00 pm :param time: A datetime object """ if 0 < time.minute < 30: return time.replace(minute=0) if 30 < time.minute < 59: return time.replace(minute=30) return time
67bc8475cedd560a60f7cc1459c73cf0bcc31675
76,993
def chain_frames(dist_matrix, first_frame=0, valid_idxs=None): """Chain together frames that are similar Args: dist_matrix (numpy.ndarray[N, N]): frame codistance matrix first_frame (int, optional): index of the first frame valid_idxs (list<int>, optional): list of valid frame indices Returns: list<int>: ordered frame indices """ current_frame = first_frame used_frames = [current_frame] # For each frame, find the best match that has not been used yet for _ in range(dist_matrix.shape[0] - 1): sorted_candidates = dist_matrix[current_frame].argsort() for frame in sorted_candidates: # Skip invalid frames if isinstance(valid_idxs, list) and frame not in valid_idxs: continue if frame not in used_frames: used_frames.append(frame) current_frame = frame break return used_frames
e5db0fd08697daef321fb34fa136c3ceed4efae6
76,996
def _MakeEntry(name, command): """Makes an entry with the given name to run the given command.""" return {"name": name, "command": command}
ccce1b69a4d5b62666607e1d4d8894ff1359c0c9
76,998
def bad_xml(tmpdir_factory): """JUnitXML sample representing invalid XML.""" filename = tmpdir_factory.mktemp('data').join('bad.xml').strpath junit_xml = "Totally Bogus Content" with open(filename, 'w') as f: f.write(junit_xml) return filename
d04d93ca519b75489d40fe84873e09a1f3915893
76,999
def end_time(data): """ Gets the final timestamp of the trial. """ try: et = data['trial_data']['timestamp'][-1] except IndexError: print(data) et = 0 return et
c115a7ee6f5ff891712872db45fe84a2e860213e
77,016
def quanta_to_string(lx, ly, lz): """Pretty print monomials with quanta lx, ly, lz.""" string = "" string += "X" * lx string += "Y" * ly string += "Z" * lz # if lx: # string += 'x' # if lx > 1: # string += '^{}'.format(lx) # if ly: # string += 'y' # if ly > 1: # string += '^{}'.format(ly) # if lz: # string += 'z' # if lz > 1: # string += '^{}'.format(lz) return string
85522d2148debf20d857be3f46310d0a02759c4f
77,017
def url_composer(url, port=None): """ URL composer :param url: URL to compose :param port: optional port to compose :return: composed URL """ if url[0] != 'h': url = 'http://{}'.format(url) if port: url = url + ':' + str(port) return url
ba3c11bdefff19ef1e4b35291837c33744c6c041
77,018
def handle(event, context): """handle a request to the function Args: event (dict): request params context (dict): function call metadata """ return { "message": "Hello From Python3 runtime on Serverless Framework and Scaleway Functions" }
f9194f20f675d1d111a7080246fa5616392ee370
77,023
import math def LR_SE_calc(item1, item2, item3, item4): """ Calculate likelihood ratio +/- standard error. :param item1: true positive or false negative (TP or FN) :type item1: int :param item2: number of actual positives (P) :type item2: int :param item3: false positive or true negative (FP or TN) :type item3: int :param item4: number of actual negatives (N) :type item4: int :return: standard error as float """ try: return math.sqrt((1 / item1) - (1 / item2) + (1 / item3) - (1 / item4)) except Exception: return "None"
f263b71c5837d55aa55794e9fe75e3e11ea53969
77,026
from typing import List import random def delete_token(seq: List, p: float): """ Randomly drop tokens Args: seq: original sequence p: drop rate Returns: - sequence with randomly deleted tokens """ seq = [s for s in seq if random.random() > p] return seq
886f26d3dda1621ecda12599e6c585c2cdad6522
77,029
def get_style_layers(similarity="balanced"): """ Assigns weights to style layer outputs to define whether the generated image is similar to "content", "style", or "balanced". The function is picking the last convolutional layer in each of the five blocks of the VGG network. The activations of each of these layers along with the content layer (last layer) will be the outputs of the neural style transfer network. Parameters ---------- similarity: str, optional a string identifying the similarity to either content, style or both Returns ------- style_layers a list of tuples identifying the name of style layer along with their weights """ if similarity == "balanced": style_layers = [ ("block1_conv1", 0.2), ("block2_conv1", 0.2), ("block3_conv1", 0.2), ("block4_conv1", 0.2), ("block5_conv1", 0.2), ] elif similarity == "content": style_layers = [ ("block1_conv1", 0.02), ("block2_conv1", 0.08), ("block3_conv1", 0.2), ("block4_conv1", 0.3), ("block5_conv1", 0.4), ] elif similarity == "style": style_layers = [ ("block1_conv1", 0.4), ("block2_conv1", 0.3), ("block3_conv1", 0.2), ("block4_conv1", 0.08), ("block5_conv1", 0.02), ] else: raise Exception( "Please provide either of 'content', 'style' or 'balanced' for --similarity" ) return style_layers
eb18b183089066f3eeeaa0152e96a54447245ed1
77,040
def params_to_weights_and_biases(params, embed_dim): """Take a vector of parameters and return model weights and biases. Args: params: Vector of length `(embed_dim + 1) * num_classes`. embed_dim: Number of features in embedding. Returns: weights: `embed_dim x num_classes` matrix of weights. biases: Vector of length `num_classes` of biases. """ params = params.reshape((embed_dim + 1), -1) return params[1:, :], params[0, :]
274f476bae6b77b57b1103f175cec6787c43c58f
77,054
def is_ip_address(host): """ >>> is_ip_address('') False >>> is_ip_address('192.168.2.1') True >>> is_ip_address('192') False >>> is_ip_address('192.168') False >>> is_ip_address('192.168.2') False >>> is_ip_address('...') False >>> is_ip_address('asdf.asdf.asdf.asdf') False >>> is_ip_address('999.0.0.1') False """ raw_parts = host.strip().split('.') parts = [p for p in raw_parts if p != ''] if len(parts) != 4: return False else: for part in parts: if part.isdigit() and int(part) >= 0 and int(part) <= 255: pass else: return False return True
ad3a03165b562c7c485163e5800bd9cc99b0888a
77,055
def create_gitlab_header_anchor(header_title): """ Returns a Gitlab Markdown anchor to the header. """ return '[{}](#{})'.format(header_title, header_title.lower().strip().replace(' ', '-'))
fc03693ff4656f6f7cae9e7e531ac12c1b1cb5d7
77,056
def most_prolific(dict): """ Takes a dict formatted like "book->published year" {"Please Please Me": 1963, "With the Beatles": 1963, "A Hard Day's Night": 1964, "Beatles for Sale": 1964, "Twist and Shout": 1964, "Help": 1965, "Rubber Soul": 1965, "Revolver": 1966, "Sgt. Pepper's Lonely Hearts Club Band": 1967, "Magical Mystery Tour": 1967, "The Beatles": 1968, "Yellow Submarine": 1969 ,'Abbey Road': 1969, "Let It Be": 1970} and returns the year in which the most albums were released. If you call the function on the Beatles_Discography it should return 1964, which saw more releases than any other year in the discography. If there are multiple years with the same maximum number of releases, the function should return a list of years. """ # Make map: value -> count # For example: 1963 -> 3, 1963 -> 2 . value_counts = {} for key in dict: value = dict[key] current_count = value_counts.get(value, 0) current_count += 1 value_counts[value] = current_count # Make map: count -> list of key # For example: 3 -> {1964}, 2 -> {1963, 1969, 1965, 1967} count_rankings = {} for key in value_counts: count = value_counts[key] ranking_bucket = count_rankings.get(count, []) ranking_bucket.append(key) count_rankings[count] = ranking_bucket max_count = sorted(count_rankings).pop() result_list = count_rankings[max_count] if len(result_list) > 1: return result_list else: return result_list[0]
3cad35f0b06dad0f68a674857be80db30e142e83
77,063
def parse_encoding_header(header): """ Parse the HTTP Accept-Encoding header into a dict of the form, {encoding: qvalue}. >>> parse_encoding_header('') == {'': 1, 'identity': 1.0} True >>> parse_encoding_header('*') == {'*': 1, 'identity': 1.0} True >>> expected = {'identity': 1.0, 'gzip': 1.0, 'compress': 0.5} >>> parse_encoding_header('compress;q=0.5, gzip;q=1.0') == expected True >>> expected = {'*': 0.0, 'gzip': 1.0, 'identity': 0.5} >>> parse_encoding_header('gzip;q=1.0, identity; q=0.5, *;q=0') == expected True """ encodings = {'identity': 1.0} for encoding in header.split(","): encoding, sep, params = encoding.partition(';') encoding = encoding.strip() key, sep, qvalue = params.partition('=') encodings[encoding] = float(qvalue or 1) return encodings
3d2d2e4018ac06b0fedc57cbbddb8d9e1bf95d49
77,064
def get_ep_lemma(ep): """ Get lemma from a pyDelphin elementary predicate """ # if ep.pred == 'named': if ep.carg: return ep.carg elif ep.pred.pos == 'u' and ep.pred.sense == 'unknown' and "/" in ep.pred.lemma: cutpoint = ep.pred.lemma.rfind('/') return ep.pred.lemma[:cutpoint] else: return ep.pred.lemma
09d9b17409a951a373ef4d806da59d8a7e3c0cc3
77,065
import json def dump(config, handle): """Dump configuration into JSON file via handle.""" return json.dump(config, handle)
7aca015ffd32d726c9f54a8029fcdaca27c91f84
77,067
import base64 def base64_decode(value): """Perform base64 denode.""" return base64.b64decode(value).decode("utf-8")
1631d1a0b12d7c3bbb19a0374a70a4d2da319fa7
77,069
def standardize_and_clip(tensor, min_value=0.0, max_value=1.0, saturation=0.1, brightness=0.5): """Standardizes and clips input tensor. Standardizes the input tensor (mean = 0.0, std = 1.0). The color saturation and brightness are adjusted, before tensor values are clipped to min/max (default: 0.0/1.0). Args: tensor (torch.Tensor): min_value (float, optional, default=0.0) max_value (float, optional, default=1.0) saturation (float, optional, default=0.1) brightness (float, optional, default=0.5) Shape: Input: :math:`(C, H, W)` Output: Same as the input Return: torch.Tensor (torch.float32): Normalised tensor with values between [min_value, max_value] """ tensor = tensor.detach().cpu() mean = tensor.mean() std = tensor.std() if std == 0: std += 1e-7 standardized = tensor.sub(mean).div(std).mul(saturation) clipped = standardized.add(brightness).clamp(min_value, max_value) return clipped
9ecbdddc75289cc2c9927171c882938222a28469
77,073
def ts_truncate_time(timestamp): """ Set time to zero in a timestamp. :param ts: Timestamp in seconds. :type ts: int :return: Timestamp in seconds, without counting time (ie: DD-MM-YYYY 00:00:00) :rtype: int """ return timestamp - (timestamp % 86400)
d5568c07cb13d991b5210e1652195512d9718ef1
77,074
def keywords_mapper(keyword, package): """ Add `section` info as a list of keywords to a DebianPackage. """ package.keywords = [keyword] return package
d3a3543519c264b393796e016bf0543a26fc1b56
77,082
def partition(word: str, partitions: int) -> int: """ Find a bucket for a given word. :param word: :param partitions: :return: """ a = ord('a') z = ord('z') value = ord(word[0].lower()) if partitions > 1 and a <= value <= z: pos = value - a return int(pos * (partitions - 1) / (z - a + 1)) + 1 # Catch-all for numbers, symbols and diacritics. return 0
ddf981ca7cdd5ad3bd45f524c706c5901bfeb892
77,084
def remaining_balance(loan: float, interest_rate: float, years: int, payments: int) -> float: """ Calculate the remaining loan balance :param loan: initial loan amount :param interest_rate: interest rate :param years: loan term in years :param payments: total number of payments made :return: remaning balance """ r = interest_rate / 1200 # monthly interest rate m = r + 1 n = years * 12 remaining = loan * (((m ** n) - (m ** payments)) / ((m ** n) - 1)) return remaining
a02501e5b859cbd7c5efe6393dca5003197c30b0
77,088
def manual_input_code(self, username: str, choice=None): """ Manual security code helper Parameters ---------- username: str User name of a Instagram account choice: optional Whether sms or email Returns ------- str Code """ code = None while True: code = input(f"Enter code (6 digits) for {username} ({choice}): ").strip() if code and code.isdigit(): break return code
db2df1c4be5a7d3d528064d51afd3a72f2c56b78
77,091
def get_data_pm_1sigma(x, e=()): """ Compute the 68.27% confidence interval given the 1-sigma measurement uncertainties `e` are given, else return a 2-tuple with data duplicated. Parameters ---------- x: array-like e: optional, array-like or 2-tuple of array-like If array like, assume this is the 1-sigma measurement uncertainties If 2-tuple, assume these are the upper and lower confidence distance from the mean Returns ------- """ if e is None: return x, x n = len(e) if n == 0: return x, x if n == 2: return x - e[0], x + e[1] return x - e, x + e
b078dd7faaf03d771516e5a51f927c2f6211d62b
77,096
def get_part (partDict, node): """ get the subgraph name that node is partitioned into """ for part, nodelist in partDict.items(): if node in nodelist: return part
1109bf78eeec73309c09e777af762a5dccf538e4
77,097
def gpx4_patients(json_patients): """Return all patients with variants in GPX4 gene""" patient_list = [] for patient in json_patients: gpx4 = False if patient.get("genomicFeatures") is None: continue for g_feature in patient["genomicFeatures"]: if g_feature["gene"]["id"] == "GPX4": gpx4 = True if gpx4: patient_list.append(patient) return patient_list
420750e9c8a64d7b4d64e5ed3c73260806b015f2
77,101
def get_base_docker_image(docker_file): """ Returns the name of the base image given a path to a docker file. """ with open(docker_file) as f: from_line = next( line for line in f.read().split("\n") if line.startswith("FROM") ) _from, base_image = from_line.split() return base_image
d3ccbcdc267c253b8906851a9c91a02fb5d93714
77,103
def get_full_class_name(obj, limit=2): """Gets full class name of any python object. Used for error names""" module = obj.__class__.__module__ if module is None or module == str.__class__.__module__: name = obj.__class__.__name__ else: name = module + "." + obj.__class__.__name__ return ".".join(name.split(".")[-limit:])
591c4fa6a1814252d72565f8ee6fec678cca6a16
77,104
def verbose_name_plural(obj): """ Returns the plural verbose name for a Django Model instance. """ return obj._meta.verbose_name_plural
bdaa38bfb308419d244c6e552475778810dc9088
77,105
def swip_swap(source, c1, c2): """ This function takes a string source and characters c1 and c2 and returns the string source with all occurrences of c1 and c2 swapped. """ result = "" for i in source: if i is c1: result += c2 elif i is c2: result += c1 else: result += i return result
ab4fd5ca4787ccfea03ee66b44701b8ea8306f40
77,106
import json import re def uri_reddit(bot, response, matches): """ Extract Reddit thread information. """ post, comments = json.loads(response.text) subreddit = matches.group(2) # Do we want the OP or a specific reply? if matches.group(4): data = comments['data']['children'][0]['data'] title = data['body'] else: data = post['data']['children'][0]['data'] title = data['title'] # Un-markdownify a bit. title = re.sub(r'\!?\[(.*)\]\((.+)\)', r'\1: \2', title) # Clean up. title = title.replace('\n', ' ') title = re.sub(r'\s+', ' ', title).strip() title = title[:300] + '...' * (len(title) > 300) # Get metadata. meta = None if not data.get('score_hidden'): meta = '↑↓{}'.format(data['score']) return 'Reddit: /r/{}'.format(subreddit), title, meta
bcc54d241bbf381ed39e6fea5728518739b60cd3
77,107
def build_geometry(self, sym=1, alpha=0, delta=0, is_simplified=False): """Build geometry of the LamSquirrelCage Parameters ---------- self : LamSquirrelCage Object sym : int Symmetry factor (1= full machine, 2= half of the machine...) alpha : float Angle for rotation [rad] delta : complex Complex value for translation is_simplified: bool True to avoid line superposition Returns ------- list surf_list: list of surfaces """ surf_list = super(type(self), self).build_geometry( sym=sym, is_simplified=is_simplified, alpha=alpha, delta=delta ) # Adapt the label for surf in surf_list: if "Wind" in surf.label: surf.label = surf.label.replace("Wind", "Bare") return surf_list
780f4f5ebc77ab54d5055b4abd851d63261847dd
77,108
def _detect_nonce_too_low_geth(message): """source: https://github.com/ethereum/go-ethereum/blob/60516c83b011998e20d52e2ff23b5c89527faf83/core/tx_pool.go#L51 """ return message.startswith("nonce too low")
268301a254c2452141c72040c608bb6ef9cef224
77,109
def update_quotas(neutron, project_id, network_quotas): """ Updates the networking quotas for a given project :param neutron: the Neutron client :param project_id: the project's ID that requires quota updates :param network_quotas: an object of type NetworkQuotas containing the values to update :return: """ update_body = dict() update_body['security_group'] = network_quotas.security_group update_body['security_group_rule'] = network_quotas.security_group_rule update_body['floatingip'] = network_quotas.floatingip update_body['network'] = network_quotas.network update_body['port'] = network_quotas.port update_body['router'] = network_quotas.router update_body['subnet'] = network_quotas.subnet return neutron.update_quota(project_id, {'quota': update_body})
d3026a28e00658b063fbb98a35fb132ee44be960
77,110
def tick(text: str): """Return `text` with a check mark emoji prepended""" return "\N{WHITE HEAVY CHECK MARK} {}".format(text)
2309a4f3467a5556d5385a2d34e7e76b94ec2e60
77,114
import re def get_float_from_string(s): """ Clean up function for dirty data. Used for transceiver stats like current, voltage, etc. """ ret = ''.join(re.findall(r"[-+]?\d*\.\d+|\d+", s)) if len(ret) == 0: return 0 return float(ret)
771ae4ca0f865629706620c34ac4208e5b2ca6bf
77,116
def read_node_to_hostname_mapping(config_file): """ Returns two maps: <ns_id, hostname> and <lns_id, hostname> by parsing a node config file""" ns_ids = {} lns_ids = {} for line in open(config_file): tokens = line.split() if tokens[1].lower() == 'yes' or tokens[1].lower() == 'true': ns_ids[tokens[0]] = tokens[2] else: lns_ids[tokens[0]] = tokens[2] return ns_ids, lns_ids
e720df98af462667ddb2eecd53bd18e9bcfde4eb
77,124
def get_tweet_word_frequencies(word_vocab_dict, tweet_text): """ Receives a count of sentiment frequencies for each word in the corpus and a tweet then it computes the word frequencies to it Parameters: word_vocab_dict (dict): contains words and sentiment as keys and total count in the corpus as value tweet_text (str): preprocessed tweet to generate word frequencies Returns: list(int): two integers, for positive and negative word frequencies in the tweet """ # initialize total frequency variables pos_total_freq = 0 neg_total_freq = 0 for word in tweet_text: # for each word in the tweet, get its positive and negative frequency neg_freq = word_vocab_dict.get((word, 0), 0) pos_freq = word_vocab_dict.get((word, 1), 0) # sum positive and negative frequencies of current word to running total neg_total_freq += neg_freq pos_total_freq += pos_freq return [neg_total_freq, pos_total_freq]
41dfc0ef2e1de750fb6090b05d322833d7320ca8
77,125
def comm(lhs, rhs): """Returns (left-only, common, right-only) """ com = lhs & rhs return (lhs-com), com, (rhs-com)
9fcf18e1c4b891406e2a095c1a2a4ec8f946dde1
77,129
def find_individual_risks(risks): """Sum the attribute-wise risks per row to get individual risks. `risks` is a list of lists of floats. The result is one list of floats. """ irisks = list(map(sum, risks)) return irisks
1bd0e1df12e8513fec3444f6f0727fc593c8452f
77,130
def predictions_for_df(df, inferrer): """Returns df with column that's the activations for each sequence. Args: df: DataFrame with columns 'sequence' and 'sequence_name'. inferrer: inferrer. Returns: pd.DataFrame with columns 'sequence_name', 'predicted_label', and 'predictions'. 'predictions' has type np.ndarray, whose shape depends on inferrer.activation_type. """ working_df = df.copy() working_df['predictions'] = inferrer.get_activations( working_df.sequence.values).tolist() return working_df
7a5979162836719ed875ec147808a2c31c254f33
77,134
def requirements(filename='requirements.txt'): """Return a list of requirements.""" with open(filename) as fp: return fp.read().splitlines()
b404263b34c6830b862fcc1d5caeaad05ab9ff36
77,135
import re def _tsreplaced(content): """Replace timestamps consisting of 14 digits in the given content.""" return re.sub(r"\d{14}", 14 * "X", content)
7b0fabdf291a513ef13c479d8579eaa7364f9e6a
77,141
def isnum(a): """True if a is an integer or floating-point number.""" if isinstance(a, (int, float)): return 1 else: return 0
7f730d94faa5c497f7d7539c51cad185cc5d09cb
77,142
from typing import Dict import yaml import click def _load_yaml(filepath: str) -> Dict: """ Read content from yaml file. :param filepath: str path to yaml file. :return: dict YAML content """ with open(filepath, "r") as f: try: return yaml.safe_load(f) except yaml.YAMLError as e: raise click.ClickException( "Loading yaml config from {} failed: {}".format(filepath, e) )
69e0f84d9fddf0a2bdc9d151be9baebe4d658b9f
77,143
def s3_path_to_name(path: str, joiner: str = ":") -> str: """ Remove the bucket name from the path and return the name of the file. """ without_bucket = path.rstrip("/").split("/")[1:] return joiner.join(without_bucket)
d74331a5fa090c3778d914c689ee5c7ec7b06021
77,144
def name_to_number(name): """ Helper function that converts the string "name" into a "number" between "0" and "4" in the way described below. 0 - rock 1 - Spock 2 - paper 3 - lizard 4 - scissors """ # Define a variable that will hold the computed "number" # and initialise it. Normally, after the execution of this # function, "number" should have one of the following # values: 0,1,2,3,4 depending on player's choice, or "-1" # case of an invalid input. number = -1 # A sequence of "if/elif/else" clauses should be used # checking for conditions of the form # name == '<player_choice>' to distinguish the cases. # A final "else" clause catches cases when "name" # does not match any of the five correct input strings. if ( name == 'rock' ): number = 0 elif ( name == 'Spock' ): number = 1 elif ( name == 'paper' ): number = 2 elif ( name == 'lizard' ): number = 3 elif ( name == 'scissors' ): number = 4 else: number = -1 # Return the computed "number". return number
8caaa73a40dea14b07d3657453dd76bbc15e3f75
77,147
import torch def _get_strided(waveform, window_size, window_shift, snip_edges): """ Given a waveform (1D tensor of size num_samples), it returns a 2D tensor (m, window_size) representing how the window is shifted along the waveform. Each row is a frame. Inputs: sig (Tensor): Tensor of size num_samples window_size (int): Frame length window_shift (int): Frame shift snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. Output: Tensor: 2D tensor of size (m, window_size) where each row is a frame """ assert waveform.dim() == 1 num_samples = waveform.size(0) strides = (window_shift * waveform.stride(0), waveform.stride(0)) if snip_edges: if num_samples < window_size: return torch.empty((0, 0)) else: m = 1 + (num_samples - window_size) // window_shift else: reversed_waveform = torch.flip(waveform, [0]) m = (num_samples + (window_shift // 2)) // window_shift pad = window_size // 2 - window_shift // 2 pad_right = reversed_waveform if pad > 0: # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect' # but we want [2, 1, 0, 0, 1, 2] pad_left = reversed_waveform[-pad:] waveform = torch.cat((pad_left, waveform, pad_right), dim=0) else: # pad is negative so we want to trim the waveform at the front waveform = torch.cat((waveform[-pad:], pad_right), dim=0) sizes = (m, window_size) return waveform.as_strided(sizes, strides)
f5ed429866926afc7a3915439fd84dbdd6352357
77,149
import re def contains_leading_zeroes(string): """ Check if a string representing a digit contains leading zeroes. """ return string.isdigit() and re.match(r'^0[0-9]', string)
d4b5ded69fa5c4e351fa51f70cb08479bd2e9571
77,151
import re def get_jobid(s, queuing_system): """Process textual output from qsub to get the job id. GE: ..... Your job 2844562 ("ENVtest") has been submitted ^^^^^^^ PBS: 332161.gordon-fe2.local ^^^^^^^^^^^^^^^^^^^^^^^ SLURM: ..... Submitted batch job 835291 ^^^^^^ """ if queuing_system == "PBS": return s.strip() elif queuing_system == "GE": m = re.search('Your job (?P<jobid>\d+) \("(?P<jobname>[^ "]+)"\)', s) if m: return m.group('jobid').strip() elif queuing_system == "SLURM": m = re.search('Submitted batch job (?P<jobid>\d+)', s) if m: return m.group('jobid').strip() else: raise ValueError("Unknown queuing system %r" % queuing_system) return None
dc5cd18ea9424a914acddf7e343278574e7ac35e
77,160
import requests def lcls_archiver_history(pvname, start='2018-08-11T10:40:00.000-07:00', end='2018-08-11T11:40:00.000-07:00', verbose=True): """ Get time series data from a PV name pvname, with start and end times in ISO 8601 format, using the EPICS Archiver Appliance: https://slacmshankar.github.io/epicsarchiver_docs/userguide.html Returns tuple: secs, vals where secs is the UNIX timestamp, seconds since January 1, 1970, and vals are the values at those times. Seconds can be converted to a datetime object using: import datetime datetime.datetime.utcfromtimestamp(secs[0]) """ url="http://lcls-archapp.slac.stanford.edu/retrieval/data/getData.json?" url += "pv="+pvname url += "&from="+start url += "&to="+end #url += "&donotchunk" #url="http://lcls-archapp.slac.stanford.edu/retrieval/data/getData.json?pv=VPIO:IN20:111:VRAW&donotchunk" print(url) r = requests.get(url) data = r.json() secs = [x['secs'] for x in data[0]['data']] vals = [x['val'] for x in data[0]['data']] return secs, vals
f2c3dd62f083ec0d4abb87286e90460fdcce0bb1
77,164
def get_remote_ip(request): """Get the IP address of the host that connected to Heroku (This may be a proxy, so don't assume it's the client's actual IP address.) """ value = request.META.get("HTTP_X_FORWARDED_FOR") or request.META.get("REMOTE_ADDR") # X-Forwarded-For may be a list of multiple IP addresses. # The last one was added by Heroku so should be trustworthy. return value.split(",")[-1].strip()
7bdc5763844c4b12d8b54b52fe34beaf56484670
77,170
def get_post_data(widget_list, widget_class, configuration): """ Helper function to construct generic widget data for an arbitrary class and configuration """ return { "widget_class": widget_class, "position": 0, "title": "example", "configuration": configuration, "widget_list": widget_list.id, "react_renderer": None }
789ce45aa0b9cb07572aadec7bb6c76feaa4383b
77,172
def _reverse_normalize_function_name(function): """ Given a Python funciton name, convert it into a C++ function name. For example "set_positions" becomes "SetPositions". """ name = function[0].upper() next_upper = False for l in function[1:]: if l == '_': next_upper = True continue name += l.upper() if next_upper else l next_upper = False return name
84ceca92924e50c3df71ecd8d9130c5878ba483e
77,176
def clamp(n, lower, upper): """ Restricts the given number to a lower and upper bound (inclusive) :param n: input number :param lower: lower bound (inclusive) :param upper: upper bound (inclusive) :return: clamped number """ if lower > upper: lower, upper = upper, lower return max(min(upper, n), lower)
9e73b0662ba0f29f0d23c8621e4e8da274b6569c
77,178