content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import uuid def generate_uuid(): """ Generate UUID from current timestamp and MAC address :return: UUID - UUID (https://docs.python.org/2/library/uuid.html) """ return uuid.uuid1()
dc0d1832bd4e916adb57d1f59e8bac3a7b363f68
42,069
def complete(repository): """Fill in missing paths of URL.""" if ':' in repository: return repository else: assert '/' in repository return 'https://github.com/' + repository.strip()
8ce9378b3e75ebe92ef94c29854acd0e2eebb677
42,070
import argparse def parse_args(): """ Parse the arguments for this script and return the namespace from argparse. """ parser = argparse.ArgumentParser(description="Initialize student repos with instructor-provided code.") parser.add_argument("server", type=str, help="git server, e.g., 'https://submitty.myuniversity.edu/git/'") parser.add_argument("semester", type=str, help="semester, e.g., 's19'") parser.add_argument("course", type=str, help="course, e.g., 'csci1200'") parser.add_argument("repo", type=str, help="gradeable/repo name, e.g., 'hw1'") parser.add_argument("src_dir", type=str, help="provided source code directory/filename, e.g., '~/teaching/provided_code/hw1'") parser.add_argument("message", type=str, help="git commit message, e.g., 'initial code'") parser.add_argument("students", type=str, help="file containing student ids, e.g., '~/teaching/students.txt'") return parser.parse_args()
4ef3d1774be3d8fd0d85a2a7cc453deefa6564f7
42,071
def py6S_sensor(mission): """ Py6S satellite_sensor name from satellite mission name """ switch = { 'Sentinel2':'S2A_MSI', 'Landsat8':'LANDSAT_OLI', 'Landsat7':'LANDSAT_ETM', 'Landsat5':'LANDSAT_TM', 'Landsat4':'LANDSAT_TM' } return switch[mission]
87e943ea55be17b9a19a3908191a798520180c04
42,072
def linky_to_json(linky_dict, occurence): """ :param dictionnary :return: json array """ json_body = [ { "measurement": "Index_HC", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['HC'] } }, { "measurement": "Index_HP", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['HP'] } }, { "measurement": "Current_A", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['IINST'] } }, { "measurement": "Power_VA", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['PAPP'] } }, { "measurement": "Imax_A", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['IMAX'] } } ] return json_body
655cbce01491b9a93addd4997e1b23878b180ca8
42,075
def make_clustering_matrix(matrix): """ calculates similarity between reads to be put in the clustering matrix :param matrix: :return filled clustering matrix: """ clustering_matrix = {} for i in range(len(matrix)): clustering_matrix[str(i)] = {} for j in range(i + 1): mutations_in_common = 0 if j == i: clustering_matrix[str(i)][str(j)] = 0 else: amount_positions = len(matrix[i]) for pos in range(len(matrix[i])): if matrix[i][pos] == '-' and matrix[j][pos] == '-': amount_positions -= 1 continue if matrix[i][pos] == matrix[j][pos]: mutations_in_common += 1 if amount_positions == 0: clustering_matrix[str(i)][str(j)] = 0 else: clustering_matrix[str(i)][str(j)] = mutations_in_common / amount_positions return clustering_matrix
19de2c5f05d827deba19e507df424be121353c4f
42,077
def _get_timezone_name(timezone): """Return the name of ``timezone``.""" return timezone.tzname(None)
e33ea45ac5cee435b528e3e8a794e646bb569791
42,078
def returnLineEquCoef(p1, p2): """[기울기m, y절편] 리턴""" x1 = p1[0] x2 = p2[0] y1 = p1[1] y2 = p2[1] if x2 != x1: m = (y2 - y1) / (x2 - x1) # 기울기 m 계산(a값) n = y1 - (m * x1) # y 절편 계산(b값) else: m = 100 n = y1 - (m * x1) return [m, n]
c9c4c61133e9c42d5ffc0586cef26fbe54008c5b
42,079
def object_name_to_label(object_class): """convert from object name in S3DIS to an int""" object_label = { 'ceiling': 1, 'floor': 2, 'wall': 3, 'column': 4, 'beam': 5, 'window': 6, 'door': 7, 'table': 8, 'chair': 9, 'bookcase': 10, 'sofa': 11, 'board': 12, 'clutter': 13, 'stairs': 0, }.get(object_class, 0) return object_label
46417ff4033b50079d9a323bb790362368e2abd2
42,080
from typing import List def verify_stock_before_checkout(products: List[dict]) -> dict: """Returns a dictionary of products whose quantities are higher than the inventory allows""" return { product["id"]: product["stock"] for product in filter(lambda prod: prod["stock"] < prod["quantity"], products) }
0dc46eb6c1cf50c13d2a06568051ff748d5ee501
42,082
def expand_case_matching(s): """Expands a string to a case insenstive globable string.""" t = [] openers = {'[', '{'} closers = {']', '}'} nesting = 0 for c in s: if c in openers: nesting += 1 elif c in closers: nesting -= 1 elif nesting > 0: pass elif c.isalpha(): folded = c.casefold() if len(folded) == 1: c = '[{0}{1}]'.format(c.upper(), c.lower()) else: newc = ['[{0}{1}]?'.format(f.upper(), f.lower()) for f in folded[:-1]] newc = ''.join(newc) newc += '[{0}{1}{2}]'.format(folded[-1].upper(), folded[-1].lower(), c) c = newc t.append(c) t = ''.join(t) return t
5dd8d40f93fd115b48eecbc8e85fe096653d0663
42,083
def power_of_two(n): """Check if value is a power of two.""" if n == 2: return True elif n%2 != 0: return False else: return power_of_two(n/2.0)
8a435ac95f6d8b2b8006788400b7bd04615f8b5e
42,085
def distance_flown(reindeer, seconds): """ >>> distance_flown("Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.", 1000) 1120 >>> distance_flown("Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.", 1000) 1056 """ reindeer = reindeer.split(" ") speed = int(reindeer[3]) fly_time = int(reindeer[6]) rest_time = int(reindeer[13]) seconds_flown = (seconds // (fly_time + rest_time)) * fly_time seconds_left = seconds % (fly_time + rest_time) return speed * (seconds_flown + min(seconds_left, fly_time))
323889665a5f57d25546c1e124eeac3a4498b6de
42,086
def ofs_nbits(start, end): """ The utility method for ofs_nbits This method is used in the class to set the ofs_nbits. This method converts start/end bits into ofs_nbits required to specify the bit range of OXM/NXM fields. ofs_nbits can be calculated as following:: ofs_nbits = (start << 6) + (end - start) The parameter start/end means the OXM/NXM field of ovs-ofctl command. .. field[start..end] .. +------------------------------------------+ | *field*\ **[**\ *start*\..\ *end*\ **]** | +------------------------------------------+ ================ ====================================================== Attribute Description ================ ====================================================== start Start bit for OXM/NXM field end End bit for OXM/NXM field ================ ====================================================== """ return (start << 6) + (end - start)
50282803afade8508905caf16987ccc8647b4ae3
42,087
def to_bytes(seq): """convert a sequence to a bytes type""" b = bytearray() for item in seq: b.append(item) # this one handles int and str return bytes(b)
433a9af70ee429ec6b978d8a3e642654ca224631
42,088
def hsl_to_rgb(hsl_array): """! @brief Convert hsl array [h, s, l] to rgb array [r, g, b]. @details HSL where h is in the set [0, 359] and s, l are in the set [0.0, 100.0]. RGB where r, g, b in the set [0, 255]. Formula adapted from https://www.rapidtables.com/convert/color/hsl-to-rgb.html @param hsl_array HSL array [h, s, l]. @return RGB array [r, g, b]. """ # Normalize 0 <= s <= 1 AND 0 <= l <= 1 h, s, l = hsl_array s, l = s/100, l/100 r, g, b = None, None, None if s == 0: r = g = b = l # Color is grayscale/achromatic. else: color_range = s * (1 - abs(2 * l - 1)) x = color_range * (1 - abs(((h / 60) % 2) - 1)) m = l - (color_range / 2) if 0 <= h < 60: r, g, b = color_range, x, 0 elif 60 <= h < 120: r, g, b = x, color_range, 0 elif 120 <= h < 180: r, g, b = 0, color_range, x elif 180 <= h < 240: r, g, b = 0, x, color_range elif 240 <= h < 300: r, g, b = x, 0, color_range elif 300 <= h < 360: r, g, b = color_range, 0, x r, g, b = (r+m), (g+m), (b+m) r, g, b = round(r*255), round(g*255), round(b*255) return [r, g, b]
0984c4036d09f8874f9a228ca7494096ddd6ef8f
42,090
def get_lambda_timeout(lambda_context) -> int: """Gets the remaining lambda execution time in seconds.""" return int(lambda_context.get_remaining_time_in_millis()/1000)
b66f485bc5394151757f662e4d9b2be7c7e379f3
42,091
def retrieve_nuts3(url: str) -> str: """Prepare nuts3 code from url.""" return url.split('/')[-2]
aaf1047a477ff59498509495749efc1c0ff25b0a
42,092
def scale_features(sample, header, scale_parameters, scale_method): """ Parameters ---------- sample : list List containing all to features to be scaled header : list Features name scale_parameters : dict Dict containing the scale parameters - Mean - Stdev - Min - Max scale_method : str Method to be used to scale the data - Standard - MinMax Returns ------- sample scaled """ scaled = [] if scale_method == "Standard": for i, key in enumerate(header): mean = scale_parameters[key]["mean"] stdev = ( scale_parameters[key]["stdev"] if scale_parameters[key]["stdev"] != 0 else 0.001 ) scaled.append((sample[i] - mean) / stdev) elif scale_method == "MinMax": for i, key in enumerate(header): min_val = scale_parameters[key]["min"] max_val = scale_parameters[key]["max"] diff = (max_val - min_val) if max_val - min_val != 0 else 0.001 scaled.append((sample[i] - min_val) / diff) else: return sample return scaled
f0a54417c9f17b3962279c4622db4d0f68058c12
42,094
import os def default_storage_dir(): """ return default directory for storing notebook history data """ user_dir = os.path.expanduser('~') janus_db_dir = os.path.join(user_dir, '.jupyter', 'janus') return janus_db_dir
6149c982f5f7b37c40f29807ef6bbb232b8fe013
42,095
def is_tiff(data): """True if data is the first 4 bytes of a TIFF file.""" return data[:4] == 'MM\x00\x2a' or data[:4] == 'II\x2a\x00'
7136d494f42169345ee09d58c1468330e22b6590
42,096
def unescape(string): """ Unescape a string. """ len_string = len(string) current_ptr = 0 is_escaped = False unescaped_str = "" while current_ptr < len_string: if not is_escaped and string[current_ptr] == "\\": is_escaped = True current_ptr += 1 continue unescaped_str += string[current_ptr] is_escaped = False current_ptr += 1 return unescaped_str
4cccf10401c83569ac5bb048a92a2dc704144f94
42,097
def d_price_d_rate(crv): """First derivative of each node in a discount curve to it's discount rates. The crv holds zero coupon bond prices in the form: z_i = exp(-r_i * ttm_i) """ return -1 * crv.sched_maturity * crv.discount_factor(crv.sched_maturity)
1c77105fb0f2e3c08584196eddabcc1355c9ab85
42,098
def is_valid_nickname(nick): """ Return True if the given name is a valid nickname. """ # Check nick length if len(nick) == 0 or len(nick) > 16: return False valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`^-_[]{}|\\" # noqa invalid = set(nick) - set(valid) return not bool(invalid)
d0756fd31ef62349e184ca52a798ec3585cb6040
42,100
def hostvars(duthosts): """Return host variables dicts for DUTs defined in testbed.""" if not duthosts: return {} var_manager = duthosts[-1].host.options["variable_manager"] hostvars_all = var_manager.get_vars()["hostvars"] return {duthost.hostname: hostvars_all[duthost.hostname] for duthost in duthosts}
14d7ae39275b462397e619708e4cdfcfec35feb9
42,101
def read_string(fd, loc=None): """ Read a null-terminated string. The file descriptor is set to one past the null. :param fd: file handle opened in binary mode :return: the decoded string """ if loc is not None: fd.seek(loc) b = b"" while True: bb = fd.read(1) if bb == b"\x00": break b += bb return b.decode("utf-8")
6eaed519162874c85f41d4525e450b4fff35ab71
42,104
import torch def to_tensor(dataset): """ """ return [torch.IntTensor(data) for data in dataset]
eb9ec23e274a7cf797c74a92524ccfbc9b8eb0e3
42,105
def encode1( strs): """Encodes a list of strings to a single string. :type strs: List[str] :rtype: str """ encoded_str = "" for s in strs: encoded_str += "%0*x" % (8, len(s)) + s return encoded_str
6982e8100a6fa985626f7c795c714b0fcec5ac1d
42,106
def list2str(lst, short=True): """ Returns a string representing a list """ try: if short: return ', '.join(lst) else: return str(lst) except: if short: return "" else: return "[]"
cdd8ccd778c9037832c96f5a399beab490ff6d6b
42,108
import re import os def get_control_lib(file_name): """ Returns an expected path for controls file :param str file_name: """ path = re.split(r'/|\\', os.path.dirname(__file__)) # Add a separator to windows drives (c: -> c:\) # otherwise ignored by os.path.join if path[0].endswith(':'): path[0] = path[0] + os.sep # Add file name path.extend([file_name]) return os.path.join(*path)
e0009ecde049cc4fdacaf7297f6ecc720df009c3
42,109
import random def get_network_latency(latency, bandwidth, queue): """Simulate a real world link latency""" queue_count = len(queue) buffered_size = sum([p['size'] for p in queue]) max_buffer_size = 9 * 1024 * 1024 return 0 return (0.5 + random.random()) * latency * (1 + buffered_size / max_buffer_size)
15d20e90e70ae8162054f9bc2c93596fe2f21e7e
42,110
def script(content="", src="", scripttype="text/javascript"): """ returns a unicode text html script element. >>> script('#this is a comment', scripttype="text/applescript") '<script type="text/applescript">#this is a comment</script>' """ out_script = u'<script ' if scripttype: out_script += u'type="%s" ' % scripttype if src: out_script += u'src="%s" ' % src return out_script[:-1] + u'>%s</script>' % content
5d7bdd84fff1047677ea3f648c786d5fac3c833f
42,111
def nu2wav(nus,outputunit="AA"): """wavenumber to wavelength (AA) Args: nus: wavenumber (cm-1) outputunit: unit of wavelength Returns: wavelength (AA) """ if outputunit=="nm": return 1.e7/nus[::-1] elif outputunit=="AA": return 1.e8/nus[::-1] else: print("Warning: assumes AA as the unit of output.") return 1.e8/nus[::-1]
fc1c1b1bbb284ed57dc85be76c257c8ce4245f99
42,114
def identity(x): """Identity map.""" return x
175def70d5694d5b9c0ca513ceea6c31860a4f8f
42,115
import torch def get_devices(): """Gets the currently available devices.""" if torch.cuda.is_available(): return 'cuda', 'cpu' else: return ('cpu',)
318ad722db1ec68de4f6ccde0c2fa52dce2de599
42,116
from typing import Optional from typing import Union import json def _jsify_dict(d: dict, stringify: Optional[bool] = False) -> Union[dict, str]: """ JavaScript-ify Python dicts: use camelCase string keys. """ def _snake2camel(s: str) -> str: s_split = s.split("_") return "".join([s_split[0]] + [e.capitalize() for e in s_split[1:]]) d2 = dict( (_snake2camel(str(k)), _jsify_dict(v) if isinstance(v, dict) else v) for k, v in d.items()) return d2 if not stringify else json.dumps( d2, separators=(',', ':')).replace("\"", "'")
48f9694c4a99b46dc240e3d3458849eaededd9ab
42,117
from pathlib import Path def find_path_from_parent(session, parent, path): """ Checks the children of parent for a match to path, returning the child if one is found """ assert path.startswith(parent.basename) remaining = path[len(parent.basename):] components = remaining.split('/')[1:] # (exclude the first '/') match = parent for c in components: # Does a child of 'match' match the next component? print(c) match = (session .query(Path) .filter_by(parent=match, basename=c) .one_or_none()) if match is None: # No match found break return match
13c745b41e1df50cb79f4af92747166cd99ac926
42,118
def hamming(s1,s2): """Return minimum Hamming distance between s1 and s2, against all shifts if one is longer.""" if len(s1) == len(s2): return sum(c1 != c2 for c1, c2 in zip(s1, s2)) if len(s1) > len(s2): s1,s2 = s2,s1 excess = len(s2) - len(s1) return min( sum(c1 != c2 for c1, c2 in zip(s1, s2[start:start+len(s1)])) for start in range(excess+1))
ab47e4f872e5984410dc03d79b57dd24ecd8836f
42,119
def add_format_field(record, fieldname): """Return record with fieldname added to the FORMAT field if it doesn't already exist. """ format = record.FORMAT.split(':') if fieldname not in format: record.FORMAT = record.FORMAT + ":" + fieldname return record
bcc3ff87a80bb561462b11428eeba8561bcc100f
42,120
def replace_dict_value(d, bad_val, good_val): """ Out of place dictionary value replacement Returns new dictionary """ new_dict = {} for key, value in d.items(): if value == bad_val: new_dict[key] = good_val else: new_dict[key] = value print(new_dict) return(new_dict)
99fea666bfe3fcf99ae8f63d41e1467db7e5a0b8
42,121
from typing import Dict def create_simple_sample( num_nodes: int = 1, kedro_version: str = "0.17.2", tagged: bool = True, name_prefix: str = "node", ) -> Dict: """Create Sample data for examples and tests. Parameters -------- num_nodes : int number of nodes to generate in the pipeline kedro_version : str kedro version to use in the pipeline.json format tagged : bool to tag the datasets or not name_prefix : str prefix to add to the name of each node Returns -------- kedro pipeline.json sample data as a dictionary Examples -------- >>> create_simple_sample(1) {'kedro_version': '0.17.2', 'pipeline': [{'name': 'node1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['tag1']}]} >>> create_simple_sample(1, name_prefix='first') {'kedro_version': '0.17.2', 'pipeline': [{'name': 'first1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['tag1']}]} >>> create_simple_sample(1, tagged=False) {'kedro_version': '0.17.2', 'pipeline': [{'name': 'node1', 'inputs': ['output0'], 'outputs': ['output1'], 'tags': ['']}]} """ return { "kedro_version": kedro_version, "pipeline": [ { "name": f"{name_prefix}{n}", "inputs": [f"output{n-1}"], "outputs": [f"output{n}"], "tags": [f"tag{n}" if tagged else ""], } for n in range(1, num_nodes + 1) ], }
de183b2a6bb0e1d66460b9836f32edc6a07e81d7
42,122
def optional_character_converter(value: str) -> str | None: """ Zamienia string-a "Nazwa postaci Quatromondis" na None. (ktoś stwierdził, że taki będzie placeholder dla imion magicznych psorów, którzy ich nie ustawili). :param value: string do ewentualnej zamiany. :returns: ``None`` jeśli string brzmiał "Nazwa postaci Quatromondis" lub podany string. """ return None if value == "Nazwa postaci Quatromondis" else value
ff2ec8b039f6d8ec24e3ed71da49f1bb3496673d
42,123
def get_frequency(text): """ """ if text[-1].isdigit(): freq = float(text[:-1]) elif text[-1] == "M": freq = float(text[:-1])*1e6 else: raise RuntimeError("Unknown FROV suffix %s", text[-1]) return freq
7dcbfff624d71aa19be6113ccce3e62c15c87b67
42,125
from typing import Mapping from typing import List def is_kept(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[bool]: """ Determine whether each variable should be kept after selection Parameters ---------- feature_proportions: Mapping[int, float] Lookup from variable to % of splits in the model that use that variable thresholds: Mapping[int, float] Lookup from variable to required % of splits in the model to be kept Returns ------- List[bool] An array of length equal to the width of the covariate matrix True if the variable should be kept, False otherwise """ print(sorted(list(feature_proportions.keys()))) return [feature_proportions[feature] > thresholds[feature] for feature in sorted(list(feature_proportions.keys()))]
4d3d0b5d4bf3e555fb37bafcc1ee58c71b337d3a
42,126
def load_hyperedges(pc, table_name): """ Returns the hyperedge list from the Postgres table. :param pc: (PostgresConnector) Object for communication. :param table_name: (string) Where to retrieve the values from :return: (list) Tuple values for (edge_id, term_id) """ with pc as open_pc: open_pc.cursor.execute("SELECT * FROM {}".format(table_name)) data = open_pc.cursor.fetchall() return data
60dedd0ad05a125dd1be39d38f059edeffe991b0
42,127
def time_delta(t1: int, t2: int) -> float: """ :param t1: first timestamp :param t2: second timestamp :return: time delta """ return (t2 - t1) / 3600000
e8606dab047ba6088d8d30b2729c85b03cba1766
42,128
def power(x,y): """returns x^y""" if(y==0): return 1 return x *power(x, y - 1)
8136ae0d488420c995fff778917373c0b1abf83c
42,132
def acquire_category(soup): """ Take a BeautifulSoup content of a book page. Return the category of the book. """ table = soup.ul.find_all('a') category = table[2].string return category
ce0f28cab0809959d89f684d1d1e5ba060abb51a
42,133
import torch def RescaleProbMap(batch_x, sparsity): """ Rescale Probability Map given a prob map x, rescales it so that it obtains the desired sparsity if mean(x) > sparsity, then rescaling is easy: x' = x * sparsity / mean(x) if mean(x) < sparsity, one can basically do the same thing by rescaling (1-x) appropriately, then taking 1 minus the result. """ batch_size = len(batch_x) ret = [] for i in range(batch_size): x = batch_x[i:i+1] xbar = torch.mean(x) r = sparsity / (xbar) beta = (1-sparsity) / (1-xbar) # compute adjucement le = torch.le(r, 1).float() ret.append(le * x * r + (1-le) * (1 - (1 - x) * beta)) return torch.cat(ret, dim=0)
73bd378728f2034064366d6edab343847b3b52b0
42,134
def simple(n): """ Returns int as simple majority of n when n >=1 otherwise returns 0 Parameters: n is int total number of elements """ return min(max(0, n), (max(0, n)//2)+1)
f92ef8ca8987ffe764422090dc937246751ee5ec
42,135
import torch def Softmax(in_tensor): """ Apply Softmax to the input tensor. in_tensor: pytorch tensor with dimensions [batch, length] outputs -> pytorch tensor with the same dimensions as 'in_tensor' containing the softmax of 'in_tensor' """ in_tensor = torch.exp(in_tensor) sum_ = torch.unsqueeze(torch.sum(in_tensor, 1), 1) return torch.div(in_tensor, sum_)
d16570525f975e7322c00e6c4486272a11f8795b
42,136
def landeg(gL,gS,J,S,L): """ Calculating the Lande factor g, For fine structure: landeg(gL,gS,J,S,L) For hyperfine structure: landeg(gJ,gI,F,I,J) """ return gL * (J * (J + 1) - S * (S + 1) + L * (L + 1)) / (2 * J * (J + 1)) + \ gS * (J * (J + 1) + S * (S + 1) - L * (L + 1)) / (2 * J * (J + 1))
e9b062a01f346b166ac9789f28d1c3cf057517e9
42,138
def epb(mjd): """ Converts Modified Julian Date to Besselian Epoch Inputs: - mjd Modified Julian Date (JD - 2400000.5) Returns the Besselian Epoch. Reference: Lieske,J.H., 1979. Astron.Astrophys.,73,282. History: P.T.Wallace Starlink February 1984 2002-07-11 ROwen Converted EPB2D to Python. """ return 1900.0 + (mjd-15019.81352)/365.242198781
8757c02acb1a128fc2907fc67891e589160156e6
42,139
def merge(LL, RL): """ One truth about LL and RL: they are always sorted """ c = [] while len(LL) != 0 and len(RL) != 0: if LL[0] < RL[0]: c.append(LL.pop(0)) else: c.append(RL.pop(0)) while len(LL) != 0: c.append(LL.pop(0)) while len(RL) != 0: c.append(RL.pop(0)) print('Merging ', c) return c
38eca72f871ed90051c03a25938721a76b6f71d9
42,140
def split_sortby(sort_by): """"Split the value of sortBy. sortBy can have a trailing 'ASC' oder 'DESC'. This function returns the fieldname and 'ASC' or 'DESC' as tuple. """ asc_desc = 'ASC' if sort_by.lower().endswith('asc'): sort_by_value = sort_by[:-3] elif sort_by.lower().endswith('desc'): sort_by_value = sort_by[:-4] asc_desc = 'DESC' else: sort_by_value = sort_by return sort_by_value, asc_desc
75c6f8ce9ad2edb02c844ccd9c0a2c0c6d22d306
42,143
def fn_add_lags(df,idvar,timevar,varlist,lagorders): """ :param df: :param idvar: :param timevar: :param varlist: :param lagorders: :return: """ dfl = df.set_index([idvar,timevar]) for lag in lagorders: df_lagged = dfl.groupby(level = [0])[varlist].shift(lag).reset_index().\ rename(columns = {'y':f'y_l{lag}','x':f'x_l{lag}'}) df = df.merge(df_lagged, on = [idvar,timevar]).dropna() return df.reset_index(drop = True)
38e35c36b0a0567e589d48fed787801bcfb7f7a8
42,144
def calc_AAIMON(TMD, df_cr, len_query_TMD): """Calculates the amino acid identity, membranous over nonmembranous (AAIMON) ratio for each homologue, and the average for all homologues of that protein. TM01_AAIMON = TM01_perc_ident / nonTMD_perc_ident Note that there are several ways of calculating the TM percentage identity, and the nonTMD percentage identity. The number of identical residues is easy: Number of identical residues = number of pipes in markup Percentage identity = Number of identical residues / length HOWEVER. The LENGTH can be calculated in different ways. - length of query excluding gaps - length of query including gaps - length of match excluding gaps - length of alignment (length of query excluding gaps + number gaps in query + number gaps in match) Parameters ---------- TMD : str String denoting transmembrane domain number (e.g. "TM01") df_cr : pd.DataFrame Dataframe with conservation ratios for a particular TMD (or region). len_query_TMD : int Length of the query TMD sequence. mean_ser : pd.Series Series containing the mean values for all homologues of a single protein. Will be saved as a csv. The csv files for each protein will be gathered to create a single dataframe. Returns ------- mean_ser : pd.Series Returns mean_ser with extra entries df_cr : pd.DataFrame Returns the dataframe with the extra calculated AAIMON ratios. """ ######################################################################################## # # # count number of identical (and/or similar) residues in # # the TMD region of interest, for each homologue # # # ######################################################################################## # count identical residues between query and match TMDs by counting the number of pipes in the markup string # NOTE THAT df_cr['%s_SW_markup_seq'%TMD].str.count('|') DOES NOT WORK, as "|" has a function in regex and needs to be escaped df_cr['%s_SW_num_ident_res' % TMD] = df_cr['%s_SW_markup_seq' % TMD].str.count('\|') df_cr['%s_SW_num_sim_res' % TMD] = df_cr['%s_SW_markup_seq' % TMD].str.count(':') ######################################################################################## # # # calculate the length of the alignment of the TMD region, including gaps # # (should equal the len_query_TMD + n_gaps_in_q + n_gaps_in_m) # # # ######################################################################################## df_cr['%s_SW_align_len' % TMD] = df_cr['%s_SW_match_seq' % TMD].str.len() ############################################################################################################## # # # calculate the length of the alignment of the TMD region, EXCLUDING GAPS # # TM01_SW_align_len_excl_gaps = TM01_SW_align_len - TM01_SW_query_num_gaps - TM01_SW_match_num_gaps # # # ############################################################################################################## df_cr['%s_SW_align_len_excl_gaps' % TMD] = df_cr['%s_SW_align_len' % TMD] - df_cr['%s_SW_query_num_gaps' % TMD] - df_cr['%s_SW_match_num_gaps' % TMD] ######################################################################################## # # # calculate the percentage identity of the TMD region # # TM01_perc_ident = TM01_SW_num_ident_res / TM01_SW_align_len_excl_gaps # # # ######################################################################################## # the percentage identity of that TMD is defined as the number of identical residues (pipes in markup) # divided by the length of the the aligned residues (excluding gaps) # note that the nonTMD percentage identity is calculated the same way df_cr['%s_perc_ident' % TMD] = df_cr['%s_SW_num_ident_res' % TMD] / df_cr['%s_SW_align_len_excl_gaps' % TMD] # calculate percentage similar residues df_cr['%s_perc_sim' % TMD] = df_cr['%s_SW_num_sim_res' % TMD] / df_cr['%s_SW_align_len_excl_gaps' % TMD] # add together to obtain the percentage similar + identical residues df_cr['%s_perc_sim_plus_ident' % TMD] = df_cr['%s_perc_ident' % TMD] + df_cr['%s_perc_sim' % TMD] ######################################################################################## # # # calculate Amino Acid Identity : Membranous Over Nonmembranous # # (number of gaps)/(length of sequence excluding gaps) # # # ######################################################################################## df_cr['%s_AAIMON'%TMD] = df_cr['%s_perc_ident'%TMD] / df_cr['nonTMD_perc_ident'] # calculate the Amino Acid Similarity : Membranous Over Nonmembranous (AASMON) (includes similarity + identity based on the matrix used in the SW alignment of SIMAP) df_cr['%s_AASMON'%TMD] = df_cr['%s_perc_sim_plus_ident'%TMD] / df_cr['nonTMD_perc_sim_plus_ident'] # calculate the AAIMON normalised by the random_AA_identity, to exclude identity due to lipophilicity df_cr['%s_AAIMON_n' % TMD] = df_cr['%s_AAIMON' % TMD] / df_cr['norm_factor'] ######################################################################################## # # # calculate ratio of length of TMD to length of nonTMD excl gaps & full match seq # # # ######################################################################################## df_cr['%s_ratio_len_TMD_to_len_nonTMD'%TMD] = len_query_TMD / df_cr['nonTMD_SW_align_len_excl_gaps'] df_cr['%s_ratio_len_TMD_to_len_full_match_seq'%TMD] = len_query_TMD / df_cr['len_full_match_seq'] return df_cr
460a4a3c5d4f53e3971f93b489a91b86460ffa65
42,145
def list_buckets(): """ Returns an empty list for compatibility """ return dict( buckets=[] )
e8b94ecc9a4217485838eba1b580f346a3287957
42,146
import os def read_columnar_data(filename, number_columns=0, comment_character='#'): """ Read in a file containing variable number of columns Lines beginning with comment_character are ignored """ if not os.path.exists(filename): raise IOError("Data file %s not found" % filename) results = [] with open(filename) as datafile: for line in datafile.readlines(): try: if line.startswith(comment_character) or line.strip() == '': continue results.append(list(map(float, line.split()[0:number_columns]))) except ValueError: pass return results
891043475c0e5c0127a61ddafaba56ea5a5d99be
42,147
def _get_quarterly_avg(series_t): """For a given monthly time-series, compute series of quarterly averages Args: series_t (numpy array): monthly time series """ len_q = int(len(series_t)/3) series_q = series_t.reshape([len_q, 3]).mean(axis=1) return series_q
77311e18d66af67d887d54393f102620fa89d7e9
42,149
from typing import Union from pathlib import Path from typing import Tuple def split_filename( filepath: Union[str, Path], resolve: bool = False, ) -> Tuple[Path, str, str]: """split a filepath into the directory, base, and extension""" filepath = Path(filepath) if resolve: filepath = filepath.resolve() path = filepath.parent _base = Path(filepath.stem) ext = filepath.suffix if ext == ".gz": ext2 = _base.suffix base = str(_base.stem) ext = ext2 + ext else: base = str(_base) return Path(path), base, ext
80f47eea7249ab22b709db3f02f9e7bde7a984f5
42,150
import torch def affine_to_linear(x): """Convert NxCxHxW tensor to Nx(C*H*W+1) tensor where last column is one""" (N,C,H,W) = x.shape if len(x.shape)==4 else (1,*x.shape) return torch.cat( (x.view(N,C*H*W), torch.ones(N,1, dtype=x.dtype)), dim=1)
be168f7e6f5221b3e2acbbd104a4904dfd81f7b6
42,152
import torch def perm_gpu_f32(pop_size, num_samples): """Use torch.randperm to generate indices on a 32-bit GPU tensor.""" return torch.randperm(pop_size, dtype=torch.int32, device="cuda")[ :num_samples ].long()
950b07a4d6493fa83cf75c368ac0faf4a3ab44fc
42,153
def variable_value(request, field_name): """Check field in POST/GET request and return field value Depreciated : It will be replaced by getvar """ if request.method == 'GET': if field_name in request.GET: field_name = request.GET[field_name] else: field_name = '' if request.method == 'POST': if field_name in request.POST: field_name = request.POST[field_name] else: field_name = '' return field_name
76b709c1d26ac0f18638c50602780745c5b35b37
42,154
def _plain(hash_value, password): """Check if ``hash_value`` and ``password`` match, using plain method.""" return hash_value == password
8ce4fae1737e8281b28d171b05175d77566fe459
42,155
def get_final_aggregation(thing_list, operation): """Generate the HTTP response content according to the operation and the result thing list Args: thing_list(list): the list of thing description operation(str): one of the five aggregation operations Returns: dict: formatted result containing the aggregation data """ if operation != "COUNT" and len(thing_list) == 0: return {"operation": operation, "result": "unknown"} result = {"operation": operation} if operation == "COUNT": result["result"] = len(thing_list) elif operation == "MIN": result["result"] = min([thing_description["_query_data"] for thing_description in thing_list]) elif operation == "MAX": result["result"] = max([thing_description["_query_data"] for thing_description in thing_list]) elif operation == "AVG": result["result"] = sum([thing_description["_query_data"] for thing_description in thing_list]) / len(thing_list) elif operation == "SUM": result["result"] = sum([thing_description["_query_data"] for thing_description in thing_list]) return result
f33f2153a6fd67ef3176cdc752115ca99c96a93a
42,157
from typing import List import re def parse_active_site_data_line(line: str) -> List[str]: """ Parse active site data line. Args: line (str): a line from the active site data file. Returns: List[str]: a list containing identifiers and the sequence. """ identifiers, sequence = re.split(r",\s+", line.strip(">\n")) return identifiers.split() + [sequence]
275c33c94a7b6442d2b209abec1ac70ff494a96e
42,160
def get_deep(x, path, default=None): """ access value of a multi-level dict in one go. :param x: a multi-level dict :param path: a path to desired key in dict :param default: a default value to return if no value at path Examples: x = {'a': {'b': 5}} get_deep(x, 'a.b') returns 5 get_deep(x, ['a', 'b']) returns 5 get_deep(x, 'c', 5) returns 5 """ if path is None or path == '': path_keys = [] elif type(path) in (list, tuple): path_keys = path else: path_keys = path.split('.') v = x or {} for k in path_keys: try: v = v.get(k) except TypeError: v = None finally: if v is None: return default return v
87290152f3c8cb7e5bf6daa4cf833fe5bb5b8ee4
42,162
import csv def CreateFieldIdLookup(f): """Create a dictionary that specifies single variable analysis each var. Args: config_dir: directory of metadata, output by update_rappor.par Returns: A dictionary from field ID -> full field name NOTE: Right now we're only doing single variable analysis for strings, so we don't have the "type". """ field_id_lookup = {} c = csv.reader(f) for i, row in enumerate(c): if i == 0: expected = ['metric', 'field', 'field_type', 'params', 'field_id'] if row != expected: raise RuntimeError('Expected CSV header %s' % expected) continue metric, field, field_type, _, field_id = row if field_type != 'string': continue # Paper over the difference between plain metrics (single variable) and # metrics with fields (multiple variables, for association analysis). if field: full_field_name = '%s.%s' % (metric, field) else: full_field_name = metric field_id_lookup[field_id] = full_field_name return field_id_lookup
ef349fb6fb5eb79bbb884c941b935f93caf44d78
42,164
import math def is_hexagonal(number): """ Check given number to be a hexagonal number. :param number: value to be checked to be a hexagonal number. :returns: True when given value is a hexagonal number see http://en.wikipedia.org/wiki/Hexagonal_number >>> is_hexagonal(15) True >>> is_hexagonal(14) False """ value = (math.sqrt(8 * number + 1) + 1) / 4.0 return value == int(value)
491bb2d9b5b1455463e18cf96c0a4ab30bce85ab
42,165
def compute_argmax(model, approx_vars): """ Computes argmax of a funsor. :param Funsor model: A function of the approximated vars. :param frozenset approx_vars: A frozenset of :class:`~funsor.terms.Variable` s to maximize. :returns: A dict mapping name (str) to point estimate (Funsor), for each variable name in ``approx_vars``. :rtype: str """ if approx_vars.isdisjoint(model.input_vars): return {} # nothing to do raise NotImplementedError
63adf8e0ab041ba8ba25d4d90f0d39de411fe3a0
42,166
def similarity_value(elem_a, elem_b): """This function compares the values of two XML elements """ if isinstance(elem_a.text, str) and isinstance(elem_b.text, str): return elem_a.text.strip() == elem_b.text.strip() return elem_a.text == elem_b.text
e9f37e45e3ccb8f1e3aeb47cf978a3c6716fb53b
42,167
def _all_na_or_values(series, values): """ Test whether every element in the series is either missing or in values. This is fiddly because isin() changes behavior if the series is totally NaN (because of type issues) Example: x = pd.DataFrame({'a': ['x', np.NaN], 'b': [np.NaN, np.NaN]}) x.isin({'x', np.NaN}) Args: series (pd.Series): A data column values (set): A set of values Returns: bool: True or False, whether the elements are missing or in values """ series_excl_na = series[series.notna()] if not len(series_excl_na): out = True elif series_excl_na.isin(values).all(): out = True else: out = False return out
f6c3f05a7dc2ad03047b1529cdcd00f6dd091899
42,168
def remove_gate_from_line(local_qasm_line, gate_symbol, qubit_index): """ Removes the application of a specific gate on a specific qubit. Args: local_qasm_line: The line from which this call should be removed. gate_symbol: The symbol representing the gate qubit_index: The index of the target qubit Returns: The same line of QASM, with the gate removed. """ # if gate applied to single qubit, remove gate call entirely single_application = "{} q[{}]".format(gate_symbol, qubit_index) if single_application in local_qasm_line: # if there is a parallel bar right local_qasm_line = local_qasm_line.replace(single_application + " | ", "") # else: if there is a parallel bar left local_qasm_line = local_qasm_line.replace(" | " + single_application, "") # else: if it is the only gate in parallelized brackets local_qasm_line = local_qasm_line.replace("{" + single_application + "}", "") # else: if it is not parellelized at all local_qasm_line = local_qasm_line.replace(single_application, "") # else remove just the number else: local_qasm_line = local_qasm_line.replace(",{},".format(qubit_index), ",") local_qasm_line = local_qasm_line.replace("[{},".format(qubit_index), "[") local_qasm_line = local_qasm_line.replace(",{}]".format(qubit_index), "]") return local_qasm_line
6a8a1b0cff23c5174dd9e0b4a6cc640506cac933
42,169
def kwargs_from_parsed_args(args): """ Transforms argparse's parsed args object into a dictionary to be passed as kwargs. """ return {k: v for k, v in vars(args).items() if v is not None}
8db36b1e151f8a5c6efcda9f0ce52fa9ade0f698
42,171
import torch def _gs1(u: torch.Tensor, v: torch.Tensor, eps: float = 1e-7): """Applies one step of the (modified) Gram-Schmidt process to a vector. :param u: the previous vector to project against. :param v: the current vector to apply the transformation to. :return: the result of one step of the process. """ v = v - (u.dot(v) / u.dot(u)) * u return v / (v.norm() + eps)
cef2d14260eedb7ac720a498d0dec47ca931c2e0
42,172
def _rescale_layout(pos, scale=1): """ Normalize the given coordinate list to the range [0, `scale`]. Parameters ---------- pos : array Coordinate list scale : number The upperbound value for the coordinates range Returns ------- pos : array The rescaled (normalized) coordinates in the range [0, `scale`]. Notes ----- Changes `pos` in place. """ pos -= pos.min(axis=0) pos *= scale / pos.max() return pos
39de1a71f6d7807d2d46f1d3b6aafdd441ff719a
42,173
def custom_matrix(m:int, n:int): """Funkcja zwraca macierz o wymiarze mxn zgodnie z opisem zadania 7. Parameters: m (int): ilość wierszy macierzy n (int): ilość kolumn macierzy Returns: np.ndarray: macierz zgodna z opisem z zadania 7. """ return None
03700fce0fc33093be3bcd31a730dedbad30cff1
42,174
def filter_one_letter_word(tweets): """remove one letter word""" for index in range(len(tweets)): tweets[index] = list( filter(lambda x: len(x) > 1, tweets[index])) return tweets
9a85c8442400bc76c5658821b515c15b34b8b14f
42,175
def active_class(var, prop, active): """Tag to return an active class if the var and prop test matches.""" try: return active if var == prop else '' except Exception: return ''
8b199f594839e1e6e7bc692408631ccbe7e3074a
42,176
def format_json(data_send): """ 将数据进行转化成所需要输出的数据 """ username = data_send["username"] pp_rank = data_send["pp_rank"] pp_raw = data_send["pp_raw"] level = float(data_send["level"]) total_score = format(int(data_send["total_score"]), ",") accuracy = float(data_send["accuracy"]) playcount = data_send["playcount"] send_str = "用户名: {}\n世界排名: {}\npp值: {}\n等级: {:.2f}\n总分: {}\n精确度: {:.2f}%\n游玩次数: {}". \ format(username, pp_rank, pp_raw, level, total_score, accuracy, playcount) return send_str
657c35915caae5d2f80f4af1411d1761f137cada
42,177
def _is_xml(s): """Return ``True`` if string is an XML document.""" return s.lower().strip().startswith('<?xml ')
f11ce0c32915d8dc8b5dcd070644557b1b9f9a4f
42,178
def recordToMessage(record, reqid): """ Format a log record as a list of interesting entries (a message) for the daemon-client protocol. """ msg = [getattr(record, e) for e in ('name', 'created', 'levelno', 'message', 'exc_text')] + [reqid] if not hasattr(record, 'nonl'): msg[3] += '\n' return msg
03c6ac362a3d0937583578ae845f3397474d7263
42,180
import os def _get_corpus_file_paths(corpus_path): """Return full paths to corpus files in |corpus_path|.""" return [ os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) ]
9c0203109a9360af94567f353a662998d37ce600
42,181
def find_disconnected(model): """ Return metabolites that are not in any of the reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ return [met for met in model.metabolites if len(met.reactions) == 0]
19c606affff99c01c47522d6903b6f8008cee8c6
42,182
def expand_quantized_bins(quantized_bins, reference_bins): """ """ expanded_quantized_bins = [0] * len(reference_bins) num_merged_bins = int(len(reference_bins) / len(quantized_bins)) j_start = 0 j_end = num_merged_bins for idx in range(len(quantized_bins)): zero_count = reference_bins[j_start:j_end].count(0) num_merged_bins = j_end - j_start if zero_count == num_merged_bins: avg_bin_ele = 0 else: avg_bin_ele = quantized_bins[idx] / ( num_merged_bins - zero_count + 0.0) for idx1 in range(j_start, j_end): expanded_quantized_bins[idx1] = (0 if reference_bins[idx1] == 0 else avg_bin_ele) j_start += num_merged_bins j_end += num_merged_bins if (idx + 1) == len(quantized_bins) - 1: j_end = len(reference_bins) return expanded_quantized_bins
b29ddebf0e43b81fb3f2ab7bf3634998def360d2
42,183
import os import collections def folder_traverse(root_dir, ext=('.jpg')): """map all image files recusively from root directory""" if not os.path.exists(root_dir): raise RuntimeError('{0} doesn\'t exist.'.format(root_dir)) file_structure = collections.defaultdict(list) for item in os.scandir(root_dir): if item.is_dir(): file_structure.update(folder_traverse(item.path, ext)) elif item.is_file() and item.name.endswith(ext): file_structure[os.path.dirname(item.path)].append(item.name) return file_structure
c67a20310428c7c40e3b6c0be0de3a5ef8a25209
42,184
import math def calculate_entropy(item_count, total_items): """ Calculate the entropy present in a password/passphrase. Assumes item_count number of items(characters/words) were chosen uniformly from out of total_items number of items, calculated as: entropy = log2(total number of possibilities during generation), or entropy = log2(total_items**item_count), or entropy = item_count * log2(total_items) Keyword arguments: item_count -- Number of items present in the generated word/phrase In a password this is simply the length In a passphrase this is the number of words total_items -- Number of choices that were present In a password this is the size of the choice alphabet In a passphrase this is the number of words in the dictionary """ return item_count * math.log2(total_items)
1dbad575cf721314677598a5731aeac5f569b62f
42,185
def multiline_code(string: str, language=None) -> str: """ Add multiline code md style. :param language: Language of the code (default NONE) :param string: The string to process. :type string:str :return: Formatted String. :rtype: str """ return f"```{language if language else ''}\n {string} \n```"
29e5c14c55328987fd2c51c240c9d99d0bf60c26
42,186
def get_request_ids(events, context): """ Get request IDs from a set of lambda log events """ ids = [] for event in events: if ('extractedFields' in event): fields = event['extractedFields'] if 'type' in fields and fields['type'] == 'END' and 'requestId' in fields: ids.append(fields['requestId']) # should always be at least one END event assert len(ids) > 0, "No END events found in message stream." # shouldn't be any dupes assert len(ids) == len(set(ids)), "Found duplicate request ids" return ids
c5cc433c497de3f23f48bbf89b078ec399fce27e
42,189
def atoi(text): """ Turn an int string into a number, but leave a non-int string alone. """ return int(text) if text.isdigit() else text
76b7a3fdd28333bdc30b45ed8a8d8f7ec361fa70
42,190
import os def logged_graphics(logfile, start='graphics/'): """Returns the graphics pack from an 'installed_raws.txt' file""" if os.path.isfile(logfile): with open(logfile) as f: for l in f.readlines(): if l.startswith(start): return l.strip().replace(start, '') return ''
23dbc48d7503ee8a8b4a89890a36ba9c556a0a77
42,191
from typing import Tuple def _lex_quoted(header: str) -> Tuple[str, str]: """ >>> _lex_quoted('"abc";a=10') ('"abc"', ';a=10') >>> _lex_quoted('a=10') ('', 'a=10') """ if header[0] != '"': return "", header end_quote_pos = header.find('"', 1) return header[: end_quote_pos + 1], header[end_quote_pos + 1 :]
ecf941475c3b37bc6d3edb246d827ab63be6173b
42,192
def city_country(city, country): """Return a tex like a Warsaw, Poland""" return f"{city.title()}, {country.title()}"
9b9de4920ca0af8e76c02fcdd7733d340901e3d5
42,193
import torch from typing import OrderedDict def retrieve_out_channels(model, size): """ This method retrieves the number of output channels of a specific model. Args: model (nn.Module): The model for which we estimate the out_channels. It should return a single Tensor or an OrderedDict[Tensor]. size (Tuple[int, int]): The size (wxh) of the input. Returns: out_channels (List[int]): A list of the output channels of the model. """ in_training = model.training model.eval() with torch.no_grad(): # Use dummy data to retrieve the feature map sizes to avoid hard-coding their values device = next(model.parameters()).device tmp_img = torch.zeros((1, 3, size[1], size[0]), device=device) features = model(tmp_img) if isinstance(features, torch.Tensor): features = OrderedDict([("0", features)]) out_channels = [x.size(1) for x in features.values()] if in_training: model.train() return out_channels
fc4c0caaad0a3f6d7763d4537abea72a877d97a4
42,194
def validate_string(strings, alias_dict): """check if the string value is in the aliases list.""" msg = '' for s in strings: if alias_dict.get(s, None) is not None: msg = msg + "WARNING: ALIAS %s USED IN string Field\n" % s return msg.rstrip()
e82bfdb7ab5c46a516ab42ccc052054b2e15f4e6
42,196
def print_title(title, outf): """Prints a title to a file The title will be marked up by an underline of equal signs as in the Setext style of headers. """ print("\n\n%s" % title, file=outf) print("=" * len(title), file=outf) print("") return None
097c1dc233a09b9231f6de0f433f09f4d74c849f
42,197
import csv def load_csv_file(filename): """ Load csv file (all). """ with open(filename, 'r') as filehandle: #read csv into a list of lists data = list(list(rec) for rec in csv.reader(filehandle, delimiter=',')) filehandle.close() return data
0fea23b4896f909b1c825f1506db080e88740c5d
42,198
def isTie(board): """判断是否平局""" for i in list("012345678"): if i in board: return False return True
742a530f7a2c64fff9bae891ff7a224cc8a757c0
42,201
def hypergeometric_expval(n, m, N): """ Expected value of hypergeometric distribution. """ return 1. * n * m / N
f9440637bde88851624bae564f75dfda21b62808
42,203