content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def kill_process(device, process="tcpdump"): """Kill any active process :param device: lan or wan :type device: Object :param process: process to kill, defaults to tcpdump :type process: String, Optional :return: Console ouput of sync sendline command after kill process :rtype: string """ device.sudo_sendline("killall %s" % process) device.expect(device.prompt) device.sudo_sendline("sync") device.expect(device.prompt) return device.before
8b892b79c07feff586ddd0576cc18ab92551c322
20,336
def mock_get_globus(): """Mock get_globus()""" class MockGlobusAuth: def __init__(self): self.authorizer = None return MockGlobusAuth()
a823d43b9ddd5d6761819b5f83b4dd035e97bf2e
20,337
def to_wgs84(gdf): """Reprojects a GeoDataFrame to WGS84 reference system (EPSG:4326). Args: gdf (GeoDataFrame): The GeoDataFrame object to be exported. Returns: The transformed GeoDataFrame. """ if not ('init=epsg:4326' in gdf.crs.to_string()): gdf = gdf.to_crs({'init': 'epsg:4326'}) # Set the crs to WGS84 return gdf
02505da7e0d4a7cb3d78b3ae60633d0dc1af1433
20,338
import json def read_test_file(path): """Read a test file. Parameters ---------- path : str The path to a test file. Returns ------- typing.Tuple[str, typing.List[str]] """ with open(path) as f: dct = json.load(f) return dct['text'], dct['sentences']
a416a4031ff355134004dde233d741899a35b28b
20,339
import csv def test_csv(data): """Test data to correct csv format.""" data.seek(0) try: _foo = csv.Sniffer().sniff(data.read(), delimiters=';') return True except csv.Error: return False
0807c6340bcaa3f0cea2d5399152d4a7bcf462f9
20,340
def make_xml_name(attr_name): """ Convert an attribute name to its XML equivalent by replacing all '_' with '-'. CamelCase names are retained as such. :param str attr_name: Name of the attribute :returns: Attribute name in XML format. """ return attr_name.replace('_', '-')
f1463c4592edd40c20f030cbd9add83a3cf93577
20,342
import typing def query_bt_info( start: typing.Dict[str, typing.Any], composition_key: str, wavelength_key: str, default_composition: str = "Ni" ) -> dict: """Query the necessary information for the PDFGetter.""" if composition_key in start: composition = start[composition_key] if isinstance(composition, dict): composition_str = "".join(["{}{}".format(k, v) for k, v in composition.items()]) elif isinstance(composition, str): composition_str = composition else: raise ValueError("Cannot parse composition: {}".format(type(composition))) else: composition_str = default_composition if wavelength_key in start: wavelength = float(start[wavelength_key]) else: wavelength = None return { "composition": composition_str, "wavelength": wavelength }
520f006a9fdf479089230f6af123b2a9e8838de9
20,343
def other(player): """ Returns the given player's opponent. """ if player == 'X': return 'O' return 'X'
e23ed765c5331ae47d284e71835b6c0897612b5c
20,345
def get_module(module_name: str): """ Retrieves a module. """ try: # import importlib # module = importlib.import_module(module_name) # requiring parameter `fromlist` to get specified module module = __import__(module_name, fromlist=['']) return module except ImportError: return None
76d9ebd5b7a8a2450f4a740720152f85bf56ef76
20,346
import re def check_package_name(package_name): """Check that package name matches convention Args: package_name: The package name to validate Returns: A boolean determining whether the package name is valid or not """ m = re.match('[a-z0-9_]{3,30}', package_name) return (m != None and m.group(0) == package_name)
28d22f69d3926152aabbb835ada8bb8d31553a01
20,347
import random def random_distr(l): """function that can be used to simulate the terminal symbol of an n-gram given the previous values. """ r = random.uniform(0, 1) s = 0 for item, prob in l: s += prob if s >= r: return item return l[-1]
88db100e09370da0bf5e9689a763316e6c822840
20,348
import random def gen_ipaddr(ip3=False, ipv6=False, prefix=()): """Generate a random IP address. You can also specify an IP address prefix if you are interested in local network address generation, etc. :param bool ip3: Whether to generate a 3 or 4 group IP. :param bool ipv6: Whether to generate IPv6 or IPv4 :param list prefix: A prefix to be used for an IP (e.g. [10, 0, 1]). It must be an iterable with strings or integers. Can be left unspecified or empty. :returns: An IP address. :rtype: str :raises: ``ValueError`` if ``prefix`` would lead to no random fields at all. This means the length that triggers the ``ValueError`` is 4 for regular IPv4, 3 for IPv4 with ip3 and 8 for IPv6. It will be raised in any case the prefix length reaches or exceeds those values. """ # Set the lengths of the randomly generated sections if ipv6: rng = 8 elif ip3: rng = 3 else: rng = 4 prefix = [str(field) for field in prefix] # Prefix reduces number of random fields generated, so subtract the length # of it from the rng to keep the IP address have correct number of fields rng -= len(prefix) if rng == 0: raise ValueError( 'Prefix {} would lead to no randomness at all'.format( repr(prefix))) elif rng < 0: raise ValueError( 'Prefix {} is too long for this configuration'.format( repr(prefix))) random.seed() if ipv6: # StackOverflow.com questions: generate-random-ipv6-address random_fields = [ '{0:x}'.format(random.randint(0, 2 ** 16 - 1)) for _ in range(rng)] ipaddr = ':'.join(prefix + random_fields) else: random_fields = [str(random.randrange(0, 255, 1)) for _ in range(rng)] ipaddr = '.'.join(prefix + random_fields) if ip3: ipaddr = ipaddr + '.0' return ipaddr
173551883ab17f033f043dbbf5fe7f2c8e87c2ff
20,349
import aiohttp async def msg_port(msg, blog, cid, time_code): """上传消息,包括文字、图片、混合消息""" url = blog data = {'token': 'qq', 'time_code': time_code, 'msg_type': 'text', 'mediaId': '1', 'content': msg, 'cid': cid, 'action': 'send_talk'} async with aiohttp.ClientSession() as sess: async with sess.post(url, data=data) as response: if response.status == 200 and await response.text() == '1': return 'biubiubiu~发送成功' else: return '发送失败惹~'
225d765b9b62af45b29554c313aea8345b290fe9
20,350
def is_composite(_): """ Always returns 'N' """ return "N"
a55cee38e26ab74c9e3b07fdce19cfc7f5b93f0a
20,352
def get_event_types(client, method, token): """ Call the client module to fetch event types using the input parameters :param client: instace of client to communicate with server :param method: Requests method to be used :param token: server access token :return: alert event types """ eTypeAlias = {} params = { 'token': token } eventtypes_url = r'/api/v2/events/types' eventTypes = client.get_event_types(method, eventtypes_url, params) if eventTypes: for eachone in eventTypes: eTypeAlias[eachone['type']] = eachone.get('alias') return eTypeAlias
6a9c382dc702d9545b8d6c27760e221a6baaea7a
20,355
import sys import os def run_platform_cmake(): """Runs cmake build configuration """ if sys.platform == 'win32': vs_version_year = '2015' if 'VSVERSION' in os.environ: vs_version_year = os.environ['VSVERSION'] if vs_version_year == '2015': vs_version_numeral = '14' elif vs_version_year == '2017': vs_version_numeral = '15' return run_cmake('Visual Studio {0} {1}'.format( vs_version_numeral, vs_version_year))
ffa69cb7463cd39b483f63aef7d58e258cfd6270
20,356
def determine_layers_below_above(layers, values, elevation): """Determine the layers below and above the current elevation Args: layers [<str>]: All the pressure layers to load for each parameter values [<LayerInfo>]: All the interpolated layers information elevation <float>: The elevation to determine the layers for Returns <str>: Below layer ID <str>: Above layer ID <int>: Index to the above layer """ index = 0 index_below = 0 index_above = len(layers) - 1 for layer in layers: if values[layer].hgt >= elevation: index_below = index - 1 index_above = index break index += 1 # Only need to check for the low height condition # We should never have a height above our highest elevation if index_below < 0: index_below = 0 index_above = 1 return (layers[index_below], layers[index_above], index_above)
bc0b0846037fd9a415c091264213684ee088174c
20,357
def get_suffix_side(node, matcher): """Check if (not) adding char node to right syllable would cause creating word suffix. Return 1 if char node should be added to right, -1 if to left, 0 if cannot determine.""" if node.next.syllable is None or node.next.syllable.is_last() is False: return 0 left_score = matcher.is_suffix(str(node.next.syllable)) right_score = matcher.is_suffix(str(node) + str(node.next.syllable)) return right_score - left_score
c46ed8605c0eef77059804802e7151b75b0388b0
20,358
import os def dataset32(user_factory, post_factory, dataset32_model): """ Provides the dataset: - 2 users as authors - 2 posts, each post authored by each user Each object has random shared hash assigned to dataset: - author: - username: user_{hash} - password: user_{hash} - post: - author: author - content: post_{author_N}_{hash}_content - image: post_{author_N}_{hash}_image - title: post_{author_N}_{hash}_title """ hash_ = os.urandom(4).hex() authors = [] for i in "12": username = f"user_{i}_{hash_}" author = user_factory(username=username) author.set_password(username) author.save() authors.append(author) author1, author2 = authors posts = [ post_factory( author=author, content=f"post_{i}_{hash_}_content", image=f"post_{i}_{hash_}_image", title=f"post_{i}_{hash_}_title", ) for i, author in zip("12", authors) ] dataset = dataset32_model( author1=author1, author2=author2, post1=posts[0], post2=posts[1], ) return dataset
eae6c3a76c137517712e11c907a1689329afac6e
20,359
def _sanitize_git_config_value(value: str) -> str: """Remove quotation marks and whitespaces surrounding a config value.""" return value.strip(" \n\t\"'")
8945955e6ac4811a637d9df6eac0f7f29a5e55eb
20,360
def binpack(articles,bin_cap): """ Write your heuristic bin packing algorithm in this function using the argument values that are passed articles: a dictionary of the items to be loaded into the bins: the key is the article id and the value is the article volume bin_cap: the capacity of each (identical) bin (volume) Your algorithm must return two values as indicated in the return statement: my_team_number_or_name: if this is a team assignment then set this variable equal to an integer representing your team number; if this is an indivdual assignment then set this variable to a string with you name bin_contents: A list of lists, where each sub-list indicates the dictionary keys for the items assigned to each bin. Each sublist contains the item id numbers (integers) for the items contained in that bin. The integers refer to keys in the items dictionary. """ myUsername = 'cango' # always return this variable as the first item myNickname = 'CN' # name to appear on the leaderboard: to opt out specify '' bin_contents = [] # use this list document the article ids for the contents of # each bin, the contents of each is to be listed in a sub-list #Sort the items descending by volume item_sorted=[] # items={} # items={0:1,1:5,2:5,3:4,4:2} for item in sorted(articles, key=articles.get, reverse=True): item_sorted.append([item, articles[item],'']) # print(item_sorted) #Initialize variables bin_contents=[[]] # bin_cap=5 cap_left=[bin_cap] #Iterate through sorted items #If the item is less than or equal to the remaining bin capacity, add the item to the bin for i in range(len(item_sorted)): for j in range(len(cap_left)): if item_sorted[i][1]<=cap_left[j]: bin_contents[j].append(item_sorted[i][0]) #item_sorted[i][2]=j cap_left[j]-=item_sorted[i][1] #bin+=1 break #If the item does not fit in any of the bins, start a new bin elif j==len(cap_left)-1: bin_contents.append([item_sorted[i][0]]) #item_sorted[i][2]=len(cap_left) cap_left.append(bin_cap - item_sorted[i][1]) break # print(bin_contents) # print(item_sorted) # print(cap_left) return myUsername, myNickname, bin_contents # use this return statement when you have items to load in the knapsack
8c4714e8358462d20342ac3d9206b7b0791ce2c4
20,361
def _get_statvfs(fh, result): """ Returns a tuple (f_type, f_bsize, f_blocks, f_bfree, f_bavail, f_files, f_ffree, f_fsid, f_namelen, f_frsize) The statvfs structure is given in the lines following the current line. If the lines contain the information needed, the lines are read and the information returned in a tuple. If the line is not of the expected format an exception is raised. """ # initialize expected values. f_type = "" f_bsize = "" f_blocks = "" f_bfree = "" f_bavail = "" f_files = "" f_ffree = "" f_fsid = "" f_namelen = "" f_frsize = "" skip_rest = False # if the syscall returned an error, don't parse the statvfs structure if result[0] != -1: # first line: # 2373: bsize=8192 frsize=1024 blocks=8139687 bfree=3830956 last_file_pos = fh.tell() line = fh.readline() if (line.find("bsize=") != -1 and line.find("frsize=") != -1 and line.find("blocks=") != -1 and line.find("bfree=") != -1): # parse the values from the line f_bsize = line[line.find("bsize=")+6:line.find("frsize=")].strip() f_frsize = line[line.find("frsize=")+7:line.find("blocks=")].strip() f_blocks = line[line.find("blocks=")+7:line.find("bfree=")].strip() f_bfree = line[line.find("bfree=")+6:].strip() else: # undo reading so that the next line will be read in the next # iteration. fh.seek(last_file_pos) # if the first statvfs line is missing then so will all the other # statvfs lines skip_rest = True if not skip_rest: # second line: # 2373: bavail=3749560 files=984256 ffree=797256 favail=797256 line = fh.readline() if (line.find("bavail=") != -1 and line.find("files=") != -1 and line.find("ffree=") != -1 and line.find("favail=") != -1): # parse the values from the line f_bavail = line[line.find("bavail=")+7:line.find("files=")].strip() f_files = line[line.find("files=")+6:line.find("ffree=")].strip() f_ffree = line[line.find("ffree=")+6:line.find("favail=")].strip() else: raise Exception("Unexpected format when translating second line " "of statvfs: " + line) if not skip_rest: # third line: # 2373: fsid=0x780000 basetype=ufs namemax=255 line = fh.readline() if (line.find("fsid=") != -1 and line.find("basetype=") != -1 and line.find("namemax=") != -1): # parse the values from the line f_fsid = line[line.find("fsid=")+5:line.find("basetype=")].strip() f_type = line[line.find("basetype=")+9:line.find("namemax=")].strip() f_namelen = line[line.find("namemax=")+8:].strip() else: raise Exception("Unexpected format when translating third line " "of statvfs: " + line) # translate to expected format. # Example expected format: # 7196 fstatfs(3, {f_type="EXT2_SUPER_MAGIC", f_bsize=4096, # f_blocks=4553183, f_bfree=1919236, f_bavail=1687940, # f_files=1158720, f_ffree=658797, # f_fsid={-1853641883, -1823071587}, # f_namelen=255, f_frsize=4096}) = 0 f_type = "f_type=" + f_type f_bsize = "f_bsize=" + f_bsize f_blocks = "f_blocks=" + f_blocks f_bfree = "f_bfree=" + f_bfree f_bavail = "f_bavail=" + f_bavail f_files = "f_files=" + f_files f_ffree = "f_ffree=" + f_ffree f_fsid = "f_fsid=" + f_fsid f_namelen = "f_namelen=" + f_namelen f_frsize = "f_frsize=" + f_frsize + "}" return (f_type, f_bsize, f_blocks, f_bfree, f_bavail, f_files, f_ffree, f_fsid, f_namelen, f_frsize)
81b984c54cae16f7975df4047f4c9620acf6a1bb
20,363
import os import zipfile def download_data(competition: str = "nlp-getting-started") -> list: """To download and unzip from kaggle competition data. Args: competition (str, optional): Competition name of which data needs to be downloaded. Defaults to "nlp-getting-started". Returns: list: Provides list of all files downloaded. """ # Call kaggle download command to get data from platform. os.system(f"kaggle competitions download -c {competition}") # Unzip and move into data folder. folder = os.path.join(os.getcwd(), "data") with zipfile.ZipFile(f"{competition}.zip", "r") as zip_ref: if not os.path.isdir("data"): os.mkdir(folder) zip_ref.extractall(folder) # Delete zip file. if len(os.listdir(folder)): os.remove(f"{competition}.zip") return os.listdir(folder)
f551bb3a3b6b5d39e74a94c84f962a0d25278730
20,364
def is_urn(s: str): """Test if is uuid string in urn format""" return type(s) == str and s.startswith("urn:uuid:")
7f5bbf7dad8e86a687230c29a50f3218198a8286
20,366
def numdim(l): """ Returns number or dimensions of the list, assuming it has the same depth everywhere. """ if not isinstance(l, (list, tuple)): return 0 if not isinstance(l[-1], (list, tuple)): return 1 else: return 1 + numdim(l[-1])
e5e35e968e944d29b81261781959350eab900b78
20,367
import os import sys import json def getProblemSet(ds, n_solutions_th, max_n_problems): """ Get list of problems satisfying required criteria: - number of solutions is not less than threshold - number of problems is not more than required Parameters: - ds - directory with tokenized dataset - n_solutions_th - min number of solutions required for selecting problems - max_n_problems - max number of problems to return If it is None or 0 return all problems with sufficiently many solutions Returns: Sorted list of pairs: <problem, number of its solutions>, sorted according to the number of problem solutions If there are more problems with required number of solutions, the problems with more solutions are returned """ _problem_data = f"{ds}/problems.json" if not os.path.exists(_problem_data): sys.exit(f"File {_problem_data} " + "with a list of problems is not found") with open(_problem_data, 'r') as _problem_json: _all_problems = json.load(_problem_json) _problems = [_p for _p in _all_problems.items() if int(_p[1]) >= n_solutions_th] if _problems: _problems.sort(key = lambda _x: int(_x[1]), reverse=True) if max_n_problems: _problems = _problems[: max_n_problems] return _problems
6932924075996f437db8a2e442ecdb30593c5f8b
20,368
def recommended_spacecharge_mesh(n_particles): """ ! -------------------------------------------------------- ! Suggested Nrad, Nlong_in settings from: ! A. Bartnik and C. Gulliford (Cornell University) ! ! Nrad = 35, Nlong_in = 75 !28K ! Nrad = 29, Nlong_in = 63 !20K ! Nrad = 20, Nlong_in = 43 !10K ! Nrad = 13, Nlong_in = 28 !4K ! Nrad = 10, Nlong_in = 20 !2K ! Nrad = 8, Nlong_in = 16 !1K ! ! Nrad ~ round(3.3*(n_particles/1000)^(2/3) + 5) ! Nlong_in ~ round(9.2*(n_particles/1000)^(0.603) + 6.5) ! ! """ if n_particles < 1000: # Set a minimum nrad = 8 nlong_in = 16 else: # Prefactors were recalculated from above note. nrad = round(3.3e-2 * n_particles ** (2 / 3) + 5) nlong_in = round(0.143 * n_particles ** (0.603) + 6.5) return {'nrad': nrad, 'nlong_in': nlong_in}
432689a85c26873cb9d255b7f119ccf06fc5301d
20,369
def integration_service(config, pin, global_service_fallback=False): """Compute the service name that should be used for an integration based off the given config and pin instances. """ if pin.service: return pin.service # Integrations unfortunately use both service and service_name in their # configs :/ elif "service" in config: return config.service elif "service_name" in config: return config.service_name elif global_service_fallback: return config.global_config._get_service() else: return None
6d4b39f96672cdf0d903f2faca8e776f579ec7e1
20,370
def valid(brd, num, pos): """ Compares the suggested value against the row, column, and 3x3 grid to identify if entry is valid. If the number is observed in either of these categories, the entry is invalid and the test fails. At that point, a new value is determined and the process repeats. If the entry is valid, the process advances to the next empty position. :param brd: Matrix input of the board needed to solve for the Sudoku problem. :param num: The entry into the position that is under review for validation. :param pos: Tuple row/column position of the position under review by the program. :return: True if valid entry, False if invalid entry. """ # Check for row validation for i in range(len(brd[0])): if brd[pos[0]][i] == num and pos[1] != i: return False # Check for column validation for j in range(len(brd)): if brd[j][pos[1]] == num and pos[0] != j: return False # Check for 3x3 grid validation box_x = pos[1] // 3 box_y = pos[0] // 3 for i in range(box_y * 3, box_y * 3 + 3): for j in range(box_x * 3, box_x * 3 + 3): if brd[i][j] == num and (i, j) != pos: return False return True
3efd7daa532fda71a0e8cd7521eae2f33b80b0b5
20,371
def get_snirf_measurement_data(snirf): """Returns the acquired measurement data in the SNIRF file.""" return snirf["nirs"]["data1"]["dataTimeSeries"][:]
2781c241aac66167e94f6873492d62a200081688
20,372
import json def parse_sw(sw_file): """ Parse stopword config. """ with open(sw_file, "r", encoding="utf-8") as sw_stream: sw = json.load(sw_stream) assert isinstance(sw["wordlist"], list) for word in sw["wordlist"]: assert isinstance(word, str) stopword = sw["wordlist"] return stopword
8dc46a61c195f0ff47bbc825dd816c780ef6f0b5
20,373
def num_ints(lst): """ Returns: the number of ints in the list Example: num_ints([1,2.0,True]) returns 1 Parameter lst: the list to count Precondition: lst is a list of any mix of types """ result = 0 # Accumulator for x in lst: if type(x) == int: result = result+1 return result
cba6c06bc1618dae1b7a6f515e7f9b42ca88187b
20,375
def _escapeQuotes(key, _dict): """ Safely escape quotes of each item that goes in a row @param key: The key of the dictionary to pull from. @param _dict: The target dictionary holding the data @return: """ value = _dict.get(key, '') # if the key exists but has a None value, just use the empty string. value = value if value is not None else '' value = value if isinstance(value, str) else str(value) value = value.replace('"', '""') return value
681920670e06098f9b0f43a61c838044593b42c8
20,376
def without_keys(d: dict, *rm_keys): """Returns copy of dictionary with each key in rm_keys removed""" return {k: v for k, v in d.items() if k not in rm_keys}
256597224426708c38369ba635b4fa1df15591be
20,377
def assemble_CohorteFromFile(fileName): """ -> Assemble discrete cohorte from cytokine data -> fileName is the name of the dicrete reduce matrix file (e. g DATA/CYTOKINES/RA_quantitativeMatrix_discrete.csv) -> add diagnostic in vectors -> Write index variable file in PARAMETERS folder -> add "pX" prefix to scalar in vector -> return a cohorte (array of arrays) """ cohorte = [] listOfVariable = [] data = open(fileName, "r") fileNameInArray = fileName.split("/CYTOKINES/") fileNameInArray = fileNameInArray[1].split("_") diagnostic = fileNameInArray[0] cmpt = 0 for line in data: line = line.split("\n") lineInArray = line[0].split(";") vector = [] if(cmpt == 0): for variable in lineInArray: listOfVariable.append(variable) else: index = 1 for scalar in lineInArray: newScalar = "p"+str(index)+"_"+scalar vector.append(newScalar) index += 1 vector.append(diagnostic) cohorte.append(vector) cmpt += 1 data.close() # Write indexFile indexFile = open("PARAMETERS/"+str(diagnostic)+"_variable_index.csv", "w") cmpt = 1 for variable in listOfVariable: indexFile.write("p"+str(cmpt)+";"+variable+"\n") cmpt += 1 indexFile.close() return cohorte
8f51cbfe71790fb757dc514635e7b0773b1a2e58
20,378
def retry_if(predicate): """ Create a predicate compatible with ``with_retry`` which will retry if the raised exception satisfies the given predicate. :param predicate: A one-argument callable which will be called with the raised exception instance only. It should return ``True`` if a retry should be attempted, ``False`` otherwise. """ def should_retry(exc_type, value, traceback): if predicate(value): return None raise exc_type, value, traceback return should_retry
e02401ba7eb9af88565e06c0013135bfcf711521
20,379
def list_diff(list1, list2, identical=False): """ API to get the differece in 2 lists :param list1: :param list2: :param identical: :return: """ result = list() for value in list1: if identical: if value in list2: result.append(value) else: if value not in list2: result.append(value) return result
e5ff6369b07514c91333330676afdf25a81377ad
20,382
import itertools from functools import reduce def rank_compounds(compounds, nst_model, stats_lexicon): """Return a list of compounds, ordered according to their ranks. Ranking is being done according to the amount of affixes (the fewer the higher) and the compound probability which is calculated as follows: p((w1, tag1)..(wn, tag1)) = p(w1, tag1) ... * p(wn, tagn) * p(tag1, ...tagn) e.g. p(clown+bil) = p(clown, NN) * p(bil, NN) * p(NN,NN) """ ranklist = [] for clist in compounds: affixes = [affix[0] for affix in clist[0]] for c in clist: tags = list(itertools.product(*[affix[2] for affix in c])) # Calculate probability score word_probs = max( reduce(lambda x, y: x * y, [(stats_lexicon.lookup_prob(i)) for i in zip(affixes, t)]) for t in tags) tag_prob = max(nst_model.prob("+".join(t)) for t in tags) score = word_probs * tag_prob ranklist.append((score, c)) # Sort according to length and probability ranklist.sort(key=lambda x: (len(x[1]), -x[0], x[1])) return ranklist
341e02131f708b67e91d5c8de26a77517dc04689
20,385
def format_value(value): """Handles side effects of json.load function""" if value is None: return 'null' if type(value) is bool: return 'true' if value else 'false' if type(value) is str: return f'\'{value}\'' if type(value) is dict \ or type(value) is list: return '[complex value]' else: return value
ccf3383d39fd1f745a505811b2cf7da335da1ae9
20,386
import requests def twitter_api_request(url, parameters, token): """Returns a list of tweets""" res = requests.get( url, params=parameters, auth=token ) tweets = res.json().get('statuses') return tweets
67a9a901cca478ac94251165ab76ed1d00b54f42
20,387
def next_pow2(n): """ Return first power of 2 >= n. >>> next_pow2(3) 4 >>> next_pow2(8) 8 >>> next_pow2(0) 1 >>> next_pow2(1) 1 """ if n <= 0: return 1 n2 = 1 << int(n).bit_length() if n2 >> 1 == n: return n else: return n2
a94ee5064ae2f3540b65b03b85ab82290c26addd
20,388
def merge_annotations(interaction_dataframe, columns): """Merges the annotations for a protein-protein interaction. Given two columns of a protein-protein interaction dataframe, each of which contains a type of annotation data, this function returns the merged set of those annotations. E.g. Column 1: {IPR011333, IPR003131, IPR000210} Column 2: {GO:0046872} Result: {GO:0046872, IPR011333, IPR003131, IPR000210} Parameters ---------- interaction_dataframe : DataFrame DataFrame containing protein-protein interactions and annotations. columns : list A list of annotation columns to merge. Expects the column contents to be array-like, not strings. Returns ------- pandas Series of sets Array containing a set of annotations for each interaction (row). """ # replace NaNs with empty sets for i in columns: interaction_dataframe.loc[interaction_dataframe[ i].isnull(), i] = interaction_dataframe.loc[interaction_dataframe[ i].isnull(), i].apply(lambda x: set()) # join all sets in each supplied column # the unpacking operator can accept an array of lists or sets merged_annotations = interaction_dataframe[columns].apply( lambda x: set().union(*x), axis=1) """ Example usage Row entry: interpro_xref_A {IPR029199, IPR026667} interpro_xref_B {IPR027417, IPR001889, IPR013672} Name: 1, dtype: object Lambda result: {'IPR001889', 'IPR013672', 'IPR026667', 'IPR027417', 'IPR029199'} """ return merged_annotations
e5dcacc645e1110c1515f5630cd9197a8b7656bd
20,389
def Conjoin( matrix, stays, goes ): """ Given a matrix and a row that stays and a row that goes Condense the matrix """ #Each item in the matrix will have one spot that stays and one spot that goes. # Take the average of item[stays] and item[goes] and store it in item stays for i in range(len(matrix)): #For each item in the matrix if( i == stays or i == goes ): continue #Precondition, stays != goes, goes != i, stays != i stay_spot = [stays,i] stay_spot.sort() #At this point, stay_spot is a tuple representing the spot to stay goes_spot = [goes,i] goes_spot.sort() #At this point, goes_spot is a tuple representing the spot to go #Since dealing with an upper triangular matrix, the sort accounts for this #Create and store the average first = matrix[stay_spot[0]][stay_spot[1]] second = matrix[goes_spot[0]][goes_spot[1]] matrix[stay_spot[0]][stay_spot[1]] = (first+second)/2.0 for i in matrix: #Remove the goes-th column i.pop(goes) matrix.pop(goes) #Remove the goes-th row return matrix
d83b20cd2bf778adf193506b8ab2fda2c8f4b936
20,390
import re def policy_set_pattern(policy_set: str) -> re.Pattern: """Return a regexp matching the policy set's name.""" final = policy_set.rsplit("/", maxsplit=1)[-1] return re.compile(rf"^{final}_\d+$")
78c278ddf6cd98e4a69c248806334a7190cad1cd
20,392
def get_start_end_interval(ref, query, i = 0, j = -1): """ Return the start and end points in the query where the ref contig matches. """ #print 'start find interval' #print ref #print i,j if i >= len(query): return i, j #print query[i][1] + '\tlen: ' + str(len(query)) #print ref + '\t' + query[i][1] # If the current reference name matches the current query start, don't change anything! if ref == query[i][1]: # If the end point hasn't been set before, set it now. if j == -1: # While the ref contig equals query, increment the end position j = 0 while ref >= query[j][1]: j += 1 #print ref + '\t' + query[j][1] while j < len(query) and ref >= query[j][1]: #print ref + '\tvs.\t' + query[j][1] #print ref + '\t' + query[j][1] j += 1 #print 'Not changing anything!' return i, j #i = curr_start #print "adjusting i" while i < len(query) and ref > query[i][1]: #print ref #print query[i][1] #print ref + '\t' + query[i][1] i += 1 if i == len(query): return i, i j = i + 1 #print "adjusting j" #print 'UH: ' + query[j][1] # While the ref contig equals query, increment the end position while j < len(query) and ref >= query[j][1]: #print ref + '\tvs.\t' + query[j][1] #print ref + '\t' + query[j][1] j += 1 #print 'returning ', #print i, j return i, j
5490f5bde09fae861661d90eb903bb705d85e4f5
20,393
import math def angle_between(pos1, pos2): """ Computes the angle between two positions. """ diff = pos2 - pos1 # return np.arctan2(diff[1], diff[0]) return math.atan2(diff[1], diff[0])
ee69333fbd188fe977ac64f91275962b34246ed4
20,394
async def handle_update_identity(request, identity): """ Handler for updating an Identity. The request is expected to contain at least a partial JSON representation of the Identity model object. """ id = request.match_info["id"] identity.id = id await identity.save() return 200, {"data": identity}
74712d4381c82395f9b31d194e2bcc1f5600b3ac
20,395
from pandas import DataFrame def _is_aligned(frame, other): """ Helper to check if a DataFrame is aligned with another DataFrame or Series. """ if isinstance(other, DataFrame): return frame._indexed_same(other) else: # Series -> match index return frame.columns.equals(other.index)
8381ff297a858d32f539f1deeef57f32d9ab0c4d
20,396
def concat_url(num_of_pics,item_type,org): """[summary] This concats our full API url Args: num_of_pics ([INT]): [How many pictures wanted, the hitsPerPage in our case] item_type ([STR]): [This is what type of item we want from request] org ([STR]): [The org we want pictures from] """ endpoint = 'http://www.kulturarvsdata.se/ksamsok/api' fields = 'serviceOrganization,thumbnail,itemType' endpoint_fields = F'?&x-api=test&method=search&hitsPerPage={num_of_pics}&recordSchema=xml' #All the "OR NOT" in the query is photos that resembles objects something we dont want when item_type is photo query = F'thumbnailExists="j" AND itemType="{item_type}" AND serviceOrganization="{org}" OR NOT itemSpecification="Dokumentationsbild" OR NOT itemSpecification="ID-bild" OR NOT itemSpecification="Placeringsbild" OR NOT itemSpecification="Presentationsbild" OR NOT itemName="föremålsbild"' req_url = F'{endpoint}{endpoint_fields}&query={query}&fields={fields}&startRecord=' return req_url,query
3214b98e387731251fbe9e4d9cd01e248f2f8b62
20,397
def not_none(elem): """Check if an element is not None.""" return elem is not None
f91a28efc42e3d50515bb783d3f16a64cdcf9490
20,398
def format_weather_header_for_HELP(itype, iunits, city, lat=None): """ Prepare the header for the precipitation, air temperature and global solar radiation input weather datafile for HELP. The format of the header is defined in the subroutine READIN of the HELP Fortran source code. """ fheader = [['{0:>2}'.format(itype)], # 3: data was entered by the user. ['{0:>2}'.format(iunits)], # 1 for IP and 2 for SI ['{0:<40}'.format(city[:40])], ] if lat is not None: # Append the latitude if the data are solar radiation. fheader.append(['{0:>6.2f}'.format(lat)]) else: fheader.append([]) return fheader
7d848481b7316cef4094c2d24b9978665a1c2e1d
20,399
import os def is_c_file(fn): """ Return true iff fn is the name of a C file. >>> is_c_file("a/b/module.c") True >>> is_c_file("a/b/module.h") True >>> is_c_file("a/b/module.c~") False >>> is_c_file("a/b/.module.c") False >>> is_c_file("a/b/module.cpp") False """ fn = os.path.split(fn)[1] # Avoid editor temporary files if fn.startswith(".") or fn.startswith("#"): return False ext = os.path.splitext(fn)[1] return ext in {".c", ".h", ".i", ".inc"}
5a106c70f034b89737d46261a0973670305f1631
20,400
import os def get_case_name(case_path, inst_path): """extract case name from case path.""" return case_path.replace(os.path.join( inst_path, "cases"), "")[1:].replace("\\", "/")
f585f1ceb469664ac7c5407e69ce1387cd59ecb2
20,401
def __TeardropLength(track, via, hpercent): """Computes the teardrop length""" n = min(track.GetLength(), (via[1] - track.GetWidth()) * 1.2071) n = max(via[1]*(0.5+hpercent/200.0), n) return n
6f2e0f9980e5d47e4c52b1a7bf4b6f20d62d2e94
20,402
import numpy def spikelist2spikematrix(DATA, N, N_time, dt): """ Returns a matrix of the number of spikes during simulation. Is of the shape of N x N. The spike list is a list of tuples (rel time, neuron_id) where the location of each neuron_id is given by the NEURON_ID matrix, which is in a standardized way [[ (N*i+j) ]] where i and j are line and column respectively. For instance, for a 4x4 population: [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]] """ DATA=numpy.array([[int(k), int(v)] for k,v in DATA]) spikematrix=numpy.zeros((N,N)) if DATA.size > 0: neuron_id=DATA[:,1] for i_id in numpy.unique(neuron_id): column = numpy.mod(i_id,N) line = numpy.floor(i_id/N) where_i_id_fired = [int(index) for index in numpy.where(neuron_id==i_id)[0] ] spikematrix[line,column] = sum(where_i_id_fired ) return spikematrix / N_time * dt
f179f69149ba6a57a6e8c01615ceb16988596b7b
20,404
def decode_selected_indices(decode_fn, features): """Decode selected indices from features dict.""" inputs = features.get("inputs", None) selected_ids = features.get("selected_ids", None) num_inputs = features.get("num_inputs", None) if inputs is None or selected_ids is None or num_inputs is None: raise ValueError("Insufficient input fields.") if len(inputs.shape) != 2: raise ValueError("Expected prediction['inputs'] to have two dimensions.") return "".join([ "%d: %s\n" % (i, decode_fn(inputs[i])) for i in selected_ids if i >= 0 and i < num_inputs ])
3e09c349551a740bf994055205463cedf1d25cdd
20,406
def wilight_to_hass_hue(value): """Convert wilight hue 1..255 to hass 0..360 scale.""" return min(360, round((value * 360) / 255, 3))
5a9021185f7bbb9bf1351b2df55207063ee49f9a
20,407
def convert_triggers(events, return_event_ids=False): """Function to convert triggers to failed and successful inhibition. Trigger codes: stop_signal: 11, go: 10, response: 1, stop_signal_only: 12, failed_inhibition_response: 2, failed_inhibition: 37, successful_inhibition: 35 Parameters ---------- events : numpy array The original events return_event_id: bool If true return event_id that matches the new triggers. Returns ------- converted_events The converted events. """ events_tmp = events.copy() for idx, line in enumerate(events_tmp): if line[2] == 20: if events_tmp[idx + 1][2] == 1: events_tmp[idx][2] = 30 # go_before_stop elif (events_tmp[idx + 1][2] == 11) and (events_tmp[idx + 2][2] != 1): events_tmp[idx][2] = 35 # successful inhibition elif (events_tmp[idx + 1][2] == 11) and (events_tmp[idx + 2][2] == 1): events_tmp[idx][2] = 37 # failed inhibition events_tmp[idx + 2][2] = 2 # failed inhibition response event_id = { "stop_signal": 11, "go": 10, "response": 1, "stop_signal_only": 12, "failed_response": 2, "failed_inhibition": 37, "successful_inhibition": 35, } if return_event_ids: return ( events_tmp, event_id, ) else: return events_tmp
69eca81afe81c4f161ee69e3a02f2e5706c1c5ee
20,408
import random def intRndPct(n, pct=20): """ Randomize an integer Parameters ---------- n : int pct : int, optional Randomization factor in %. The default is 20. Returns ------- int Randomized integer """ return int(n * (100.0 + random.uniform(-pct, pct)) / 100.0)
5ef784b83d7e9e5c033932ec41d5426b775ece35
20,410
def kind(o): """ returns a suitable table name for an object based on the object class """ n = [] for c in o.__class__.__name__: if c.isalpha() or c == '_': if c.isupper() and len(n): n.append('_') n.append(c.lower()) return ''.join(n)
420041fd2369fdcdee48ac433a81be2dc07c1563
20,412
def user_index_by_id(slack_id, users): """ Returns index of user in users list, based on it's Slack id :param slack_id: :param users: :return: Index in users or False """ found_users = [user for user in users if user['id'] == slack_id] if len(found_users) == 0: # not found return False elif len(found_users) == 1: return users.index(found_users[0]) else: # found more than one, this should never happen return False
ec5fb9b382f71f1df4fa7778285679307c634c39
20,413
def isoformat(date): """ Convert a datetime object to asmx ISO format """ return date.isoformat()[:-3] + "Z"
c2c94c1957f251622998af723aab354116b7fd55
20,415
from pathlib import Path import json def load_json(path: Path): """ Loads authentification keys/tokens. :param path: JSON file to load the keys from :return: Dictionary containing the key-file information """ with open(path, mode='r', encoding='utf8') as file: return json.load(file)
e7368431970682451669603098d56c1c991750a5
20,416
def normalize_md(txt): """Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests. """ # Two newlines in a row should NOT be replaced with a space. txt = txt.replace("\n\n", "OMG_NEWLINE") # Lists should NOT be replaced with a space. txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") # Links broken over two lines should not get an extra space. txt = txt.replace("]\n(", "OMG_LINK") # Convert all remaining newlines into spaces. txt = txt.replace("\n", " ") # Restore everything else. txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
3d3383607c7b5957ce75888266326f03f7dc7be2
20,417
def find(node): """ Find the UFSet of given node by return the root of this UFSet With path compression """ if node.parent != node: node.parent = find(node.parent) return node.parent
5c671b02cf8a7c7df82480a16eab3006139b1d8e
20,418
import json def parse_station_info_json(station_info_json): """ 解析沿途停靠站点信息 :param station_info_json: 车站信息json :return: [简略火车车次, 起点站名, 终点站名] """ json_text = json.loads(station_info_json) data = json_text['data']['data'] start_station_name = data[0]['start_station_name'] station_train_code = data[0]['station_train_code'] train_class_name = data[0]['train_class_name'] service_type = data[0]['service_type'] end_station_name = data[0]['end_station_name'] # for d in data: # arrive_time = d['arrive_time'] # station_name = d['station_name'] # start_time = d['start_time'] # stopover_time = d['stopover_time'] # station_no = d['station_no'] return [station_train_code, start_station_name, end_station_name]
a9d80a47db9864662c33fadbf623218af2ece04e
20,419
def process_data(df, stable=True): """Process raw data into useful files for model.""" # Fix the category for both bat_hand = {'R': 1, 'L': 0, 'B': 2} df['bats'] = df['bats'].map(bat_hand) pit_hand = {'R': 1, 'L': 0, 'S': 0} df['throws'] = df['throws'].map(pit_hand) df = df.dropna(subset=['PlayerID']) df = df.set_index('PlayerID') return df
90d6f91821420b22341ef542f71f9a0521fbb5f8
20,420
def validate_key_parse(data): """ Validate key is a numerical value string and parse it. :param dict data: Data to be validated """ validated_data = {int(key): str(value) for key, value in data.items() if key.isdigit()} return validated_data if len(validated_data) == len(data) else None
79c973b2892caabd6249366423bc229384f80f5a
20,421
import re def tsv_string_to_list(s, func=lambda x : x, sep='|^|'): """Convert a TSV string from the sentences_input table to a list, optionally applying a fn to each element""" if s.strip() == "": return [] # Auto-detect separator if re.search(r'^\{|\}$', s): split = re.split(r'\s*,\s*', re.sub(r'^\{\s*|\s*\}$', '', s)) else: split = s.split(sep) # split and apply function return [func(x) for x in split]
3bc223b7d685a0245d4dad98eeac81c39a315a77
20,422
def resource_count(entries=[]): """ Count for each type of resource count = [{resourceType: instances}, ...] :param entries: :return: counts: dictionary of counts """ count = {} for e in entries: if "resourceType" in e: if e['resourceType'] in count: val = count[e['resourceType']] count.update([(e['resourceType'], val + 1)]) else: count.update([(e['resourceType'], 1)]) return count
2518f016fbdd515e25e005905948b84fdc923c60
20,424
def replicate_config(cfg, num_times=1): """Replicate a config dictionary some number of times Args: cfg (dict): Base config dictionary num_times (int): Number of repeats Returns: (list): List of duplicated config dictionaries, with an added 'replicate' field. """ # Repeat configuration settings across replicates configs = [] for replicate in range(num_times): _config = cfg.copy() _config.update({"replicate": replicate}) configs.append(_config) return configs
8b4d43002c22d20c6d2238c2309086a21d560c5c
20,425
import torch def gradient(func, idx, tensors): """Take the gradient of func w.r.t. tensors[idx]. func is Args: func (Callable): assumed to be the function of *tensors. idx (int): index of the tensor w.r.t which the gradient is taken tensors (list or tuple ): sequence of tensors of sizes [n, di] where di is the dims of ith tensor Returns: [type]: [description] """ # variable to take grad X = tensors[idx] if X.is_leaf: X.requires_grad = True func_values = func(*tensors) func_sum = torch.sum(func_values) Gs = torch.autograd.grad(func_sum, X, retain_graph=True, only_inputs=True, create_graph=True, ) G = Gs[0] # n, dx = X.shape # assert G.shape[0] == n # assert G.shape[1] == dx assert G.shape == X.shape return G
a537186507101e63fb47bee9e62b56de309ebb60
20,427
def count_items(column_list): """Function to consolidade the count by user type Args: column_list: The list to perform the consolidation Returns: Lists with user types and count items """ type_count = {} for type in set(column_list): type_count[type] = column_list.count(type) item_types = list(type_count.keys()) count_items = list(type_count.values()) return item_types, count_items
e3872c88145aa259402ced51c3783b9a4ea1bab3
20,428
import shutil def header(overrides) -> str: """Create header for experiment.""" # Get terminal number of columns try: columns, _ = shutil.get_terminal_size((80, 20)) except Exception: # pylint: disable=broad-except columns = 80 # Join key-values and truncate if needed content = ", ".join(f"{key}={value}" for key, value in overrides.items()) if len(content) >= columns - 2: content = content[: columns - 2 - 3] + "." * 3 content = f"[{content}]" # Add padding padding = "=" * max((columns - len(content)) // 2, 0) return f"{padding}{content}{padding}"
d870bc643c65b9a717e2a8004c6381e3e743e676
20,429
def get_best_algorithm_hyperparameter_onestep(results_df): """ Combines get_best_algorithm_hyperparameter() and get_best_hyperparam_all() into one function. Parameters ========== results_df : DataFrame DataFrame containing pipeline_ML results. """ # Set up average_group_cols = ['algorithm', 'hyperparameters', 'label', 'recall', 'ignition_id'] best_hyperparam_group_cols = ['label', 'recall'] metric = 'precision_at_recall' # Do results_df['hyperparameters'] = results_df['hyperparameters'].astype(str) average_folds = results_df.groupby(average_group_cols).mean().reset_index() best_hyperparam = average_folds.loc[average_folds.groupby(best_hyperparam_group_cols)[metric].idxmax()] return best_hyperparam
8065a6f51d2b4be557d6fc8816783bb2b18d6ce3
20,430
def get_user_config(): """Reads the project configuration from the user. Returns: tuple: Returns a tuple, containing (project_name, is_flask_service) """ project = str(input("""Please give your project name: """)) flask_service = str(input( """Should "%s" contain a Flask service? (y/n) """ % project) ).lower().strip() if flask_service: flask_service = flask_service[0] == "y" return project, flask_service
48f89636741887a103ecaa900d749068ac77cb24
20,431
def places(net): """ build place vector definition """ _places = {} offset = 0 for place in net.places: _places[place] = { 'offset': offset, 'position': net.places[place].position, 'initial': net.places[place].marking } offset += 1 return _places
8ed3c3db0bfe77afb4028c68898b535057d1fc88
20,432
def gen_mock_env_info(*args, **kwargs) -> str: """构造并返回 mock 的 exec_command 查询到的 env_info""" return "env1=xxx\nenv2=xxx\nenv3=xxx"
f7b3835ed151825969e8752ce6a975fe86708f7e
20,433
import argparse def init_argparse(): """ Initialize all possible arguments for the argument parser. Returns: :py:mod:`argparse.ArgumentParser`: ArgumentParser object with command line arguments for this script. """ argparser = argparse.ArgumentParser() argparser.add_argument('--input', help='Relation input data') argparser.add_argument('--indir', help="Directory to partioned data.") argparser.add_argument('--outdir', help='Output data directory') argparser.add_argument('--partitions', nargs=3, help="Percentages of training / validation / testset") argparser.add_argument('--uniqueness_check', action="store_true", help="Checks the uniqueness of input data triplets.") argparser.add_argument('--set_check', action="store_true", help="Checks if relation in validation and test set appear at least once in training data.") argparser.add_argument('--remove_clones', action='store_true', help="Removes clones while checking for uniqueness.") argparser.add_argument('--whole', action='store_true', help="Partiton whole dataset or per relation..") return argparser
98018f46668d8e87aa3571fbdcb22ca2381d4433
20,434
def in_order(tree): """ Function to which performs inorder for a tree iteratively. Parameters: tree (BinTreeNode) ; the tree for which inorder is to be performed. Returns: sortedArr (list); list of all elements in ascending order. """ sortedArr = [] currentNode = tree stack = [] loopRunning = True while loopRunning: if currentNode != None: stack.append(currentNode) currentNode = currentNode.left #Keep going till the last left element of the tree else: if (len(stack) > 0): #If there are any pending nodes to visit currentNode = stack.pop() sortedArr.append(currentNode.value) currentNode = currentNode.right else: loopRunning = False return sortedArr
2d61a691854a5901b4140f3a22d689709cac7640
20,435
def create_table_sql(tablename, fields, geom_type, geom_srid): """ Create a SQL statement for creating a table. Based on a geoJSON representation. For simplicity all fields are declared VARCHAR 255 (not good, I know, patches welcome) """ cols = [] for field in fields: cols.append("%s VARCHAR(255)" % field) statement = """CREATE TABLE %s ( id SERIAL, %s, geom GEOMETRY(%s, %s), PRIMARY KEY (id) ) """ %(tablename, ",\n".join(cols), geom_type, geom_srid) return statement
1df5ff0472a0333f6dcc872030e3f253e4aeb940
20,436
import os import sys import stat def get_stdin_data(): """ Helper function that returns data send to stdin or False if nothing is send """ # STDIN can only be 3 different types of things ("modes") # 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR) # 2. A (named) pipe (stat.S_ISFIFO) # 3. A regular file (stat.S_ISREG) # Technically, STDIN can also be other device type like a named unix socket (stat.S_ISSOCK), but we don't # support that in gitlint (at least not today). # # Now, the behavior that we want is the following: # If someone sends something directly to gitlint via a pipe or a regular file, read it. If not, read from the # local repository. # Note that we don't care about whether STDIN is a TTY or not, we only care whether data is via a pipe or regular # file. # However, in case STDIN is not a TTY, it HAS to be one of the 2 other things (pipe or regular file), even if # no-one is actually sending anything to gitlint over them. In this case, we still want to read from the local # repository. # To support this use-case (which is common in CI runners such as Jenkins and Gitlab), we need to actually attempt # to read from STDIN in case it's a pipe or regular file. In case that fails, then we'll fall back to reading # from the local repo. mode = os.fstat(sys.stdin.fileno()).st_mode stdin_is_pipe_or_file = stat.S_ISFIFO(mode) or stat.S_ISREG(mode) if stdin_is_pipe_or_file: input_data = sys.stdin.read() # Only return the input data if there's actually something passed # i.e. don't consider empty piped data if input_data: return str(input_data) return False
604adcd395e48038caaa9d01bedbdf9b3efb90cf
20,437
def inherit(cls, *bases): """ Inherits class cls from *bases @note: cls needs a __dict__, so __slots__ is tabu @param cls: The class to inherit from *bases @type cls: C{class} @param bases: The base class(es) @type bases: C{list} """ newdict = dict([(key, value) for key, value in cls.__dict__.items() if key != '__module__' ]) cls = type(cls.__name__, tuple(bases), newdict) setattr(cls, "_%s__decorator_class" % cls.__name__, cls) return cls
96fdbd3bf0fbf669d8f25b4f1338c50fbf10de79
20,438
import math def math_degrees(x): """Implement the SQLite3 math built-in 'degrees' via Python. Convert value X from radians into degrees. """ try: return math.degrees(x) except: pass
19ce54220c093b2b10de5f101dd11c4221a413fe
20,440
def extract_bold_bins(dataf): """ Returns the dataframe filtered to only rows with a BOLD BIN id, i.e. excluding NaN values :param dataf: Input dataframe :return: Filtered dataframe """ return dataf.loc[dataf.bold_id==dataf.bold_id]
42637ae77bdc3966a3c95612eced79dd6c9358a8
20,442
import click def _validate_count(value): """ Validate that count is 4 or 5, because EFF lists only work for these number of dice. :param value: value to validate :return: value after it's validated """ # Use `set` ({x, y, z}) here for quickest result if value not in {4, 5}: raise click.BadParameter( 'Words in word lists limit number of dice to 4 or 5.' ) return value
79f17c061af41e71f0f2f82d4a6a21fe2e09abfa
20,443
def get_vplex_device_parameters(): """This method provide the parameters required for the ansible device module on VPLEX """ return dict( cluster_name=dict(type='str', required=True), geometry=dict(type='str', required=False, default='raid-1', choices=['raid-0', 'raid-1', 'raid-c']), stripe_depth=dict(type='str', required=False), device_name=dict(type='str', required=True), extents=dict(type='list', required=False, elements='str'), extent_state=dict(type='str', required=False, choices=[ 'present-in-device', 'absent-in-device']), new_device_name=dict(type='str', required=False), state=dict(type='str', required=True, choices=['present', 'absent']), mirror_name=dict(type='str', required=False), mirror_state=dict(type='str', required=False, choices=[ 'present-in-device', 'absent-in-device']), target_cluster=dict(type='str', required=False), transfer_size=dict(type='int', required=False) )
f6d0e5892f3b1e770f1ef10b78ec290e14d33e76
20,444
def totalSeconds(td): """totalSeconds(td) -> float Return the total number of seconds contained in the given Python datetime.timedelta object. Python 2.6 and earlier do not have timedelta.total_seconds(). Examples: >>> totalSeconds( toLocalTime(86400.123) - toLocalTime(0.003) ) 86400.12 """ if hasattr(td, "total_seconds"): ts = td.total_seconds() else: ts = (td.microseconds + (td.seconds + td.days * 24 * 3600.0) * 1e6) / 1e6 return ts
20ef54d7107ec910582a0fb904902788d93a2de4
20,445
import typing def read_data_from_txt_file(txt_path: str) -> typing.List[str]: """从单个txt文本中按行读取,并返回结果 """ with open(txt_path, 'r', encoding='utf-8') as f: contents = [i.strip() for i in f] return contents
725d4566198c61b383b2ef973dd1f0f66b627ff8
20,446
async def get_ladders(database, platform_id, ladder_ids): """Get platform ladders.""" query = 'select id, platform_id, name from ladders where platform_id=:platform_id and id = any(:ladder_ids)' values = {'platform_id': platform_id, 'ladder_ids': ladder_ids} return list(map(dict, await database.fetch_all(query, values=values)))
b1bbdbd51b72c45dd14ac696b3c4b841b39e72e9
20,447
def NLSYMBNDRY(NSPV,ISPV,VSPV,GLK,GLKG,NDF=4): """Establish the boundary condition in the global matrices Args: NSPV (int): Number of primary boundary conditions ISPV (array): Position of primary boundary conditions VSPV (array): Value of primary boundary conditions GLK (array): Global stiffness matrix of the system GLKG (array): Global geometrical matrix of the system NDF (int,optional): Number of degrees of freedom Returns: Arrays """ # Primary degrees of freedom if NSPV != 0: for NB in range(NSPV): FDF = (ISPV[NSPV][0]-1)*NDF+ISPV[NB][1] GLK[:][FDF] = 0 GLK[FDF][:] = 0 GLK[FDF][FDF] = 1 GLKG[:][FDF] = 0 GLKG[FDF][:] = 0 GLKG[FDF][FDF] = 1 return GLK, GLKG
e379a73e93b0d02db7c91d2d69ad55105abc8339
20,448
import torch def compute_content_loss(a_C, a_G): """ Compute the content cost Arguments: a_C -- tensor of dimension (1, n_C, n_H, n_W) a_G -- tensor of dimension (1, n_C, n_H, n_W) Returns: J_content -- scalar that you compute using equation 1 above """ m, n_C, n_H, n_W = a_G.shape # Reshape a_C and a_G to the (m * n_C, n_H * n_W) a_C_unrolled = a_C.view(m * n_C, n_H * n_W) a_G_unrolled = a_G.view(m * n_C, n_H * n_W) # Compute the cost J_content = 1.0 / (4 * m * n_C * n_H * n_W) * torch.sum((a_C_unrolled - a_G_unrolled) ** 2) return J_content
51894aedd2a7cfce5db3be3b43f8689ecdf78bf6
20,449
def id_sort_key(entity): """ Sort key for ``DiscordEntity``-s. Parameters ---------- entity : ``DiscordEntity`` The discord entity to get identifier of. Returns ------- entity_id : `int` The entity's identifier. """ return entity.id
320468d39064f49c9a118ca5a3591e68227d9bbe
20,450
def filter_table_from_column(table,column,value): """ Return a view into the 'table' DataFrame, selecting only the rows where 'column' equals 'value' """ return table[table[column] == value]
2b0642d8a2a7959614ef99f1fdc12148ddd294f7
20,451
def strip_specific_magics(source, magic): """ Given the source of a cell, filter out specific cell and line magics. """ filtered=[] for line in source.splitlines(): if line.startswith(f'%{magic}'): filtered.append(line.lstrip(f'%{magic}').strip(' ')) if line.startswith(f'%%{magic}'): filtered.append(line.lstrip(f'%%{magic}').strip(' ')) else: filtered.append(line) return '\n'.join(filtered)
3c2722b86b6fc8e40c8dd51edd06d7edb28e2945
20,452
def build_docker_package_install(package, version): """Outputs formatted dockerfile command to install a specific version of an R package into a docker image Parameters ---------- package : string Name of the R package to be installed version : string Version number of the desired package """ return 'RUN R -e \"require(\'devtools\');install_version(\'' + \ package + '\', version=\'' + version + '\', repos=\'http://cran.rstudio.com\')\"\n'
3d323377ce22225fdeea767550eedc57542631df
20,453
def recall_powerlaw_fits_to_full_models(): """fits may be recomputed by evaluating the .ipynb associated with fitting powerlaws to the full models. here, w=M*q**m, and Delta_X is the maximum disagreement one could expect to observe with 95% confidence. here, we observe Delta_X concerns disagreements between statistically independent measurements of X. Example Usage: wjr=recall_powerlaw_fits_to_full_models() print(*wjr) """ # Recall powerlaw fits to full models # Fenton-Karma(PBC) m, Delta_m, M, Delta_M = 1.8772341309722325, 0.02498750277237229, 5.572315674840435, 0.3053120355191732 wjr={ 'fk_pbc':{'m':m, 'Delta_m':Delta_m, 'M':M, 'Delta_M':Delta_M} } # Luo-Rudy(PBC) m, Delta_m, M, Delta_M = 1.6375562704001745, 0.017190912126700632, 16.73559858353835, 0.8465090320196467 wjr['lr_pbc']={'m':m, 'Delta_m':Delta_m, 'M':M, 'Delta_M':Delta_M} return wjr
441177ede4f5a9b093522ce8dc799307e714ee0d
20,455
def center_crop(im): """assumes im.shape = (x, y, colors)""" im_dim = min(im.shape[:2]) im_x0 = int((im.shape[0] - im_dim)/2) im_y0 = int((im.shape[1] - im_dim)/2) return im[im_x0:im_x0+im_dim, im_y0:im_y0+im_dim]
29251fd1a000c5c6ff5d81a6688685d7ac010a76
20,456
def fatorial(num=1, show=False): """ -> Calcula a fatorial de um número. :param num: Número a ser calculado fatorial. :param show: (opcional) Mostrar ou não a conta. :return: valor da fatorial do número. *Desenvolvido por Lucas Souza, github.com/lucashsouza *. """ fat = 1 for cnt in range(num, 0, -1): if show: print(f'{cnt}', end='') if cnt > 1: print(' x ', end='') else: print(' = ', end='') fat *= cnt return fat
39245c46ae6d9551dcd1fdd9c161575c8e5c21dd
20,457
def get_column_from_file(data_path, column_index, sep=None): """ :param data_path: :param column_index: :param sep: :return: """ with open(data_path, 'r', encoding="utf8") as f: contents = f.readlines() data = [] sep_data = [] for content in contents: content = content.strip() if content: if sep is not None: content = content.split(sep)[column_index] else: content = content.split()[column_index] data.append(content) else: if data: sep_data.append(data) data = [] return sep_data
a7aa673f594a2f4bd4939330c30971e3f60c7aa4
20,458