content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import subprocess def host_check(host) -> bool: """ Check to see if a specific host is alive """ proc = subprocess.run( ['ping', '-W1', '-q', '-c', '2', host], stdout=subprocess.DEVNULL) return proc.returncode == 0
edca7d859ca284acded0f4021292777952c3fd16
696,860
import typing def _get_tags(phase: str, param: str) -> typing.List[str]: """Get the tag names. Parameters ---------- phase param Returns ------- """ return [param, phase, "{}_{}".format(phase, param)]
278bb1f9d032c14287bd48a6bcfa9bfd7577f53a
696,861
from typing import Dict from typing import Union from typing import List def add_expenditures_cumulative_count(category_tree: Dict) -> None: """Add cumulative_count attribute to categories in category tree.""" def _get_all_children_expenditure_counts(children: Union[List[Dict], None]) -> int: value = 0 if children: for child in children: category = child["node"] value += category.expenditures_count category_children = child.get("children") value += _get_all_children_expenditure_counts(category_children) return value category = category_tree["node"] category_children = category_tree.get("children", []) children_count = ( _get_all_children_expenditure_counts(category_children) if category_children else 0 ) category.expenditures_cumulative_count = ( category.expenditures_count + children_count ) for child in category_children: add_expenditures_cumulative_count(child)
1dab9228284f86c89f67c93e39e015e903846eac
696,862
def get_datetime(cube): """Extract the time coordinate from a cube in datetime format """ tcoord = cube.coord('time') tcoord_as_datetime = tcoord.units.num2date(tcoord.points) return tcoord_as_datetime
e9975950e2e7d32a9845e55e062d11d7fa7440f6
696,863
def fill(template, hostdata, dnsdata=False): """Fills a generic template Replaces a lot of repeated code""" if dnsdata: template.names = hostdata['names'] template.cnames = hostdata['cnames'] template.mxs = hostdata['mxs'] template.host = hostdata['host'] template.interfaces = hostdata['interfaces'] template.ips = hostdata['ips'] return template
450d89360ac305c39e26ded1fcdcb53a4ead7ea1
696,864
def trim_hash(info_hash): """cleans up info hash""" if len(info_hash) == 40: return info_hash.decode("hex") if len(info_hash) != 20: raise TypeError("Infohash not equal to 20 digits", info_hash) return info_hash
ffc71b8f2b92d408a4bfc58878ee36493b0f9fcb
696,865
def secure_lookup(data, key1, key2 = None): """ Return data[key1][key2] while dealing with data being None or key1 or key2 not existing """ if not data: return None if key1 in data: if not key2: return data[key1] if key2 in data[key1]: return data[key1][key2] return None
03316d8902572f9ece66229f45fcc68212120fa5
696,866
def make_path (paths:list) -> str: """ 把一个str的list合并在一起,并用 '/' 分割 -> ['api','goods'] <- "/api/goods" """ if paths == []: return '/' s = '' for i in paths: if i == '': continue # 过滤空项 s += "/" s += i return s
a4e200c228c9f3d8f3ace3fb6cca94df1be16b6d
696,867
import re def pint2cfunits(value): """Return a CF-Convention unit string from a `pint` unit. Parameters ---------- value : pint.Unit Input unit. Returns ------- out : str Units following CF-Convention. """ # Print units using abbreviations (millimeter -> mm) s = "{:~}".format(value) # Search and replace patterns pat = r"(?P<inverse>/ )?(?P<unit>\w+)(?: \*\* (?P<pow>\d))?" def repl(m): i, u, p = m.groups() p = p or (1 if i else "") neg = "-" if i else ("^" if p else "") return "{}{}{}".format(u, neg, p) out, n = re.subn(pat, repl, s) return out
7527a76393553282e39800dd685fd7138e78b359
696,868
def extract_form_data(response): """ Extract the HTML form information contained in the given response. """ def add_param(element): """ Add the info of the given element to params if it has a name """ nonlocal params name = element.attrib.get("name", None) value = element.attrib.get("value", "") if name: params[name] = value # find and iterate over all forms contained in the response form_data = [] forms = response.xpath("//form") for form in forms: action = form.attrib.get("action", None) form_id = form.attrib.get("id", None) method = form.attrib.get("method", None) # only process forms with action and method attribute if (action is None) or (not method): continue # adjust action and method strings if action == "#" or action == "": action = response.url action = action.replace("&amp;", "&") action = action.replace("&#038;", "&") method = method.upper() # extract all the different parameters params = {} for _input in form.xpath("//input"): add_param(_input) for select in form.xpath("//select"): add_param(select) for textarea in form.xpath("//textarea"): add_param(textarea) # handle the use of form IDs if form_id: for _input in response.xpath("//input[@form='%s']" % form_id): add_param(_input) for select in response.xpath("//select[@form='%s']" % form_id): add_param(select) for textarea in response.xpath("//textarea[@form='%s']" % form_id): add_param(textarea) # if there is only one form, consider all inputs of the page to be part of this form if len(forms) == 1: for _input in response.xpath("//input"): add_param(_input) for select in response.xpath("//select"): add_param(select) for textarea in response.xpath("//textarea"): add_param(textarea) form_data.append({"action": action, "method": method, "params": params, "id": form_id}) return form_data
dd64ff0bc06aad143ff298d4661616363f6a50b1
696,869
def _get_outcome(guess: str, solution: str) -> str: """Get outcome string for the given guess / solution combination Args: guess: the word guessed solution: puzzle solution Returns: 5-character string of: '0' = letter not present '1' = letter present but not in correct position '2' = letter present and in correct position """ # We use lists to have mutable objects to work with outcome = list("-----") guess_list = list(guess) solution_list = list(solution) # Get 0 and 2 first - this manages multiple occurrences of the same letter # whereby a letter in the correct position should take precedence # over one not in the correct position for position in range(5): # Letter not present = 0 if guess_list[position] not in solution_list: outcome[position] = "0" guess_list[position] = "-" # Letter in correct position = 2 elif guess_list[position] == solution_list[position]: outcome[position] = "2" solution_list[position] = "-" guess_list[position] = "-" # Now mop up remaining letters for position in range(5): if guess_list[position] != "-": if guess_list[position] not in solution_list: outcome[position] = "0" else: outcome[position] = "1" solution_list[solution_list.index(guess_list[position])] = "-" return "".join(outcome)
843e7d2c38d3bd7e22b508581924ada0416adb84
696,870
def generate_save_string(dataset, embedding_name, random_state=-1, sample=-1.0): """ To allow for multiple datasets to exist at once, we add this string to identify which dataset a run script should load. Arguments: dataset (str) : name of dataset embedding_name (str) : name of pre-trained embedding to use (possible names can be found in possible_embeddings) random_state (int) : random state used to split data into train, dev, test (if applicable) sample (float) : percentage of possible data used for training """ return "_".join([dataset, embedding_name, str(random_state), str(sample)])
975fc413d86af2b5b3f24f440100efdd4013c478
696,872
def getDiffElements(initialList: list, newList: list) -> list: """Returns the elements that differ in the two given lists Args: initialList (list): The first list newList (list): The second list Returns: list: The list of element differing between the two given lists """ final = [] for element in newList: if element not in initialList: final.append(element) return final
a336d5f0c3073f3a656e0b79eaae2b117ba86899
696,873
import struct def gen_rs_table(radstroke): """Generate the Unicode Radical Stroke data.""" def flag(s): if s: return 0x80 return 0 ranges = [] entries = [] num_ranges = radstroke.run_count() dofs = num_ranges * 12 + 4 for base,mapped in radstroke.runs(): ranges.append(struct.pack(b'=III', base, base + len(mapped) - 1, dofs)) dofs += len(mapped) * 2 entries.append(b''.join([struct.pack(b'=BB', r, (a & 0x7f) | flag(s)) for r, s, a in mapped])) return b''.join([struct.pack(b'=I', num_ranges)] + ranges + entries)
3ac761b510471a9e6211b07b5eda78db9db5c3e9
696,874
def cpu_xgraph_quantizer(xgraph, **kwargs): """ Basic xgraph quantizer """ return xgraph
bc2b5f78637108207f0c1f7d0350ff01e3325822
696,875
def limit_stop_loss(entry_price: float, stop_price: float, trade_type: str, max_allowed_risk_percentage: int) -> float: """ Limits the stop-loss price according to the max allowed risk percentage. (How many percent you're OK with the price going against your position) :param entry_price: :param stop_price: :param trade_type: :param max_allowed_risk_percentage: :return: float """ risk = abs(entry_price - stop_price) max_allowed_risk = entry_price * (max_allowed_risk_percentage / 100) risk = min(risk, max_allowed_risk) return (entry_price - risk) if trade_type == 'long' else (entry_price + risk)
ad60d370fc44c43ea9398691c271465009f6d96e
696,876
from typing import List import os def get_paths_in_folder(directory: str) -> List[str]: """Return the files in a given folder. :param directory: folder path :return: file names in folder """ return [ path for path in os.listdir(directory) if os.path.isfile(os.path.join(directory, path)) ]
962603fa01551f5b71d216e3b6784ea6cd0ae526
696,877
def nearest_x(num, x): """ Returns the number rounded down to the nearest 'x'. example: nearest_x(25, 20) returns 20. """ for i in range(x): if not (num - i) % x: return num - i
5d8a9b6a77b9bec37517ca00fcc1d7a383aaaea1
696,878
def safe_join_existing_lab(lab_id, client): """ gets a lab by its ID only if it exists on the server """ if lab_id in client.get_lab_list(): return client.join_existing_lab(lab_id) return None
fe5ccc6fcb36fd90325a6156b3dff6d199d5ee8a
696,879
def pytest_report_collectionfinish(config, items): """Log how many and, if verbose, which items are tested in this shard.""" msg = "Running {num} items in this shard".format(num=len(items)) if config.option.verbose > 0: msg += ": " + ", ".join([item.nodeid for item in items]) return msg
e81d3663f60505543a1b554d36e2dee26b7107f9
696,880
def extract_gist_id(gist_string): """Extract the gist ID from a url. Will also work if simply passed an ID. Args: gist_string (str): Gist URL. Returns: string: The gist ID. Examples: gist_string : Gist url 'https://gist.github.com/{user}/{id}'. """ return gist_string.split("/")[-1]
7c93c5f78cc9c5dd1b1136a82e9359a20d31265f
696,881
import asyncio def run(coroutine): """ Runs and returns the data from the couroutine passed in. This is to only be used in unittesting. coroutine : asyncio coroutine -> coroutine return """ return asyncio.get_event_loop().run_until_complete(coroutine)
c862608f1abfdd234c5b4ad0be413d14123f1d6e
696,882
import os def relative_path(full_path, parent_subpath): """Get the relative path Args: path: long path: example /a/b/c parent_subpath: sub-directory. Example: In [78]: relative_path("/a/b/c", '/a') Out[78]: 'b/c' In [79]: relative_path("/a/b/c", '/a/') Out[79]: 'b/c' """ full_path = os.path.realpath(full_path) assert parent_subpath != "" parent_subpath = os.path.realpath(parent_subpath) relative = os.path.relpath(full_path, parent_subpath) return relative
bd2c21a8763f089e47e587cfe901807df9fd5a4f
696,883
def rec_mc(coins, change, knowned): """利用递归解决'用最少的硬币找零'问题""" min_coins = change if change in coins: knowned[change] = 1 return 1 elif knowned[change] > 0: return knowned[change] else: for i in [c for c in coins if c <= change]: num_coins = 1 + rec_mc(coins, change-i, knowned) if num_coins < min_coins: min_coins = num_coins knowned[change] = min_coins return min_coins
1480fe4be6f0c14af3a210311b4a7d3b9d592855
696,884
def null_detector(X): """ X is a dataframe Function will give the null values in the dataframe """ return X.isnull().sum()
3654ac56534abc92b720b35529c8dc44f5a73eff
696,885
from datetime import datetime def get_timestamp(detailed=False): """ get_timestamp - returns a timestamp in string format detailed : bool, optional, default False, if True, returns a timestamp with seconds """ if detailed: return datetime.now().strftime("%Y-%m-%d_%H-%M-%S") else: return datetime.now().strftime("%b-%d-%Y")
264eb48f7c205a187bc08a9b9ad73fdab05a6acb
696,886
import math def stddev(li, mean): """ Calculate the standard deviation of a set of data. >>> stddev([3, 3.5, 4], 3.5) 5.0 """ if not li: return None return math.sqrt(sum(x*x for x in li) - mean*mean)
d947d3bdf11841d236d28620682773d18be637f1
696,887
import os def create_file_list(loc): """ create file location for reading functions """ file_list = os.listdir(loc) for i in range(len(file_list)): file_list[i] = loc + "/" + file_list[i] return file_list
560b658d6b3360308bf1a2111627d957ba45d997
696,888
import string import random def get_rand_string(size=6, chars=string.ascii_uppercase + string.digits): """generates a random string. :param size: length of the string :param chars: string of charaters to chese form, by default a-zA-Z0-9 """ return ''.join(random.choice(chars) for _ in range(size))
655cad254414b265df30e160b6f4859f91680ed1
696,889
def flip(board): """Returns horizontal mirror image of board with inverted colors.""" flipped_board = dict() for square, piece in board.items(): flipped_board[(7 - square[0], square[1])] = piece.swapcase() return flipped_board
62e3bbbe33abdd2e2d4e1ce6eae9f992b0fd275a
696,890
def disable_hat_selector_dropdown(acq_state): """ A callback function to disable the HAT selector dropdown when the application status changes to configured or running. """ disabled = False if acq_state == 'configured' or acq_state == 'running': disabled = True return disabled
fce32c07f01a88d94a6258ca43c14b0641ab565f
696,891
def paragraph_reversal(p: str) -> str: """Reverse the order of sentences in a paragraph. This function can run in O(n) time. Because Python requires returning a new copy of a string and not a pointer, this function does not technically perform the operation in place. Arguments: p : str input paragraph Returns: str: the paragraph with the order of sentences reversed """ sentence_breaks = ['.', '?', '!'] sentences = [] sentence = [] state = 'normal_text' for i in range(len(p)): if state == 'normal_text': sentence.append(p[i]) if p[i] in sentence_breaks: state = 'punctuation' elif state == 'punctuation': sentence.append(p[i]) if p[i].strip() != '': state = 'normal_text' else: state = 'white_space' elif state == 'white_space': if p[i].strip() == '': continue elif p[i].upper() == p[i]: sentences.append(''.join(sentence)) sentence = [p[i]] state = 'normal_text' if sentence[-1].strip != '': sentences.append(' ') sentences.append(''.join(sentence)) for i in range(int(len(sentences)/2)): temp = sentences[i] sentences[i] = sentences[len(sentences) - 1 - i] sentences[len(sentences) - 1 - i] = temp return ''.join(sentences)
e0f8889fe64193ab675c67947db72128e139900b
696,893
import json def cat_to_name(file): """ Loads a json file with mapping from category to flower name Parameters: file: name of .json mapping file Returns: a python dictionary with mapping of categories to flower names """ with open(file, 'r') as f: cat_to_name = json.load(f) return cat_to_name
4545c9924c160665d984aee88d120f9648bf2f1e
696,894
import os def groupImageStats(imgFile, outImage, brik=''): """ Strip the desired image statistics from the image file Specifically, remove those subbricks from specified from the supplied image, and store them in their own file that can be manipulated more easily later on. Params: imgFile -- The input 4d file. It can be a subject image file or a group image file, so long as at least 2 subbricks reside within the image. The image should contain the desired path. '/path/to/image/file/4dImage.nii.gz' Optionally, a list of 4d images can be supplied in which case a string will be constructed using a list comprehension. brik -- The desired subbrik(s) to be extracted. AFNI conventions for specifying subbriks apply. outImage -- The desired prefix for the newly created image file. The path name should be included in the image prefix Returns: A string composed of the output image's path and name, in case it is needed. """ if type(imgFile) == list: imgFile = ' '.join([x + brik for x in imgFile]) else: imgFile = imgFile + brik os.system('3dbucket -prefix ' + outImage + ' ' + imgFile) return outImage
2b5433c413fa07971b06565c93e867f99a9358e7
696,895
def list_to_string(lst): """ convert a list to string format for config files """ return ",".join(map(str, lst))
c7eb44b84befb0d00fd72033e9cbcb50951c649c
696,897
def getTime(t): """把OANDA返回的时间格式转化为简单的时间字符串""" return t[11:19]
c5d3cd088a4aaee358cd65b807b387bf034adce2
696,899
def get_blue(): """ Get color blue for rendering """ return [0, 0.651, 0.929]
0a045497fb100e9019fe1cad52221ef37431cee0
696,900
def get_secs(t_string): """ From time_string like '00:00:35.660' returns the number of seconds respect to '00:00:00.000' returned value is float """ hours = t_string[0:2] minutes = t_string[3:5] secs = t_string[6:8] m_secs = t_string[8:12] secs_total = int(hours) * 3600 + int(minutes) * 60 + int(secs) + float(m_secs) # print(hours,",", minutes,",", secs ,",", m_secs) # print (secs_total) return secs_total
2401adef97845e9d7d900baf882812ebf0acae58
696,901
def confirm(msg=None): """ Confirms the removal of the file with a yes or no input """ # Loop "forever" intended while True: confirm_input = input(msg if msg is not None else "Purge this directory (yes/no)?") if confirm_input.lower() in ["y", "yes"]: return True if confirm_input.lower() in ["n", "no"]: return False print("{} is invalid. Please use 'yes' or 'no'".format(confirm_input))
c6023f9fb72b10953afd0dbbea73a4f03db67de2
696,902
def is_kevinshome(ctx): """check to see if invoking user is kevinshome""" return ctx.author.id == 416752352977092611
13f3bf234affe65c05f2b9cf9af89297ab5ef855
696,903
def get_conf(genotype): """transform a genotype(string) in some variables""" architecture, evol_strattegy = genotype.split('--') architecture = [[int(x) for x in conn.split('|')] for conn in architecture.split(' ')] use_shared, dataset = evol_strattegy.split(' ') use_shared, dataset = int(use_shared), int(dataset) return architecture, use_shared, dataset
a4cf9353f95eb77283aa616ce66854db85cd3ca0
696,904
def find_alternating_4_cycle(G): """ Returns False if there aren't any alternating 4 cycles. Otherwise returns the cycle as [a,b,c,d] where (a,b) and (c,d) are edges and (a,c) and (b,d) are not. """ for (u, v) in G.edges(): for w in G.nodes(): if not G.has_edge(u, w) and u != w: for x in G.neighbors(w): if not G.has_edge(v, x) and v != x: return [u, v, w, x] return False
30456f50fbf6da2a98dafe0ec837d5a4ea963b08
696,905
def is_submeter(sensor, dfdaymin, dfdaymax): """ Return True if this sensor is a sub-meter sensor = sensor object """ other_sensors = sensor.device.get_sensors(sensortype='electricity') other_sensors.remove(sensor) if len(other_sensors) == 0: print("\n{} - {}: no other sensors, this must be main.".format(sensor.device.key, sensor.description)) return False else: print("\n{} - {}: comparing with:".format(sensor.device.key, sensor.description)) for o in other_sensors: # we only check the values for last day print("* {}:".format(o.description)) sensormin = dfdaymin.ix[-1,sensor.key] sensormax = dfdaymax.ix[-1,sensor.key] try: othermin = dfdaymin.ix[-1].dropna()[o.key] othermax = dfdaymax.ix[-1].dropna()[o.key] except: print(" No data found for sensor {}".format(o.description)) pass else: if (sensormin <= othermin) and (sensormax <= othermax): print(" {} has lower daily min AND max, so it is a submeter.".format(sensor.description)) return True else: print(" {} has higher daily min and/or max, we look further.".format(sensor.description)) else: print("All other sensors have no data OR lower daily min and max. {} must be main.".format(sensor.description)) return False
78ca8017d736d5d9502604b82666bc08bf7ac9c0
696,906
def add_2d( ds, ): """ Regrid horizontally. :param ds: Input xarray dataset """ ds['lat2d'] = ds.lat.expand_dims({'lon': ds.lon}).transpose() ds['lon2d'] = ds.lon.expand_dims({'lat': ds.lat}) return ds
428bca0725b6bea7d146d4501c221ef96e6c28e8
696,907
def gf_crt(U, M, K): """Chinese Remainder Theorem. Given a set of integer residues `u_0,...,u_n` and a set of co-prime integer moduli `m_0,...,m_n`, returns an integer `u`, such that `u = u_i mod m_i` for `i = `0,...,n`. As an example consider a set of residues `U = [49, 76, 65]` and a set of moduli `M = [99, 97, 95]`. Then we have:: >>> from sympy.polys.galoistools import gf_crt >>> from sympy.polys.algebratools import ZZ >>> gf_crt([49, 76, 65], [99, 97, 95], ZZ) 639985 This is correct result because:: >>> 639985 % 99 49 >>> 639985 % 97 76 >>> 639985 % 95 65 """ p, v = K.one, K.zero for m in M: p *= m for u, m in zip(U, M): e = p // m s, _, _ = K.gcdex(e, m) v += e*(u*s % m) return v % p
32d4fabf159487c9867ca825fb2e2c550f39b393
696,908
def mosaic_to_horizontal(ModelParameters, forecast_period: int = 0): """Take a mosaic template and pull a single forecast step as a horizontal model. Args: ModelParameters (dict): the json.loads() of the ModelParameters of a mosaic ensemble template forecast_period (int): when to choose the model, starting with 0 where 0 would be the first forecast datestamp, 1 would be the second, and so on must be less than forecast_length that the model was trained on. Returs: ModelParameters (dict) """ if str(ModelParameters['model_name']).lower() != "mosaic": raise ValueError("Input parameters are not recognized as a mosaic ensemble.") all_models = ModelParameters['series'] result = {k: v[str(forecast_period)] for k, v in all_models.items()} model_result = { k: v for k, v in ModelParameters['models'].items() if k in result.values() } return { 'model_name': "horizontal", 'model_count': len(model_result), "model_metric": "mosaic_conversion", 'models': model_result, 'series': result, }
1776062056b9c4d56bbcb8db5972ce62b9eacf68
696,909
def term_to_cat_relevance(term, category, all_categories): """ mi R(t, cj) :param term: term from document :type type: str :param category: category in which we look for :type Category :param all_categories: all categories :type list """ return (category.terms_quantity[term] / sum([cat.terms_quantity[term] for cat in all_categories]))
bfaa677bae99cef2423ebf5d16290dc5e2ff8b07
696,910
def add_log_level(logger, method_name, event_dict): """ Add the log level to the event dict. """ if method_name == "warn": # The stdlib has an alias method_name = "warning" event_dict["level"] = method_name return event_dict
c69627fcbf8c7b0ec5890b8752f1327b95bd5854
696,911
def _is_python_file(filename): """Check if the input file looks like a Python script Returns True if the filename ends in ".py" or if the first line contains "python" and "#!", returns False otherwise. """ if filename.endswith('.py'): return True with open(filename, 'r') as file_handle: first_line = file_handle.readline() return 'python' in first_line and '#!' in first_line
a6d99166c6b76c4ae0ad5f5036951986fd5a102b
696,912
def left_join(ht1, ht2): """ Takes in two hashtables and returns a dictionary representing a left join of the two hashtables. """ join_dict = {} for bucket in ht1._array: if bucket is not None: curr = bucket.head while curr: key = curr.data[0] val = curr.data[1] join_dict[key] = [val] curr = curr._next for key in join_dict: if ht2.contains(key): val = ht2.get(key) join_dict[key].append(val) else: join_dict[key].append(None) return join_dict
a70e2a1d8d4feb6ef1cdd83c2f1ec494e9d4ce5c
696,913
def transpose_report(report): """ Splits & transposes the report """ l_o_l = [list(c for c in code) for code in report.split("\n") if len(code)] return list(map(list, zip(*l_o_l)))
baf3a9bdcc1095ec434cf7c9033aa2cc29d1c5b6
696,914
def RANGE(start, end, step=None): """ Generates the sequence from the specified starting number by successively incrementing the starting number by the specified step value up to but not including the end point. See https://docs.mongodb.com/manual/reference/operator/aggregation/range/ for more details :param start: An integer (or valid expression) that specifies the start of the sequence. :param end: An integer (or valid expression) that specifies the exclusive upper limit of the sequence. :param step: An integer (or valid expression) that specifies the increment value. :return: Aggregation operator """ return {'$range': [start, end, step]} if step is not None else {'$range': [start, end]}
3537e1e0cd28ae7d90e469a281ab373312fe075c
696,915
import six def _unicode_to_str(data): """ Utility function to make json encoded data an ascii string Original taken from: http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python :param data: either a dict, list or unicode string :return: ascii data """ if isinstance(data, dict): return {_unicode_to_str(key): _unicode_to_str(value) for key, value in six.iteritems(data)} elif isinstance(data, list): return [_unicode_to_str(element) for element in data] elif isinstance(data, six.text_type): return data.encode('utf-8') else: return data
ff637b4cad60833de6c448cc3807c4928b2a94da
696,916
import re def mocked_res_client(*args): """Function will be used by the mock to replace resilient client""" class MockResponse: def __init__(self, *arg): pass def __contains__(self, key): return True if key in self.__dict__.keys() else False def _get_data(self): return 'X5O!P%@AP[4\\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*' def _get_content(self, type): response = { "incident_id": {u'task_at_id': None, u'vers': 50, u'name': u'incident_eicar.txt', u'task_id': None, u'created': 1544603748680, u'inc_owner': 4, u'task_members': None, u'task_custom': None, u'task_name': None, u'actions': [{u'enabled': True, u'id': 29, u'name': u'Example: ClamAV scan attachment'}], u'inc_name': u'ClamAV', u'creator_id': 4, u'content_type': u'text/plain', u'inc_id': 2095, u'type': u'incident', u'id': 18, u'size': 69}, "artifact_id": {u'hits': [], u'hash': u'a39aee8e740f0344194a1b46ae62ceb1724e7ac02c2cb9c71c615f17a3bc49a7', u'description': None, u'creator': {u'status': u'A', u'password_changed': False, u'display_name': u'Resilient Sysadmin', u'is_external': False, u'email': u'a@a.com', u'lname': u'Sysadmin', u'create_date': 1514410126812, u'last_login': 1545068067059, u'fname': u'Resilient', u'last_modified_time': 1545068067059, u'locked': False, u'id': 4}, u'inc_owner': 4, u'perms': {u'read': True, u'write': True, u'delete': True}, u'created': 1544603975028, u'relating': None, u'value': u'artifact_eicar.txt', u'properties': None, u'parent_id': None, u'attachment': {u'task_at_id': None, u'vers': 50, u'name': u'artifact_eicar.txt', u'task_id': None, u'created': 1544603975061, u'inc_owner': 4, u'task_members': None, u'task_custom': None, u'task_name': None, u'actions': [], u'inc_name': u'ClamAV', u'creator_id': 4, u'content_type': u'text/plain', u'inc_id': 2095, u'type': u'artifact', u'id': 22, u'size': 69}, u'inc_name': u'ClamAV', u'creator_principal': {u'display_name': u'Resilient Sysadmin', u'type': u'user', u'id': 4, u'name': u'a@a.com'}, u'inc_id': 2095, u'type': 7, u'id': 21, u'actions': [{u'enabled': True, u'id': 50, u'name': u'Example: ClamAV scan artifact attachment'}], u'pending_sources': []}, "task_id": {u'task_at_id': None, u'vers': 50, u'name': u'task_eicar.txt', u'task_id': 2251251, u'created': 1544604120052, u'inc_owner': 4, u'task_members': None, u'task_custom': True, u'task_name': u'test task', u'actions': [{u'enabled': True, u'id': 29, u'name': u'Example: ClamAV scan attachment'}], u'inc_name': u'ClamAV', u'creator_id': 4, u'content_type': u'text/plain', u'inc_id': 2095, u'type': u'task', u'id': 25, u'size': 69}, } return response[type] def get(self, metadata_uri): if re.match("^/incidents/[0-9]+/attachments/[0-9]+$", metadata_uri): return self._get_content("incident_id") elif re.match("^/incidents/[0-9]+/artifacts/[0-9]+$", metadata_uri): return self._get_content("artifact_id") elif re.match("^/tasks/[0-9]+/attachments/[0-9]+$", metadata_uri): return self._get_content("task_id") def get_content(self, data_uri): return self._get_data().encode() return MockResponse(*args)
09ceac1c44c2788534141eec135956defd2b6174
696,917
import re def _line_type(line, delimiter=None): """Interpret a QDP file line Parameters ---------- line : str a single line of the file Returns ------- type : str Line type: "comment", "command", or "data" Examples -------- >>> _line_type("READ SERR 3") 'command' >>> _line_type(" \\n !some gibberish") 'comment' >>> _line_type(" ") 'comment' >>> _line_type(" 21345.45") 'data,1' >>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan") 'data,6' >>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',') 'data,6' >>> _line_type(" 21345.45 ! a comment to disturb") 'data,1' >>> _line_type("NO NO NO NO NO") 'new' >>> _line_type("NO,NO,NO,NO,NO", delimiter=',') 'new' >>> _line_type("N O N NOON OON O") Traceback (most recent call last): ... ValueError: Unrecognized QDP line... >>> _line_type(" some non-comment gibberish") Traceback (most recent call last): ... ValueError: Unrecognized QDP line... """ _decimal_re = r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?' _command_re = r'READ [TS]ERR(\s+[0-9]+)+' sep = delimiter if delimiter is None: sep = r'\s+' _new_re = rf'NO({sep}NO)+' _data_re = rf'({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)' _type_re = rf'^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$' _line_type_re = re.compile(_type_re) line = line.strip() if not line: return 'comment' match = _line_type_re.match(line) if match is None: raise ValueError(f'Unrecognized QDP line: {line}') for type_, val in match.groupdict().items(): if val is None: continue if type_ == 'data': return f'data,{len(val.split(sep=delimiter))}' else: return type_
cbc0f5f831a80b28ce9aee01049c90c65b962ff4
696,919
def get_intermediate_objective_suffix(suffix=""): """ Returns dictionary of dictionaries of suffixes to intermediate files. Dict contains: objective - initial(s) of corresponding objective(s) filesuffix - intermediate file/variable suffix Args (optional) : suffix - appends the user defined suffix to the one used by the intermediate file """ intsuffix = [tuple(["comb_weight_R", "EPN", "cwgt_r"]), tuple(["comb_weight_Exp", "EPN", "cwgt_e"]), tuple(["index_exp", "EPN", "ind_e"]), tuple(["index_ret", "EPN", "ind_r"]), tuple(["ups_flowacc", "EPNFG", "flowacc"]), tuple(["dret_flowlen", "EPNFG", "flowlen"]), tuple(["index_cover", "FG", "ind_c"]), tuple(["index_rough", "FG", "ind_r"]), tuple(["comb_weight_ret", "FG", "cwgt_r"]), tuple(["comb_weight_source", "FG", "cwgt_s"]), tuple(["rainfall_depth_index", "F", "rain_idx"]), tuple(["precip_annual_index", "G", "prec_idx"]), tuple(["aet_index", "G", "aet_idx"])] inter = dict() for sfx in intsuffix: filesuffix = '_'.join([sfx[2], suffix]).rstrip('_') + '.tif' inter[sfx[0]] = {'objective': sfx[1], 'filesuffix': filesuffix} return inter
b00395103d3d21749f4ce3fddc913a24289b4417
696,920
def is_suffixed(suffixes, field, allowed): """ Check field for a suffix. Check if a field has a specific suffix where its base is in the allowed list. For example if the suffix is 'Modifier', the following situation is valid: field = 'characterStrengthModifier' allowed = ['characterStrength'] Returns True """ # If it's a simple string, check for a suffix if not isinstance(suffixes, list) and field.endswith(suffixes): return field[:-len(suffixes)] in allowed # Since there are multiple suffixes, track the combined result: result = False # If there are multiple suffixes to check, loop over them: for suffix in suffixes: if field.endswith(suffix): result = result or field[:-len(suffix)] in allowed # If any suffix was found to have a valid base field, the result will be # True, if none were found, it'll be False: return result
63cb34ee813289ed1efcdc04a4df5656d9aab29a
696,922
import numpy def voronoi_finite_polygons_2d(vor, radius=2.): """ https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram Reconstruct infinite voronoi regions in a 2D diagram to finite regions. Parameters ---------- vor : Voronoi Input diagram radius : float, optional Distance to 'points at infinity'. Returns ------- regions : list of tuples Indices of vertices in each revised Voronoi regions. vertices : list of tuples Coordinates for revised Voronoi vertices. Same as coordinates of input vertices, with 'points at infinity' appended to the end. """ if vor.points.shape[1] != 2: raise ValueError("Requires 2D input") new_regions = [] new_vertices = vor.vertices.tolist() center = vor.points.mean(axis=0) if radius is None: radius = vor.points.ptp().max() # Construct a map containing all ridges for a given point all_ridges = {} for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices): all_ridges.setdefault(p1, []).append((p2, v1, v2)) all_ridges.setdefault(p2, []).append((p1, v1, v2)) # Reconstruct infinite regions for p1, region in enumerate(vor.point_region): vertices = vor.regions[region] if all(v >= -0.5 for v in vertices): # finite region new_regions.append(vertices) continue # reconstruct a non-finite region ridges = all_ridges[p1] new_region = [v for v in vertices if v >= 0] for p2, v1, v2 in ridges: if v2 < 0: v1, v2 = v2, v1 if v1 >= 0: # finite ridge: already in the region continue # Compute the missing endpoint of an infinite ridge t = vor.points[p2] - vor.points[p1] # tangent t /= numpy.linalg.norm(t) n = numpy.array([-t[1], t[0]]) # normal midpoint = vor.points[[p1, p2]].mean(axis=0) direction = numpy.sign(numpy.dot(midpoint - center, n)) * n far_point = vor.vertices[v2] + direction * radius new_region.append(len(new_vertices)) new_vertices.append(far_point.tolist()) # sort region counterclockwise vs = numpy.asarray([new_vertices[v] for v in new_region]) c = vs.mean(axis=0) angles = numpy.arctan2(vs[:,1] - c[1], vs[:,0] - c[0]) new_region = numpy.array(new_region)[numpy.argsort(angles)] # finish new_regions.append(new_region.tolist()) return new_regions, numpy.asarray(new_vertices)
c7e81711f552e5da824ca926ced03e4d3502fee0
696,925
def lorenz(XYZ, t, the=10, r=28, b=8 / 3): """ The Lorenz Attractor. x0 = (0,1,0) """ x, y, z = XYZ x_dt = the * (y - x) y_dt = x * (r - z) - y z_dt = x * y - b * z return x_dt, y_dt, z_dt
61d7ac1eb0ba1507fe4127a78ae6c83cdaeec7ef
696,926
import glob import os def get_image_ids(path: str) -> list: """ Return a list of image IDs based on the contents of the ARD tarballs folder Args: path: Returns: """ file_list = glob.glob(path + os.sep + "*SR*") return sorted([os.path.splitext(os.path.basename(f))[0] for f in file_list])
23ccafb749d38bbfd9b52e0b6570213934163dba
696,927
import time def time_to_month(t): """时间戳 => 月份""" return time.strftime('%Y%m', time.localtime(t))
b851f332f6e9de13c10e0fc8c0cb6439092147d8
696,928
def ms_timestamp_to_epoch_timestamp(ts: int) -> int: """ Converts a milliseconds timestamp to an epoch timestamp :param ts: timestamp in miliseconds :return: epoch timestamp in seconds """ return int(ts / 1000)
1c27d9ef053f568bcf9e5e37309b3def5f4c8dd0
696,929
def intersection_line_plane(p0, p1, p_co, p_no, epsilon=1e-9): """ Copied from https://stackoverflow.com/questions/5666222/3d-line-plane-intersection p0, p1: Define the line. p_co, p_no: define the plane: p_co Is a point on the plane (plane coordinate). p_no Is a normal vector defining the plane direction; (does not need to be normalized). Return a Vector or None (when the intersection can't be found). """ def add_v3v3(v0, v1): return (v0[0] + v1[0], v0[1] + v1[1], v0[2] + v1[2]) def sub_v3v3(v0, v1): return (v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2]) def dot_v3v3(v0, v1): return ((v0[0] * v1[0]) + (v0[1] * v1[1]) + (v0[2] * v1[2])) def len_squared_v3(v0): return dot_v3v3(v0, v0) def mul_v3_fl(v0, f): return (v0[0] * f, v0[1] * f, v0[2] * f) u = sub_v3v3(p1, p0) dot = dot_v3v3(p_no, u) if abs(dot) > epsilon: # The factor of the point between p0 -> p1 (0 - 1) # if 'fac' is between (0 - 1) the point intersects with the segment. # Otherwise: # < 0.0: behind p0. # > 1.0: infront of p1. w = sub_v3v3(p0, p_co) fac = -dot_v3v3(p_no, w) / dot u = mul_v3_fl(u, fac) return add_v3v3(p0, u) else: # The segment is parallel to plane. return None
42ea283d5a9798a706e713dda8599c3d33a05cec
696,930
import six def inverse_dict(x): """ Make keys or values and values of keys """ return {v: k for k, v in six.iteritems(x)}
05239e5ff375fe2b1345f915f5745befc869062d
696,931
def gen_trace_ids_exps(base_id, base_exp=None, group_size=3, group_count=5, block_count=6, group_jump=18,inverse=False, base_exp_group=None,skip=0): """ Generates the list of trace_ids to load and plot. Returns a list of lists of trace_id. Args: - base_id: first trace_id in the first list of lists. - base_exp: if set, base_exp is added at the beginning. of each list. - group_size: size of the group of experiments. - group_count: number of groups per block of experiments. - block_count: number of lists in the returned list of lists. - group_jump: number trace_ids to jump from one group to the next. - inverse: if True, the first group is at the end of each row list. - base_ex_group: if set, base_ex_group is added at the begining of each group. - skip: number of trace_ids to jump between blocks, Returns: a list of block_count lists. Each list may be started by base_exp if set. Each list is composed by group_count groups of group_size size. If base_exp_group, is set, it is added to each group. """ trace_id_rows_colors = [] for block_i in range(block_count): trace_id_row = [] trace_id_rows_colors.append(trace_id_row) if base_exp is not None: trace_id_row.append(base_exp) group_index_list=list(range(group_count)) if inverse: group_index_list=reversed(group_index_list) for group_i in group_index_list: if base_exp_group is not None: trace_id_row.append(base_exp_group) for exp_i in range(group_size): trace_id=(base_id + (group_size+skip)*block_i + (group_jump)*group_i + exp_i) trace_id_row.append(trace_id) return trace_id_rows_colors
3d22c9776b810784fd596f92a21758b446a084d4
696,932
def _convert_to_barycentric(point, simplex, coordinates): """ Converts the coordinates of a point into barycentric coordinates given a simplex. Given a 2D point inside a simplex (a line segment or a triangle), find out its barycentric coordinates. In the case of the line (1-simplex), this would be the point expressed as a linear combination of the two endpoints. In the case of the triangle (2-simplex), this would be the point expressed as a linear combination of three corner coordinates. This method will not work when finding barycentric coordinates of points within a triangle or line segment in R^3. It is only meant for finding barycentric coordinates of 2D points within 2D line segments or triangles. Parameters ---------- point: list of floats, length 2 The 2D coordinates of the flattened vertex. The z-component should be 0. simplex: list of ints, length 2 or 3 The indices corresponding to coordinates making up the line segment/triangle. coordinates: list of pairs of floats The 2D coordinate system in which the point and simplex lie. Returns ------- list of floats, length 2 or 3 The lambda values (i.e. the weights used in the linear combination) corresponding to the barycentric coordinates of the point in the simplex. Length depends on the type of simplex - 2 if a line, 3 if a triangle. If all values are between 0 and 1, the point is in the simplex. """ if not len(point) == len(coordinates[0]) == 2: raise Exception("_convert_to_barycentric: Invalid coordinate dimensions. " \ "This method only accepts coordinates in 2D.") #initialise result result = [] #if the simplex is a triangle, calculate the barycentric coordinates of the #point in the triangle if len(simplex) == 3: #get coordinates from vertices of simplex triangle_coordinates = [coordinates[i] for i in simplex] (x_0, y_0), (x_1, y_1), (x_2, y_2) = triangle_coordinates #find each of the three weights lambda_0 = ((y_1-y_2)*(point[0]-x_2)+(x_2-x_1)*(point[1]-y_2)) / \ ((y_1-y_2)*(x_0-x_2)+(x_2-x_1)*(y_0-y_2)) lambda_1 = ((y_2-y_0)*(point[0]-x_2)+(x_0-x_2)*(point[1]-y_2)) / \ ((y_1-y_2)*(x_0-x_2)+(x_2-x_1)*(y_0-y_2)) lambda_2 = 1 - lambda_0 - lambda_1 result = [lambda_0, lambda_1, lambda_2] #if the simplex is a line segment, find the proportions of each point in the line segment elif len(simplex) == 2: #since it's linear interpolation, the proportions are the same for both #x and y components, so we just use one of them x_0, x_1 = coordinates[simplex[0]][0], coordinates[simplex[1]][0] #find the two weights lambda_1 = (point[0] - x_0) / (x_1 - x_0) lambda_0 = 1 - lambda_1 result = [lambda_0, lambda_1] else: raise Exception("_convert_to_barycentric: Invalid input simplex. " \ "This method is only defined for triangles and edges") return result
d895325f8a605cefaa8ecd9e2cc4edd07690b932
696,933
def filter_by_price(res, max, min): """ res should be the return value of search (a list of Products as dictionaries) """ filtered_search = [] for r in res: if r["PriceInEuros"] <= max and r["PriceInEuros"] >= min: filtered_search.append(r) return filtered_search # return searched_view.query.filter(searched_view.PriceInEuros == filter_value)
99c2c21b257f817998afd7c8573ceeec5239fd12
696,935
def create_dict(min_count): """ :param min_count: 词频出现的最少个数 :return: """ input_file = open("text8.txt", "r", encoding='utf-8') word_count_sum, sentence_count, word2id_dict, id2word_dict, wordid_frequency_dict, word_freq = 0, 0, {}, {}, {}, {} for line in input_file: line = line.strip().split() word_count_sum += len(line) sentence_count += 1 for i, word in enumerate(line): if word_freq.get(word) is None: word_freq[word] = 1 else: word_freq[word] += 1 for i, word in enumerate(word_freq): if word_freq[word] < min_count: word_count_sum -= word_freq[word] continue word2id_dict[word] = len(word2id_dict) id2word_dict[len(id2word_dict)] = word wordid_frequency_dict[len(word2id_dict)-1] = word_freq[word] # 因为上述添加一个词进去,所以需要索引减1 return word2id_dict, id2word_dict, wordid_frequency_dict
fcc28b9b457a568fabae8bc67c9d2ffba3b76362
696,936
import importlib def get_net_driver_func(benchmark_name): """ :param benchmark_name: This MUST be the exact name of the actual benchmark (tcp_driver_name === benchmark_name) :return: The net_driver function of the given benchmark """ tcp_driver_name = "." + benchmark_name net_driver_func = importlib.import_module(tcp_driver_name, package='iot2_tcp_drivers') return net_driver_func
235ff43679f2928e989e436d6e89c53d280eaa86
696,937
import base64 import requests import json import random def __getCookies(weibo): """ 获取Cookies """ cookies = [] loginURL = r'https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)' for elem in weibo: account = elem.no password = elem.psw username = base64.b64encode(account.encode('utf-8')).decode('utf-8') postData = { "entry": "sso", "gateway": "1", "from": "null", "savestate": "30", "useticket": "0", "pagerefer": "", "vsnf": "1", "su": username, "service": "sso", "sp": password, "sr": "1440*900", "encoding": "UTF-8", "cdult": "3", "domain": "sina.com.cn", "prelt": "0", "returntype": "TEXT", } session = requests.Session() r = session.post(loginURL, data=postData) jsonStr = r.content.decode('utf-8') info = json.loads(jsonStr) if info["retcode"] == "0": #print("Get Cookie Success!( Account:%s )" % account) cookie = session.cookies.get_dict() cookies.append(cookie) else: print("Failed!( Reason:%s )" % info['reason']) return random.choice(cookies)
413ed10f3b7e453975772b28c703e5cc24a38d1e
696,938
import logging def get_logger() -> logging.Logger: """ logging.Loggerを作成します。 Returns: logger (logging.Logger): logging.Loggerのインスタンス """ logger = logging.getLogger(__name__) return logger
233d118748fcddb4eb42aae0acd19fd4ce2d26b7
696,941
from typing import Counter def is_anagram(word, _list): """ Checks if the given word has its anagram(s) on the given list of words. :param word: word :param _list: list of words :return a list of found anagrams """ word = word.lower() anagrams = [] for words in _list: if word != words.lower(): if Counter(word) == Counter(words.lower()): anagrams.append(words) return anagrams
dc6da021ed46e5068f16608fb9f3f78fedeea4a8
696,942
def diagonal (line): """Indicates whether or not `line` is diagonal.""" return (line.p.x != line.q.x) and (line.p.y != line.q.y)
b5c64e983c429023ccd2c7e57bba59511567e2d5
696,943
def cleave(sequence, index): """ Cleaves a sequence in two, returning a pair of items before the index and items at and after the index. """ return sequence[:index], sequence[index:]
5f542de88eb4cd7496a2308c9625cc7db766c90c
696,944
def age_to_eye_diameter(age): """Calulates the size of an eye given an age Args: age (float): age of the observer Returns: D_eye (float): Best diameter of Eyepiece (mm) """ if age <= 20: D_eye = 7.5 elif ((age> 20) and (age<= 30)): D_eye = 7 elif ((age> 30) and (age<= 35)): D_eye = 6.5 elif ((age> 35) and (age<= 45)): D_eye = 6 elif ((age> 45) and (age<= 60)): D_eye = 5.5 else: D_eye = 5.0 return D_eye
f67154b48ec3e246e6049bfcb89c67b908d896b9
696,945
def fetch_noncompressed_file(name, fido, downloader=None): """ Load an uncompressed file from the data registry """ fname = fido.fido.fetch(name, downloader=downloader) return fname
54437726d3b1827bf0014ea10a324181e4a8a973
696,946
def transform_zip(number): """Get rid of extended format ZIP code.""" zip_code = number.split("-")[0] return zip_code
9ef27547f501b6f77ffff0198a4965b3082c2c91
696,947
def flatten_list(list_): """ Flattens out nested lists and tuples (tuples are converted to lists for this purpose). Example: In [1]: flatten_list([1, [[2, 3], [4, 5]], 6]) Out[1]: [1, 2, 3, 4, 5, 6] """ items = [] for element in list_: if isinstance(element, (list, tuple)): items.extend(flatten_list(list(element))) else: items.append(element) return items
c914976b1ded7b43d6a2031ac51aca9299c6a2ce
696,948
def base_to_dec(string, base, digits): """Converts strings to decimal numbers via a custom base and digits.""" if string == '': return 0 return digits.index(string[0]) + base_to_dec(string[1:], base, digits) * base
6f608e280e6f45207b024dddfb768ee1df961edb
696,950
def vote_pourcentage(string): """ (str) -> int Counts the total number of 'oui', 'abstention', and the total of words in a given string. Restrictions: string must contain at least one 'oui' or one 'non'. """ total_yes = 0 total_votes = 0 total_abs = 0 # Count all 'oui', 'abstention', and words. for i in string.split(): total_votes += 1 if i == 'oui': total_yes += 1 elif i == 'abstention': total_abs += 1 elif i == 'non': continue # Count anything other than 'oui' or 'non' as 'abstention' else: total_abs += 1 return (total_yes / (total_votes - total_abs))*100
bfcb80a00d8bfe650a0a50a75b5a86ecb9187f3d
696,951
def jetCollectionString(prefix='', algo='', type=''): """ ------------------------------------------------------------------ return the string of the jet collection module depending on the input vaules. The default return value will be 'patAK5CaloJets'. algo : indicating the algorithm type of the jet [expected are 'AK5', 'IC5', 'SC7', ...] type : indicating the type of constituents of the jet [expec- ted are 'Calo', 'PFlow', 'JPT', ...] prefix : prefix indicating the type of pat collection module (ex- pected are '', 'selected', 'clean'). ------------------------------------------------------------------ """ if(prefix==''): jetCollectionString ='pat' else: jetCollectionString =prefix jetCollectionString+='Pat' jetCollectionString+='Jets' jetCollectionString+=algo jetCollectionString+=type return jetCollectionString
c77c2fd3afcccb1e62e1d92103ce677775fae0e4
696,952
import re def sanitize_tag_label(label_string): """Return string slugified, uppercased and without dashes.""" return re.sub(r'[-\s]+', '-', (re.sub(r'[^\w\s]', '',label_string).strip().upper()))
1fee28a86533a5fda3a34a647c445fdaa73e3c59
696,953
def strip_extension(input_string: str, max_splits: int) -> str: """Strip the extension from a string, returning the file name""" output = input_string.rsplit(".", max_splits)[0] return output
62c388fd1cf11883f9e2b0259fb9ec7632537bd2
696,954
import os def get_abs_dir(path: str) -> str: """Returns absolute path""" return os.path.abspath(path)
985184d7c5b4987426f44dee488416d4332cc4ae
696,955
def getVertices(obj, world=False, first=False): """Get the vertices of the object.""" if first: print('-----getVertices') vertices = [] if obj.data: if world: vertices.append([obj.matrix_world * x.co for x in obj.data.vertices]) else: vertices.append([x.co for x in obj.data.vertices]) # print('+++++', vertices[-1]) for idx_child, child in enumerate(obj.children): vertices.extend(getVertices(child, world=world)) print(idx_child) return vertices
158700e2578aeacbcb74b9695a6165a8611a86a2
696,956
import re def header(line): """ add '>' if the symbol is missing at the end of line """ line=line.strip() if re.match("##.*<.*", line) and line[-1:] != '>': line=line+'>' return line
b06488ff95d154836d622f55ebb5b26661d1fceb
696,957
def safe_stringify(value): """ Converts incoming value to a unicode string. Convert bytes by decoding, anything else has __str__ called. Strings are checked to avoid duplications """ if isinstance(value, str): return value if isinstance(value, bytes): return value.decode("utf-8") return str(value)
c591f0b86f0f73f0ebf7f8253f7ecfd1e979a906
696,958
def normalize_expasy_id(expasy_id: str) -> str: """Return a standardized ExPASy identifier string. :param expasy_id: A possibly non-normalized ExPASy identifier """ return expasy_id.replace(" ", "")
30107cd75cba116b977a001f3839b1e9f32395ce
696,959
import math def break_likelihood(datalayer, feature, median_speed): """ Calculate break_likelihood for a point based on point speed & angle between previous & next points :param datalayer: gps segment :param feature: gps point id to check :param median_speed: median speed for gps segment :return: category_break: High/Medium/Low break likelihood for point category_speed: High/Medium/Low point speed category_angle: Wide/Narrow point angle line_direction: Quadrant the direction of travel is heading """ prevfeature = datalayer.getFeature(feature - 1) feature = datalayer.getFeature(feature) a1 = prevfeature.geometry().angleAtVertex(0) * 180 / math.pi a2 = feature.geometry().angleAtVertex(0) * 180 / math.pi speed = feature.attribute('speed') #Set angle = 180 for first point in segment try: if feature["Segment No"] == prevfeature["Segment No"]: angle = abs(180 - abs(a1 - a2)) else: angle = 180 except: angle = 180 if speed > 10: category_speed = 'High' elif speed <= median_speed / 2: category_speed = 'Zero' else: category_speed = 'Low' if angle > 90: category_angle = 'Wide' if category_speed == 'Zero' or category_speed == 'High': category_break = 'Medium' else: category_break = 'Low' else: category_angle = 'Narrow' if category_speed == 'Low' or category_speed == 'Zero': category_break = 'High' else: category_break = 'Medium' if 0 <= a2 < 90: line_direction = 1 elif 90 <= a2 < 180: line_direction = 2 elif 180 <= a2 < 270: line_direction = 3 else: line_direction = 4 return category_break, category_speed, category_angle, line_direction
481cfd318c4e121db80dbfe9b677251db0f72df5
696,960
def test_learning(learner, X, y): """ vrne napovedi za iste primere, kot so bili uporabljeni pri učenju. To je napačen način ocenjevanja uspešnosti! Primer klica: res = test_learning(LogRegLearner(lambda_=0.0), X, y) """ c = learner(X,y) results = [c(x) for x in X] return results
7bc140652b05461d469d1a06dee79e392a041cd0
696,961
def get_excel_column_index(column: str) -> int: """ This function converts an excel column to its respective column index as used by pyexcel package. viz. 'A' to 0 'AZ' to 51 :param column: :return: column index of type int """ index = 0 column = column.upper() column = column[::-1] for i in range(len(column)): index += ((ord(column[i]) % 65 + 1)*(26**i)) return index-1
a42b05e2c08c3a6611c066fa49153f6dade93728
696,962
def shift_conv(matchobj): """ Transform '(a<b)' into 'a * 2**b'. """ shift = 1 << int(matchobj.group(2)) formula = '{}*{}'.format(matchobj.group(1), shift) return formula
c17d4c8585e18cc99d9e70848a2f9e776a21cdf6
696,963
def convert_db_dict_into_list(db_dict): """Convert dictionary of processes into list of processes.""" db_list = [] for key in db_dict.keys(): assert key == db_dict[key]["@id"] db_list.append(db_dict[key]) return db_list
ce0d678ffef92a66f5ce988bf2b7143248a1a60d
696,964
def join(a, *p): """Taken from python posixpath.""" sep = '/' path = a if not p: path[:0] + sep for b in p: if b.startswith(sep): path = b elif not path or path.endswith(sep): path += b else: path += sep + b return path
5cf0783c995d4da115eae7f5f85f56eb5407f36a
696,965
def est_palindrome3(s : str) -> bool: """ ... cf. ci-dessus ... """ i : int for i in range(0,len(s)//2): if s[i] != s[-i-1]: return False return True
871a169d2e82fe733ef23e8d63f466afe1c76599
696,966
def dbname(): """ database name fixture :return: test db name """ return 'jhhalchemy_test'
4b5152fbbc2d2c70edfae16d286f69f61159950a
696,967
def calculate_profit(region, country_parameters): """ Estimate npv profit. This is treated as the Net Operating Profit After Taxes Margin (NOPAT). NOPAT ranges from ~5-15% depending on the context: https://static1.squarespace.com/static/54922abde4b0afbec1351c14/t/583c49c8bebafb758437374e """ investment = ( region['mno_network_cost'] + region['administration'] + region['spectrum_cost'] + region['tax'] ) profit = investment * (country_parameters['financials']['profit_margin'] / 100) return profit
2791000709e55f7f3640c501779ef5da9a4f4e6a
696,968
def taskname(*items): """A task name consisting of items.""" s = '_'.join('{}' for _ in items) return s.format(*items)
c8283ea85818b3e3bac6ec5357439e8c944b05e8
696,969
def add_doc(m): """ Grab the module docstring """ return (('doc', m.groups()[0]), )
d458adb9c58812f5cb5df5913bcc68e3ec256e4c
696,970
import itertools def generate_addresses(addr: str, mask: str) -> list: """generate all possible addresses from floating mask""" generator_bits = list() addresses = list() addr = format(int(addr), '036b') for addr_bit, mask_bit in zip(addr, mask): if mask_bit == '0': generator_bits.append(addr_bit) elif mask_bit == '1': generator_bits.append('1') else: generator_bits.append('01') for address in itertools.product(*generator_bits): addresses.append(int(''.join(address), 2)) return addresses
bbc49ab230743eaab4a305622b61402992db2225
696,971