content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from dateutil import tz def local_time(dt): """ Convert a datetime.datetime object to a timezone-aware datetime.datetime in the users local timezone. :param dt: A datetime.datetime object. :returns: A timezone-aware datetime.datetime object in the users local timezone. """ if dt is None: return None return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
73d66c916fe7c849184181faf781715535da3166
31,654
def test_generator_frame_cycle(): """ >>> test_generator_frame_cycle() ("I'm done",) """ testit = [] def whoo(): try: yield except: yield finally: testit.append("I'm done") g = whoo() next(g) # Frame object cycle eval('g.throw(ValueError)', {'g': g}) del g return tuple(testit)
d965091e4347f636a9c2a93c5a00bdc96b3ddce5
31,656
def get_notes(): """ :return: a list containing all notes """ with open('data/notes.txt') as file: notes = [line[0] for line in file] return notes
bf4a3ec34b484d5e945117252ecb51a63685cb34
31,657
def _lps(maxlen): """ Brute force regular expression pattern for a length prefixed domain name component. """ return BR'|'.join(BR'\x%02x[a-z0-9\-\_]{%d}' % (d, d) for d in range(1, maxlen + 1))
1a527e7466ea2fc9fd2a331b63ece0c610b39ba4
31,658
def read_xml_tree(elt): """ return xml tree `elt` """ children = elt.getchildren() if len(children): children_dict = {} for child in children: children_dict[child.tag] = read_xml_tree(child) return children_dict else: return elt.text
7e9d951ca5bf346fc1b1e7c649d7e5570aa9df81
31,660
def check_build_success(): """Check build is successful and there are no warnings.""" def check(status, warning): assert "build succeeded" in status.getvalue() warnings = warning.getvalue().strip() assert warnings == "" return check
6f0affe4ff4bbf1781ef931bb139348b336f884a
31,661
def _getattr(obj, attr): """Try to get the attribute as if the object is a Namespace, but if it fails, get it as it if were a key from a dictionary. This allows us to play nice with both Namespace objects (method='create') as well as dicts (method='yaml') """ try: return getattr(obj, attr) except AttributeError: return obj[attr]
c6f46800ad5fa339fdb3a0924678ade4f77de785
31,662
def _aggregate_counts(child_counts): """Return aggregated node count as int.""" if not child_counts: return 1 elif len(child_counts) == 1: return child_counts[0] elif len(child_counts) == 2: return child_counts[0] * child_counts[1] else: raise ValueError
a4b691341d7283b0800bffd3bc926b9ff71e1bdb
31,663
def clean_10x_adata_var(adata): """cleans up adata.var (drops feature types columns that is Gene Expression for all, genome col, sets gene_ids to index and gene_symbols to a column of adata.var, remove index name.)""" columns_to_drop = [ col for col in ["feature_types", "genome"] if col in adata.var.columns ] adata.var = ( adata.var.drop(columns=columns_to_drop) .reset_index() .rename(columns={"index": "gene_symbols"}) .set_index("gene_ids") ) adata.var.index.name = None return adata
24887824acd497d0afc321ba3483a7240c969ceb
31,666
def from_datastore(entity): """Formats data from datastore Datastore typically returns: [Entity{key: (kind, id), prop: val, ...}] This returns: [ name, description, pageCount, author, review ] """ if not entity: return None if isinstance(entity, list): entity = entity.pop() return [entity['name'],entity['description'],entity['pageCount'],entity['author'],entity['review']]
45b7b4690258b380bf7c7e69c631cf4f520559e2
31,667
import math def round_sample(input_dataframe, frac=0.1, min_samples=1): """Sample X ensuring at least min samples are selected.""" num_samples = max(min_samples, math.floor(len(input_dataframe) * frac)) return input_dataframe.sample(num_samples)
1e6279a9e069929bdf9fef931b00d9d99a9b6f0c
31,668
def r_e_KimKim(N_s): """ effective drop radius """ r_e = (4*N_s)**(-0.5) return r_e
0c85c71f29e320809da42e071bccedfff4fbb703
31,670
import time def time_sorting(sort_fxn, data): """ Record the run times for each run of a sorting function :param sort_fxn: The sort function of form sort_fxn(vec) to call :param data: A list of vectors to sort :returns: A list of run times to sort each element in data """ times = [time.process_time()] for vec in data: sort_fxn(vec) times.append(time.process_time()) return [t1 - t0 for t0, t1 in zip(times[:-1], times[1:])]
5d7f2e83ec2a74202264447e24fb43dbd03ab83c
31,671
def null(): """ return an empty generator """ def _null(input): return iter([]) return _null
c1ac07f1270439105350e672c6238c2e0dde46b2
31,672
def GetLostDistributionArr(aprtNodes, bunch_lost): """ Function returns the array with [aptrNode,sum_of_losses] The sum_of_losses is a number of particles or the sum of macro sizes if the particle attribute "macrosize" is defined. """ lossDist_arr = [] aprtPos_arr = [] #--- first we will sort apertures according to the position aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False) for aprt_node in aprtNodes: aprtPos_arr.append(aprt_node.getPosition()) loss_sum = 0. lossDist_arr.append([aprt_node,loss_sum]) if(len(aprtPos_arr) <= 0): return lossDist_arr #----------------------------------------------------------------- def indexFindF(pos_arr,pos,ind_start = -1,ind_stop = -1): """ This function will find the index of nearest to pos point in pos_arr""" if(ind_start < 0 or ind_stop < 0): ind_start = 0 ind_stop = len(pos_arr) - 1 return indexFindF(pos_arr,pos,ind_start,ind_stop) if(abs(ind_start - ind_stop) <= 1): dist0 = abs(pos_arr[ind_start] - pos) dist1 = abs(pos_arr[ind_stop] - pos) if(dist0 <= dist1): return ind_start return ind_stop ind_mdl = int((ind_start + ind_stop)/2.0) if(pos_arr[ind_start] <= pos and pos <pos_arr[ind_mdl]): return indexFindF(pos_arr,pos,ind_start,ind_mdl) else: return indexFindF(pos_arr,pos,ind_mdl,ind_stop) #----------------------------------------------------------------- has_macrosize_partAttr = bunch_lost.hasPartAttr("macrosize") if(not bunch_lost.hasPartAttr("LostParticleAttributes")): return lossDist_arr macroSize = bunch_lost.macroSize() nParticles = bunch_lost.getSize() for ind in range(nParticles): pos = bunch_lost.partAttrValue("LostParticleAttributes",ind,0) if(pos < aprtPos_arr[0] or pos > aprtPos_arr[len(aprtPos_arr)-1]): continue pos_ind = indexFindF(aprtPos_arr,pos) if(has_macrosize_partAttr): macroSize = bunch_lost.partAttrValue("macrosize",ind,0) lossDist_arr[pos_ind][1] += macroSize continue lossDist_arr[pos_ind][1] += 1.0 return lossDist_arr
b239bdf94e779e9c5c67cf0d1b98ab5a64800158
31,673
import os def get_file_extension(fname): """Returns the given file's extension as a string, . included""" _, ext = os.path.splitext(fname) return ext
c814410e91e354383d3274d48e1e8f42b63a7e7a
31,675
def caffe_compute(transformed_image, caffe_net=None, output_layers=None): """ Run a Caffe network on an input image after preprocessing it to prepare it for Caffe. :param PIL.Image pimg: PIL image to be input into Caffe. :param caffe.Net caffe_net: A Caffe network with which to process pimg afrer preprocessing. :param list output_layers: A list of the names of the layers from caffe_net whose outputs are to to be returned. If this is None, the default outputs for the network are returned. :return: Returns the requested outputs from the Caffe net. """ if caffe_net is not None: # Grab the default output names if none were requested specifically. if output_layers is None: output_layers = caffe_net.outputs input_name = caffe_net.inputs[0] all_outputs = caffe_net.forward_all(blobs=output_layers, **{input_name: transformed_image}) outputs = all_outputs[output_layers[0]][0].astype(float) return outputs else: return []
09d6581a5db5e184092742ae4296cc3310122507
31,676
import os def get_districts(root_path): """ Start from the directory containing all the districts. A district is assumed to be any directory in root_path. """ return (os.path.join(root_path,directory) for directory in os.listdir(root_path) if os.path.isdir(os.path.join(root_path,directory)))
bb201a746fab9d2a64b5ef2efeeb80835639b40d
31,677
def kstairs(n, k): """Give the number of ways to take n steps, given that at each step, you can choose to take 1, 2, ... k-2, k-1 or k steps. >>> kstairs(5, 2) 8 >>> kstairs(5, 5) 16 >>> kstairs(10, 5) 464 """ if n == 0: return 0 if n <= k: return 2**(n-1) return sum([kstairs(n - i, k) for i in range(1, k + 1)])
07484fe1186967ba892d3d4e3284962d93a36b91
31,678
import random def rand(request): """agent item python.random[lower, upper]""" if len(request.params) != 2: raise ValueError('Invalid number of parameters') l = int(request.params[0], 10) u = int(request.params[1], 10) if l > u: raise ValueError('Incorrect range given') return random.randrange(l, u)
fb1aa42eb3dbd3dea9615e5fe53fbcd21bcab62b
31,679
def _compute_derived_var(var_key, derived_vars_dict, nc_file): """Call the first valid derivation from the derived_vars_dict dict.""" derived_vars = derived_vars_dict[var_key] # store a list of all inputs visited, so if Exception, we get a good msg. derived_var_inputs = [] # get the first function and inputs from the derived_vars_dict dict for inputs, func in list(derived_vars.items()): derived_var_inputs.append(inputs) # tuples with a single string [ex: ('pr')] become just a string ['pr'] # are all of the variables (inputs) in the nc_file? if isinstance(inputs, str) and inputs in nc_file.variables: args = [nc_file(inputs)(squeeze=1)] return func(*args) elif isinstance(inputs, tuple) and set(inputs).issubset(nc_file.variables): args = [nc_file(var)(squeeze=1) for var in inputs] return func(*args) # When nc_file is obs, there is var_key in nc_file, i.e. nc_file(var_key) if var_key in nc_file.variables: return nc_file(var_key)(squeeze=1) raise RuntimeError('None of the variables (%s) are in the file: %s' % ( derived_var_inputs, nc_file.id))
a77253e33393a1515f58867a5a5e496d421804f7
31,680
def get_corr_reg_name(curr_name: str) -> str: """ Function that corrects wrong regions names """ # Specjalny wyjatek, bo w danych PRG jest powiat "JELENIOGORSKI", a od 2021 roku powiat ten nazywa sie "KARKONOSKI", # wiec trzeba to poprawic if curr_name == "JELENIOGORSKI": return "KARKONOSKI" # Kolejny wyjatek, bo w danych PRG jest gmina "SITKOWKA-NOWINY", a od 2021 roku gmina ta nazywa sie "NOWINY", wiec # trzeba to poprawic elif curr_name == "SITKOWKA-NOWINY": return "NOWINY" # Kolejny wyjatek, bo w danych PRG jest gmina "SLUPIA (KONECKA)", a od 2018 roku gmina ta nazywa sie # "SLUPIA KONECKA", wiec trzeba to poprawic elif curr_name == "SLUPIA (KONECKA)": return "SLUPIA KONECKA" else: return curr_name
7fb0709868e66e73a0d736f3a0860fefbb0d6cd0
31,681
def este_corect(expresie): """Verifică dacă toate parantezele sunt folosite corespunzător. """ stk = [] for char in expresie: if char == '(' or char == '[': stk.append(char) else: if not stk: return 0 else: alfa = stk.pop() if char == ']' and alfa == '(' or char == ')' and alfa == '[': return 0 return 1
a27881687b0afa69e2a0a8192f79b6b81b176de0
31,682
import os def api_key(): """the api key""" return os.environ['DF_API_KEY']
3c9b58f9ddab3553fce8f701aa45f22f56e7364d
31,684
def bin_names_to_coords_filepath(query_bin, ref_bin, results_dir): """ prepare a file path for results :param query_bin: bin number 1 name/filepath (reference sequence) :param ref_bin: bin number 2 name/filepath. (query sequence) :return:string like Acidovora-69x_Ga0081644_to_Acidovorax-79_Ga0081651 """ return results_dir + '/' + query_bin + "_to_" + ref_bin
946b788acaf776322cc02aaa3b6740f198b61b4d
31,685
from typing import Tuple import re def parse_hp_condition(hp_condition: str) -> Tuple[int, int, str]: """ HPと状態異常を表す文字列のパース :param hp_condition: '50/200' (現在HP=50, 最大HP=200, 状態異常なし) or '50/200 psn' (状態異常の時) :return: 現在HP, 最大HP, 状態異常('', 'psn'(毒), 'tox'(猛毒), 'par', 'brn', 'slp', 'frz', 'fnt'(瀕死)) """ if hp_condition == '0 fnt': # 瀕死の時は0という表示になっている # 便宜上最大HP100として返している return 0, 100, 'fnt' m = re.match('^(\\d+)/(\\d+)(?: (psn|tox|par|brn|slp|frz|fnt)|)?$', hp_condition) assert m is not None, f"HP_CONDITION '{hp_condition}' cannot be parsed." # m[3]は状態異常がないときNoneとなる return int(m[1]), int(m[2]), m[3] or ''
cbe9ec75efbae1b144836cc8129fdc3256e0dccf
31,686
def genlen(gen, ldict, name): """A bit of a hack to get the length of a generator after its been run. ldict = {} list(genlen((i for i in range(3)), ldict, 'mylen') assert(ldict['mylen'] == 3 """ ldict[name] = 0 def f(): while True: yield next(gen) ldict[name] += 1 return f()
6d5ec86ed71b1fc97f796a8dc72db230f01af7c1
31,688
import win32api import sys def clear_dll_directory(): """ Push current Dll Directory. There are two cases that can happen related to setting a dll directory: 1: Project is using different python then Desktop, in which case the desktop will set the dll directory to none for the project's python interpreter. In this case, the following code is redundant and not needed. 2: Desktop is using same python as Project. In which case we need to keep the desktop dll directory. """ dll_directory = None if sys.platform == "win32": # This 'try' block will fail silently if user is using # a different python interpreter then Desktop, in which # case it will be fine since the Desktop will have set # the correct Dll folder for this interpreter. Refer to # the comments in the method's header for more information. try: # GetDLLDirectory throws an exception if none was set try: dll_directory = win32api.GetDllDirectory(None) except StandardError: dll_directory = None win32api.SetDllDirectory(None) except StandardError: pass return dll_directory
28c052002a410f25d56359a54a46fa22bbb0717f
31,689
from typing import List from typing import Dict def dictify(data, keys: List, val: Dict) -> Dict: """Turns a flat :class:`NodeTree` dictionary into a nested dictionary. Helper function to generate nested dictionary from list of keys and value. Calls itself recursively. Arguments: data (dict): A dictionary to add value to with keys. keys (list): A list of keys to traverse along tree and place value. val (dict): A value for innermost layer of nested dict. """ key = keys[0] key = int(key) if key.isdecimal() else key.lower() if len(keys) == 1: data[key] = val else: if key in data.keys(): data[key] = dictify(data[key], keys[1:], val) else: data[key] = dictify({}, keys[1:], val) return data
e7c0111f67b7755a6e28d6264d8f7b300e94273c
31,690
def normalize(x, min_x, max_x): """ Goal of this function is to normalize passed data given the min and max of the data to fit in 0 - 1 :param x: Array Like structure: The data to be normalized :param min_x: Float like: The minimum of the data set :param max_x: Float like: The maximum of the data set :return: Array: The normalized data in an array with values ranging from 0 to 1 """ return (x - min_x) / (max_x - min_x)
4fbbaf06017cedd14eca3e7f0639d9002b231be4
31,691
import textwrap import html def pretty_text(data): """Unsescape the html characters from the data & wrap it""" if data is not None: return textwrap.fill(html.unescape(html.unescape(data)), width=60) else: return ""
f5baf58394b8578b26ad3cb95aff26867bc5f748
31,692
def calculate_assets(df, choice, transactions): """Calculate the worth of the portfolio Args: df ([type]): [description] choice ([type]): [description] transactions ([type]): [description] Returns: [type]: [description] """ close_column = "close_" + choice quantity_column = "quantity_" + choice asset_column = "asset_" + choice for i in range(1, len(df)): for j in range(len(transactions)): #st.write(f".{df.loc[i, 'Date_y_m_d']}. == .{transactions[j][0]}. | .{choice}. == .{transactions[j][1]}.") if df.loc[i, "Date_y_m_d"] == transactions[j][0] and choice ==transactions[j][1]: # st.write ("HIT") # st.write(f".{df.loc[i, 'Date_y_m_d']}. == .{transactions[j][0]}. | .{choice}. == .{transactions[j][1]}.") df.loc[i, quantity_column] = df.loc[i-1, quantity_column] + transactions[j][2] break else: df.loc[i, quantity_column] = df.loc[i-1,quantity_column] df[asset_column] = df[quantity_column] * df[close_column] return df
85fea50cb8740799f675695cf836adb153090f6a
31,693
def precision_at_position_1(sort_data): """ Evaluate precision """ score = 0 if sort_data[0][1] == 1: score = 1 return score
bb07288836ef127c3122a8065a7cf14952c642aa
31,694
def count_recursive(contents_per_bag, bag_list): """ Count number of nested bags from the given list :param contents_per_bag: per-bag mapping :param bag_list: list of bag to inspect :return: number of bags """ nested_bag_qty = 0 for bag in bag_list: nested_bag_qty += 1 nested_bag_qty += count_recursive(contents_per_bag, contents_per_bag[bag]) return nested_bag_qty
d48e317ee73bf0f18021d27bc9857a3ad898759c
31,697
def sort_list(doc_name: str, *vart): """Checks if a string contains the identifiers in vart. If it does return the initial string, if not return None.""" for var in vart: if var.lower() in doc_name.lower() and "(old" not in doc_name and "~" not in doc_name: return doc_name
92caeaa253daa4dc1b9fac5acda4e38963a984bf
31,699
def from_homogeneous(vectors, at_infinity): """Converts vectors from homogeneous (from point or direction).""" if at_infinity: return vectors[:-1,...] else: divided = vectors / vectors[-1,...] return divided[:-1,...]
002c7fe26fa5100f4b21a373b8506e4960081b4c
31,700
def rotl(x, count): """Rotates the 64-bit value `x` to the left by `count` bits.""" ret = 0 for i in range(64): bit = (x >> i) & 1 ret |= bit << ((i + count) % 64) return ret
6bba2bf109f9ddd0f9b4c202c3192e14a0cd625f
31,701
from typing import Union import re def _header_line(line: str) -> Union[None, tuple]: """ If it is detected as header line, returns its header level and caption. Otherwise returns `None`. """ m = re.match(r"^(#+)(.+)", line) if m: level = len(m.group(1)) caption = m.group(2) return level, caption return None
0704ff88623eb66caef6a0473047f17a152701f4
31,702
import random def flip(): """Flip a coin.""" return random.choice(['Heads!', 'Tails!'])
bced383b31c62fef758d8522ecf1fef49d4f5fc1
31,703
from typing import Optional import re def get_nb_query_param(nb_url_search: str, param: str) -> Optional[str]: """ Get a url query parameter from the search string. Parameters ---------- nb_url_search: str The URL search string param: str The parameter name to search for Returns ------- Optional[str] value of the query string parameter or None if not found. """ qs_regex = r"[\\?&]{param}=(?P<val>[^&#]*)".format(param=param) query_string_match = re.search(qs_regex, nb_url_search) if query_string_match: return query_string_match["val"] return None
28b36c630879a16dbca38a9739fe92b1d54aa5ca
31,704
def _create_padded_message(message): """Pad the message.""" fill_size = len(message) + 16 - len(message) % 16 return message.zfill(fill_size)
fce0e75c10eb7bb9f16513926535436c1337bdcc
31,705
from typing import Tuple def nmea_coord_to_lla( lat_str: str, lat_cardinal: str, lon_str: str, lon_cardinal: str, alt_str: str ) -> Tuple[float, float, float]: """ converts the nmea quintuplet (4811.7605691,N,01137.1631304,E,494.6412) to decimal gps coordinates :param lat_str: latitude in nmea format :param lat_cardinal: latitude cardinal (N for north, S for south) :param lon_str: longitude in nmea format :param lon_cardinal: longitude cardinal (E for east, W for west) :param alt_str: altitude in nmea format :return: """ lat_deg, lat_min = float(lat_str[0:2]), float(lat_str[2:]) lat_dec = lat_deg + lat_min / 60.0 lon_split = lon_str.index('.') - 2 # degree part may be 2 or 3 digits long lon_deg, lon_min = float(lon_str[0:lon_split]), float(lon_str[lon_split:]) lon_dec = lon_deg + lon_min / 60.0 alt_dec = float(alt_str) if 'S' == lat_cardinal: lat_dec *= -1 if 'W' == lon_cardinal: lon_dec *= -1 return lat_dec, lon_dec, alt_dec
1f9a8dc6bb05309957e27eb9ed72df953abca386
31,706
import time def UnixTimeCurrent(): """ Retrun: float 現在時刻のUnixTime """ unixime = time.time() return unixime
f045d94e64e9eb6bdd564f8773d92dbc410241fa
31,707
def quick_sort(arr): """ Breaks down array into smaller sections then swaps subsections into order uses local partition() and sub_quick_sort(), which assumes whole array should be sorted """ def partition(arr, low, high): i = low - 1 pivot = arr[high] for j in range(low, high): if arr[j] < pivot: i += 1 arr[i], arr[j] = arr[j], arr[i] arr[i+1], arr[high] = arr[high], arr[i+1] return i + 1 def sub_quick_sort(arr, low, high): if low < high: p = partition(arr, low, high) sub_quick_sort(arr, low, p - 1) sub_quick_sort(arr, p + 1, high) l = 0 h = len(arr) - 1 sub_quick_sort(arr, l, h) return arr
be060f8855d459b9cad0fc980675f035e81b00d9
31,708
def MinMaxAvg(data): """ Given a list of values, the MIN/MAX/AVG value is returned in a Dictionary :param data: List of data of the same kind :type data: int[] or float[] :returns: a dictionary { 'min':min,'max':max,'avg':average } :rtype: dictionary .. seealso:: Stats """ min_val = data[0] max_val = data[0] acc_val = 0 count = 0 for d in data: if (d < min_val ): min_val= d if (d > max_val ): max_val= d acc_val = acc_val + d count = count+1 return { 'min':min_val,'max':max_val,'avg':acc_val/count }
0373da1ab3dfa36453669d0f6f33b8541e33eb52
31,709
def ansi(code_r, bold=""): """ANSI Colours for printing (Because why not?) Code can be 30-37. In order of colours, these are black, red, green, yellow, blue, magnenta, cyan, and white. After every colour print, print ansi(0) to clear colour attributes. (Copied from psilib.utils.ansi) """ ansi_base = "\033" code = str(code_r) if bold: ansi_code = ansi_base + "[" + code + ";1m" return ansi_code else: ansi_code = ansi_base + "[" + code + "m" return ansi_code
43bfedf6e1aba69f2b2bacd928701547bdd4c4ad
31,710
def createGeoJSON(features):# [[coord1,cord2,cord3,...], row, column] """ createGeoJSON(features) From structure as [[coord1,cord2,cord3,...], row, column] creates a new geoJSON used for Surface Unit Parameters ---------- features : List Structure as [[coord1,cord2,cord3,...], row, column] Returns ------- String Returns a great formated geojson """ gjson = '{"type":"FeatureCollection", ' gjson += '"features": [' numFeat = len(features) for x in range(numFeat): feature = features[x] #Feature gjson += '{ "type":"Feature",' gjson += '"geometry":{' gjson += '"type": "MultiPolygon", "coordinates": [[[' isFirst = 0 firstLon = -9999 firstLat = -9999 for c in feature[0]: lon = c[0] lat = c[1] if isFirst == 0: firstLon = lon firstLat = lat isFirst = 1 gjson += '[' gjson += str(lat) gjson += ', ' gjson += str(lon) gjson += '],' gjson += '[' gjson += str(firstLat) gjson += ', ' gjson += str(firstLon) gjson += ']' gjson += ']]]' gjson += "}," gjson += '"properties": {' gjson += '"id" :' if(feature[1] > 0): gjson += str(feature[1]) gjson += str(feature[2]) gjson += '}' if x +1 == numFeat: gjson +='}' else: gjson +='},' #End Feature gjson += ']' gjson += '}' return gjson
80c25e5c094d704d4c9d12f5ebdfe556fbe44f71
31,712
from pathlib import Path def read_html_file(file_path): """Reads the contents of an HTML file.""" html_file = Path(file_path) if html_file.exists() and html_file.is_file(): with html_file.open() as temp_file: contents = temp_file.read() return contents return None
260b56d895a42696ff04badf62d58268b4f24c0a
31,713
def check_inclusion_4(s1: str, s2: str) -> bool: """ optimize method 2 """ len1, len2 = len(s1), len(s2), if len1 > len2: return False diff = [0] * 26 for i in range(len1): diff[ord(s1[i]) - ord('a')] -= 1 diff[ord(s2[i]) - ord('a')] += 1 diff_count = 0 for cnt in diff: if cnt != 0: diff_count += 1 if diff_count == 0: return True for i in range(len1, len2): if s2[i] != s2[i - len1]: if diff[ord(s2[i - len1]) - ord('a')] == 0: diff_count += 1 diff[ord(s2[i - len1]) - ord('a')] -= 1 if diff[ord(s2[i - len1]) - ord('a')] == 0: diff_count -= 1 if diff[ord(s2[i]) - ord('a')] == 0: diff_count += 1 diff[ord(s2[i]) - ord('a')] += 1 if diff[ord(s2[i]) - ord('a')] == 0: diff_count -= 1 if diff_count == 0: return True return False
d471dc1aca049ea07c1203c7c3bff472f6d6a4ac
31,715
def rsa_encrypt(data: int, e: int, n: int) -> int: """ encrypt data with the rsa cryptosystem (rsa_fernet_encrypt is more secure and supports more data) :param data: the plaintext :param e: public key (e) of the other person :param n: public key (n) of the other person :return: the ciphertext """ if data > n: raise OverflowError('') return pow(data, e, n)
58f39297a594a1ed224b0ea09b81e7f988c2fe54
31,716
from typing import List def is_armstrong(number: int) -> bool: """Return True if the given number is an armstrong number.""" digits = list(map(int, str(number))) exponent = len(digits) powers: List[int] = [base ** exponent for base in digits] return sum(powers) == number
2c84a53e439368a403f75066e4fbdca119d68f2f
31,718
def quote_identifier(identifier, sql_mode=""): """Quote the given identifier with backticks, converting backticks (`) in the identifier name with the correct escape sequence (``) unless the identifier is quoted (") as in sql_mode set to ANSI_QUOTES. Args: identifier (str): Identifier to quote. Returns: str: Returns string with the identifier quoted with backticks. """ if sql_mode == "ANSI_QUOTES": return '"{0}"'.format(identifier.replace('"', '""')) return "`{0}`".format(identifier.replace("`", "``"))
c032e6e597795b74b3a25c8529074684fc4ab619
31,719
from typing import Counter import re def build_wordlist(input_file): """Build a wordlist Counter from lines of the corpus file""" wordlist = Counter() for line in input_file: words = re.findall(r'\w+', line) wordlist.update(words) return wordlist
575a6fb872750dc83ac8b6f8b66f4b779962b71d
31,720
def check_max_amplitude(st, min=5, max=2e6): """ Checks that the maximum amplitude of the traces in the stream are ALL within a defined range. Only applied to counts/raw data. Args: st (obspy.core.stream.Stream): Stream of data. min (float): Minimum amplitude for the acceptable range. Default is 5. max (float): Maximum amplitude for the acceptable range. Default is 2e6. Returns: Stream that has been checked for maximum amplitude criteria. """ if not st.passed: return st for tr in st: # Only perform amplitude/clipping check if data has not been converted # to physical units if 'remove_response' not in tr.getProvenanceKeys(): if (abs(tr.max()) < float(min) or abs(tr.max()) > float(max)): tr.fail('Failed max amplitude check.') return st
3043ac4e29904779578c52dfb1c7b79f1cf13ad1
31,721
def getvalsfromparams(cosmo, **params): """ TO DO provide a general function to pass values into cosmo and params """ return None
9ce20795bcfbac8ea78241701acab907af73b387
31,722
def _calc_corr(dbal, benchmark_dbal, window): """ Calculate the rollowing correlation between two returns. Parameters ---------- dbal : pd.Series Strategy daily closing balance indexed by date. benchmark_dbal : pd.Series Benchmark daily closing balance indexed by date. window : int Size of the moving window. This is the number of observations used for calculating the statistic. Returns ------- corr : pd.DataFrame Window size rollowing correlation between `dbal` and `benchmark_dbal`. """ ret = dbal.pct_change() benchmark_ret = benchmark_dbal.pct_change() corr = ret.rolling(window).corr(benchmark_ret) return corr
4ae0ec7184774a6500eecafa91467daee7800ca1
31,724
def iben_tutukov1984(history, al=1): """ CE formalism from `Iben & Tutukov 1984, ApJ, 284, 719 <https://ui.adsabs.harvard.edu/abs/1984ApJ...284..719I/abstract>`_ Required history parameters: - star_1_mass - star_2_mass - he_core_mass - binary_separation :param history: ndarray with model parameters :param al: alpha CE, the efficiency parameter for the CE formalism :return: final separation, final primary mass """ M1 = history['star_1_mass'][-1] M2 = history['star_2_mass'][-1] Mc = history['he_core_mass'][-1] a = history['binary_separation'][-1] af = al * (Mc * M2) / (M1 ** 2) * a return af, Mc
6eef41463eef83668902c74f748258a39e8b919f
31,725
import os import re def convert_frame_format(path): """Convert format like '%04d' into '####' padding format""" head, tail = os.path.split(path) def replace(match): count = int(match.group(2)) return match.group(1) + "#" * count + match.group(3) tail = re.sub("(.*)%([0-9]*)d(.*)", replace, tail) return head + "/" + tail
5764e3edc74554a14bb1632243469f59409bbb95
31,726
def _count_words(text): """ Count words in a piece of text. """ if isinstance(text, (list, tuple)): text = "\n".join(text) return len(text.split()) if text else 0
473b91262f27400b8ae7c95a819eb35288cb1dc4
31,727
def parse_monitors(monitors): """ Given a list of dictionaries, returns a list of hosts that can be used in an ansible inventory. These host lines can include host variables as well. For example, monitors in this format:: [ {"host": "mon0.host", "interface": "eth0"}, {"host": "mon1.host", "interface": "enp0s8"}, ] Would return the following:: ["mon0.host monitor_interface=eth0", "mon1.host monitor_interface=enp0s8"] Because the API allows for both ``interface`` or ``address`` this utility will look for both. Ideally, only one should be defined, but it is up to the client to ensure that the one that is needed is passed to the API. """ hosts = [] for mon in monitors: host = [] host.append(mon["host"]) # This is an 'either or' situation. The schema engine does not allow us # to play with situations that might have one key or the other. That # problem gets solved here by trying to use monitor_interface first and # then falling back to its address if defined. try: host.append("monitor_interface=%s" % mon['interface']) except KeyError: try: host.append("monitor_address=%s" % mon['address']) except KeyError: # do not append monitor_* and just use the host pass hosts.append(" ".join(host)) return hosts
5fc05ca5713bb188c022b0f070bf63f97162174f
31,729
import json def template_json(): """Return a Template JSON.""" return json.loads( """{ "name": "RVXXX1-T1-ID", "subject": "Campaign 1", "html": "<html>Body Test</html>", "text": "Body Test" }""" )
7d9208e30140c9c4d840e544305f60a59dc75573
31,730
import argparse def parse_args(): """Parse command-line arguments""" parser = argparse.ArgumentParser() parser.add_argument('-s', '--video-source', type=str, required=True, help='Video stream source') parser.add_argument('-t', '--requested-target', type=str, required=True, help='Qrexec requested target') parser.add_argument('-r', '--remote-domain', type=str, required=True, help='Qrexec remote domain') return parser.parse_args()
f563dad7f27bcdfd35467e9b86f6ba5c95a3d371
31,731
def jump_stats(previous_jumps, chute_altitude): """Compare altitude when chute opened with previous successful jumps. Return the number of previous jumps and the number of times the current jump is better. """ n_previous_jumps = len(previous_jumps) n_better = sum(1 for pj in previous_jumps if chute_altitude < pj) return n_previous_jumps, n_better
f9b92c1355ca45b4bb8be77c0e446bc71cdae7fc
31,733
from typing import Dict import os import json def get_sample_cr(crd: str) -> Dict: """Get a dictionary with a sample of a CRD.""" valid_crd = ['robco_mission', 'robco_robot', 'robco_robottype'] if crd not in valid_crd: raise ValueError('There is no sample for CRD "{}"'.format(crd)) path = os.path.dirname(__file__) with open('{}/k8s-files/sample_{}.json'.format(path, crd)) as file: template_cr = json.load(file) return template_cr
f48056719cf2a13b340d2c53425e36b75cdaf0af
31,734
def random_string(stringLength=8): """ Generate a random string Parameters ---------- stringLength : int, optional length of the random string, by default 8 Returns ------- str random string """ strings = [ ("PasséComposéquitue","Le PasséComposéquitue est un temps génial !"), ("123456789","Le PasséComposéquitue est un temps génial !") ] return "123456789"
954c1e4481dc6584ccef40dfc1d52b8aaeede125
31,736
from operator import xor def detect_secstruct_clash(i, j, secstruct): """ Detect if an EC pair (i, j) is geometrically impossible given a predicted secondary structure Based on direct port of the logic implemented in choose_CNS_constraint_set.m from original pipeline, lines 351-407. Use secstruct_clashes() to annotate an entire table of ECs. Parameters ---------- i : int Index of first position j : int Index of second position secstruct : dict Mapping from position (int) to secondary structure ("H", "E", "C") Returns ------- clashes : bool True if (i, j) clashes with secondary structure """ # extract a secondary structure substring # start and end are inclusive def _get_range(start, end): return "".join( [secstruct[pos] for pos in range(start, end + 1)] ) def _all_equal(string, char): return string == len(string) * char # get bigger and smaller of the two positions b = max(i, j) s = min(i, j) # if pair too distant in primary sequence, do # not consider for clash if b - s >= 15: return False # get secondary structure in range between pairs secstruct_string = _get_range(s, b) # part 1: check for clashes based on alpha helices # first check for helix between them, or both in a helix # (or either one directly next to helix) if _all_equal(_get_range(s + 1, b - 1), "H"): return True # of if just one of them is in a helix elif xor(secstruct[s] == "H", secstruct[b] == "H"): h2 = "H" * (b - s - 1) h3 = "H" * (b - s - 2) if h2 in secstruct_string: if b - s > 6: return True elif h3 in secstruct_string: if b - s > 11: return True # part 2: check for clashes based on beta strands if _all_equal(_get_range(s + 1, b - 1), "E"): return True elif _all_equal(_get_range(s + 2, b - 2), "E"): if b - s > 8: return True if xor(secstruct[s] == "E", secstruct[b] == "E"): e2 = "E" * (b - s - 1) e3 = "E" * (b - s - 2) e4 = "E" * (b - s - 3) if e2 in secstruct_string: return True elif e3 in secstruct_string: return True elif e4 in secstruct_string: if b - s > 8: return True return False
2591bffd2f21266ace9371fee68f3ac729f3cc9d
31,739
def _cg_update(d, x, Ap, p, r, rsold, eps): """ """ # standard CG update pAp = r.dtype.type(0.0) for f in range(d): pAp += p[f] * Ap[f] # alpha = rsold / pAp alpha = rsold / max(pAp, eps) for f in range(d): x[f] += alpha * p[f] r[f] -= alpha * Ap[f] rsnew = r.dtype.type(0.0) for f in range(d): rsnew += r[f] ** 2 p = r + (rsnew / rsold) * p rsold = rsnew return x, p, r, rsold
e2002c5f1c1822984b94e3daf951c10850f980bf
31,740
def gauss_sum(n): """Calculate sum(x for x in range(1, n+1)) by formula.""" return n * (n + 1) // 2
96d611e8975d163f1cc1cdf207f0930b5951d4a1
31,742
def _is_mobi(file_bytes: bytes) -> bool: """ Decide if a file is a MOBI/AZW3 file. From ./se/vendor/kindleunpack/mobi_sectioner.py lines 49-53 """ return file_bytes[:78][0x3C:0x3C+8] in (b"BOOKMOBI", b"TEXtREAd")
dcd1fb584b5a8c373fc5b453314ccbc9574b66ea
31,743
def find_dataset(filename, dataset_id): """ :param filename : file containing dataset information. :param dataset_id : dataset to be searched. :returns: The path of the given dataset id. """ var_dict = {} with open(filename) as l_file: for line in l_file: if not line.startswith("#"): name, var = line.partition("=")[::2] var_dict[name.strip()] = var.strip() if dataset_id == "all": return var_dict else: try: return var_dict[dataset_id] except KeyError as ex: return None
60ec4e70f893ef6fb13f3e33ba5bf01b69b6c05b
31,744
from typing import Dict from typing import List import collections import math def calculate_tf_weight(tokens_dict: Dict[str, List[str]]) -> Dict[str, Dict[str, float]]: """Calculates tf weight""" tf_dict: Dict[str, Dict[str, float]] = {x: {} for x in tokens_dict} # {doc_id: {token: tf} for doc_id in tokens_dict: doc_freq: Dict[str, int] = dict(collections.Counter(tokens_dict[doc_id])) # {token: frequency} for token in doc_freq: tf_dict[doc_id][token] = 1.0 + math.log(doc_freq[token]) return tf_dict
699d54a58403341fb82aca86a04c58c6c1735508
31,746
def remove_empty(rec): """ Deletes sequences that were marked for deletion by convert_to_IUPAC """ for header, sequence in rec.mapping.items(): if all(char == 'X' for char in sequence): rec.headers.remove(header) rec.sequences.remove(sequence) rec.update() return rec
57616d8fb35f6bf5c74d4271dfe709e87171932a
31,747
def get_first(objs, default=""): """get the first element in a list or get blank""" if len(objs) > 0: return objs[0] return default
5932027019a52bc037b18fb3d594819c51ab3b75
31,748
import os def get_default_pp_args(): """Returns the default arg for the preprocessor in a format: -I<path_to_the_fake_includes> """ this_file = os.path.realpath(os.path.dirname(__file__)) root_path = os.path.split(os.path.abspath(os.path.join(this_file)))[0] include_path = os.path.join(root_path, "utils", "fake_libc_include") return "-I%s" % include_path
510e1ec97479cb1d1f77b68f2c4180c8a26be0c5
31,751
import time def timestamp(): """ Returns a unix timestamp :return: """ return int(time.time())
4e67a0ebce4d493405cd045e766962ee5d14bbeb
31,753
def get_y_for_x(x: float, gradient: float, y_intercept: float) -> float: """ Linear equation, y = mx + c """ return (x * gradient) + y_intercept
f2e4226c159e74621ae8301d24ecc5bef85971f4
31,754
import os import json import logging import sys import subprocess def getremote(override): """Get the remote server from ~/.pull-gitrc or the command line. Verify that the remote is reachable, or quit. The contents of ~/.pull-gitrc should look like this: {"remote": "foo.bar.home"} """ if not override: rcpath = os.environ["HOME"] + os.sep + ".pull-gitrc" try: with open(rcpath) as rcfile: config = json.load(rcfile) except FileNotFoundError: logging.error("file ~/.pull-gitrc not found and no server provided.") sys.exit(1) remote = config["remote"] logging.info(f"using remote '{remote}' from configuration file.") else: remote = override logging.info(f"using remote '{remote}' from command-line.") rv = subprocess.run( ["ping", "-c", "1", remote], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) if rv.returncode == 0: return remote logging.error(f"remote {remote} cannot be reached by ICMP echo request.") sys.exit(2)
9652c49ccc5e014b85eb2824f46ac7f1f71aa5d9
31,755
def record(*fields): """Generate a class that only allows attr names contained in `fields`""" class _Record(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __setattr__(self, name, value): if name not in fields: raise AttributeError("Illegal attribute name {0}".format(name)) super(_Record, self).__setattr__(name, value) def _asdict(self): dikt = {} for field in fields: dikt[field] = getattr(self, field) return dikt return _Record
869351e977a5d04dae6b593bec9760278aea94f0
31,756
from typing import Union from typing import Dict from typing import Any from typing import List import json def deep_decode(j: Union[Dict[str, Any], List[Any], str]) -> Union[Dict[str, Any], List[Any], str]: """将str完全解析为json""" if isinstance(j, dict): j = j.copy() for k, v in j.items(): j[k] = deep_decode(v) elif isinstance(j, list): j = j.copy() for i, v in enumerate(j): j[i] = deep_decode(v) elif isinstance(j, str): try: j = deep_decode(json.loads(j)) except json.decoder.JSONDecodeError: pass return j
18700e2766d1d3d9266d8af2965e88dbbec30704
31,758
def open_bed_file(bed_file_name): """Open BED file of sequence overlaps""" with open(bed_file_name) as f: cores = [i.split() for i in f.readlines() if "CORE" in i] return cores
be061068bc31215627919db46e9e86197c2e76a4
31,762
import math def regularize(data): """Converts every non-numerical list value to zero which is useful for analysis later.""" for index, val in enumerate(data): if math.isinf(val) or math.isnan(val): data[index]=0 return data
ae38fa7a3a1f5bb6bfeba2ca4fbcfd5145f36ee8
31,763
import functools def log(func): """ 不带参数的装饰器 :param func: 被装饰的函数 :return: 增强(装饰)后的函数 函数也是对象,它有__name__等属性,但经过decorator装饰之后的函数,它们的__name__已经从原来的'now'变成了'wrapper' 内置的functools.wraps就是把原始函数的__name__等属性复制到wrapper()函数中 """ @functools.wraps(func) def wrapper(*args, **kwargs): print('begin call %s():' % func.__name__) ret = func(*args, **kwargs) print('end call %s():' % func.__name__) return ret return wrapper
3bd630339318afff62814466d966807541255d5b
31,764
import re def finditer_(log_file_path, regex, read_line=True): """ regex = '(<property name="(.*?)">(.*?)<\/property>)' :param log_file_path: :param regex: :param read_line: :param re_parse: :return: """ with open(log_file_path, "r") as f: match_list = [] if read_line: for line in f: for match in re.finditer(regex, line, re.S): match_text = match.group() match_list.append(match_text) else: data = f.read() for match in re.finditer(regex, data, re.S): match_text = match.group() match_list.append(match_text) f.close() print(match_list) return match_list
74f54279a71338ed3135a10a2caae11562bfc922
31,765
def _config_hint_generate(optname, both_env_and_param): """Generate HINT language for missing configuration""" env = optname.replace('-', '_').upper() if both_env_and_param: option = '--' + optname.lower() return ('Pass "{0}" or set the environment variable "{1}".' .format(option, env)) else: return 'Set the environment variable {0}.'.format(env)
1a21ce609a4d209f06c6479eb848412be859a598
31,766
from typing import List def padding_list(someList: List[str], N: int) -> List[str]: """Padding the list with <s> at the front and </s> behind Args: someList (List[str]): The list to be padded with N (int): The amount of <s>, </s> to be padded Returns: List[str]: Padded list """ for i in range(N): someList = ['<s>'] + someList + ['</s>'] return someList
e8df315d715e5e1e4575b42da5f6ff1691107732
31,767
import logging def get_logger(): """ Configures and returns root logger. """ logger = logging.getLogger('root') FORMAT = '[%(asctime)-15s %(filename)s:%(lineno)s] %(message)s' logging.basicConfig(format=FORMAT) return logger
8910c417e6d15cf92ecfab1226c175a55b2e5108
31,769
import re def split(re_, str_, flag=0, max_split=0) -> list: """支持正则分割 :param re_:正则表达式 :param str_:字符串 :param flag: re.search(re_, self.string, flag), 默认flag=0 :param max_split: 最大分割数量 """ return re.split(pattern=re_, string=str_, maxsplit=max_split, flags=flag)
d1eb55819341a4c210069459aae6c200c401e83a
31,770
import os def resolve_file(dirname, basename, lookup): """ If a filename is unique, use it. If it is not, check the parent directories until disambiguation is achieved. """ if basename not in lookup: return None reference_list = lookup[basename] dirname_path = os.path.split(dirname) matches = [(os.path.split(match[0]), match[0], match[1]) for match in reference_list] if len(matches) == 1: return (matches[0][1], matches[0][2]) while matches: matches = [(os.path.split(match[0][0]), match[1], match[2]) for match in matches if os.path.normpath(match[0][-1]) == os.path.normpath(dirname_path[-1])] dirname_path = os.path.split(dirname_path[0]) if len(matches) == 1: return (matches[0][1], matches[0][2]) return None
7f5b69d7ce4c2258f3f6a46c97107f8d8f068d3c
31,771
import re def _detect_base64(s): """Quite an ingenuous function to guess if a string is base64 encoded """ return (len(s) % 4 == 0) and re.match('^[A-Za-z0-9+/]+[=]{0,2}$', s)
0c0e4d50c8687e70a4ac423ae88d57146f5f28cc
31,772
from typing import OrderedDict def create_message(message_id, sender, receivers, date_sent, subject, content): """ This method turns all the messages from the file into an ordered list. @param message_id The ID of the message @param sender The sender of the message @param receivers The list of receivers from the message @param date_sent The date the message was sent @param subject The subject of the message @param content The body of the email @return message The message itself turned into a ordered list @see https://www.python.org/dev/peps/pep-0372/ @see https://docs.python.org/dev/whatsnew/2.7.html#pep-372-adding-an-ordered-dictionary-to-collections """ # In order to preserve order, turn each message into a list of tuples message = [ ('id', message_id), ('sender', sender), ('receivers', receivers), ('date_sent', date_sent), ('subject', subject), ('content', content) # ('x-gmail-labels', gmail_labels) # NOTE: Future feature ] # Keep order of all the contents in the message message = OrderedDict(message) # Return the newly created message as a list return message
11d6ce6b2ad0ef44405b3f68326c95812279eb2f
31,773
def get_latent_and_log_weight_and_log_q(generative_model, guide, obs, obs_id=None, num_particles=1): """Samples latent and computes log weight and log prob of inference network. Args: generative_model: models.GenerativeModel object guide: models.Guide object obs: tensor of shape [batch_size, num_rows, num_cols] obs_id: long tensor of shape [batch_size] num_particles: int Returns: latent: tensor of shape [num_particles, batch_size, num_arcs, 2] log_weight: tensor of shape [batch_size, num_particles] log_q: tensor of shape [batch_size, num_particles] """ latent_dist = guide.get_latent_dist(obs) latent = guide.sample_from_latent_dist(latent_dist, num_particles) log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1) log_q = guide.get_log_prob_from_latent_dist(latent_dist, latent).transpose(0, 1) log_weight = log_p - log_q return latent, log_weight, log_q
c37fcc7d028fddd48edd563eb5fcf75067bbcc95
31,774
def check_non_ascii(line): """Checks if a line contains a non ascii chars Params: line (unicode) Returns: true if line does not contain non ascii chars """ try: line.encode('ascii') return True except UnicodeEncodeError: return False
9e9eebf5623000b27e3b2234ada4c2c5eaafad83
31,775
def az_rate(val): """Convert an integer value to a floating point angular rate.""" return val * 90. / 2**16
22dfd3cfcc04ec83dc5b6d845eb333de96cd5f7a
31,776
def title(): """Return a sample title string.""" return 'this is a title'
1a1e9bf8024870812783d851994733f58de79a91
31,777
def cond(condition, expr1, expr2): """Marked for deletion.. Python2.5 provides this.""" if condition: return expr1 else: return expr2
2e46fe73a54ef87ba4e9ccf9071dcb73ebf360bf
31,778
import os def dsn_from_env(): """Read DSN from test environment variables. For testing postgresql configurations with passwords / nonstandard ports, you can set the environment variables: * PGSU_TEST_HOST * PGSU_TEST_PORT * PGSU_TEST_PASSWORD * PGSU_TEST_USER * PGSU_TEST_database :returns: Dictionary with (only) the DSN keys provided via environment variables. """ dsn = { 'host': os.getenv('PGSU_TEST_HOST'), 'port': os.getenv('PGSU_TEST_PORT'), 'password': os.getenv('PGSU_TEST_PASSWORD'), 'user': os.getenv('PGSU_TEST_USER'), 'database': os.getenv('PGSU_TEST_DATABASE'), } return {k: v for k, v in dsn.items() if v}
825a3dde56a93b9ac91cc306cb661bb2fd2f86b2
31,779
import token import tokenize def strip_docstrings(line_gen): """ Strip comments and docstrings from a file. Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings """ res = [] prev_toktype = token.INDENT last_lineno = -1 last_col = 0 tokgen = tokenize.generate_tokens(line_gen) for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen: if slineno > last_lineno: last_col = 0 if scol > last_col: res.append(" " * (scol - last_col)) if toktype == token.STRING and prev_toktype == token.INDENT: # Docstring res.append("#--") elif toktype == tokenize.COMMENT: # Comment res.append("##\n") else: res.append(ttext) prev_toktype = toktype last_col = ecol last_lineno = elineno return ''.join(res)
ea5774377439484dd6e208521f18dc694eb991ac
31,780
def getelasticpublicip(ip): """Gets the PublicIp address of the Elastic IP Address""" return ip["PublicIp"]
84e4f788cb4a9b765f4b5dfc1a0606004162a4ed
31,781
def commands(ctx): """ Returns a string of Rimworld specific commands Parameters: ctx: The context of the message Returns: str: The list of commands """ response = '' response = 'Rimworld Commands: !item, !event, !iteminfo, !eventinfo, !mods' return response
4fcb162740e97437f414f8925eff97d3032e5319
31,782
def add_timeout_arg(parser): """Add the timeout argument to a parser""" def _validator(val): """Validate acceptable inputs for the timeout of the function""" error = 'Value for \'timeout\' must be an integer between 10 and 900' try: timeout = int(val) except ValueError: raise parser.error(error) if not 10 <= timeout <= 900: raise parser.error(error) return timeout parser.add_argument( '-t', '--timeout', required=True, help=( 'The AWS Lambda function timeout value, in seconds. ' 'This should be an integer between 10 and 900.' ), type=_validator )
2b96c763f579650e86e311a2081eab2313f6dc49
31,784