content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def input_block(config, section): """Return the input block as a string.""" block = '' if section not in config: return '' for key in config[section]: value = config[section][key].strip() if value: block += '{} {}\n'.format(key, value) else: block += key + '\n' return block
4f6ff5979c99171b390429bc6e90b325986d9f99
699,620
import os def get_working_dir(): """ Returns the base directory of the cartridge agent. :return: Base working dir path :rtype : str """ #"/path/to/cartridgeagent/modules/util/".split("modules") returns ["/path/to/cartridgeagent/", "/util"] return os.path.abspath(os.path.dirname(__file__)).split("modules")[0]
74fb1e3c792f51a9245a01353b1dbecb11a39e0c
699,621
import ast import json def strtojson(intxt): """str to json function""" out = {} try: out = ast.literal_eval(intxt) except ValueError: out = json.loads(intxt) except SyntaxError as ex: raise Exception("SyntaxError: Failed to literal eval dict. Err:%s " % ex) from ex return out
d4b1ba11dd6a064f46d4e07284b54f59072e15ef
699,623
import hashlib def git_hash_data(data, typ='blob'): """Calculate the git-style SHA1 for some data. Only supports 'blob' type data at the moment. """ assert typ == 'blob', 'Only support blobs for now' return hashlib.sha1(b'blob %d\0%s' % (len(data), data)).hexdigest()
b3195de0c04444a811308e8b2b79b5d136095ea8
699,624
def _vector_clock_equal(clock1, clock2): """ Compares two vector clocks, ignoring the timestamp field, which may be skewed. """ clock1_entries = dict((entry.node_id, entry.version) for entry in clock1.entries) clock2_entries = dict((entry.node_id, entry.version) for entry in clock2.entries) return clock1_entries == clock2_entries
2402724d75f7e3b48b05764e056756bdb883ef2e
699,625
def handle_response(response): """ Correct the response datatype if returned incorrectly :response Requests.models.Response: a response from the api :return: response ready for consumption from wrapper """ print data = response.json() if data['err_no'] == 0: return data['data'] else: print(data['err_msg']) return data['err_msg']
f18b26d2df8e6abf7625d1ad1153877eae6ecd64
699,626
import os def get_workdir(iteration, workroot): """Find what is the root of the work-tree at a given iteration""" if workroot is None: workdir = None else: if iteration is None: myworkdir = 'noiter' else: try: myworkdir = '-'.join([str(it) for it in iteration]) except TypeError: myworkdir = str(iteration) workdir = os.path.abspath(os.path.join(workroot, myworkdir)) return workdir
817bd4d31cb50f9df14b2793c204b11f72b8bc37
699,627
def transpose(table): """Returns: copy of table with rows and columns swapped Precondition: table is a (non-ragged) 2d List""" new_table = [] n_row = len(table) n_col = len(table[0]) for col in range(n_col): each_col = [] for row in range(n_row): each_col.append(table[row][col]) new_table.append(each_col) # print(table) # print(new_table) return new_table
11c74863d8b1941f1a74fb1c054f67b8dd2f22e8
699,628
def num_processes(): """ Return the number of MPI processes (not used for SpiNNaker, always returns 1) """ return 1
51bcd083a57cbce544b52cc1f1ef3772fb0d362c
699,629
def hash_generator(token_to_id, tokens): """Generate hash for tokens in 'tokens' using 'token_to_id'. Args: token_to_id: dict. A dictionary which maps each token to a unique ID. tokens: list(str). A list of tokens. Returns: int. Hash value generated for tokens in 'tokens' using 'token_to_id'. """ hash_val = 0 n = len(tokens) - 1 base = len(token_to_id) ** n for x in tokens: hash_val += token_to_id[x] * base base /= len(token_to_id) return hash_val
d9a059e22ab8574fd3c49dfa594cd903edb64d8b
699,630
def pack_varint(data): """Pack a VARINT for the protocol.""" return bytes([(0x40 * (i != data.bit_length() // 7)) + ((data >> (7 * i)) % 128) for i in range(1 + data.bit_length() // 7)])
1bdb283a787fc6b9e65e278d581b7654088ddf87
699,631
import argparse def get_args() -> argparse.Namespace: """Gets arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '-cc', '--common-config', required=True, help='Path to common config file', ) parser.add_argument( '-m', '--method', required=True, help='The used node embedding method', ) parser.add_argument( '-d', '--dim', required=True, help='The node embedding method dimensionality', ) return parser.parse_args()
5e9f5c825804572e4f6eeeae5ab0a3aa5b0a0d82
699,632
def getSheetContent(sheet): """ Returns two dimensional array of all non empty content of the given sheet. Array contains string and double values. """ cursor = sheet.createCursor() cursor.gotoStartOfUsedArea(False) cursor.gotoEndOfUsedArea(True) return [list(row) for row in cursor.getDataArray()]
9a77c7dadcdf5247f30fa3dc4fa87cc5683cfee2
699,633
def sbas_nav_decode(dwrds: list) -> dict: """ Helper function to decode RXM-SFRBX dwrds for SBAS navigation data. :param list dwrds: array of navigation data dwrds :return: dict of navdata attributes :rtype: dict """ return {"dwrds": dwrds}
215d945ac939a817ae84fa08607b72e0c14a1cc9
699,634
def constant(step, total_train_steps, value=1.0): """Constant learning rate (multiplier). Args: step: a tf.Scalar total_train_steps: a number value: a number or tf.Scalar Returns: a tf.Scalar, the learning rate for the step. """ del step, total_train_steps return value
53285310764c8d627ae366b2ec8e5ff98339e612
699,635
def encriptar(frase: str, clave: str, s: str = "02468") -> str: """Función que encripta una frase. :param frase: Frase a encriptar. :type frase: str :param clave: Clave de encriptación. :type clave: str :param s: Símbolo de sustitución. :type s: str :return: Frase encriptada. :rtype: str """ for letra in frase.lower(): if letra in clave: frase = frase.replace(letra, s[clave.find(letra)]) return frase
1e1a4e3369180418184411b4c74fa468976ff872
699,636
def set_image(images, row, issues, pre, overrides={}, verbose=False): """ Update an image based on known issues. Given an image, image metadata, and a set of known issues, determine if any of the known issues apply to the image in question and, if they do, make the appropriate edits to the image. Parameters ---------- images : dict Dict of (SCI, ERR, DQ) np.ndarray images row : abscal.common.exposure_data_table.AbscalDataTable A single-row table containing metadata on the image issues : dict A dictionary containing a set of parameters, along with information to identify files whose parameters should be adjusted. overrides : dict A dictionary containing any parameters whose value is being overridden by the user. verbose : bool Whether or not informational output should be printed. Returns ------- image : tuple Tuple of (SCI, ERR, DQ) np.ndarray images, as edited. """ # print("set_image with {}, {}, {}, {}".format(images, row, issues, overrides)) for issue in issues: # print(issue) # print(issue["column"], type(issue["column"])) # print(row) found = False if issue["column"] in row: if isinstance(issue["column"], str): issue_len = len(issue["value"]) if issue["value"] == row[issue["column"]][:issue_len]: found = True else: if issue["value"] == row[issue["column"]]: found = True if found: if len(issue["x"]) > 1: x1, x2 = issue["x"][0], issue["x"][1] else: x1, x2 = issue["x"][0], issue["x"][0]+1 if len(issue["y"]) > 1: y1, y2 = issue["y"][0], issue["y"][1] else: y1, y2 = issue["y"][0], issue["y"][0]+1 images[issue["ext"]][y1:y2,x1:x2] = issue["value"] if verbose: reason = issue["reason"] source = issue["source"] value = issue["value"] msg = "{}: changed ({}:{},{}:{}) to {} because {} from {}" print(msg.format(pre, y1, y2, x1, x2, value, reason, source)) return images
fae11f006dc93abdc0b04dc0aaf09c2ce4642450
699,637
import os import errno def makedirs(path, *paths): """Join one or more path components, make that directory path (using the default mode 0o0777), and return the full path. Raise OSError if it can't achieve the result (e.g. the containing directory is readonly or the path contains a file); not if the directory already exists. """ full_path = os.path.join(path, *paths) try: os.makedirs(full_path) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(full_path): raise return full_path
2645f2cc3b7503da95898c3b6dbb991c8ee24aa4
699,638
def boyer_moore_preprocessing(pattern, alphabet_size=4): """ Bad character rule used by Boyer-Moore algorithm: For each character x in the alphabet, let R(x) be the position of right-most occurrence of character x in P. R(x) is defined to be zero if x does not occur in P. """ R = [0] * alphabet_size for i in range(len(pattern)): R[pattern[i]] = i return R
1d70891cfe0f0f55579c7a9349fb24d6954379fd
699,639
def max_consecutive_sum(array): """ given an array of numbers (positive, negative, or 0) return the maximum sum of consecutive numbers """ max_value = max(array) running_sum = 0 for num in array: if running_sum < 0: running_sum = 0 running_sum += num if running_sum > max_value: max_value = running_sum return max_value
32e7322f8936f8399ec2ebe0702dbb10332cb529
699,640
def _feature_importances(clf, FEATURES): """ finds and prints feature importances for given clf """ if hasattr(clf, "feature_importances_"): results = {} params = zip(FEATURES, clf.feature_importances_) for param, importance in sorted(params, key=lambda x: x[1], reverse=True): param = param.replace("_", " ") results[param] = importance print(f'{param:>27} {importance:.3f}') return results else: return False
96e218ec60c0a780624524187274a6a7e1ea61a7
699,641
import signal import subprocess def run_process(process_arg_list, on_interrupt=None, working_dir=None): """ This runs a process using subprocess python module but handles SIGINT properly. In case we received SIGINT (Ctrl+C) we will send a SIGTERM to terminate the subprocess and call the supplied callback. @param process_arg_list Is the list you would send to subprocess.Popen() @param on_interrupt Is a python callable that will be called in case we received SIGINT This may raise OSError if the command doesn't exist. @return the return code of this process after completion """ assert isinstance(process_arg_list, list) old_handler = signal.getsignal(signal.SIGINT) process = subprocess.Popen(process_arg_list, cwd=working_dir) def handler(signum, frame): process.send_signal(signal.SIGTERM) # call the interrupted callack if on_interrupt: on_interrupt() # register the signal handler signal.signal(signal.SIGINT, handler) rv = process.wait() # after the process terminates, restore the original SIGINT handler # whatever it was. signal.signal(signal.SIGINT, old_handler) return rv
75b53dc2b877791f37eb6f01669ea58a423a17e0
699,642
def _mask_for_bits(i): """Generate a mask to grab `i` bits from an int value.""" return (1 << i) - 1
53fc285225632cce34a74536a085cfe0af10300a
699,644
import random def random_int(min_num=1, max_num=200): """ return an int inclusively between min_nim and max_num :param min_num: :param max_num: :return: {int} a number """ return random.randint(min_num, max_num)
0c481f889f4a40e8a72a1efa44244e113f395168
699,646
import os import sys def gettestfiles(testdir=None, randomizer=None): """Get all test files from the passed test directory. If none is passed, use the default sdl test directory. """ if not testdir: testdir = os.path.dirname(__file__) if testdir not in sys.path: sys.path.append(testdir) names = os.listdir(testdir) testfiles = [] for name in names: if name.endswith("_test" + os.extsep + "py"): testfiles.append(name) if randomizer: randomizer.shuffle(testfiles) else: testfiles.sort() return testdir, testfiles
64383d657f074f22d9193e49e3838ee6f4a8c290
699,647
def normalize_prefix(prefix): """ Removes slashes from a URL path prefix. :param str prefix: :rtype: str """ if prefix and prefix.startswith("/"): prefix = prefix[1:] if prefix and prefix.endswith("/"): prefix = prefix[:-1] return prefix
0db359f10fa213bf638fa3fca5bd58c530faf788
699,648
def checkIfDuplicates_2(listOfElems): """ Check if given list contains any duplicates """ setOfElems = set() for elem in listOfElems: if elem in setOfElems: return True else: setOfElems.add(elem) return False
a7d9f322faefa4b0b0191ca96097bbf38c61ee3d
699,649
import platform def is_windows(): """ Check and return if running platform is Windows. Arguments: None Returns: bool """ return any(platform.win32_ver())
4c166c57d96c84bafa6750327b7481be584129e8
699,650
async def root(): """ Default endpoint for testing if the server is running :return: Positive JSON Message """ return {"MLDatasetTemplate is Running!"}
17fccde4f21561a5166e39ca43df9e88539e0b2e
699,651
def get_details_format(s: str, lang: str = 'zh-cn'): """ Get API Request Parameters ---------- s: Company Name lang: Lang Returns ------- URL """ return "http://www.solvusoft.com/%s/file-extensions/software/%s/" % (lang, s)
4df1c9526febf2eadb9f6fe9d13d8b9615535aa2
699,652
def delete(context, key): """Delete a key from the current task context.""" return context.pop(key, None)
0b697ede943653ba41e7c50fff86907f93becee1
699,653
def matches_beginning(prefix: str, allowlist_key: str) -> bool: """" :param prefix: the value of the prefix query parameter :param allowlist_key: the key from :return: a bool of whether the prefix can be found on the allowlist. Both values are stripped of leading `/` before comparison. """ return prefix.lstrip('/').find(allowlist_key.lstrip('/')) == 0
ef047dfe16722d98b8fe894d3c400330a2defd74
699,654
def ExecuteFunction(function, *args, **kwargs): """Stub method so that it can be used for mocking purposes as well. """ return function(*args, **kwargs)
1b3d30c4053fe7b64f530d1c2e45518473c27b0e
699,655
import torch def create_tau( fval, gradf, d1x, d2x, smoothing_operator=None ): """ tau = create_tau( fval, gradf, d1x, d2x ) In: fval: torch.FloatTensor of shape B gradf: torch.FloatTensor of shape B*C*H*W d1x: torch.FloatTensor of shape B*C*H*W d2x: torch.FloatTensor of shape B*C*H*W smoothing_operator: function A self-adjoint smoothing operator sending torch.FloatTensor of shape B*2*H*W to torch.FloatTensor of shape B*2*H*W. Out: tau: torch.FloatTensor of shape B*H*W*2 """ B,C,H,W = gradf.shape # Sum over color channels alpha1 = torch.sum( gradf*d1x, 1).unsqueeze_(1) alpha2 = torch.sum( gradf*d2x, 1).unsqueeze_(1) # stack vector field components into shape B*2*H*W tau = torch.cat([alpha1,alpha2], 1) # Smoothing if smoothing_operator: tau = smoothing_operator( tau ) # torch can't sum over multiple axes. norm_squared_alpha = (tau**2).sum(1).sum(1).sum(1) # In theory, we need to apply the filter a second time. tau = smoothing_operator( tau ) else: # torch can't sum over multiple axes. norm_squared_alpha = (tau**2).sum(1).sum(1).sum(1) scale = -fval/norm_squared_alpha tau *= scale.view(B,1,1,1) # rearrange for compatibility with compose(), B*2*H*W -> B*H*W*2 return tau.permute( 0, 2, 3, 1 ).detach()
5e086908e432fbc6a34e1ce72bee84a1f467823e
699,656
def format_seconds(delta): """ Given a time delta object, calculate the total number of seconds and return it as a string. """ def _total_seconds(td): return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 return '%s' % (_total_seconds(delta))
60248f96a64b04be6480e27aa134c943461a6daa
699,657
def defineShapePerimeter(): """Define the perimeter and radius of each shape for different sizes""" allowedRadius = { "circle" : { "small" : [16,25], "medium" : [32,40], "large" : [45,58] }, "quadrilateral" : { "small" : [16,32], "medium" : [40,48], "large" : [56,72] }, "triangle" : { "small" : [20,38], "medium" : [50,60], "large" : [70,88] }, "pentagon" : { "small" : [14,28], "medium" : [36,44], "large" : [52,68] }, "hexagon" : { "small" : [14,28], "medium" : [35,44], "large" : [49,64] } } return allowedRadius
92ad63dcfe4f93fc8910bdb41d52f80169a91c1c
699,658
def scanner_position(n, t): """Return positin of scanner of range n at time t.""" n1 = n - 1 return n1 - abs(t % (2*n1) - n1)
3651f5997b370e703e09ff1f97c1d7d249cf286c
699,659
import socket import struct def discover(service, timeout=2, retries=1): """discover pilight servers""" group = ("239.255.255.250", 1900) message = "\r\n".join([ 'M-SEARCH * HTTP/1.1', 'HOST: {0}:{1}'.format(*group), 'MAN: "ssdp:discover"', 'ST: {st}', 'MX: 3', '', '']) responses = {} # pylint: disable=redefined-outer-name i = 0 for _ in range(retries): i += 1 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack('LL', 0, 10000)) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) sock.settimeout(timeout) sock.sendto(bytes(message.format(st=service), 'UTF-8'), group) while True: try: responses[i] = sock.recv(1024+1) break except socket.timeout: break except Exception as ex: # pylint: disable=broad-except print("no pilight ssdp connections found") print(ex) break sock.close() return list(responses.values())
6e220b66de64b00d18d71883791dd2d1ad5d9a96
699,660
import argparse def load_arguments(): """ Parse the arguments for the cli_env_autoinstall.py module. """ # cur_path = os.path.dirname(os.path.realpath(__file__)) # config_file = os.path.join(cur_path, "config.toml") p = argparse.ArgumentParser( description="Create a new conda environment for a repo and installs its dependencies.") p.add_argument( "folder_input", default="", help="Folder containing the source files") p.add_argument("--conda_env", "-n", default="test", help="Name of conda environment to create") p.add_argument("--python_version", "-py", default="3.6.7", help="Python version to use in the conda environment") p.add_argument("--packages", "-p", default="numpy", help="Custom/extra packages to install in conda env, e.g., \"numpy tensorflow\"") p.add_argument("--mode", default="test", help="conda environment mode which can be test/ prod /uat") arg = p.parse_args() return arg
f9e0b2b3bed71bb149478e86729e60cc9fe15af7
699,661
def find_tree_root(tree, key): """Find a root in a tree by it's key :param dict tree: the pkg dependency tree obtained by calling `construct_tree` function :param str key: key of the root node to find :returns: a root node if found else None :rtype: mixed """ result = [p for p in tree.keys() if p.key == key] assert len(result) in [0, 1] return None if len(result) == 0 else result[0]
372bf64f3230fe01d4369ae79bd61edc503b0fd6
699,662
def convert_str_to_list(sequence: str, is_ordered_sequence: bool = True, is_first_term_seq_name: bool = True): """ sequence: A string that contains a comma seperated numbers is_first_term_seq_name: True to drop the first term (i.e. A01255,1,3,5, ...) return: A list of integers in a list (String ---> List) """ terms_split: list sequence = sequence.strip().strip(",") if is_ordered_sequence: terms_split = sequence.split(",") else: terms_split = sequence.split(" ") if is_first_term_seq_name: del terms_split[0] # Delete the name of the sequence int_list: list = [0] * (len(terms_split)) for idx in range(0, len(terms_split)): int_list[idx] = int(terms_split[idx]) return int_list
f73213ae3484e00824920722eb58368bb116886b
699,663
import os def extract_SENTINEL_date(sen_directory): """ extracts the acquisition date of SENTINEL scenes sorted earlier on into a new list :return: """ SENTINEL_date_list = [] for filename in os.listdir(sen_directory): timestamp = filename[8:18] SENTINEL_date_list.append(os.path.join(timestamp)) return SENTINEL_date_list
1688953656cbb48a088b5ed89159847cf66a17f7
699,664
import requests import sys import re from bs4 import BeautifulSoup def souper(url): """Turns a given URL into a BeautifulSoup object.""" try: html = requests.get(url) except requests.exceptions.RequestException: print('''Dope was unable to fetch Stack Overflow results. Please check that you are connected to the internet.\n''') sys.exit(1) if re.search("\.com/nocaptcha", html.url): # URL is a captcha page return None else: return BeautifulSoup(html.text, "html.parser")
4701c096b19a093eb16d169270368b7be6fe0344
699,665
def filter_chants_without_notes(chants, logger=None): """Exclude all chants without notes""" notes_pattern = r'[89abcdefghjklmnopqrs\(\)ABCDEFGHJKLMNOPQRS]+' contains_notes = chants.volpiano.str.contains(notes_pattern) == True return chants[contains_notes]
98324a2b9c17d975ebfc7860ad9ca38e65db481e
699,666
def correct_eval_poly(d): """This function evaluates the polynomial poly at point x. Poly is a list of floats containing the coeficients of the polynomial poly[i] -> coeficient of degree i Parameters ---------- poly: [float] Coefficients of the polynomial, where poly[i] -> coeficient of degree i x : float Point Returns ------- float Value of the polynomial at point x Example ------- >>> eval_poly( [1.0, 1.0], 2) 3.0 """ poly, x = d["poly"], d['x'] result = 0.0 power = 1 degree = len(poly) - 1 i = 0 while i <= degree: result = result + poly[i] * power power = power * x i = i + 1 return (d,result)
5a0042c0fb28a5fa4891f86b8b8fa70516fbed34
699,667
import torch def zeros(shape, dtype=None, device = None): """ Creates a tensor with all elements set to zero. Parameters ---------- shape : A list of integers a tuple of integers, or a 1-D Tensor of type int32. dtype : tensor The DType of an element in the resulting Tensor Returns ------- A Tensor with all elements set to zero. """ if device == 'cpu': device = torch.device('cpu') elif device == 'gpu': device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') return torch.zeros(size=shape, dtype=dtype, device = device)
e8e7b18a1f0d2999152504536709388440c56f1a
699,668
import random def chapter_uid_generator() -> str: """Random number generator for Mastroka chapter UIDs.""" return str(random.choice(range(int(1E18), int(1E19))))
024ea43a93d3e94576324364375542338b859d13
699,669
import os import requests import json def Geocoding(long, lat): """ 经纬度获取街道信息 :param long: 经度 :param lat: 维度 :return: 地址,json信息 """ baidu_key = os.environ.get('baidu', '') url = "http://api.map.baidu.com/geocoder/v2/?location=%s,%s&output=json&pois=1&ak=%s"\ %(lat, long, baidu_key) r = requests.get(url) print("Geocoding url ==> %s" % r.url) print("Geocoding text ==> %s" % r.text) data = r.json() if data['status'] == 0: return data['result']['formatted_address'], json.dumps(data) else: return "", json.dumps(data)
01cf7f8bde7a365f70e67a5ef1cc6d2af74d00e5
699,670
from datetime import datetime def generate_header(header: str = "") -> str: """ Generates a file header. Args: header: A custom header to insert. Returns: str: The generated header of a file. """ syntax = " Warning generated file ".center(90, "-") date = datetime.now().isoformat() syntax += "\n" syntax += f"Generated at: {date}" syntax += "\n" syntax += "".center(90, "-") return header if header else syntax
0821aef1f5f77dcd7b9fb1bcbcbca74749c3ed4a
699,671
def _mbits(num_bytes, duration_ms) -> float: """Return Mbit/s.""" mbits = (num_bytes * 8) / (1024 * 1024) seconds = duration_ms / 1000 if seconds == 0: return 0 return mbits / seconds
8e4a1157ddb5f4d0361a553081aa8c6c3b0d62a6
699,672
def goto_definitions(script): """Get definitions for thing under cursor.""" return script.goto_definitions()
b614c72080b51e7c91100359cd863f9a8deae228
699,673
import argparse def nstr(value): """String converter that checks for contents""" value = value.strip() if not value: raise argparse.ArgumentTypeError("a non-empty, non-whitespace string is expected") return value
7b395e2384cc90956c5938a5d0717a8789eb8337
699,674
def get_adgroup_df(ads_df, num_days=90): """ Adgroups - Look at the effectiveness of different ad groups in bounce and close rate. * time windowed - look at the last 1, 7, 14, 28, and 90 days. Look for major changes in 1-7 and 7-28 """ adgroups = ads_df.groupby('adgroup') adgroup_df = adgroups.mean() adgroup_df['num_users'] = adgroups.count()['user'] adgroup_df['nav_depth_var'] = adgroups.var().get('nav_depth') adgroup_df = adgroup_df.sort_values('num_users', ascending=False) adgroup_df = adgroup_df.drop(columns=['survey_id', 'user', 'is_bot']) adgroup_df = adgroup_df.loc[adgroup_df['num_users'] > 10] adgroup_df.reset_index(level=0, inplace=True) return adgroup_df
1723064da39554cdc6ddea50d69a07ffc65c08b1
699,675
def star(fn): """Wrap function to expand input to arguments""" return lambda args: fn(*args)
4ec8eb075fda1b091cb780ef6af5f42284de96dd
699,676
import argparse def _comma_separated_strings(string): """Parses an input consisting of comma-separated strings.""" error_msg = 'Argument should be a comma-separated list of strings: {}' values = string.split(',') if not values: raise argparse.ArgumentTypeError(error_msg.format(string)) return values
cb552351fc40eb59fe7a60cd4064d25e040f0a7b
699,677
def convert2numeral(item, cls=int, default=None): """ Convert an argument to a new type unless it is `None`. """ try: num = cls(item) except (ValueError, TypeError): num = default return num
4c2e741eb0d7bcf6d6450db7c49f14895e0f1b15
699,678
import os def randomize_queries(queries): """ Formats the results `query.get_randomize`. :type list :param queries: Unformatted queries :rtype str :return Formatted queries """ if len(queries) > 0: separator = ';' + os.linesep return separator.join(queries) + separator return ''
28b8db4ebb5ac476868808457f4f34f9d0418730
699,679
import re def dna_to_re(seq): """ Return a compiled regular expression that will match anything described by the input sequence. For example, a sequence that contains a 'N' matched any base at that position. """ seq = seq.replace('K', '[GT]') seq = seq.replace('M', '[AC]') seq = seq.replace('R', '[AG]') seq = seq.replace('Y', '[CT]') seq = seq.replace('S', '[CG]') seq = seq.replace('W', '[AT]') seq = seq.replace('B', '[CGT]') seq = seq.replace('V', '[ACG]') seq = seq.replace('H', '[ACT]') seq = seq.replace('D', '[AGT]') seq = seq.replace('X', '[GATC]') seq = seq.replace('N', '[GATC]') return re.compile(seq)
86afb929b2281f0f875a1f11ca8cf36584b2f895
699,680
from typing import Callable import time def create_timer() -> Callable[[], float]: """Create a timer function that returns elapsed time since creation of the timer function""" start = time.time() def elapsed(): return time.time() - start return elapsed
e97fbb8b6fded209d1e5659f548feac726d9cf04
699,681
import requests import logging def get_recent_posts_instagram(handle): """Api for basic details Arguments: handle {str}: username Returns: json -- returns basic details """ result = None try: url = "https://www.instagram.com/{}/?__a=1".format(handle) response = requests.get(url) if (response.status_code != 200): return None result = response.json() except Exception as e: logging.error("error is {}".format(e)) return result
a9e60be1d4f31e3c14831eaf64b5eb2ff43d249d
699,682
import os import logging import requests def download_file(filename, url, overwrite=False): """ Check if file exist and download it if necessary. Parameters ---------- filename : str Full filename with path. url : str Full URL to the file to download. overwrite : boolean (default False) If set to True the file will be downloaded even though the file exits. """ if not os.path.isfile(filename) or overwrite: if overwrite: logging.warning("File {0} will be overwritten.".format(filename)) else: logging.warning("File {0} not found.".format(filename)) logging.warning("Try to download it from {0}.".format(url)) req = requests.get(url) with open(filename, "wb") as fout: fout.write(req.content) logging.info( "Downloaded from {0} and copied to '{1}'.".format(url, filename) ) r = req.status_code else: r = 1 return r
18a9458cc04eec3f713c21296358e2cc0fe497c3
699,683
def remove_base64(examples): """Remove base64-encoded string if "path" is preserved in example.""" for eg in examples: if "audio" in eg and eg["audio"].startswith("data:") and "path" in eg: eg["audio"] = eg["path"] if "video" in eg and eg["video"].startswith("data:") and "path" in eg: eg["video"] = eg["path"] return examples
44442b868ff57d57d65f63bc65b681c859a1ca52
699,684
from typing import Mapping def recursive_update(old_dict, update_dict): """ Update one embed dictionary with another, similar to dict.update(), But recursively update dictionary values that are dictionaries as well. based on the answers in https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth """ for k, v in update_dict.items(): if isinstance(v, Mapping): old_dict[k] = recursive_update(old_dict.get(k, {}), v) else: old_dict[k] = v return old_dict
7f0c4fdca6a58f8416e5f9c2762918fa776d8d9d
699,685
import copy def merge_dicts(dict1, dict2): """Recursively merge two dictionaries. Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a value, this will call itself recursively to merge these dictionaries. This does not modify the input dictionaries (creates an internal copy). Parameters ---------- dict1: dict First dict. dict2: dict Second dict. Values in dict2 will override values from dict1 in case they share the same key. Returns ------- return_dict: dict Merged dictionaries. """ if not isinstance(dict1, dict): raise ValueError(f"Expecting dict1 to be dict, found {type(dict1)}.") if not isinstance(dict2, dict): raise ValueError(f"Expecting dict2 to be dict, found {type(dict2)}.") return_dict = copy.deepcopy(dict1) for k, v in dict2.items(): if k not in dict1: return_dict[k] = v else: if isinstance(v, dict) and isinstance(dict1[k], dict): return_dict[k] = merge_dicts(dict1[k], dict2[k]) else: return_dict[k] = dict2[k] return return_dict
b3dccd6301be21a096bb3d299793b4cf1461c3d9
699,686
def convert_geoId(fips_code): """Creates geoId column""" return 'geoId/' + str(fips_code).zfill(2)
76c644d3f4d4da292d33b3e61e82eecfe7860434
699,687
def adder_function(args): """Dummy function to execute returning a single float.""" loc, scale = args return loc + scale
7646bb1acc324f05c92268b78fefaa75283f812a
699,688
import requests def get_api_results(url, id): """[summary] Args: url ([str]): [External API url] id ([int]): [member id] Returns: [json]: [API request response] """ r = requests.get(url.format(id)) return r.json()
4dc686c616f3ea9124c866b593d44bdc63e54d1d
699,689
def window_optical_flow(vec, window): """ Return pairs of images to generate the optical flow. These pairs contains the first and the last image of the optical flow according to the size of the window. Parameters: ----------- vec : array_like sorted list containing the image ids (type int) window : int size of the window to generate the optical flow Returns: -------- pairs : array_like list containing tuples with pairs as (first image, last image) of the optical flow Usage: ------ >>> vec = [0, 1, 2, 3] >>> window_optical_flow(vec, 2) [(0, 2), (1, 3), (2, 3), (3, 3)] """ pairs = [] for img in vec: last_img = img + window if last_img in vec: pairs.append((img, last_img)) else: pairs.append((img, vec[-1])) return pairs
dde566f6eff6845cb16a1acec95a7bdbc4609b11
699,691
def text_width(text_item): """Returns width of Autocad `Text` or `MultiText` object """ bbox_min, bbox_max = text_item.GetBoundingbox() return bbox_max[0] - bbox_min[0]
001816ff5937d1e286f00088ede076b86590a951
699,692
def get_years_for_valid_fwi_values(df) -> list: """ List each year that is sorted """ ffmc_data_years = df[df['ffmc_valid']].year.unique().tolist() bui_data_years = df[df['bui_valid']].year.unique().tolist() isi_data_years = df[df['isi_valid']].year.unique().tolist() # Combine them and remove duplicates data_years = list(set(ffmc_data_years) | set( bui_data_years) | set(isi_data_years)) data_years.sort() return data_years
41b3725680b322932217c5d3cd97c5922eca5105
699,693
from bs4 import BeautifulSoup def is_html(string): """ Check if string contains html. If html, return true, otherwise, return false. """ result = bool(BeautifulSoup(string, 'html.parser').find()) return result
404723e869608ad7949c144c2f11c0bf629b0262
699,694
def load_sig_owners(sig_name): """ Load owners specified sig """ owners = [] owners_file = "sig/{}/OWNERS".format(sig_name) try: with open(owners_file, 'r') as file_descriptor: lines = file_descriptor.readlines() for line in lines: if line.strip().startswith('-'): owner = line.replace('- ', '@').strip() owners.append(owner) except IOError as error: print("Error: 没有找到文件或读取文件失败 {}.", owners_file, error) return None return owners
725cfdceea02a319121770f3ec20c192f641193c
699,695
import codecs def get_text(filename:str) -> str: """ Load and return the text of a text file, assuming latin-1 encoding as that is what the BBC corpus uses. Use codecs.open() function not open(). """ f = codecs.open(filename, encoding='latin-1', mode='r') s = f.read() f.close() return s
43d6036ba8c10946d704dee1cd32b1968de5c199
699,696
def summerE(n: int) -> int: """ Uses the arithmetic series formula. Avoids generating the multiples, thanks to math. """ def sum_multiples(divisor: int, terms: int) -> int: return divisor * (terms * (terms + 1)) // 2 fizzsum = sum_multiples( 3, (n-1) // 3) buzzsum = sum_multiples( 5, (n-1) // 5) intersectsum = sum_multiples(15, (n-1) // 15) return fizzsum + buzzsum - intersectsum
6b4ee36075a5b46bd51c58c3bd44c103061c7cd1
699,697
import requests def post_data(url, file): """ :param url: 接口url :param file: 上传文件的路径 :return: """ files = {"file": open(file, "rb")} s = requests.session() r = s.post(url, files=files, verify=False) r_json = r.json() print('r', r_json) if r_json.get('success'): return r_json.get('data').get('picName') else: return False
24e53797698e61789c3233cc1bf150fdbf0485fd
699,698
def evidence_type_number_to_name(num: int) -> str: """ Transforms evidence type number to it's corresponding name :param num: The evidence type number :return: The string name of the evidence type """ name: str = str() supported_types = ['Network', 'Process', 'File', 'Registry', 'Security', 'Image', 'DNS'] try: name = supported_types[num - 1] except IndexError: name = 'Unknown' finally: return name
1f6a8e57334e08e997a3f86e629df04cb9602594
699,699
def energy(_x, _params): """Kinetic and Potential Energy of rigid body. _x is an array/list in the following order: q1: Yaw q2: Lean |-(Euler 3-1-2 angles used to orient A q3: Pitch / q4: N[1] displacement of mass center. q5: N[2] displacement of mass center. q6: N[3] displacement of mass center. u1: A[1] measure number of angular velocity u2: A[2] measure number of angular velocity u3: A[3] measure number of angular velocity u4: N[1] velocity of mass center. u5: N[2] velocity of mass center. u6: N[3] velocity of mass center. _params is an array/list of: m: Mass of first pendulum point mass. g: Gravitational constant. I11: Principal moment of inertia about A[1] I22: Principal moment of inertia about A[2] I33: Principal moment of inertia about A[3] Returns a list/array of kinetic energy and potential energy, respectively. """ # Unpack function arguments q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6 = _x # Unpack function parameters m, g, I11, I22, I33 = _params # Calculate return values ke = I11*u1**2/2 + I22*u2**2/2 + I33*u3**2/2 + m*u4**2/2 + m*u5**2/2 + m*u6**2/2 pe = -g*m*q6 # Return calculated values return [ke, pe]
f83a9c292cd370614c04b14d8ba3c96bf0f6143e
699,700
def solution(n): """Returns the largest palindrome made from the product of two 3-digit numbers which is less than n. >>> solution(20000) 19591 >>> solution(30000) 29992 >>> solution(40000) 39893 """ # fetchs the next number for number in range(n - 1, 10000, -1): # converts number into string. strNumber = str(number) # checks whether 'strNumber' is a palindrome. if strNumber == strNumber[::-1]: divisor = 999 # if 'number' is a product of two 3-digit numbers # then number is the answer otherwise fetch next number. while divisor != 99: if (number % divisor == 0) and ( len(str(int(number / divisor))) == 3): return number divisor -= 1
67e91eb9bbd778dd67bac7a2287f1d333ff49961
699,701
import requests def get_epoch(): """ get current epoch """ web_address = "https://0l.interblockcha.in:444/epochs" response = requests.get(web_address) epochs = response.json() return epochs
f46af8fe10e7b8dec5368b949c1e338a2c02b340
699,702
def select(population, to_retain): """Retain those that meet criteria""" sorted_population = sorted(population) to_retain_by_sex = to_retain//2 members_per_sex = len(sorted_population)//2 females = sorted_population[:members_per_sex] males = sorted_population[members_per_sex:] selected_females = females[-to_retain_by_sex:] selected_males = males[-to_retain_by_sex:] return selected_males, selected_females
8621bf02dc4ba528b4d7e327826c3d53e893065e
699,703
import os import yaml def load_conf(config_path): """ Load the configuration file :return: """ try: with open(os.path.expandvars(os.path.expanduser(config_path))) as config_file: config = yaml.load(config_file, Loader=yaml.FullLoader) except FileNotFoundError: print("No configuration found. To create configuration try \"pingmfa --configure\"") config = {} return config
f8c9b3c27ca11286a4b771203eb54826b7bc9a6a
699,704
def axis_slicer(n, sl, axis): """ Return an indexing tuple for an array with `n` dimensions, with slice `sl` taken on `axis`. """ itup = [slice(None)] * n itup[axis] = sl return tuple(itup)
0fdd64be34428da20c79d8c52a22c916cb5afe19
699,705
def zzx_degree(f): """Returns leading degree of f in Z[x]. """ return len(f) - 1
f24b966a69c998014a54542d906bbbf62f027126
699,706
import socket import ssl def wrap_socket( conn: socket.socket, keyfile: str, certfile: str, ) -> ssl.SSLSocket: """Use this to upgrade server_side socket to TLS.""" ctx = ssl.create_default_context( ssl.Purpose.CLIENT_AUTH, ) ctx.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 ctx.verify_mode = ssl.CERT_NONE ctx.load_cert_chain( certfile=certfile, keyfile=keyfile, ) return ctx.wrap_socket( conn, server_side=True, )
8981c86922aa28c6aded189ef425e232e93425b0
699,707
import argparse def get_args(batch_size=64, image_size=32, n_classes=10, max_iter=100000, sample_size=50000): """ Get command line arguments. Arguments set the default values of command line arguments. """ description = "Example of Self-Attention GAN (SAGAN)." parser = argparse.ArgumentParser(description) parser.add_argument("-d", "--device-id", type=str, default="0", help="Device id.") parser.add_argument("-c", "--context", type=str, default="cudnn", help="Context.") parser.add_argument("--type-config", "-t", type=str, default='float', help='Type of computation. e.g. "float", "half".') parser.add_argument("--image-size", type=int, default=image_size, help="Image size.") parser.add_argument("--batch-size", "-b", type=int, default=batch_size, help="Batch size.") parser.add_argument("--max-iter", "-i", type=int, default=max_iter, help="Max iterations.") parser.add_argument("--num-generation", "-n", type=int, default=1, help="Number of iterations for generation.") parser.add_argument("--save-interval", type=int, default=sample_size // batch_size, help="Interval for saving models.") parser.add_argument("--latent", type=int, default=128, help="Number of latent variables.") parser.add_argument("--maps", type=int, default=128, help="Number of latent variables.") parser.add_argument("--monitor-path", type=str, default="./result/example_0", help="Monitor path.") parser.add_argument("--model-load-path", type=str, help="Model load path to a h5 file used in generation and validation.") parser.add_argument("--lrg", type=float, default=1e-4, help="Learning rate for generator") parser.add_argument("--lrd", type=float, default=1e-4, help="Learning rate for discriminator") parser.add_argument("--n-critic", type=int, default=5, help="Learning rate for discriminator") parser.add_argument("--beta1", type=float, default=0.5, help="Beta1 of Adam solver.") parser.add_argument("--beta2", type=float, default=0.9, help="Beta2 of Adam solver.") parser.add_argument("--lambda_", type=float, default=10.0, help="Coefficient for gradient penalty.") parser.add_argument("--up", type=str, choices=["nearest", "linear", "unpooling", "deconv"], help="Upsample method used in the generator.") args = parser.parse_args() return args
96065f41d0744ddef244a515b44d9073dfdf94ad
699,708
def is_visible(self, y): """Checks whether a given point is within the currently visible area of the markdown area. The function is used to handle text which is longer than the specified height of the markdown area and during scrolling. :param self: MarkdownRenderer :param y: y-coordinate :return: boolean """ return not self.is_above_area(y) and not self.is_below_area(y)
aa982d8fadf70f970e084ead9be07916d2599217
699,709
import resource def _IncreaseSoftLimitForResource(resource_name, fallback_value): """Sets a new soft limit for the maximum number of open files. The soft limit is used for this process (and its children), but the hard limit is set by the system and cannot be exceeded. We will first try to set the soft limit to the hard limit's value; if that fails, we will try to set the soft limit to the fallback_value iff this would increase the soft limit. Args: resource_name: Name of the resource to increase the soft limit for. fallback_value: Fallback value to be used if we couldn't set the soft value to the hard value (e.g., if the hard value is "unlimited"). Returns: Current soft limit for the resource (after any changes we were able to make), or -1 if the resource doesn't exist. """ # Get the value of the resource. try: (soft_limit, hard_limit) = resource.getrlimit(resource_name) except (resource.error, ValueError): # The resource wasn't present, so we can't do anything here. return -1 # Try to set the value of the soft limit to the value of the hard limit. if hard_limit > soft_limit: # Some OS's report 0 for "unlimited". try: resource.setrlimit(resource_name, (hard_limit, hard_limit)) return hard_limit except (resource.error, ValueError): # We'll ignore this and try the fallback value. pass # Try to set the value of the soft limit to the fallback value. if soft_limit < fallback_value: try: resource.setrlimit(resource_name, (fallback_value, hard_limit)) return fallback_value except (resource.error, ValueError): # We couldn't change the soft limit, so just report the current # value of the soft limit. return soft_limit else: return soft_limit
aa71fa41f721a612e44c7ce9b65a025f2e9d1bba
699,710
def get_edges(tree): """ Get edges from url tree. Where url tree is tuple of (url, list of tuples( url, list)) etc. Example tree: (url, [(url1, [...]), (url2, [...]). ]) Parameters ---------- tree : tuple Tree of urls. Returns ------- list List of tuples (source page, end page). """ edges = [] url, elements = tree if isinstance(elements, list): for element in elements: if isinstance(element, str): edges.append((url.split("/")[-1], element.split("/")[-1])) else: edges.append((url.split("/")[-1], element[0].split("/")[-1])) edges += get_edges(element) return edges
f0e5d591e1453c6b7507c889a51f9c7064f4be39
699,711
def atom_type(): """Returns this atom type""" return 'vmhd'
81f90f2f95729ededd8d1242f4b10ddec8b25fb9
699,712
def asstring(bitArray): """ Return string representation of bit array Parameters: bitArray (list): an array of bits Returns: (string): string form of bitArray """ return ''.join([str(bit) for bit in bitArray])
23df601aa1b66c89428004d2e1e8a5961066c8be
699,713
def is_in_fold_innermost_scope_scope(context): """Return True if the current context is within a scope marked @fold.""" return 'fold_innermost_scope' in context
6bed2406d28ce17c09c6dd293fe61e7f70fbf4b2
699,714
import collections def coords_assign(coords, dim, new_name, new_val): """Reassign an xray.DataArray-style coord at a given dimension. Parameters ---------- coords : collections.OrderedDict Ordered dictionary of coord name : value pairs. dim : int Dimension to change (e.g. -1 for last dimension). new_name : string New name for coordinate key. new_val : any New value, e.g. numpy array of values Returns ------- new_coords : collections.OrderedDict Ordered dictionary with altered dimension. Example ------- lat = np.arange(89., -89., -1.0) lon = np.arange(0., 359., 1.) data = np.ones((len(lat), len(lon)), dtype=float) coords = coords_init(data) coords = coords_assign(coords, -1, 'lon', lon) coords = coords_assign(coords, -2, 'lat', lat) """ items = list(coords.items()) items[dim] = (new_name, new_val) new_coords = collections.OrderedDict(items) return new_coords
ed5210ec2f5399aa8302eadc53e515bdd6722307
699,715
import os def find_devices(device_mapping): """Finds the peripheral devices that are present on the system. Parameters ---------- device_mapping : dict A dict whose keys are the Unix group names and whose values are lists of the corresponding device files. Returns ------- dict A dict with the same structure as the input device_mapping, but with values that only include the device files that are actually present on the system. An empty dict is returned if no files are found. """ filtered_mapping = {} for group, devices in device_mapping.items(): device_file_list = [device for device in devices if os.path.exists(device)] if device_file_list: filtered_mapping[group] = device_file_list return filtered_mapping
5c58a2c67c1ddb43776a54efd1983a365faf2522
699,716
def set_paths(): """ Sets directory paths based on the machine being used :return: PATH_TO_INPUT, PATH_TO_OUTPUT, PATH_TO_CHECKPOINTS and PATH_TO_VAL """ path_to_input = "../data/flaskv3/input/" path_to_output = "../data/flaskv3/output/" path_to_cov_output = "../data/flaskv4/output/" path_to_checkpoints = "" path_to_val = "../validation_101.npz" return path_to_input, path_to_output, path_to_cov_output, path_to_checkpoints, path_to_val
b590eaba0b1327dfafd69dbf9777669c7cd5505a
699,718
def add_clusters(data, clusters): """ Adds the cluster predictions to the original data for interpretation. :param data: DataFrame. The data to have the cluster predictions added on to. :param clusters: List. The list of cluster predictions to be added to the DataFrame. """ addclusters = data addclusters["cluster"] = clusters return addclusters
5a91c9af1bccf6ee76d419ceba3274dcecef7535
699,719
def extract_label_from_txt(filename): """Get building type and building function.""" with open(filename, 'r') as in_file: txt = in_file.readline() split = txt.split(";") return split[0], split[1][:-1]
d0ed3ea611d631b4dfef6ed7e6637172ed9740e4
699,720
def compose_gates(cliff, gatelist): """ Add gates to a Clifford object from a list of gates. Args: cliff: A Clifford class object. gatelist: a list of gates. Returns: A Clifford class object. """ for op in gatelist: split = op.split() q1 = int(split[1]) if split[0] == 'v': cliff.v(q1) elif split[0] == 'w': cliff.w(q1) elif split[0] == 'x': cliff.x(q1) elif split[0] == 'y': cliff.y(q1) elif split[0] == 'z': cliff.z(q1) elif split[0] == 'cx': cliff.cx(q1, int(split[2])) elif split[0] == 'h': cliff.h(q1) elif split[0] == 's': cliff.s(q1) elif split[0] == 'sdg': cliff.sdg(q1) else: raise ValueError("Unknown gate type: ", op) return cliff
bcbade0ec400b46805f73512a1c2fc64ac866404
699,721
def is_same_hour(dt1, dt2): """ 判断两个datetime对象是否是同一时 :param dt1: :param dt2: :return: """ if (dt1.year == dt2.year) and (dt1.month == dt2.month) and ( dt1.day == dt2.day) and (dt1.hour == dt2.hour): return True else: return False
76e225db9076bf3d2a9816d495aa9959015d8e61
699,722
import math def color_distance(from_color, to_color): """ Calculate the euclidean distance of two colors in 3D space """ return math.sqrt( (from_color[0] - to_color[0]) ** 2 + (from_color[1] - to_color[1]) ** 2 + (from_color[2] - to_color[2]) ** 2 )
b72e9101682bd498ed21e8fb9f73b8f52401b888
699,723
def convert_percent(s): """ Convert the percentage string to an actual floating point percent - Remove % - Divide by 100 to make decimal http://pbpython.com/pandas_dtypes.html """ new_s = s.replace('%', '') return float(new_s) / 100
161f72166105fb8c915b7dfef8cd1ac62a057ac8
699,724