content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_url_and_type(xml): """ Gets the URL of the audio file, and it's type """ attr = xml.find('channel').find('item').find('enclosure').attrib return attr['url'], attr['type']
5e6feacb59edaaca0192342e4c1424c8d23f2bfe
23,993
import random import math def create_training_testing_dataset_for_member_prediction(NodeChildrenFile="",NumOfSelect=100, NumOfChild=10, wsFile ="", catPathFile="", unknownWordsFile = "", lenUNK=100, TrainPercentage=50, outPutFile=""): """ :param NodeChildrenFile: :param NumOfSelect: :param NumOfChild: :param TrainPercentage: :param outPutFile: :return: """ WSCHD = dict() outPutFile += "_".join([str(NumOfChild), str(TrainPercentage)]) CATPATHDic = dict() with open(catPathFile, 'r') as pfh: for ln in pfh: wlst = ln[:-1].split() CATPATHDic[wlst[0]] = wlst[2:-1] wsLst = [] with open(wsFile, 'r') as ifh: for ln in ifh: wsLst.append(ln[:-1]) unkLst = [] with open(unknownWordsFile, 'r') as ifh: for ln in ifh: word = ln[:-1] if '.' not in word and not word.isdigit(): unkLst.append(ln[:-1]) with open(NodeChildrenFile, 'r') as ifh: for ln in ifh: wlst = ln[:-1].split() if len(wlst[1:]) >= NumOfChild: WSCHD[wlst[0]] = wlst[1:] klst = list(WSCHD.keys()) random.shuffle(klst) num = 0 open(outPutFile, 'w').close() with open(outPutFile, 'a') as ofh: while num < NumOfSelect and klst: curKey = klst.pop() if curKey == "*root*": continue clst = [ele for ele in WSCHD[curKey] if ele in wsLst] if len(clst) < NumOfChild: continue EntitiesInKG = list(CATPATHDic.keys()) allNodesInTree = CATPATHDic[curKey] + [curKey] + clst + ["*root*"] falseEntityInKG = list(set(EntitiesInKG).difference(allNodesInTree)) random.shuffle(clst) boarder = math.ceil(len(clst) * TrainPercentage/100) trainLst = clst[:boarder] testLst = clst[boarder:] random.shuffle(falseEntityInKG) random.shuffle(unkLst) tRecord = curKey+"#"+" ".join(trainLst)+"#"+" ".join(testLst)+"#"+\ " ".join(falseEntityInKG[:lenUNK])+"#"+" ".join(unkLst[:lenUNK]) ofh.write(tRecord+'\n') num += 1 print(outPutFile, ' is created for training and testing\n') return outPutFile
ab772c9aff2f9ff059dae2df9156d00e13f4ec51
23,996
def read_problem(filename): """ Read a 0hh1 puzzle from file <filename> Takes a filename, and returns a problem dictionary with the following fields: - 'size': the number of rows/columns (assumed to be the same; an error will be raised if there is a size mismatch) - 'variables': the puzzle itself, a dictionary with (row, column) tuples as keys and the corresponding value 'r', 'b', or '.' - 'state': initially 'unsolved', to be updated by other methods """ with open(filename, 'r') as problem_file: problem = [['rb' if c == '.' else c for c in line if c in '.rb'] for line in problem_file] size = len(problem) assert all(len(v) == size for v in problem) cells = {(r, c): problem[r][c] for r in range(size) for c in range(size)} problem_dict = {'size': size, 'variables': cells, 'state': 'unsolved'} return problem_dict
554f97862ddc56337a0ba786052a28955c10b03a
23,997
import stat def s_isdev(mode): """ Check to see if a file is a device. """ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
9caea3ac877f8b745c6c24487ad26f105ef99ca1
23,998
def in_dict(obj, key, default=False): """ Returns true if key exists in obj.__dict__; false if not in. If obj.__dict__ is absent, return default """ return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default
905bc7afe9677f5f1dd70a1c6ec296df608cc6c3
24,000
def normalize_min_max(img, a=0.1, b=0.9): """ Normalize the image with Min-Max scaling to a range of [a, b] Args: img: The image to be normalized Returns: Normalized image data """ # Assume image data is in grayscale, with the current values in # the range [0, 255] (uint8). x_min = 0 x_max = 255 # x' = a + ((x - x_min) * (b - a)) / (x_max - x_min) normalized_img = a + ((img - x_min) * (b - a)) / (x_max - x_min) return normalized_img
0772ce27ca4a7cc75df2ae2d40e37f638b43359f
24,002
def itraceToStr( trace ): """ Concerts an instruction trace into a string """ traceStr = "" for i in trace: traceStr += "%s," % i[0] return traceStr[:-1]
0961655df019737b5812bc4066ce0c898034a032
24,005
import yaml def load_config(config_path): """ :param config_path: Path to yaml configuration file. :type config_path: str :return: Returns settings as found in configuration file. """ with open(config_path, "r") as yaml_file: cfg = yaml.safe_load(yaml_file) return cfg
d39d3030c3ebdc99f085ab430f27caabb5af7299
24,006
import json def addNodeGroup(browser, token, obj): """ Create a new node group from the given node group object (a dictionary) """ raise NotImplementedError status, data = APICall(browser, token, "POST", "/api/v2/node_groups.json", body=json.dumps({"node_group": obj})) return json.loads(data)
3b59784bca44040413e4fdb816a8dd8009971520
24,008
import threading def wait_for_events(completion_queues, deadline): """ Args: completion_queues: list of completion queues to wait for events on deadline: absolute deadline to wait until Returns: a sequence of events of length len(completion_queues). """ results = [None] * len(completion_queues) lock = threading.Lock() threads = [] def set_ith_result(i, completion_queue): result = completion_queue.next(deadline) with lock: results[i] = result for i, completion_queue in enumerate(completion_queues): thread = threading.Thread(target=set_ith_result, args=[i, completion_queue]) thread.start() threads.append(thread) for thread in threads: thread.join() return results
fd172ade97f78a538bcd9e6c0c95f5ee2102688c
24,010
def analyse(text): """ Return a dictionary with the number of occurrences for each character in the given string. """ frequencies = {} for c in text: if c in frequencies: frequencies[c] += 1 else: frequencies[c] = 1 return frequencies
5f4cabd36ab090ba3f26dab89851947ac8f70616
24,012
import os import getpass import json def GetUsernameAndPwd(): """ 根据用户的输入或者本地的存储返回学号和密码 """ # 不使用mysql那种通过命令行传递用户名和密码的方式登录 # 使用Linux的方法,在进程中要求用户给定用户名和密码 if not os.path.exists("user_info.json"): username = input("请输入您的学号: ") password = getpass.getpass("请输入您的密码: ") print() # 持久化 user_info = {"username": username, "password": password} with open("user_info.json", "w") as f: json.dump(user_info, f, indent=4) else: with open("user_info.json", "r") as f: user_info = json.load(f) username = user_info["username"] password = user_info["password"] return username, password
d14e1f66f7d28f59cda371c95f5c9fa209f63cab
24,013
def _eval_split_partition_fn(example, num_partitions, eval_fraction, all_ids): """Partition function to split into train/eval based on the hash ids.""" del num_partitions example_id = example[0] eval_range = int(len(all_ids) * eval_fraction) for i in range(eval_range): if all_ids[i] == example_id: return 0 return 1
93dd87df83f3fdc0ac2cc7181e4c7cabb6e9dfba
24,015
def norm_vec(lst, idx): """Get the normal vector pointing to cube at idx (from previous cube).""" assert len(lst)>1 and 0 < idx < len(lst) return [a-a1 for (a,a1) in zip(lst[idx], lst[idx-1])]
76fb86ac29b2cd8b5ceab0ba83b9bb8360914764
24,016
def end_chat(input_list): """Function to end the chat. Parameters ---------- input_list : list List that contains the input that the user input to the chatbot. Returns ------- output : bool Returns True to end chat or False to continue chat. """ if str('quit') in input_list: output = True return output else: output = False return output
cf9a592af3981706c04aa97f4519a4d58a1d87af
24,017
def parse_accept_language(accept_header): """ Taken from: https://siongui.github.io/2012/10/11/python-parse-accept-language-in-http-request-header/ """ languages = accept_header.split(",") locale_q_pairs = [] for language in languages: if language.split(";")[0] == language: # no q => q = 1 locale_q_pairs.append((language.strip(), "1")) else: locale = language.split(";")[0].strip() q = language.split(";")[1].split("=")[1] locale_q_pairs.append((locale, q)) return locale_q_pairs
fbe32beabecec84a543d9612336943120cf6b8b0
24,018
def next_element(elem): """Get sibling elements until we exhaust them.""" while elem is not None: elem = elem.next_sibling if hasattr(elem, 'name'): return elem
9039a6db533ba5adeb7e767db3b3a49561ef5aa5
24,019
def pair_sum_eff(A, n=0): """ O(nlogn) but a memory efficient """ A = sorted(A) lst = [] first = 0 last = len(A) - 1 while first < last: if A[first] + A[last] == n: lst.append((A[first], A[last])) first += 1 last -= 1 elif A[first] + A[last] < n: first += 1 else: last -= 1 return lst
6faf4f4a20dceb9df13c980546d131379b50701d
24,020
from typing import Iterable from typing import Sequence import time import subprocess def time_subprocesses(name: str, subprocesses_args: Iterable[Sequence[str]]) -> float: """Time several suprocesses running in parallel. If `check` is set, expect the given exit code (default 0). """ print() print("> Running: {}".format(name)) start = time.time() subprocesses = [] for subprocess_args in subprocesses_args: subprocesses.append(subprocess.Popen(subprocess_args)) for subprocess_ in subprocesses: retcode = subprocess_.wait() assert retcode == 0 end = time.time() duration = end - start print("> Completed: {}, duration: {}".format(name, duration)) print() return duration
3e6d954ca78473f319e57282ae5c82a509b0ce7a
24,021
import uuid def _generate_id() -> str: """ Returns a string representation of a UUID for use as an identifier for entities. The identifier will be 32 characters long, each of which will be either a number (0-9) or a lower case letter in the range a-f. See VALID_CHARS, above. :return: A string that can be used to uniquely identify an entity. """ return str(uuid.uuid4()).replace("-", "", 4)
9ad7e8d55caaa8c9acb852b193a5bf54402d0194
24,022
def poly_integral(poly, C=0): """ Calculates the integral of a polynomial: poly is a list of coefficients representing a polynomial the index of the list represents the power of x that the coefficient belongs to Example: if [f(x) = x^3 + 3x +5] , poly is equal to [5, 3, 0, 1] C is an integer representing the integration constant If a coefficient is a whole number, it should be represented as an integer If poly or C are not valid, return None Return a new list of coefficients representing the integral of the polynomial The returned list should be as small as possible """ if type(poly) == list and len(poly) > 0 and type(C) == int: itg = [C] if len(poly) > 1: for i in range(1, len(poly)): if isinstance(poly[i], (int, float)): coef = poly[i - 1] / i if coef.is_integer(): itg.append(int(coef)) else: itg.append(coef) else: return None else: if poly[0] == 0: return itg else: return [C, poly[0]] coef = poly[len(poly) - 1] / len(poly) if coef.is_integer(): itg.append(int(coef)) else: itg.append(coef) for i in range(len(itg)): if (sum(itg[i:]) == 0): return itg[:i] return itg return None
77055a4c0d59411d0acc3d402dc3b55743106d23
24,024
def isint(s): """ Is the given string an integer?""" try: i = int(s) except ValueError: return False return True
fc734c88031b2d0aa1c60b59d0605276b0215a83
24,025
def is_anagram(word1, word2): """Two words are anagrams if you can rearrange the letters from one to spell the other. Write a function called is_anagram that takes two strings and returns True if they are anagrams.""" chars1 = list(word1) chars1.remove(" ") chars2 = list(word1) chars2.remove(" ") return sorted(chars1) == sorted(chars2)
281fe13616155a755eefd01f09e40ba223e41e0d
24,026
def aws_account_from_infrastructure_access(cluster, access_level, ocm_map): """ Generate an AWS account object from a cluster's awsInfrastructureAccess groups and access levels """ ocm = ocm_map.get(cluster['name']) account = None for awsAccess in cluster['awsInfrastructureAccess']: if awsAccess.get('accessLevel', "") == access_level: account = { 'name': awsAccess['awsGroup']['account']['name'], 'uid': awsAccess['awsGroup']['account']['uid'], 'terraformUsername': awsAccess['awsGroup']['account']['terraformUsername'], 'automationToken': awsAccess['awsGroup']['account']['automationToken'], 'assume_role': ocm.get_aws_infrastructure_access_terraform_assume_role( cluster['name'], awsAccess['awsGroup']['account']['uid'], awsAccess['awsGroup']['account']['terraformUsername'], ), 'assume_region': cluster['spec']['region'], 'assume_cidr': cluster['network']['vpc'] } return account
091866e997f7a1e07da78cb35b8ec10acd639eda
24,028
import csv def import_csv(csv_file: str): """Reads a given csv file and returns a list of lists with each list corresponding to the row in the .csv file. Examples: >>> manystring = ''' ... bob, 21, janitor, sanitization team, 02 ... alice, 22, secretary, admin team, 03 ... chuck, 23, plumber, construction team, 04 ... ''' >>> >>> export_csv_multiline_string(manystring, 'mytest.csv')\n >>> import os\n >>> [file for file in os.listdir() if file.startswith('my')]\n ['mytest.csv'] >>> >>> import_csv('mytest.csv')\n [['bob', ' 21', ' janitor', ' sanitization team', ' 02'], ['alice', ' 22', ' secretary', ' admin team', ' 03'], ['chuck', ' 23', ' plumber', ' construction team', ' 04']] References: https://realpython.com/python-csv/ Args: csv_file (str): Reference a .csv file """ # results_list = [] with open(csv_file, 'r') as f: csv_reader = csv.reader(f) # line_count = 0 for row in csv_reader: results_list.append(row) # return results_list
b9376065f3a560c157b78e3af9574fbd1727c9f1
24,029
def make_lognormal_params_95_ci(lb, ub): """ Provides mean and standard deviation of a lognormal distribution for 95% confidence interval :param lb: lower bound :param ub: lower bound and upper bound :return: mean :return: std: standard deviation """ mean = (ub * lb) ** (1 / 2) std = (ub / lb) ** (1 / 4) # http://broadleaf.com.au/resource-material/lognormal-distribution-summary/ # v1 = ub # v2 = lb # z1 = 1.96 # z2 = -1.96 # std = log( v1 / v2 ) / (z1 - z2) # mu = ( z2 * log(v1) - z1 * log(v2)) / (z2 - z1) return mean, std
ea6db5b6478b59687ed39a09e84d5cf189582879
24,030
def val_proto_to_python(msg): """ Converts a `protobuf` `Value` `Message` object into a Python variable. Parameters ---------- msg : google.protobuf.struct_pb2.Value `protobuf` `Value` `Message` representing a variable. Returns ------- one of {None, bool, float, int, str} Python variable represented by `msg`. """ value_kind = msg.WhichOneof("kind") if value_kind == "null_value": return None elif value_kind == "bool_value": return msg.bool_value elif value_kind == "number_value": return int(msg.number_value) if msg.number_value.is_integer() else msg.number_value elif value_kind == "string_value": return msg.string_value elif value_kind == "list_value": return [val_proto_to_python(val_msg) for val_msg in msg.list_value.values] elif value_kind == "struct_value": return {key: val_proto_to_python(val_msg) for key, val_msg in msg.struct_value.fields.items()} else: raise NotImplementedError("retrieved value type is not supported")
4dc710483a599ba16b6e27e0f1af6431c454a632
24,031
def shed(data): """ Drops columns that have been encoded """ data = data.drop(['Gender','Education','Married','Self_Employed'],axis=1) return data
a50b8974d09d04d38497d87e3c4c96c05bf973ea
24,032
import hashlib def file_hash(filename): """Calculates the hash of an entire file on disk Args: filename (str) Location of file on disk Returns: hash (str) SHA256 checksum """ hash_sha256 = hashlib.sha256() with open(filename, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest()
cebd76ad6dec53056ff829dacb5df6fee639ee10
24,034
def serialize(tag, headers=(), items=()): """Return a text corresponding to given collection ``tag``. The text may have the given ``headers`` and ``items`` added around the items if needed (ie. for calendars). """ items = sorted(items, key=lambda x: x.name) if tag == "VADDRESSBOOK": lines = [item.text for item in items] else: lines = ["BEGIN:%s" % tag] for part in (headers, items): if part: lines.append("\n".join(item.text for item in part)) lines.append("END:%s\n" % tag) return "\n".join(lines)
69e827e1e962e0d493645ee068abeb0cb12a432f
24,035
def argsort_list(seq): """Returns indices such that the list is sorted. Example: argsort_list([2, 1, 3]) = [1, 0, 2]. """ return sorted(range(len(seq)), key=seq.__getitem__)
25e598b3771f1aae0b28dff83909705f9bd76b3d
24,036
def make_mgt_byte(service, sync=False): """ Creates the management byte according to the protocol :param service: Service code as defined in :obj:`~.Service` :param sync: boolean if synchronized mode should be used :return: integer """ msg = service + 3*16 if sync: msg += 64 return msg
2e69845f9fd19d03013f39eb0a74e0b5f0c0ea7e
24,037
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1): """Return match boolean and match score. :param pattern: the pattern to be matched :type pattern: ``str`` :param instring: the containing string to search against :type instring: ``str`` :param int adj_bonus: bonus for adjacent matches :param int sep_bonus: bonus if match occurs after a separator :param int camel_bonus: bonus if match is uppercase :param int lead_penalty: penalty applied for each letter before 1st match :param int max_lead_penalty: maximum total ``lead_penalty`` :param int unmatched_penalty: penalty for each unmatched letter :return: 2-tuple with match truthiness at idx 0 and score at idx 1 :rtype: ``tuple`` """ score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring) prev_match, prev_lower = False, False prev_sep = True # so that matching first letter gets sep_bonus best_letter, best_lower, best_letter_idx = None, None, None best_letter_score = 0 matched_indices = [] while s_idx != s_len: p_char = pattern[p_idx] if (p_idx != p_len) else None s_char = instring[s_idx] p_lower = p_char.lower() if p_char else None s_lower, s_upper = s_char.lower(), s_char.upper() next_match = p_char and p_lower == s_lower rematch = best_letter and best_lower == s_lower advanced = next_match and best_letter p_repeat = best_letter and p_char and best_lower == p_lower if advanced or p_repeat: score += best_letter_score matched_indices.append(best_letter_idx) best_letter, best_lower, best_letter_idx = None, None, None best_letter_score = 0 if next_match or rematch: new_score = 0 # apply penalty for each letter before the first match # using max because penalties are negative (so max = smallest) if p_idx == 0: score += max(s_idx * lead_penalty, max_lead_penalty) # apply bonus for consecutive matches if prev_match: new_score += adj_bonus # apply bonus for matches after a separator if prev_sep: new_score += sep_bonus # apply bonus across camelCase boundaries if prev_lower and s_char == s_upper and s_lower != s_upper: new_score += camel_bonus # update pattern index iff the next pattern letter was matched if next_match: p_idx += 1 # update best letter match (may be next or rematch) if new_score >= best_letter_score: # apply penalty for now-skipped letter if best_letter is not None: score += unmatched_penalty best_letter = s_char best_lower = best_letter.lower() best_letter_idx = s_idx best_letter_score = new_score prev_match = True else: score += unmatched_penalty prev_match = False prev_lower = s_char == s_lower and s_lower != s_upper prev_sep = s_char in '_ ' s_idx += 1 if best_letter: score += best_letter_score matched_indices.append(best_letter_idx) return p_idx == p_len, score
5d5f4164f6230161842f37b29f03758753f36ef6
24,038
def add_lists(list1, list2): """ Add list1 and list2 and remove any duplicates. Example: list1=[1,2,3,4] list2=[3,4,5,6] add_lists(list1, list2) = [1, 2, 3, 4, 5, 6] :param list1: input list 1 :param list2: input list 2 :return: added lists with removed duplicates """ return list1 + list(set(list2) - set(list1))
fc3f024ddbe0de0b6c5cfb92c173fedade0b1f54
24,039
def group(iterable, n_groups): """Group a list into a list of lists This function can be used to split up a big list items into a set to be processed by each worker in an MPI scheme. Parameters ---------- iterable : array_like A list of elements n_groups : int The number of groups you'd like to make Returns ------- groups : list A list of lists, where each element in `groups` is a list of approximately `len(iterable) / n_groups` elements from `iterable` See Also -------- interweave : inverse of this operation """ return [iterable[i::n_groups] for i in range(n_groups)]
bb981dbc8f1fc8034e2b672652c63d6af7de8f5b
24,040
import os def get_files(dirpath, suffix=["png"]): """ 获得指定目录下的图片 Args: dirpath: 需要遍历的目录 suffix: 文件后缀格式 Returns: type(list), 指定目录下所有指定后缀文件的全路径 """ file = [] for root, dirs, files in os.walk(dirpath, topdown=False): for name in files: path = os.path.join(root, name) if name.split(".")[-1] in suffix: file.append(path) return file
47f5a2985b5f156d716743491736e5d083c43d8d
24,041
import torch def device(): """Return current CUDA device if on GPUs else CPU device. """ if torch.cuda.is_available(): return torch.cuda.current_device() else: return torch.device('cpu')
77aad6c53753b550cfd3766ad499be1c9b33e87f
24,042
import os def _baristaProjectFile(directory): """ Returns the filename of the config-json file in the given directory """ return os.path.join(directory, "barista_project.json")
e73092a948ecb8f545130ddb552029abceeafca9
24,043
import subprocess def coursepid(name): """ 根据进程名称,获得进程id """ child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) response = child.communicate()[0] return [int(pid) for pid in response.split()]
631aefdc30488120d41780dbf69be501e7082826
24,044
def create_params() -> dict: """create_params. max_results max == 1000 Args: Returns: dict: """ return {"user.fields": "created_at", "max_results": "1000"}
2520e1e521e20bc361e7414aa3c4432ad6fd718f
24,049
def emails_parse(emails_dict): """ Parse the output of ``SESConnection.list_verified_emails()`` and get a list of emails. """ return sorted([email for email in emails_dict['VerifiedEmailAddresses']])
db5b1935057367bdacd923e1f27557727f1b9a00
24,050
def getSourcefileSetsUsed(runSets): """ Returns a list of svcomp categories (or "Sets") for which result sets exist. This is used to populate the "Category" drop down in index.py. """ usedSets = set() for runset in runSets: usedSets.add(runset.fileSet) return sorted(list(usedSets))
6b9f2348723d2079a2604ffc2603509d74f34723
24,052
def create_datafile_url(base_url, identifier, is_filepid): """Creates URL of Datafile. Example - File ID: https://data.aussda.at/file.xhtml?persistentId=doi:10.11587/CCESLK/5RH5GK Parameters ---------- base_url : str Base URL of Dataverse instance identifier : str Identifier of the datafile. Can be datafile id or persistent identifier of the datafile (e. g. doi). is_filepid : bool ``True`` to use persistent identifier. ``False``, if not. Returns ------- str URL of the datafile """ assert isinstance(base_url, str) assert isinstance(identifier, str) base_url = base_url.rstrip("/") if is_filepid: url = "{0}/file.xhtml?persistentId={1}".format(base_url, identifier) else: url = "{0}/file.xhtml?fileId={1}".format(base_url, identifier) assert isinstance(url, str) return url
d6deece390b3028f45b27a0f6aa1864688ed97fa
24,053
from colorama import Fore def ctext(s, color='red'): """ Colored text support * BLACK * RED * GREEN * YELLOW * BLUE * MAGENTA * CYAN * WHITE * RESET * LIGHTBLACK_EX * LIGHTRED_EX * LIGHTGREEN_EX * LIGHTYELLOW_EX * LIGHTBLUE_EX * LIGHTMAGENTA_EX * LIGHTCYAN_EX * LIGHTWHITE_EX Note ---- * colored text is longer in length and can overlength the screen (i.e. make the progress-bar ugly). * ctext(x + y + z) is BETTER than ctext(x) + ctext(y) + ctext(z) because it save more space. """ s = str(s) try: color = str(color).strip() color = color.upper() # special fix for light color if 'LIGHT' == color[:5] and '_EX' != color[-3:]: color = color + '_EX' color = getattr(Fore, color, '') return color + s + Fore.RESET except ImportError: pass return s
13bbb20677c056a8f2e363369db042a66ef3ff54
24,054
def nakpair(list): """Naked pair.""" pair = [] for i in list: if len(i) == 2: pair.append(i) if len(set(pair)) < len(pair): pairs = [] for i in range(len(pair)): for j in range(i + 1, len(pair)): if pair[i] == pair[j]: pairs.append(pair[i]) for pa in pairs: other = set(list) - set([pa]) f = [] for q in other: if pa[0] in q or pa[1] in q: f.append(list.index(q)) if len(f) > 0: position = [i for i, x in enumerate(list) if x == pa] return (True, pa, position, f) return (False, 0, 0, 0)
13b54d3a03a363277ab22d502a2d479afc1ce04a
24,055
def pkgsystem_lock(): # real signature unknown; restored from __doc__ """ pkgsystem_lock() -> bool Acquire the global lock for the package system by using /var/lib/dpkg/lock to do the locking. From Python 2.6 on, the apt_pkg.SystemLock context manager is available and should be used instead. """ return False
587e804a738b610d5e47469a65b0fb7e99875c0e
24,056
import glob import os def get_text_files(directory): """ Returns a list of .txt files in <directory>. Excludes all filenames beginning with "readme" (case-insensitive). Args: directory: the directory to search. """ temp = glob.glob(os.path.join(directory, '*.txt')) result = [] for f in temp: if not os.path.basename(f).lower().startswith('readme'): result.append(f) return result
79fe0d8c5b97dbf99b7fddf7913ffc50f594af0e
24,058
def suffix_all_lines(s, suffix): """ Returns 's', with 'suffix' appended to all lines. If the last line is empty, suffix is not appended to it. (If s is blank, returns s unchanged.) """ split = s.split('\n') last = split.pop() final = [] for line in split: final.append(line) final.append(suffix) final.append('\n') if last: final.append(last) final.append(suffix) return ''.join(final)
fe43cb9039254a7acf7cb11375356ef2fc1c01b4
24,059
def safe_float(x): """Returns the value of x converted to float, if fails return None. """ try: return float(x) except (ValueError, TypeError): return None
a492ce03eeb15c4fe192b56dc447a97fff94b03e
24,061
def current_piv(power, voltage): """Usage: current_piv(power, voltage)""" return power/voltage
c9ea309d11d3ef696f177c961b26c0177a5ce551
24,063
def string_to_scopes(scopes): """Converts stringifed scopes value to a list. Args: scopes (Union[Sequence, str]): The string of space-separated scopes to convert. Returns: Sequence(str): The separated scopes. """ if not scopes: return [] return scopes.split(' ')
ba304ebc17cd52739809d84c0617cf853de6d21b
24,064
import subprocess def panconv(inputfile): """Convert inputfile to a markdown string with pandoc""" o = subprocess.check_output(['pandoc', '-t', 'markdown', inputfile]) return o
cc2699cda8285a7afceff4a006d8ffc7f27e2990
24,065
def calculate_total_bill(subtotal): """ (float) -> float subtotal is passed through as an input HST_RATE variable in this function is multiplied by inputted variable Function returns the resulting variable "total", rounded and formatted to 2 decimal points. Variable "total" is then rounded to the nearest 5 cents using the following nickel rounding scheme standard rules in Canada: 0.01 to 0.02 will round down to 0.00. 0. 03 to 0.04 will round up to 0.05. 0.06 to 0.07 will round down to 0.05. 0.08 to 0.09 will round up to 0.10 >>> calculate_total_bill(3.0) 3.40 >>> calculate_total_bill(6.67) 7.55 >>> calculate_total_bill(2.05) 2.30 """ HST_RATE = 1.13 total_bill = subtotal *HST_RATE return format(round(0.05 * round(float(total_bill)/0.05), 2), '.2f')
6335c9e85e37e6d897eaa48ad09557b5d77d2e1b
24,066
import multiprocessing def consumer_process_factory(kafka_bootstrap_servers): """ Use this "factory as a fixture" to create multiprocessing.Process running a Kafka consumer polling loop in a test function. Parameters ---------- kafka_bootstrap_servers: pytest fixture (str) comma-separated list of host:port Returns ------- _consumer_process_factory : function(consumer_factory, topics, group_id, consumer_config, document_queue, **kwargs) factory function returning a multiprocessing.Process that will run a Kafka consumer polling loop """ def _consumer_process_factory( consumer_factory, topics, group_id, consumer_config, document_queue, **kwargs ): """ Parameters ---------- consumer_factory : function(topics, group_id, consumer_config, **kwargs) a factory function (or callable) returning a BlueskyConsumer-like object topics : list of str, required the underlying Kafka consumer will subscribe to the specified topics group_id : str, optional the underlying Kafka consumer will have the specified group_id, "pytest" by default consumer_config : dict, optional the underlying Kafka consumer will be created with the specified configuration parameters document_queue : multiprocessing.Queue the underlying Kafka consumer will place documents it receives in this queue kwargs kwargs will be passed to the consumer_factory to be used in constructing the underlying Kafka consumer Returns ------- document_queue: multiprocessing.Queue this queue will contain bluesky (name, document) pairs that were delivered to the underlying Kafka consumer """ if consumer_config is None: consumer_config = { # it is important to set a short time interval # for automatic commits or the Kafka broker may # not be notified by the consumer that messages # were received before the test ends; the result # is that the Kafka broker will try to re-deliver # those messages to the next consumer that subscribes # to the same topic(s) "auto.commit.interval.ms": 100, } # this function will run in the external process created below def start_consumer_with_queue(document_queue_): logger = multiprocessing.get_logger() logger.warning("constructing consumer process with inter-process queue") def put_document_in_queue(consumer, topic, name, doc): logger.warning("BlueskyConsumer putting %s in queue", name) document_queue_.put((name, doc)) # it is important the BlueskyConsumer be # constructed in the external process bluesky_consumer_ = consumer_factory( topics=topics, bootstrap_servers=kafka_bootstrap_servers, group_id=group_id, consumer_config=consumer_config, process_document=put_document_in_queue, **kwargs, ) # consume messages published by a Kafka broker bluesky_consumer_.start() # create an external process for the bluesky_kafka.BlueskyConsumer polling loop # but do not start it, the client of this function will start the process consumer_process = multiprocessing.Process( target=start_consumer_with_queue, args=(document_queue,), daemon=True, ) return consumer_process return _consumer_process_factory
90222728439dc071a39afa72b83c66c757b0a2a7
24,069
def pil_to_rgb(pil): """Convert the color from a PIL-compatible integer to RGB. Parameters: pil: a PIL compatible color representation (0xBBGGRR) Returns: The color as an (r, g, b) tuple in the range: the range: r: [0...1] g: [0...1] b: [0...1] >>> '(%g, %g, %g)' % pil_to_rgb(0x0080ff) '(1, 0.501961, 0)' """ r = 0xff & pil g = 0xff & (pil >> 8) b = 0xff & (pil >> 16) return tuple((v / 255.0 for v in (r, g, b)))
b6c2c3f150ddf2e82120febc676e400582d7db48
24,070
def get_instance(instances, field, value): """Given a list of instances (dicts), return the dictionary where field == value.""" for inst in instances: if inst[field] == value: return inst assert False, "Value '{}' for field '{}' was not found in instances!".format(value, field)
56b79d142885c54a356709e99d9333476bbc1c43
24,072
def version_number_to_array(version_num): """Given a version number, return an array of the elements, as integers.""" return [int(x) for x in version_num.split(".")]
b55605238bd0b534c92c7867f5f2257226e8382f
24,076
def get_lr(optimizer): """ Return current learning rate from optimiser function * :param optimiser(torch.optim): Optimiser function :return (float): Learning rate at current epoch """ for param_group in optimizer.param_groups: return param_group['lr']
d35703b89e07bce65113135c78733b282eebffd4
24,077
def dedup(lst): """Remove duplicates from lst""" result = [] repeated = set() for element in lst: if element not in repeated: result.append(element) repeated.add(element) return result
367640c9f4795ddc9ee2d7db1d88755a9b901290
24,078
def qgis_vector_fix_geometries(processing, context, INPUT, OUTPUT): """qgis fixgeometries ---------- Notes ------- Returns: ------- None, """ out = processing.run("native:fixgeometries", {"INPUT": INPUT, "OUTPUT": OUTPUT}) return out
f2865c533840ec1b2fc3e09b832631d9c5334e39
24,079
def cria_coordenada(x, y): """ cria_coordenada: {0, 1, 2} x {0, 1, 2} >>> coordenada - Cria um valor do tipo "coordenada" a partir de dois valores, linha e coluna, coordenadas X e Y """ if x not in (0, 1, 2) or y not in (0, 1, 2): raise ValueError("cria_coordenada: argumentos invalidos.") return (x, y)
4f6d7271df8e6b2618e0c8341eed20b4b06af033
24,080
def select_all(conn): """ Query all rows in the MOVIE table :param conn: the Connection object :return: """ cur = conn.cursor() cur.execute("SELECT * FROM ACTOR_SCORE") rows = cur.fetchall() print(rows) return rows # print('rows count : '+str(len(rows)))
1d920ccfaef300b32c7823b280f9a57742ecec23
24,082
def comma(text): """Split a string into components by exploding on a comma""" components = (el.strip() for el in text.split(',')) return tuple(filter(bool, components))
ebfd1d0fe7be436a93973e4ee14f7565414cd5f3
24,084
import random def get_random_float(min, max, digits=4): """ :param min: :param max: :param digits: :return: """ return round(random.uniform(min, max), digits)
287a8f69f5a1d9e213978d064c3a4c34860e66eb
24,085
def calling(cb): """Iterfunc which returns its input, but calling callback after every item. In particular, the returned iterator calls cb() after it has been asked for a next element, but before returning it. """ def iterfunc(iterator): for item in iterator: cb() yield item return iterfunc
eb025659d93fdb6b0d96424e395ef10720621736
24,086
def bisect(f, left, right): """Determine largest value where ``f`` returns ``True``. If this values lies to the left of the search interval, return ``left - 1``. If this values lies to the right of the search interval, return ``right + 1``. """ if f(left) is False: return left - 1 if f(right) is True: return right + 1 while right - left > 1: at = (left + right) // 2 if f(at): left = at else: right = at if f(right): return right else: return left
03a1147c2a5a841804cbc6aa2dc85f791d9e9ac2
24,087
def get_column(puzzle: str, col_num: int) -> str: """Return column col_num of puzzle. Precondition: 0 <= col_num < number of columns in puzzle >>> get_column('abcd\nefgh\nijkl\n', 1) 'bfj' """ puzzle_list = puzzle.strip().split('\n') column = '' for row in puzzle_list: column += row[col_num] return column
4d4dd5c6d6345786e4ef0f41c4748e47d1231603
24,088
def _convert_boolean(value): """Convert a string to a boolean value the same way the server does. This is called by the transform_parameter_value function and shouldn't be called directly. Args: value: A string value to be converted to a boolean. Returns: True or False, based on whether the value in the string would be interpreted as true or false by the server. In the case of an invalid entry, this returns False. """ if value.lower() in ('1', 'true'): return True return False
0c30185b8745be6b431baab6d6e4331c608c93cb
24,089
import click def prompt_tag_selection(tags): """Prompt user to chose a tag or <HEAD>.""" # Prompt user to select a tag to export tags = sorted(tags, key=lambda t: t.created) text_prompt = 'Tag to export: \n\n<HEAD>\t[1]\n' text_prompt += '\n'.join( '{}\t[{}]'.format(t.name, i) for i, t in enumerate(tags, start=2) ) text_prompt += '\n\nTag' selection = click.prompt( text_prompt, type=click.IntRange(1, len(tags) + 1), default=1 ) if selection > 1: return tags[selection - 2] return None
913ebd41f23905acf967f466910966e435d957e1
24,092
def _status_code_for_git_status_line(status_line): """Returns the XY status git status. Git status char ' ' (empty space) is replaced with an underscore per the MergeFile status spec above. See `git status --help` for details. """ assert len(status_line) >= 2, status_line return status_line[:2].replace(" ", "_")
51e802e4f6aded85d5344eba89bb0f20af192d7f
24,093
def _alter_str(data, pos=0, incr=1, num=1): """Alters a string at the given position by incrementing the char""" start = pos assert pos >= 0 assert incr >= 0 assert num >= 0 assert len(data) >= (pos + num) while pos < num + start: data = data[:pos] + chr(ord(data[pos]) + incr) + data[pos+1:] pos += 1 return data
2f3eedceb70486ff2ca9a6171c5f5765157c2ca5
24,094
def open_file(fname): """Open <fname> and return its contents.""" with open(fname, "r") as reader: data = reader.read() return data
c08c02414fcacdffc7fc1391ee9201a6a0151c5a
24,096
def abi_crs(G, reference_variable="CMI_C01"): """ Get coordinate reference system for the Advanced Baseline Imager (ABI). Parameters ---------- G : xarray.Dataset An xarray.Dataset to derive the coordinate reference system. reference_variable : str A variable in the xarray.Dataset to use to parse projection from. Returns ------- Three objects are returned 1. cartopy coordinate reference system 2. data projection coordinates in x direction 3. data projection coordinates in y direction """ # We'll use the `CMI_C01` variable as a 'hook' to get the CF metadata. dat = G.metpy.parse_cf(reference_variable) crs = dat.metpy.cartopy_crs # We also need the x (north/south) and y (east/west) axis sweep of the ABI data x, y = (dat.x, dat.y) return crs, x, y
b420ea4cda12f1acd4b9af6ecd9c2f70c756f761
24,097
def EncodePublic(pk): """ Encode a public key into bytes. """ return bytes(pk)
a4dcc174f12fa78b3ec425b4129c038d51312fea
24,098
import os def validate_report_file(report_file_name): """ Raise ValueError if report contains path seps """ if not os.path.basename(report_file_name) == report_file_name: raise ValueError("Path separators are not allowed: {r}".format(r=report_file_name)) return report_file_name
bca396f98d32c1d66f59f749daaff90be12941a6
24,099
def find_mode(*c): """mode of a curve""" i = 0 best = 0 for j in range(len(c)): if c[j] > best: i = j best = c[j] return i
41f663ae3cc549eec37c94c768408b2615fae53c
24,100
def _without_otter_metadata(metadata): """ Return a copy of the metadata with all the otter-specific keys removed. """ meta = {k: v for (k, v) in metadata.get('metadata', {}).iteritems() if not (k.startswith("rax:auto_scaling") or k.startswith("rax:autoscale:"))} return {'metadata': meta}
b35a10d3961c1885df4ae9a25b39c639d4f77b9c
24,101
import re def cv(row, col_name, arg, current_data_model, df, con): """ row[col_name] must contain only values from the appropriate controlled vocabulary """ vocabulary = con.vocab.vocabularies cell_value = str(row[col_name]) col_type = current_data_model.loc[col_name].type if not cell_value: return None elif cell_value == "None": return None cell_values = [cell_value] # split Matrix value rows by semicolons unless they are within quotes if col_type == 'Matrix': cell_values = re.split(';(?=(?:[^"]*"[^"]*")*[^"]*$)', cell_value) # split List and Dictionary values by colons unless they are within quotes if col_type == 'List' or col_type == 'Dictionary': cell_values = re.split(':(?=(?:[^"]*"[^"]*")*[^"]*$)', cell_value) cell_values = [c.strip() for c in cell_values] # get possible values for controlled vocabulary # exclude weird unicode possible_values = [] for val in vocabulary[col_name]: try: possible_values.append(str(val).lower()) except UnicodeEncodeError as ex: print(val, ex) for value in cell_values: # only validate the first column in a Matrix row agaist the vocabulary if col_type == 'Matrix': value = value.split(':')[0] if str(value).lower() == "nan": continue elif str(value).lower() in possible_values: continue elif value.lower() == "none": continue else: try: if str(float(value)) in possible_values: continue except: pass return '"{}" is not in controlled vocabulary for {}'.format(value, arg) return None
10a24e990c85f860380609b980656720afb6060b
24,102
def evaluate_criteria(df): """ The evaluation criteria is to select the separator with less standard deviation and with mean above zero. """ return df[df["mean"] > 0].sort_values(by="std").iloc[0]["name"]
f8eeb03f73b47a5b81612deab6a65a160098e19f
24,104
from typing import List def _get_shard_boundaries(num_examples: int, number_of_shards: int) -> List[int]: """Shard boundaries based on number of number of shards to generate.""" if num_examples == 0: raise AssertionError("No examples were yielded.") if num_examples < number_of_shards: raise AssertionError("num_examples ({}) < number_of_shards ({})".format( num_examples, number_of_shards)) return [ round(num_examples * (float(i) / number_of_shards)) for i in range(1, number_of_shards + 1) ]
62e8bb025661bf37d6987eb7e792d198fd7f3df7
24,105
import pickle def extract_bsl(beta_,sigma_,lambda_,X,y,nb_class,th_class,nb): """ Etract beta, sigma, lambda of a specific class - th_class: the class number extracted - nb th experience """ index = 0 th = 0 for c in range(len(nb_class)): beta_c = beta_[c,index:index + nb_class[th],:,:] sigma_c = sigma_[c,index:index + nb_class[th],:,:] lambda_c = lambda_[c,index:index + nb_class[th],:] if nb_class[c] == th_class: beta = open('beta_E'+str(nb)+'_'+str(th_class)+'.pkl', 'wb') pickle.dump(beta_c, beta) sigma = open('sigma_E'+str(nb)+'_'+str(th_class)+'.pkl', 'wb') pickle.dump(sigma_c, sigma) lambda_ = open('lambda_E'+str(nb)+'_'+str(th_class)+'.pkl', 'wb') pickle.dump(lambda_c, lambda_) beta.close() sigma.close() lambda_.close() return beta_c,sigma_c,lambda_c break index = index + nb_class[th] th = th +1
7f728ccdf47286babc36099f18f10d8b4df8d523
24,108
def content_filter(text): """ The simple filter of words Change the values in filter.txt to configurate it. """ filter_result = { 'length': True, 'count': True, 'black_list': True } with open('filter.txt', 'r') as f: info = eval(f.read()) if len(text) < info['min_word_length'] or len(text) > info['max_word_length']: filter_result['length'] = False if len(text.split(' ')) < info['min_count_of_words'] or len(text.split(' ')) > info['max_count_of_words']: filter_result['count'] = False for word in info['black_list']: if word in text: filter_result['black_list'] = False break return filter_result
5008f4a2bd8e56c9972cebd50ced7dc731be8769
24,109
import math def griewank(phenome): """The bare-bones Griewank function.""" ssum = 0.0 product = 1.0 for i in range(len(phenome)): ssum += phenome[i] ** 2 / 4000.0 product *= math.cos(phenome[i] / math.sqrt(i + 1.0)) return ssum - product + 1.0
b2f5ad697051b3d1e491da9c159aafa2faa1cc6d
24,111
def evaluate_part_one(expression: str) -> int: """Solve the expression from left to right regardless of operators""" answer = 0 operator = None for elem in expression.split(): if elem.isdigit(): if operator is None: answer = int(elem) elif operator == "+": answer += int(elem) elif operator == "*": answer *= int(elem) else: operator = elem return answer
b6f5fcbb3adde53377c369d8744b4fec5be9badf
24,112
def tx_vport(b2b_raw_config_vports): """Returns a transmit vport""" return b2b_raw_config_vports.ports[0]
b2b6629a868d1ee75566a2c86acf5e38c75e992c
24,114
import json def get_hosts_deployed(cls): """ Method to get all the hosts deployed in the cluster Args: cls: cephadm instance object Returns: List of the names of hosts deployed in the cluster """ out, _ = cls.shell(args=["ceph", "orch", "host", "ls", "-f", "json"]) hosts = list() host_obj = json.loads(out) for host in host_obj: hosts.append(host["hostname"]) return hosts
221bd1a437bb4a271c1b85af9f80de5cbf99884c
24,116
def initial_status_ok (db, status_id, cat_id, is_simple) : """ Allow "escalated" when submitting new issue. This is allowed in case the analysis was done in advance. The first (submission) message should contain the analysis results. Allow "open" when submitting new non-cert issue. This is allowed in case the analysis and decision about implementation was made in advance, the first message should contain the analysis results and decision. Allow open for simple kind (the ones using simple_transitions). """ status = db.status.get (status_id, 'name') if status == 'escalated' and not is_simple : return True if status == 'open' and is_simple : return True if ( status == 'open' and cat_id and db.category.get (cat_id, 'valid') and not db.category.get (cat_id, 'cert_sw') ) : return True return False
f2e28779258a5f1f590e6cf2a2e9a26a415c16b7
24,117
def is_anagram_0(s1, s2): """ This is my first solution, and it's incorrect because this method checks palindrome, not anagram. """ if s1 is None or s2 is None: return False if len(s1) != len(s2): return False s1_list = list(s1) s2_list = list(s2) for i in range((len(s1_list))): #print("{0}, {1}".format(s1_list[i], s2_list[-i])) if s1_list[i] != s2_list[len(s2_list)-1-i]: return False return True
a958a29909e3d5ac9e52d77572b1feae4bc87574
24,118
import json def process_synonyms(genes, synonyms_file): """Process a list of genes into a dictionary with synonyms The user may select only a subset of genes to look for, rather than the entire dictionary. This function creates a new dictionary for the genes used by the user. Returns: dict: Dictionary containing the gene names provided as keys, and the synonyms as values. """ with open(synonyms_file) as f: gene_to_syns = json.load(f) target_to_syns = {} targets_lower = [target.lower() for target in genes] for symbol, synonyms in gene_to_syns.items(): if symbol in genes: target_to_syns[symbol] = synonyms else: for synonym in synonyms: if synonym.lower() in targets_lower: target_to_syns[synonym] = synonyms target_to_syns[synonym].append(symbol) return target_to_syns
f2452464eb58e70487c8f0fbf3159dcb8a7a0d75
24,120
from typing import Any import inspect from pydantic import BaseModel # noqa: E0611 def _is_basemodel(obj: Any) -> bool: """Checks if object is a subclass of BaseModel. Args: obj (Any): Any object Returns: bool: Is a subclass of BaseModel. """ return inspect.isclass(obj) and issubclass(obj, BaseModel)
30027d6e5ce9f685c4b227ef3c2f83d76a963da7
24,121
def get_T(M): """ Returns T when given [I-P, T]. """ n = M.shape[0] return(M[:, n])
35ae311abb0864adc96ac0741e44e1069f6a25a1
24,122
import statistics def median(wm): """ Median Operator wm = list of importance weighted membership values returns the middle value in the set """ return statistics.median(wm)
bee3aa1f17d943ed1951277c17eff958a6b16e12
24,123
import os import shutil def rmpath(path): """Wrapper for os.remove or shutil.rmtree as appropriate""" if os.path.isfile(path): return os.remove(path) elif os.path.isdir(path): return shutil.rmtree(path)
a9935d1cc363f8032bd1d3614d374a0f89f12e2e
24,124
from time import localtime, strftime, time def get_time(is_bracket=True, return_numerical_time=False): """Get the string of the current local time.""" string = strftime("%Y-%m-%d %H:%M:%S", localtime()) if is_bracket: string = "[{}] ".format(string) if return_numerical_time: return string, time() else: return string
ba26766ea74cf7467e4b8775fa9a964b0d6aff7f
24,126
def code_huffman(chaine, arbre): """Fonction qui retourne l'encodage binaire correspondant à la chaine à l'aide de l'arbre de huffman 'arbre'""" chaine_retour = "" ab1 = arbre for c in chaine: while ab1.get_val() != c: chaine_retour += "1" if (gauche := c in ab1.get_ag()) else "0" ab1 = ab1.get_ag() if gauche else ab1.get_ad() ab1 = arbre return chaine_retour
0e7abecfaaba2ca97ba58a1849309e7a5cab26cb
24,128
def title(info): """ concatenate page title from info dict """ version = info['node']['version'] try: # perhaps cut away "-stable-316fc7ec" version = version.split("-")[0] except: pass T = "(%s) %s %s with %d txs: %.1f TPS" T = T % (info['diagrams']['prefix'], info['node']['name'], version, info['send']['num_txs'], info['diagrams']['blocktimestampsTpsAv']) return T
94d1116bfedc2b9f2585ba6479e2cb04a1a05b70
24,130
def has_duplicates(t): """Takes a list returns True if any element appears more than once, otherwise False""" seen_letters = [] for i in t: if i in seen_letters: return True else: seen_letters.append(i) return False
dc1316d94ed510eaa468feb26a62ae9afd6cfbc2
24,131
def rational_polynomial2(data): """Rational polynomial benchmark function. .. list-table:: :widths: 10 50 :stub-columns: 1 * - Range - :math:`\mathbf{x} \in [0, 6]^2` * - Function - :math:`f(\mathbf{x}) = \\frac{(x_1 - 3)^4 + (x_2 - 3)^3 - (x_2 - 3)}{(x_2 - 2)^4 + 10}` """ return ((data[0] - 3)**4 + (data[1] - 3)**3 - (data[1] - 3)) / ((data[1] - 2)**4 + 10)
99d7802acf72ee57421d543c3a324958e7bb5281
24,132
import argparse def use_args(): """Commandline argument parser""" parser = argparse.ArgumentParser(description="Search for something") parser.add_argument("query", help="search query") parser.add_argument("--tld", default="com", help="""top-level-domain 'com', 'co.in' default is 'com'""") parser.add_argument("--pause", type=int, default=2, help="lapse to wait between HTTP requests") parser.add_argument("--num", type=int, default=10, help="number of results per page") parser.add_argument("--start", type=int, default=0, help="first result to retrieve") parser.add_argument("--stop", type=int, help=("last result to retrieve." "Use None to search forever")) parser.add_argument("-f", "--file", action='store_true', help="If Output should be to file") args = parser.parse_args() return args
1e140238a687bb9fb8f2211bf76cdb1c94dbf344
24,133
def build_id(*elements, delimiter='.'): """ :param elements: :param str delimiter: :return: Strings joined by delimiter "." :rtype: str """ return delimiter.join(map(str, elements))
9fd0dce65a9f23937b05691ede3002f987286881
24,134