content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import tempfile import zipfile def unzip_csar_to_tmp(zip_src): """ Unzip csar package to temp path :param zip_src: :return: """ dirpath = tempfile.mkdtemp() zip_ref = zipfile.ZipFile(zip_src, 'r') zip_ref.extractall(dirpath) return dirpath
b6be57f4e20c208583cc15dc9d0b997f6b41e2f1
13,669
from typing import Callable from typing import Any from typing import List import inspect def get_parameters( f: Callable[..., Any], allow_args: bool = False, allow_kwargs: bool = False ) -> List[str]: """Get names of function parameters.""" params = inspect.getfullargspec(f) if not allow_args and params[1] is not None: raise ValueError(f"Function {f.__name__} should not have *args") if not allow_kwargs and params[2] is not None: raise ValueError(f"Function {f.__name__} should not have **kwargs") return params[0]
218e187ae504ce18c0d240aee93480e275f4b8a4
13,671
def _convertCtypeArrayToList(listCtype): """Returns a normal list from a ctypes list.""" return listCtype[:]
89a408b796f2aba2f34bc20942574986abd66cd2
13,672
from typing import List def get_cost_vector(counts: dict) -> List[float]: """ This function simply gives values that represent how far away from our desired goal we are. Said desired goal is that we get as close to 0 counts for the states |00> and |11>, and as close to 50% of the total counts for |01> and |10> each. :param counts: Dictionary containing the count of each state :return: List of ints that determine how far the count of each state is from the desired count for that state: -First element corresponds to |00> -Second element corresponds to |01> -Third element corresponds to |10> -Fourth element corresponds to |11> """ # First we get the counts of each state. Try-except blocks are to avoid errors when the count is 0. try: a = counts['00'] except KeyError: a = 0 try: b = counts['01'] except KeyError: b = 0 try: c = counts['10'] except KeyError: c = 0 try: d = counts['11'] except KeyError: d = 0 # We then want the total number of shots to know what proportions we should expect totalShots = a + b + c + d # We return the absolute value of the difference of each state's observed and desired counts # Other systems to determine how far each state count is from the goal exist, but this one is simple and works well return [abs(a - 0), abs(b - totalShots / 2), abs(c - totalShots / 2), abs(d - 0)]
b7565de2e47ba99e93b387ba954fdc29f44805e8
13,673
def default_copy(original_content): """Copy all fields of the original content object exactly as they are and return a new content object which is different only in its pk. NOTE: This will only work for very simple content objects. This will throw exceptions on one2one and m2m relationships. And it might not be the desired behaviour for some foreign keys (in some cases we would expect a version to copy some of its related objects as well). In such cases a custom copy method must be defined and specified in cms_config.py """ content_model = original_content.__class__ content_fields = { field.name: getattr(original_content, field.name) for field in content_model._meta.fields # don't copy primary key because we're creating a new obj if content_model._meta.pk.name != field.name } return content_model.objects.create(**content_fields)
d0a6fca858e34b8a7ae26ed9693a894cf2b2473f
13,674
def build_normalized_request_string(ts, nonce, http_method, host, port, request_path, ext): """Implements the notion of a normalized request string as described in http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-02#section-3.2.1""" normalized_request_string = \ ts + '\n' + \ nonce + '\n' + \ http_method + '\n' + \ request_path + '\n' + \ host + '\n' + \ str(port) + '\n' + \ ext + '\n' return normalized_request_string
6a7f397738b852116cbaf249c846f58b482fdca1
13,675
def rivers_with_station(stations): """Given list of stations (MonitoringStation object), return the names of the rivers that are being monitored""" # Collect all the names of the rivers in a set to avoid duplicate entries rivers = {station.river for station in stations} # Return a list for convenience return list(rivers)
c099af2eb0f869c6f1e3270ee089f54246779e2d
13,677
import requests import json def pinterest_shares(url): """pinterest share count""" try: pinterest_url = 'http://api.pinterest.com/v1/urls/count.json?url=' \ + url response = requests.get(pinterest_url).text\ .replace('receiveCount(', '')\ .replace(')', '') json_data = json.loads(response) return json_data['count'] except: return 0
097a587778b1b21ceb84664b644cdbc6bb4619c3
13,678
import math def get_deltas(radians): """ gets delta x and y for any angle in radians """ dx = math.sin(radians) dy = -math.cos(radians) return dx, dy
032acb537373d0ee18b721f04c95e75bb1572b1b
13,679
from typing import Dict def mrr(benchmark: Dict, results: Dict, repo: str) -> float: """ Calculate the MRR of the prediction for a repo. :param benchmark: dictionary with the real libraries from benchmark. :param results: dictionary with the predicted libraries. :param repo: full name of the repo. :return: float of the MRR. """ true_libraries = benchmark[repo] suggestions = results[repo] for index, req in enumerate(suggestions): if req in true_libraries: return 1 / (index + 1) return 0
b057c93cc807244e4d5e8e94772877273ac1041c
13,680
def hook_get_load_batch_query(table: str, start_index: int, end_index: int) -> str: """Returns the query that loads the BQ data. Here it's possible to filter just new customers or other customer groups. Args: table: A string representing the full path of the BQ table where the transactions are located. This table is the prepared new customers periodic transactions table which contains a single line per customer. start_index: An integer representing the row number where to start retrieving data from end_index: An integer representing the row number where to stop retrieving data at Returns: A string with the query. Example: query = ''' SELECT * FROM ( SELECT s1.*, ROW_NUMBER() OVER() as rowId FROM `{0}` as s1 LEFT JOIN ( SELECT orderId, clientId, date FROM ( SELECT orderId, clientId, date, ROW_NUMBER() OVER(PARTITION BY orderId, clientId, date ORDER BY orderId asc) as rowId, FROM `{0}` ORDER BY orderId asc ) ) s2 on s1.orderId=s2.orderId and s1.clientId=s2.orderId and s1.date=s2.date ) WHERE rowId between {1} and {2} ''' return query.format(table, start_index, end_index) """ del table, start_index, end_index # Unused by default return ""
6a99cf04214a815249a01b6cf5040bdb93b73fb8
13,681
def elf_segment_names(elf): """Return a hash of elf segment names, indexed by segment number.""" table = {} seg_names_sect = elf.find_section_named(".segment_names") if seg_names_sect is not None: names = seg_names_sect.get_strings()[1:] for i in range(len(names)): table[i] = names[i] return table
4af46fb35c9808f8f90509417d52e7ce4eb2770e
13,682
def line_line_closest_points(A1, C1, A2, C2): """ >>> print line_line_closest_points(Ux,Ux,Uy,Uy) (Vec( 0.000000, 0.000000, 0.000000 ), Vec( 0.000000, 0.000000, 0.000000 )) >>> print line_line_closest_points(Ux,Uy,Uy,Uz) (Vec( 0.000000, 1.000000, 0.000000 ), Vec( 0.000000, 1.000000, 1.000000 )) """ # Line1 : C1 + t1*A1 # Line2 : C2 + t2*A2 # (Ucase: Vectors , Lcase: Scalars) # To find the points with minimun distance is equivalent to find Q1(t1) # & Q2(t2) / # Q2 - Q1 = C2+t2*A2 - C1-t1*A1 = k*(A2 x A1) # ( x mean Cross product) # Using some tricks and vector properties the solution is: # print type(A1) # print type(A2) # print A2.cross(A1) C21 = C2 - C1 M = A2.cross(A1) m2 = M.dot(M) R = C21.cross(M / m2) t1 = R.dot(A2) t2 = R.dot(A1) Q1 = C1 + t1 * A1 Q2 = C2 + t2 * A2 return Q1, Q2
211d19b67fb56d00ae1276ebd62e9e979c4d5295
13,683
def segment_spectrum_batch(spectra_mat, w=50, dw=25): """ Segment multiple raman spectra into overlapping windows Args: spectra_mat (2D numpy array): array of input raman spectrum w (int, optional): length of window. Defaults to 50. dw (int, optional): step size. Defaults to 25. Returns: list of numpy array: list containing arrays of segmented raman spectrum """ return [spectra_mat[:,i:i+w] for i in range(0,spectra_mat.shape[1]-w,dw) ]
956791e2957f4810726d1d94549f79f3d83c8d21
13,684
def readvar(fni,varname,latrange=[],prange=[]): """reads variable from netcdf object in varname fni is a Dataset variable""" tsv = fni.variables[varname] ts=tsv[:] # try: # ts = np.array(ts,dtype='float32')*float(tsv.scale_factor)+float(tsv.add_offset) # except AttributeError: # x=0 # dummy statement try: if tsv.calendar == 'noleap': ts /= 365. if tsv.calendar == '365_day': ts /= 365. if tsv.calendar == '360_day': ts /= 360. if tsv.calendar == 'gregorian': ts /= 365.24 except AttributeError: x=0 # dummy statement return ts
2c987fa20c76911442cdcbba48f7b215350e0484
13,685
def transform_url(private_urls): """ Transforms URL returned by removing the public/ (name of the local folder with all hugo html files) into "real" documentation links for Algolia :param private_urls: Array of file links in public/ to transform into doc links. :return new_private_urls: A list of documentation URL links that correspond to private doc files. """ new_private_urls = [] for url in private_urls: ## We add /$ to all links in order to make them all "final", in fact ## Algolia stop_url parameter uses regex and not "perfect matching" link logic new_private_urls.append(url.replace('public/','docs.datadoghq.com/') + '/$') return new_private_urls
17a5c720103294b7535c76d0e8994c433cfe7de3
13,688
import os import argparse def _dependent_globals(this_script, cwd=os.curdir): """Compute set of related globals that depend on this script's path.""" script_dir = os.path.dirname(this_script) # This script lives under 'build/rbe', so the path to the root is '../..'. default_project_root = os.path.realpath( os.path.join(script_dir, '..', '..')) # This is the relative path to the project root dir from the build output dir. project_root_rel = os.path.relpath(default_project_root, start=cwd) # This is the relative path to the build output dir from the project root dir. build_subdir = os.path.relpath(cwd, start=default_project_root) return argparse.Namespace( script_dir=script_dir, default_project_root=default_project_root, project_root_rel=project_root_rel, build_subdir=build_subdir, # This is the script that eventually calls 'rewrapper' (reclient). generic_remote_action_wrapper=os.path.join( script_dir, 'fuchsia-rbe-action.sh'), # This command is used to check local determinism. check_determinism_command=[ os.path.join( default_project_root, 'build', 'tracer', 'output_cacher.py'), '--check-repeatability', ], # The path to the prebuilt fsatrace in Fuchsia's project tree. fsatrace_path=os.path.join( project_root_rel, 'prebuilt', 'fsatrace', 'fsatrace'), detail_diff=os.path.join(script_dir, 'detail-diff.sh'), )
392452de8f0e591c1bedca7b3aad4df359d29367
13,689
def cosine_similarity(intersection_num, all_items_num, first_set_len, second_set_len): """ Count cosine similarity based on intersection data :param intersection_num: len of intersection of two sets :param all_items_num: num of all items, for example, sum of views :param first_set_len: len of first set, for example, views of first video :param second_set_len: :return: """ intersection_probability = (intersection_num / all_items_num) ** 2 first_set_probability = first_set_len / all_items_num second_set_probability = second_set_len / all_items_num cosine_similarity_value = ( intersection_probability / (first_set_probability * second_set_probability) ) return cosine_similarity_value
90a8be939730307069f6758fe6a93299d6484b42
13,690
import random def random_walk(graph, node, steps=8): """ Given a graph and a specific node within the graph, returns a node after steps random edge jumps :param graph: A networkx graph :param node: Any node within the graph :param steps: The number of steps within the random walk :return: A node within the network """ if steps == 0: return node else: return random_walk(graph, random.choice(graph.out_edges(node))[1], steps - 1)
ad75cadc8e1df8edf5ebbb030de9b80076ab1f4c
13,691
from typing import List from typing import Any def get_last_key_from_integration_context_dict(feed_type: str, integration_context: List[Any] = []) -> \ str: """ To get last fetched key of feed from integration context. :param feed_type: Type of feed to get last fetched key. :param integration_context: Integration context. :return: list of S3 object keys. """ feed_context = integration_context for cached_feed in feed_context: cached_key = cached_feed.get(feed_type, '') if cached_key: return cached_key return ''
64eeb53cf00636dfc73866c64c77e2964149209e
13,692
import re def english_syllables(word): """Simple heuristic for the number of syllables in an English word. 91% agreement with CMUDict.""" pos = ["[aeiouy]+", "[^cgj]eo|[^cgst]ia|ii|[^cgstx]io|io$|[^g]iu|[^qg]ua|[^g]uo", "^mc|(s|th)ms?$", "[aeiouy]ing"] neg = ["[aeiouy]n?[^aeiouy]h?e$", "[aeiouy]([^aeiouytd]|tc?h)+ed$", "[aeiouy]r?[^aeiouycszxh]h?es$", "cally$|[^ei]ely$"] return sum(len(re.findall(r, word)) for r in pos) - sum(len(re.findall(r, word)) for r in neg)
000a353633c25067472c131c78a8c16eda45b522
13,693
def parse_file(data_dict): """ takes the streptomedb dictionary and returns a list of lists with NPs which is easy to use data_dict: strep dictionary created with make_dict() """ key_list = [] for key in data_dict.keys(): key_list += [key] value_list = [] for value in data_dict.values(): value_list += [value] value_list = [list(i) for i in zip(*value_list)] info_list = [key_list] + value_list return info_list
ad1c80729bf38b868c839aef2b9b6d76d1ad17d5
13,694
import math def _parseSeconds(data): """ Formats a number of seconds into format HH:MM:SS.XXXX """ total_seconds = data.total_seconds() hours = math.floor(total_seconds / 3600) minutes = math.floor((total_seconds - hours * 3600) / 60) seconds = math.floor(total_seconds - hours * 3600 - minutes * 60) remainder = total_seconds - 3600 * hours - 60 * minutes - seconds return "%02d:%02d:%02d%s" % ( hours, minutes, seconds, (".%s" % str(round(remainder, 8))[2:]) if remainder > 0 else "", )
b01a66f66dc3cdc930aff29069c865cab5278d08
13,695
import os import requests def check_exists(item, rest_url): """ Checks the existence of item in ES @param item: item to check @param rest_url: rest API endpoint @return: True if item exists """ ptype = "container" if item.startswith("job"): ptype = "job_spec" elif item.startswith("hysds_io"): ptype = "hysds_io" url = os.path.join(rest_url, "{0}/{1}?id={2}".format( ptype, "info" if item.startswith("container") else "type", item)) try: r = requests.get(url, verify=False) r.raise_for_status() return True except Exception as e: print("Failed to find {0} because of {1}.{2}".format(item, type(e), e)) return False
f2445ce2926745333fe307ae3a226a428aa2089f
13,698
from pathlib import Path def generate_filename(candidate: Path) -> Path: """Generate a filename which doesn't exist on the disk. This is not atomic.""" orig = candidate i = 0 while candidate.exists(): stem = orig.stem candidate = orig.with_stem(f"{stem}-{i}") i += 1 return candidate
4a304b30aac5091e8a3f59db3d7d293b272b92ef
13,699
from typing import Optional def code_block(text: str, language: Optional[str]) -> str: """Formats text for discord message as code block""" return f"```{language or ''}\n" f"{text}" f"```"
c54d5b3e6f456745817efd03b89bd56d7fe4794e
13,700
import os def get_last_bounce(data_file_name): """ Find the index of the last bounce and return it. """ if not os.path.exists(data_file_name): return None with open(data_file_name, 'r') as data_file: lines = data_file.readlines() if len(lines) == 0: return None line = lines[-1] if line.startswith("#"): return None else: count = int(line.split(",")[1])+1 return count
d7a18e4581220f720e160e44896a76e77351e9c0
13,701
def Fredkin_check(function): """Decorator to check the arguments of calling Fredkin gate. Arguments: function {} -- The tested function """ def wrapper(self, control_qubit): """Method to initialize Fredkin gate. Arguments: control_qubit {Qubit} -- Possible values: 0, 1 or 2 Raises: ValueError Examples: >>> import qvantum >>> >>> h = qvantum.Fredkin(2) """ if control_qubit == 0 or control_qubit == 1 or control_qubit == 2: return function(self, control_qubit) else: raise ValueError('Invalid input! Use number 0, 1, and 2 to mark the control qubit. ' +\ '0: 1st qubit is the control. 1: 2nd qubit is the control. 2: 3rd qubit is the ' +\ 'control.') return wrapper
7b62bc1998baffcffa896a6a549c3044ebbc9abd
13,703
def current_scopes(): """ init security setting :return: pending """ return []
2aa8500f0c1487723d74d975cf9e50f92904e1de
13,704
def urlsplit(url): """ Splits a URL like "https://example.com/a/b?c=d&e#f" into a tuple: ("https", ["example", "com"], ["a", "b"], ["c=d", "e"], "f") A trailing slash will result in a correspondingly empty final path component. """ split_on_anchor = url.split("#", 1) split_on_query = split_on_anchor[0].split("?", 1) split_on_scheme = split_on_query[0].split("://", 1) if len(split_on_scheme) <= 1: # Scheme is optional split_on_scheme = [None] + split_on_scheme[:1] split_on_path = split_on_scheme[1].split("/") return { "scheme": split_on_scheme[0], "netloc": split_on_path[0].split("."), "path": split_on_path[1:], "query": split_on_query[1].split("&") if len(split_on_query) > 1 else None, "fragment": split_on_anchor[1] if len(split_on_anchor) > 1 else None, }
47cd2b556f842dd3ed499f841ff41d16c0747cbc
13,708
def rotate_key(element): """Returns a new key-value pair of the same size but with a different key.""" (key, value) = element return key[-1] + key[:-1], value
e9407e87a24571bb17e484d7954764dce38003dc
13,709
import math def hypotenuse_length(leg_a, leg_b): """Find the length of a right triangle's hypotenuse :param leg_a: length of one leg of triangle :param leg_b: length of other leg of triangle :return: length of hypotenuse >>> hypotenuse_length(3, 4) 5 """ return math.sqrt(leg_a**2 + leg_b**2)
7a59ede73301f86a8b6ea1ad28490b151ffaa08b
13,710
def make_rowcol(string): """ Creates a rowcol function similar to the rowcol function of a view Arguments: string -- The string on which the rowcol function should hold Returns: A function similar to the rowcol function of a sublime text view """ rowlens = [len(x) + 1 for x in string.split("\n")] rowpos = [] acc = 0 for i in rowlens: acc += i rowpos.append(acc) def rowcol(pos): last = 0 for i, k in enumerate(rowpos, 0): if pos < k: return (i, pos - last) last = k return (-1, -1) return rowcol
87a0c162c69dd8f1e4e50f3b40da8772e645772e
13,713
def commonPoints(lines): """ Given a list of lines, return dictionary - vertice -> count. Where count specifies how many lines share the vertex. """ count = {} for l in lines: for c in l.coords: count[c] = count.get(c, 0) + 1 return count
1b838ddd4d6a2539b0270cd319a2197e90372c3a
13,716
import os def fullpath(path): """Expands ~ and returns an absolute path""" return os.path.abspath(os.path.expanduser(path))
9299d60d67214a19fd0c2739beda4fa3b55ac38a
13,718
import os def add_meta_and_doc_file(test_name, json_name): """ create .meta/tests.toml .meta/config.json .docs/instructions.md (based on ../problem-specifications/exercises/[EXERCISE]/description.md) for test_name Not yet implemented. """ test_dir_name = os.path.dirname(test_name) meta_dir = os.path.join( test_dir_name, '.meta') doc_dir = os.path.join( test_dir_name, '.docs') return None
1181e82b627175c885ae34b01d6e8a811793956f
13,719
def isostring(dt): """Convert the datetime to ISO 8601 format with no microseconds. """ if dt: return dt.replace(microsecond=0).isoformat()
db7326e53402c0982514c4516971b4460840aa20
13,720
import base64 def image_encoder(img_path): """Function to verify API status. Parameters ---------- img_path : str The path to the image to be encoded. Returns ------- encoded_img : str A UTF-8 string containing the encoded image. """ with open(img_path, 'rb') as img_file: encoded_img = base64.b64encode(img_file.read()) return encoded_img.decode('utf-8')
7ef4c5c4a0fad340ab50c798e70b0f0c4ceb0d22
13,721
def inject_menu_items() -> dict: """Defines items of navbar""" return dict(menu_items=[ { 'title': '<i class="fa fa-home" aria-hidden="true"></i> Home', 'url': '/', 'function_name': 'home', 'children': None }, { 'title': '<i class="fa fa-wrench" aria-hidden="true"></i> Manage', 'children': [{ 'title': '<i class="fa fa-folder-open-o" aria-hidden="true"></i> Ingredient types', 'function_name': 'ingredient_types' }, { 'title': '<i class="fa fa-lemon-o" aria-hidden="true"></i> Ingredients', 'function_name': 'ingredients' }] }, ])
dfca070335ed11a60cc721056de6a90f62f66749
13,722
def filenameDictForDataPacketType(dataPacketType): """ Return a dict of fileDescriptors for a given DataPacket type. """ foo = dataPacketType(None, None) return foo.filenames
6853add9035e3b7aad86a001ee7f46e5e69336a8
13,723
def git_origin_url(git_repo): # pylint: disable=redefined-outer-name """ A fixture to fetch the URL of the online hosting for this repository. Yields None if there is no origin defined for it, or if the target directory isn't even a git repository. """ if git_repo is None: return None try: origin = git_repo.remotes.origin except ValueError: # This local repository isn't linked to an online origin return None return origin.url
d1f301a31aca6fae2f687d2b58c742ee747c4114
13,724
from typing import Any def argument(*name_or_flags: Any, **kwargs: Any) -> tuple: """Convenience function to properly format arguments to pass to the subcommand decorator. """ return (list(name_or_flags), kwargs)
22b2cbda7a3c2e4113c47896d60290b124cf85f1
13,725
import subprocess import numpy as np def get_free_gpu(): """Selects the gpu with the most free memory """ output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE, shell=True).communicate()[0] output = output.decode("ascii") # assumes that it is on the popiah server and the last gpu is not used memory_available = [int(x.split()[2]) for x in output.split("\n")[:-2]] if not memory_available: return print("Setting GPU to use to PID {}".format(np.argmax(memory_available))) return np.argmax(memory_available)
4432dda1b27cb7c913d9dd20348b9ac86ee39faa
13,728
import os def read_file(filename): """Reading file from filesystem.""" result = '' # Checking if file exists if not os.path.isfile(filename): # If file does not exists, return suitable information return "file '" + filename + "' not found" else: # If ile exists open it. with open(filename, 'r') as file: text = file.read().splitlines() # If file has more than 2 lines, use readlines to return pretty text. if len(text) > 2: with open(filename, 'r') as file: result = file.readlines() # If file contains just one line, return just its value. else: result = text[0] return result
6ef21d08745d302bc1aadeabc0e779a425b1b7f5
13,729
def special_len(tup): """ comparison function that will sort a document: (a) according to the number of segments (b) according to its longer segment """ doc = tup[0] if type(tup) is tuple else tup return (len(doc), len(max(doc, key=len)))
81154c8e1b31dc37cffc43dfad608a5cd5281e4c
13,730
import os def assemble_paths_list(): """Returns a list of paths to search for libraries and headers. """ PATHS = [ ("/usr/lib", "/usr/include"), ("/usr/lib64", "/usr/include"), ("/usr/X11/lib", "/usr/X11/include"), ("/usr/X11R6/lib", "/usr/X11R6/include"), ("/usr/local/lib", "/usr/local/include"), ("/opt/lib", "/opt/include") ] ## add path from environment var LD_LIBRARY_PATH try: ld_lib_path = os.environ["LD_LIBRARY_PATH"] except KeyError: pass else: for path in ld_lib_path.split(":"): path = path.strip() (dir, first) = os.path.split(path) include_path = os.path.join(dir, "include") if dir not in PATHS: PATHS.append((path, include_path)) ## add paths from /etc/ld.so.conf try: ld_so_conf = open("/etc/ld.so.conf", "r").readlines() except IOError: return PATHS for path in ld_so_conf: path = path.strip() (dir, first) = os.path.split(path) include_path = os.path.join(dir, "include") if dir not in PATHS: PATHS.append((path, include_path)) return PATHS
9c2f3c1883f241884c5b3b15ee06c8036252ae26
13,731
def opener(wrapped, instance, args, kwargs): """ opener(yaml.load)("conf.yaml") opener(json.load)("conf.json") """ path = args[0] with open(path) as f: return wrapped(f)
05c42f0e668f2419b4e478e8602c056a8cda634b
13,732
def make_header_lines(all_bits): """ Formats header lines """ lines = [] # Bit names bit_names = ["%d_%d" % (b[0], b[1]) for b in all_bits] bit_len = 6 for i in range(bit_len): line = "" for j in range(len(all_bits)): bstr = bit_names[j].ljust(bit_len).replace("_", "|") line += bstr[i] lines.append(line) return lines
6c48eb125f39ca55ae47c5f6a21732fdb60c9de0
13,733
def _pair_product(p, i, j): """Return product of counts of i and j from data.""" try: return sum(p[i].values())*sum(p[j].values()) except KeyError: return 0
7a7a19d62544ee88f6755c64f08438c0ab95033f
13,735
from typing import Optional def optional_arg_return(some_str: Optional[str]) -> Optional[int]: """Optional type in argument and return value.""" if not some_str: return None # OK # Mypy will infer the type of some_str to be str due to the check against None return len(some_str)
8125965fb1fe1f11f4f5045afc8107bcdfc95fc0
13,736
import os def get_failure_report_path(sample_alignment, report_filename): """Returns full path to given report for ExperimentSampleToAlignment. """ return os.path.join(sample_alignment.get_model_data_dir(), report_filename)
2979f8d772633ebbcd1d20895987d52d1efdbf36
13,738
import cProfile import time import pstats def profile(fn): """ Profile the decorated function, storing the profile output in /tmp Inspired by https://speakerdeck.com/rwarren/a-brief-intro-to-profiling-in-python """ def profiled_fn(*args, **kwargs): filepath = "/tmp/%s.profile" % fn.__name__ prof = cProfile.Profile() start = time.time() result = prof.runcall(fn, *args, **kwargs) duration = time.time() - start print("Function ran in %.6f seconds - output written to %s" % ( duration, filepath)) prof.dump_stats(filepath) print("Printing stats") stats = pstats.Stats(filepath) stats.sort_stats('cumulative') stats.print_stats() return result return profiled_fn
aaf1711fefee698ff5456e120dcc06cbb8c22a8f
13,739
import asyncio def _ensure_coroutine_function(func): """Return a coroutine function. func: either a coroutine function or a regular function Note a coroutine function is not a coroutine! """ if asyncio.iscoroutinefunction(func): return func else: async def coroutine_function(evt): return func(evt) return coroutine_function
e8271691abe4d4b7b965efe561f137adf77f6b4f
13,740
import os def load_samples(sample_file_path): """Read in sample file, returning a flat list of all samples inside""" samples = [] print("Reading: {}".format(sample_file_path)) with open(os.path.abspath(sample_file_path), "r") as sample_file: for sample in sample_file: samples.append(sample.strip()) return samples
bf854993a9e3b3bac6e7f3804f1a24c4b45ff7bc
13,741
def interpolate_value(x, y, t): """Find the value of x: y(x) = t.""" if t > max(y) or t < min(y): raise RuntimeError("t outside of [%f, %f]" % (min(y), max(y))) for j in range(1, len(x)): x0 = x[j - 1] y0 = y[j - 1] x1 = x[j] y1 = y[j] if (y0 - t) * (y1 - t) < 0: return x0 + (t - y0) * (x1 - x0) / (y1 - y0)
cebddb77534a5a09b825fa08c1083c2dc65db5d8
13,742
def pwd(session, *_): """Prints the current directory""" print(session.env.pwd) return 0
0f63b0483453f30b9fbaf5f561cb8eb90f38e107
13,743
def percent_replies(user): """ description: en: Percent of comments that are replies. de: Prozent der Kommentare, die Antworten gibt. type: float valid: probability """ return sum(1 if c.get('parent_id', '') else 0 for c in user['comments'])/len(user['comments'])
2e9f78fa41742cbc9a6d6a692b9acf41200d4146
13,744
def list_roles(*args): """DEPRECATED: Use list""" return list(*args)
397cafd85ab863edf08d12ba647bcb670c371eed
13,745
def feature_list_and_dict(features): """ Assign numerical indices to a global list of features :param features: iterable of feature names :return: sorted list of features, dict mapping features to their indices """ feature_list = sorted(features) feature_dict = {feat:i for i, feat in enumerate(feature_list)} return feature_list, feature_dict
11006cdc4871c339cf3936a1734f012f21d92459
13,746
def load_weights(network, filename): """ Loads a model’s weights filename is the path of the file that the model should be loaded from Returns: the loaded model """ network.load_weights(filename) return None
583003f0232fdfdf06eb172db97d22440877cb53
13,747
def compute_St(data): """ Given a dataset, computes the variance matrix of its features. """ n_datapoints, n_features = data.shape # Computing the 'mean image'. A pixel at position (x,y) in this image is the # mean of all the pixels at position (x,y) of the images in the dataset. # This corresponds to the 'mu' we have seen in the lectures. mu = data.mean(axis=0) # apply along the rows for each columns. centered_data = data - mu # Computing the covariance matrix St = (1. / n_datapoints) * (centered_data.T * centered_data) return St
99f55de9c19f7304136e5737d9acba0e6de4d2fd
13,748
def metamodel_to_swagger_type_converter(input_type): """ Converts API Metamodel type to their equivalent Swagger type. A tuple is returned. first value of tuple is main type. second value of tuple has 'format' information, if available. """ input_type = input_type.lower() if input_type == 'date_time': return 'string', 'date-time' if input_type == 'secret': return 'string', 'password' if input_type == 'any_error': return 'string', None if input_type == 'opaque': return 'object', None if input_type == 'dynamic_structure': return 'object', None if input_type == 'uri': return 'string', 'uri' if input_type == 'id': return 'string', None if input_type == 'long': return 'integer', 'int64' if input_type == 'double': return 'number', 'double' if input_type == 'binary': return 'string', 'binary' return input_type, None
a1f01124546acc3035d3db3329b0194ac65c2f17
13,750
import os import glob def version_file_name(file_name): """ Versions file name with .v{i}. Returns new file name with latest i """ file_path = os.path.dirname(file_name) file_name = os.path.basename(file_name) files = glob.glob(os.path.join(file_path, file_name + '*')) def in_list(i, files): for f in files: if file_name + f".v{i}" in f: return True return False i = 0 while in_list(i, files): i += 1 return os.path.join(file_path, file_name + f".v{i}")
3de20a26d695dff2679946ae3033f2e7fe29bff1
13,751
def is_instance(arg, types, allow_none=False): """ >>> is_instance(1, int) True >>> is_instance(3.5, float) True >>> is_instance('hello', str) True >>> is_instance([1, 2, 3], list) True >>> is_instance(1, (int, float)) True >>> is_instance(3.5, (int, float)) True >>> is_instance('hello', (str, list)) True >>> is_instance([1, 2, 3], (str, list)) True >>> is_instance(1, float) False >>> is_instance(3.5, int) False >>> is_instance('hello', list) False >>> is_instance([1, 2, 3], str) False >>> is_instance(1, (list, str)) False >>> is_instance(3.5, (list, str)) False >>> is_instance('hello', (int, float)) False >>> is_instance([1, 2, 3], (int, float)) False >>> is_instance(None, int) False >>> is_instance(None, float) False >>> is_instance(None, str) False >>> is_instance(None, list) False >>> is_instance(None, (int, float)) False >>> is_instance(None, (int, float)) False >>> is_instance(None, (str, list)) False >>> is_instance(None, (str, list)) False >>> is_instance(1, int, allow_none=True) True >>> is_instance(3.5, float, allow_none=True) True >>> is_instance('hello', str, allow_none=True) True >>> is_instance([1, 2, 3], list, allow_none=True) True >>> is_instance(1, (int, float), allow_none=True) True >>> is_instance(3.5, (int, float), allow_none=True) True >>> is_instance('hello', (str, list), allow_none=True) True >>> is_instance([1, 2, 3], (str, list), allow_none=True) True >>> is_instance(1, float, allow_none=True) False >>> is_instance(3.5, int, allow_none=True) False >>> is_instance('hello', list, allow_none=True) False >>> is_instance([1, 2, 3], str, allow_none=True) False >>> is_instance(1, (list, str), allow_none=True) False >>> is_instance(3.5, (list, str), allow_none=True) False >>> is_instance('hello', (int, float), allow_none=True) False >>> is_instance([1, 2, 3], (int, float), allow_none=True) False >>> is_instance(None, int, allow_none=True) True >>> is_instance(None, float, allow_none=True) True >>> is_instance(None, str, allow_none=True) True >>> is_instance(None, list, allow_none=True) True >>> is_instance(None, (int, float), allow_none=True) True >>> is_instance(None, (int, float), allow_none=True) True >>> is_instance(None, (str, list), allow_none=True) True >>> is_instance(None, (str, list), allow_none=True) True """ return (allow_none and arg is None) or isinstance(arg, types)
52149919b010909614c7dc83e189fa3b8a950393
13,752
def derivative_2nd(f,x,h): """function to calaculate 2nd derivative""" h = float(h) func=f(x) return (f(x+h)-(2*func)+(f(x-h)))/(h*h)
6c420bf2cf8cff27fb3e1f022d67d62bf38c9631
13,753
def get_fire_mode(weapon): """Returns current fire mode for a weapon.""" return weapon['firemodes'][weapon['firemode']]
691fc5e9b3ce40e51ab96930086f8d57e5fa6284
13,754
def data_to_str(data, contents_ignore_fields, grab_only_fields): """ desc: recursively converts all data to a single concatenated string only works for strings, lists and dicts """ c = "" if type(data) == list: for d in data: c += " " + data_to_str(d, contents_ignore_fields, grab_only_fields) elif type(data) == dict: for f, v in data.items(): if len(grab_only_fields) != 0: if f in grab_only_fields: c += " " + data_to_str(v, contents_ignore_fields, grab_only_fields) elif f not in contents_ignore_fields: c += " " + data_to_str(v, contents_ignore_fields, grab_only_fields) elif (type(data) == float) or (type(data) == int): c += " " + str(data) elif type(data) == str: c += " " + data return c
e66bcea4b40f48166bb256b06f8595604e5d8f62
13,756
def depth_first_ordering(adjacency, root): """Compute depth-first ordering of connected vertices. Parameters ---------- adjacency : dict An adjacency dictionary. Each key represents a vertex and maps to a list of neighboring vertex keys. root : str The vertex from which to start the depth-first search. Returns ------- list A depth-first ordering of all vertices in the network. Notes ----- Return all nodes of a connected component containing 'root' of a network represented by an adjacency dictionary. This implementation uses a *to visit* stack. The principle of a stack is LIFO. In Python, a list is a stack. Initially only the root element is on the stack. While there are still elements on the stack, the node on top of the stack is 'popped off' and if this node was not already visited, its neighbors are added to the stack if they hadn't already been visited themselves. Since the last element on top of the stack is always popped off, the algorithm goes deeper and deeper in the datastructure, until it reaches a node without (unvisited) neighbors and then backtracks. Once a new node with unvisited neighbors is found, there too it will go as deep as possible before backtracking again, and so on. Once there are no more nodes on the stack, the entire structure has been traversed. Note that this returns a depth-first spanning tree of a connected component of the network. Examples -------- >>> import compas >>> from compas.datastructures import Network >>> from compas.topology import depth_first_search as dfs >>> network = Network.from_obj(compas.get('lines.obj')) >>> print(dfs(network, network.get_any_vertex())) See Also -------- * """ adjacency = {key: set(nbrs) for key, nbrs in iter(adjacency.items())} tovisit = [root] visited = set() ordering = [] while tovisit: # pop the last added element from the stack node = tovisit.pop() if node not in visited: # mark the node as visited visited.add(node) ordering.append(node) # add the unvisited nbrs to the stack tovisit.extend(adjacency[node] - visited) return ordering
fcff465cfaa2e3a500e8d177e6b9e78cc66bc21d
13,757
def encode_dxt1(image): """Encode a PIL Image into DXT1 (stored in bytearray).""" pixels = image.load() # you can access the colours in the image by indexing: # pixels[x, y] # => (red, green, blue) # for example, pixels[0, 0] returns (r, g, b) for the top-left pixel # for more info, see the PIL docs: # http://pillow.readthedocs.org/en/3.0.x/reference/Image.html image_data = bytearray() # this is just sample_small.png in DXT1, replace # the following with your encoding code image_data.extend([0xE6, 0x81, 0xE9, 0x48, 0xB5, 0xBD, 0x2F, 0x0B]) image_data.extend([0xE3, 0xC2, 0xE6, 0x81, 0xB5, 0xAD, 0x2F, 0x0B]) return image_data
5bc6eae61b57de9e81743e5c72725f7c6f62b34f
13,758
from typing import List from typing import Tuple from textwrap import dedent def get_rt_object_to_complete_texts() -> List[Tuple[str, str]]: """Returns a list of tuples of riptable code object text with associated completion text.""" return [ ( dedent( '''Dataset({_k: list(range(_i * 10, (_i + 1) * 10)) for _i, _k in enumerate( ["alpha", "beta", "gamma", "delta", "epsilon", "zeta", "eta", "theta", "iota", "kappa", "lambada", "mu", "nu", "xi", "omnicron", "pi"])})''' ), "dataset.", ), ( dedent( '''Struct({"alpha": 1, "beta": [2, 3], "gamma": ['2', '3'], "delta": arange(10), "epsilon": Struct({ "theta": Struct({ "kappa": 3, "zeta": 4, }), "iota": 2, }) })''' ), "struct.", ), ( dedent( '''Multiset( {"ds_alpha": Dataset({k: list(range(i * 10, (i + 1) * 10)) for i, k in enumerate( ["alpha", "beta", "gamma", "delta", "epsilon", "zeta"])}), "ds_beta": Dataset({k: list(range(i * 10, (i + 1) * 10)) for i, k in enumerate( ["eta", "theta", "iota", "kappa", "lambada", "mu"])}), })''' ), "multiset.", ), ]
007d2291fd922aab5627783897a56cd5fd715f98
13,760
import token import requests def create_mirror(gitlab_repo, github_token, github_user): """Creates a push mirror of GitLab repository. For more details see: https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html#pushing-to-a-remote-repository-core Args: - gitlab_repo: GitLab repository to mirror. - github_token: GitHub authentication token. - github_user: GitHub username under whose namespace the mirror will be created (defaults to GitLab username if not provided). Returns: - JSON representation of created mirror. """ url = f'https://gitlab.com/api/v4/projects/{gitlab_repo["id"]}/remote_mirrors' headers = {'Authorization': f'Bearer {token}'} # If github-user is not provided use the gitlab username if not github_user: github_user = gitlab_repo['owner']['username'] data = { 'url': f'https://{github_user}:{github_token}@github.com/{github_user}/{gitlab_repo["path"]}.git', 'enabled': True } try: r = requests.post(url, json=data, headers=headers) r.raise_for_status() except requests.exceptions.RequestException as e: raise SystemExit(e) return r.json()
2a5d7e01ca04a6dcb09d42b5fc85092c3476af0d
13,761
def genelist_mask(candidates, genelist, whitelist=True, split_on_dot=True): """Get a mask for genes on or off a list Parameters ---------- candidates : pd.Series Candidate genes (from matrix) genelist : pd.Series List of genes to filter against whitelist : bool, default True Is the gene list a whitelist (True), where only genes on it should be kept or a blacklist (False) where all genes on it should be excluded split_on_dot : bool, default True If True, remove part of gene identifier after '.'. We do this by default because ENSEMBL IDs contain version numbers after periods. Returns ------- passing_mask : ndarray boolean array of passing genes """ if split_on_dot: candidates = candidates.str.split('.').str[0] genelist = genelist.str.split('.').str[0] if whitelist: mask = candidates.isin(genelist) else: mask = ~candidates.isin(genelist) return mask.values
53e1f80de097311faddd4bfbff636729b076c984
13,762
from datetime import datetime def writenow(): """Return utcnow() as ISO foramat string""" return datetime.isoformat(datetime.utcnow())
19a0e4653bfa7cfab2528e81317fd2399ca630d5
13,763
def parse_releases(list): """ Parse releases from a MangaUpdate's search results page. Parameters ---------- list : BeautifulSoup BeautifulSoup object of the releases section of the search page. Returns ------- releases : list of dicts List of recent releases found by the search. :: [ { 'id': 'Series Id', 'name': 'Series name', 'chp': 'chapter number', 'vol': 'number' or None, 'date': '02/21/21', # Date in month/day/year 'group': { 'name': 'Scanlation Group', 'id': 'Scanlation Group Id' } } ] """ releases = list.find_all("div", class_="text")[:-1] results = [] for i in range(0, len(releases), 5): release = {} release["date"] = str(releases[i].string) series_link = releases[i + 1] if series_link.a is None: release["name"] = str(series_link.string) else: release["name"] = str(series_link.a.string) release["id"] = series_link.a["href"].replace( "https://www.mangaupdates.com/series.html?id=", "" ) vol = releases[i + 2].get_text() release["vol"] = vol if vol else None release["chp"] = str(releases[i + 3].string) release["group"] = { "name": releases[i + 4].get_text(), "id": releases[i + 4] .a["href"] .replace("https://www.mangaupdates.com/groups.html?id=", ""), } results.append(release) return results
e7e93130732998b919bbd2ac69b7fc36c20dd62d
13,764
def read_data_with_ref(source_path, target_path, ref_path): """Read sentences and parallel sentence references.""" with open(source_path, "r", encoding="utf-8") as source_file,\ open(target_path, "r", encoding="utf-8") as target_file: source_lines = [l for l in source_file] target_lines = [l for l in target_file] references = set() with open(ref_path, mode="r", encoding="utf-8") as ref_file: for l in ref_file: i, j = l.split() references.add((int(i), int(j))) return source_lines, target_lines, references
1ce3d768ddb5d4de0f72cbe0633dfafe20bdc2c0
13,765
def _linkify(value): """create link format""" out = [] for link in value: out.append(','.join(list(link))) return '^'.join(out)
1fa2020148ef1fd7ba7f10eecb54836517ff0abd
13,766
import torch def get_packed_sequence_info(packed): """Get `batch_sizes` and `sorted_indices` of `PackedSequence`. Args: packed (object): Packed sequences. If it contains multiple `PackedSequence`s, then only one of them are sampled assuming that all of them have same `batch_sizes` and `sorted_indices`. Returns: Tensor: `PackedSequence.batch_sizes`. Tensor: `PackedSequence.sorted_indices`. """ if isinstance(packed, torch.nn.utils.rnn.PackedSequence): return packed.batch_sizes, packed.sorted_indices if isinstance(packed, tuple): for y in packed: ret = get_packed_sequence_info(y) if ret is not None: return ret return None
f3540bfe63d5ef72ecb1df20e09dd249310508b7
13,768
def ouverture_image(largeur, hauteur): """ génère la balise ouvrante pour décrire une image SVG des dimensions indiquées. Les paramètres sont des entiers. Remarque : l’origine est en haut à gauche et l’axe des Y est orienté vers le bas. """ balise = "<svg xmlns='http://www.w3.org/2000/svg' version='{version}' " \ "stroke='{lignes}' stroke-linecap='round' fill='{remplissage}' " \ "width='{largeur}' height='{hauteur}'>" return balise.format(version="1.1", # par défaut, lignes noires lignes="black", # et pas de remplissage remplissage="none", largeur=largeur, hauteur=hauteur)
da53a8d8e45df8d54bb63dcf8f88b2465af486aa
13,770
def list_size_reducer(reduction_factor, your_list): """_summary_ Parameters: reduction_factor (int): _description_ your_list (list): _description_ Returns: reduced_list (_type_): _description_ """ # input type checking assert type(reduction_factor) == int, 'reduction_factor should be an int.' assert type( your_list) == list, ('The thing to be reduced needs to be a list.') # create new list with every nth point of your_list reduced_list = [your_list[0]] for i in range(reduction_factor, len(your_list), reduction_factor): reduced_list.append(your_list[i]) return reduced_list
0ed7dbff4a0b27146ffa1f44ce9e517bc4c632b0
13,772
import requests import json def request_data(url: str, where_clause: str = "1=1") -> dict: """ Requests data from the ArcGIS api and retuens the response in JSON format. """ params = { "referer": "https://www.mywebapp.com", "user-agent": "python-requests/2.9.1", "where": where_clause, "outFields": "*", # all fields "returnGeometry": True, # no geometries "f": "json", # json format "cacheHint": True, # request access via CDN } r = requests.get(url=url, params=params) result = json.loads(r.text) return result
61ce82eb38f48805d62cdf8c9927f2607411281e
13,773
def Sqrt(x): """Square root function.""" return x ** 0.5
e726dfad946077826bcc19f44cd6a682c3b6410c
13,774
def hello(friend_name): """Says 'Hello!' to a friend.""" return "Hello, {}!".format(friend_name.title())
706c5a2d3f7ebdf9c7b56e49bb0541655c191505
13,775
def _get_story_duration(story_tag): """ Return the sum of the text time and media time, or return None if not found """ try: metadata = story_tag.find('mosExternalMetadata') payload = metadata.find('mosPayload') except AttributeError: return 0 try: return float(payload.find('StoryDuration').text) except AttributeError: pass try: text_time = float(payload.find('TextTime').text) media_time = float(payload.find('MediaTime').text) return text_time + media_time except AttributeError: return 0
df7f90eb6a02c0e8f9e6deaa45770fdc03c79a50
13,776
def generate_image(model, landmark, e_vector, device): """ Generator generate image from landmark and e_vector Args: model(nn.Module) : Generator model which generate image from landmark and e_vector. landmark(tensor) : Landmark which type is torch.tensor. e_vector(tensor) device(int) : Cuda device number Return: image(ndarray) : Generated image(RGB) """ e_vector = e_vector.to(device) image = model(landmark, e_vector) image = image.cpu().detach().numpy() image = image.transpose(0, 2, 3, 1) return image
1c0ee6ca116b663ab0d257d6e7c9a1e7a85aaa37
13,777
import uuid def set_compose_session(request): """Initialize a new "compose" session. It is used to keep track of attachments defined with a new message. Each new message will be associated with a unique ID (in order to avoid conflicts between users). :param request: a Request object. :return: the new unique ID. """ randid = str(uuid.uuid4()).replace("-", "") request.session["compose_mail"] = {"id": randid, "attachments": []} return randid
57384af709ce69d648bf85bce0c8a157fa5627c4
13,778
def CreateLessThanOrEqualRegex(number): """ Return a regular expression to test whether an integer less than or equal to 'number' is present in a given string. """ # In three parts, build a regular expression that match any numbers smaller # than 'number'. # For example, 78656 would give a regular expression that looks like: # Part 1 # (78356| # 78356 # Part 2 # 7835[0-5]| # 78350-78355 # 783[0-4][0-9]| # 78300-78349 # 78[0-2][0-9][0-9]| # 78000-78299 # 7[0-7][0-9][0-9][0-9]| # 70000-77999 # [0-6][0-9][0-9][0-9][0-9]| # 10000-69999 # Part 3 # [0-9][0-9][0-9][0-9]| # 1000-9999 # [0-9][0-9][0-9]| # 100-999 # [0-9][0-9]| # 10-99 # [0-9]) # 0-9 # Part 1: Create an array with all the regexes, as described above. # Prepopulate it with the number itself. number = str(number) expressions = [number] # Convert the number to a list, so we can translate digits in it to # expressions. num_list = list(number) num_len = len(num_list) # Part 2: Go through all the digits in the number, starting from the end. # Each iteration appends a line to 'expressions'. for index in range (num_len - 1, -1, -1): # Convert this digit back to an integer. digit = int(num_list[index]) # Part 2.1: No processing if this digit is a zero. if digit == 0: continue # Part 2.2: We switch the current digit X by a range "[0-(X-1)]". num_list[index] = '[0-%d]' % (digit - 1) # Part 2.3: We set all following digits to be "[0-9]". # Since we just decrementented a digit in a most important position, all # following digits don't matter. The possible numbers will always be smaller # than before we decremented. for next_digit in range(index + 1, num_len): num_list[next_digit] = '[0-9]' # Part 2.4: Add this new sub-expression to the list. expressions.append(''.join(num_list)) # Part 3: We add all the full ranges to match all numbers that are at least # one order of magnitude smaller than the original numbers. for index in range(1, num_len): expressions.append('[0-9]'*index) # All done. We now have our final regular expression. regex = '(%s)' % ('|'.join(expressions)) return regex
1de3074ede96b2a2bd0a28cb3f0167da7a767d40
13,779
def allowed_attrs(attrs, new=False): """Only allow href, target, rel and title.""" allowed = [ (None, 'href'), (None, 'target'), (None, 'rel'), (None, 'title'), '_text', ] return dict((k, v) for k, v in attrs.items() if k in allowed)
28f7e310f1ff1cbec58ea505c179494491af7171
13,780
import subprocess def create_sparse_dmg(name, out_path): """Creates a sparseimage (auto resizing read/write image) with the specified volume name at out_path.""" print('Creating disk image (%s)...' % out_path) return subprocess.call([ 'hdiutil', 'create', '-volname', name, '-type', 'SPARSE', '-layout', 'GPTSPUD', '-fs', 'APFS', '-size', '4g', out_path, ])
3696ba86fe5713df1b219e92d7394685c606f529
13,781
import requests import json def get_iam_token(oauth_token): """ Получение IAM-токена. @param oauth_token: OAuth-токен в сервисе Яндекс.OAuth @return: (str) - IAM-токен. """ url = 'https://iam.api.cloud.yandex.net/iam/v1/tokens' data = {'yandexPassportOauthToken': oauth_token} with requests.post(url, data=json.dumps(data)) as resp: if resp.status_code != 200: raise RuntimeError("Invalid response received: code: %d, message: %s" % (resp.status_code, resp.text)) else: resp_dict = resp.json() return resp_dict['iamToken']
939b4a512d20e5dd71c576f84d5a6ae10fb0e7e9
13,782
import requests def ack_url(url): """ 访问网页 返回页面信息""" headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"} response = requests.get(url=url, headers=headers) return response.text
8fa3f9c2b8983d45523461c0fa28cf9746d572c7
13,783
import curses def _alternative_left_right(term): """_alternative_left_right(T) -> dict Return dict of sequences ``term._cuf1``, and ``term._cub1``, valued as ``KEY_RIGHT``, ``KEY_LEFT`` when appropriate if available. some terminals report a different value for *kcuf1* than *cuf1*, but actually send the value of *cuf1* for right arrow key (which is non-destructive space). """ keymap = dict() if term._cuf1 and term._cuf1 != u' ': keymap[term._cuf1] = curses.KEY_RIGHT if term._cub1 and term._cub1 != u'\b': keymap[term._cub1] = curses.KEY_LEFT return keymap
ffdbdbc3327af956c39ca1f806935c5dc651ff9b
13,786
def gfMDS_make_cmd_string(info): """ Purpose: Construct the command line options string passed to the MDS C code. Usage: Author: PRI Date: May 2018 """ # create the input files string in_files = info["in_file_paths"][0] for in_file in info["in_file_paths"][1:]: in_files = in_files + "+" + in_file # get the output base path out_base_path = info["out_base_path"] # start creating the command list of MDS options cmd = ['./mds/bin/gf_mds', '-input='+in_files, '-output='+out_base_path, '-date=TIMESTAMP', '-rows_min=0'] # create the target label string tofill = info["target_mds"] cmd.append('-tofill='+tofill) # create the driver label and tolerance strings sw_in = info["drivers_mds"][0] cmd.append('-sw_in='+sw_in) sw_int = str(info["tolerances"][0][0]) + "," sw_int = sw_int + str(info["tolerances"][0][1]) cmd.append('-sw_int='+sw_int) if len(info["drivers_mds"][1]) > 0: ta = info["drivers_mds"][1] cmd.append('-ta='+ta) tat = str(info["tolerances"][1]) cmd.append('-tat='+tat) if len(info["drivers_mds"][2]): vpd = info["drivers_mds"][2] cmd.append('-vpd='+vpd) vpdt = str(info["tolerances"][2]) cmd.append('-vpdt='+vpdt) if info["time_step"] == 60: cmd.append('-hourly') return cmd
72a446bed8925b6b9ef20ebe21d2eb49982b86c2
13,787
def get_initial_state(inp): """ return tuple of (terminals,initial assignments) where terminals is a dictionnay key = target and value is a list of start and end coordinates """ terminals = {} assignments = {} for row_ind in range(len(inp)): for col_ind in range(len(inp[row_ind])): current_char = inp[row_ind][col_ind] if current_char != "_": fields = terminals.get(current_char) if fields : fields.append((col_ind,row_ind)) else : terminals[current_char] = [(col_ind,row_ind)] assignments[(col_ind,row_ind)] = current_char return (terminals,assignments)
65189f967af0eaa41b914f2c2dd79f7e875fdafd
13,789
def count_digit(value): """Count the number of digits in the number passed into this function""" digit_counter = 0 while value > 0: digit_counter = digit_counter + 1 value = value // 10 return digit_counter
f9b1738804b0a40aa72283df96d2707bcfd7e74c
13,790
def prepare_service(data): """Prepare service for catalog endpoint Parameters: data (Union[str, dict]): Service ID or service definition Returns: Tuple[str, dict]: str is ID and dict is service Transform ``/v1/health/state/<state>``:: { "Node": "foobar", "CheckID": "service:redis", "Name": "Service 'redis' check", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "redis1", "ServiceName": "redis" } to:: { "ID": "redis1", "Service": "redis" } Extract from /v1/health/service/<service>:: { "Node": {...}, "Service": { "ID": "redis1", "Service": "redis", "Tags": None, "Address": "10.1.10.12", "Port": 8000 }, "Checks": [...] } """ if not data: return None, {} if isinstance(data, str): return data, {} # from /v1/health/service/<service> if all(field in data for field in ("Node", "Service", "Checks")): return data["Service"]["ID"], data["Service"] # from /v1/health/checks/<service> # from /v1/health/node/<node> # from /v1/health/state/<state> # from /v1/catalog/service/<service> if all(field in data for field in ("ServiceName", "ServiceID")): return data["ServiceID"], { "ID": data["ServiceID"], "Service": data["ServiceName"], "Tags": data.get("ServiceTags"), "Address": data.get("ServiceAddress"), "Port": data.get("ServicePort"), } if list(data) == ["ID"]: return data["ID"], {} result = {} if "Name" in data: result["Service"] = data["Name"] for k in ("Service", "ID", "Tags", "Address", "Port"): if k in data: result[k] = data[k] return result.get("ID"), result
5af6f0ae150fe21cd41d1ab14aa129d07efa08c5
13,792
def func_1(x: float, a: float, b: float) -> float: """ Test function. """ return x + a + b
776ad7473aa52b16fcd759e376f5f47a51c73013
13,795
import socket def get6addr(name): """Get IPv6 address for name. If necessary, an IPv4 over IPv6 address.""" try: addr = socket.getaddrinfo(name, 'domain', family=socket.AF_INET6) except socket.gaierror: addr = socket.getaddrinfo(name, 'domain') addr6 = '::ffff:' + addr[0][4][0] addr = socket.getaddrinfo(addr6, 'domain', family=socket.AF_INET6) return addr[0][4]
4ed93d37bf891b80a8a3ad0ff4eee3c1db643733
13,796
import os def update_fields(base_dict, fields_to_check, print_text, field_required, check_type): """ For a subset of fields in a dictionary, prompts the user to provide updated values. Parameters ---------- base_dict : Dictionary The fiducial dictionary we're updating the fields for. Will contain the ``.ini`` file parameters for either ``SAGE`` or ``cifog``. fields_to_check : List of strings The fields that we will be prompting the user to update. print_text : List of strings The text associated with the prompt for each field. field_required : List of boolean Specifies whether each of the fields in ``fields_to_check`` requires a user input. If ``False``, then the default value will be used. check_type : List of strings Some fields are paths to a directory or a file name. ``check_type`` specifies which of these to check. Can be either ``"dir"`` or ``"file"`` Returns ---------- updated_dict : Dictionary Dictionary with identical data-structure to ``base_cifog_params`` but with certain fields updated via user input. """ updated_dict = {} for idx in range(len(fields_to_check)): # Grab all the relevant info. field = fields_to_check[idx] text = print_text[idx] required = field_required[idx] check = check_type[idx] if required: # If the field is marked as required, keep asking for a field until # the user enters one. my_field = None text_to_print = "{0} (must be given): ".format(text, base_dict[field]) while not my_field: my_field = input("{0}".format(text_to_print)) if not my_field: print("Must be specified.") else: # Otherwise just accept the default if they don't enter one. text_to_print = "{0} [default: {1}]: ".format(text, base_dict[field]) my_field = input("{0}".format(text_to_print)) if not my_field: my_field = base_dict[field] # Check that the directory path or the file actually exists. if check == "dir": if not os.path.isdir(my_field): print("Path {0} does not exist.".format(my_field)) raise ValueError elif check == "file": if not os.path.isfile(my_field): print("File {0} does not exist.".format(my_field)) raise ValueError updated_dict[field] = my_field return updated_dict
5a79a3d11e6c100f4ed9e5f983f610486e5a402f
13,798
def get_sys_uptime(): """ :return: (uptime, idle) in seconds """ f = open("/proc/uptime") # %f(uptime) %f(idle) , all in seconds uptime = f.read() f.close() uptime, idle = uptime.split(" ") return float(uptime), float(idle)
71406d10728820680046a2137129a7410f16cd78
13,799
import json def parse_fio_output_file(fpath: str) -> dict: """ Read and parse json from fio json outputs """ lines = [] with open(fpath, 'r') as fiof: do_append = False for l in fiof: if l.startswith('{'): do_append = True if do_append: lines.append(l) if l.startswith('}'): break try: return json.loads(''.join(lines)) except json.decoder.JSONDecodeError: return {}
ce4efcd3f0508179971788a2c19a7f278d887a79
13,800