content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def plus97_modulus26_minus97(number): """ Replica algunas operaciones realizadas por el DGA a cada caracter. Suma 97, realiza el modulo 26 y vuelve a sumar 97 :param number: entero que representa el carácter en base a la tabla ASCII :return: el carácter resultante """ return chr(((number - 97) % 26 ) + 97)
ddf68bb67775fd033d20a6662602a2c0ef34a628
698,737
def browser_to_use(webdriver, browser): """Recover the browser to use with the given webdriver instance. The browser string is case insensitive and needs to be one of the values from BROWSERS_CFG. """ browser = browser.strip().upper() # Have a look the following to see list of supported browsers: # # http://selenium.googlecode.com/git/docs/api/ # py/_modules/selenium/webdriver/common/desired_capabilities.html # b = getattr(webdriver.DesiredCapabilities(), browser, None) if not b: raise ValueError( "Unknown browser requested '{0}'.".format(browser) ) return b
0e6bca7bad48c7c2934b81fce080f848e9bcdb22
698,738
def update_appliance_hostname( self, ne_pk: str, hostname: str, ) -> bool: """Add or update hostname to Edge Connect appliance. This operation will take a few seconds to run. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - vxoaHostname - GET - /hostname/{nePk} :param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE`` :type ne_pk: str :param hostname: New hostname to apply to the appliance :type hostname: str :return: Returns True/False based on successful call :rtype: bool """ data = { "hostname": hostname, } return self._post( "/hostname/{}".format(ne_pk), data=data, expected_status=[204], return_type="bool", )
c08af64efe2e9c1e31faec0845e6bf61606b527b
698,739
def get_max_table_size(final_tables): """ Compute the maximum number of elements that appear in one of the table generated inside the main process. Parameters ---------- final_tables : list list of the tables generated inside the loop for each bucket. Returns: -------- max_table_size : int the number of elements inside the largest table (i.e., number of row multiplied by the number of columns). """ # Variable initialization max_table_size = 0 for table in final_tables: max_table_size = max(max_table_size,len(table[0])*len(table)) return max_table_size
a562d6f944a03785034028e8b2e83bdb19a8d9aa
698,740
import itertools def next_bigger(n: int) -> int: """ A function that takes a positive integer number and returns the next bigger number formed by the same digits. If no bigger number can be composed using those digits, return -1 """ numbers = sorted(set(sorted(int(''.join(i)) for i in itertools.permutations(str(n))))) if n == numbers[-1]: return -1 else: return numbers[numbers.index(n) + 1]
8800040f3e88054bcacdb432131f8a9e0a872277
698,741
def getItemPos(item, inList): """ get item position from the list """ for i, j in enumerate(inList): if j == item: return i return -1
a77bc384641881582625d6c48187de8d23e7154b
698,742
import requests import socket def _get_response_tuple(url): """Return a tuple which contains a result of url test Arguments: url -- a string containing url for testing Result tuple content: result[0] -- boolean value, True if the url is deemed failed result[1] -- unchange url argument """ try: response = requests.get(url) return (response.status_code != 200, url) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.HTTPError, ValueError, socket.timeout): return (True, url)
786f7ff4e731b968fc502847d1b96b9f3a8274bc
698,743
import re def ReplaceResourceZoneWithRegion(ref, args, request): """Replaces the request.name 'locations/{zone}' with 'locations/{region}'.""" del ref, args # Unused. request.name = re.sub( r'(projects/[-a-z0-9]+/locations/[a-z]+-[a-z]+[0-9])[-a-z0-9]*((?:/.*)?)', r'\1\2', request.name) return request
97f416e77799c28699b251ccd45a89f29cd34c65
698,744
def get_mac_addr_from_datapath(datapath): """ Return the MAC address from the datapath ID. According to OpenFlow switch specification, the lower 48 bits of the datapath ID contains the MAC address. """ mac_addr_int = datapath.id & 0x0000ffffffffffff mac_addr = format(mac_addr_int, '02x') return ':'.join(mac_addr[i:i+2] for i in range(0, 12, 2))
539c0b0a92f3eead947aed65ed62a1e630dcb30f
698,745
def create_empty(val=0x00): """Create an empty Userdata object. val: value to fill the empty user data fields with (default is 0x00) """ user_data_dict = {} for index in range(1, 15): key = "d{}".format(index) user_data_dict.update({key: val}) return user_data_dict
55e614059940b7eda5a0684dd105a60dbc7f1549
698,746
import re def remove_tags(text): """ remove html style tags from some text """ cleaned = re.sub('<[^>]+>', '', text) return re.sub('\s+',' ', cleaned).strip()
5bbce34ca179f871eca6306f7fbe50b5cc1ee893
698,747
def decorator_blank_line_and_sleep(function): """ This decorator just waits one second and prints a blank line after executing function""" def inner_wrapper(*args, **kwargs): input("") value = function(*args, **kwargs) print() return value return inner_wrapper
1f544c87990c24b0cd82eeefe524cdf0649de69b
698,748
def get_note_type(syllables, song_db) -> list: """ Function to determine the category of the syllable Parameters ---------- syllables : str song_db : db Returns ------- type_str : list """ type_str = [] for syllable in syllables: if syllable in song_db.motif: type_str.append('M') # motif elif syllable in song_db.calls: type_str.append('C') # call elif syllable in song_db.introNotes: type_str.append('I') # intro notes else: type_str.append(None) # intro notes return type_str
621448603581a56af22ec9c559f9854c62073318
698,749
import asyncio def _get_selector_noop(loop) -> asyncio.AbstractEventLoop: """no-op on non-Windows""" return loop
b2b53006c66a2900642858a9bc975c54225d8034
698,750
def get_plain_text(values, strip=True): """Get the first value in a list of values that we expect to be plain-text. If it is a dict, then return the value of "value". :param list values: a list of values :param boolean strip: true if we should strip the plaintext value :return: a string or None """ if values: v = values[0] if isinstance(v, dict): v = v.get('value', '') if strip: v = v.strip() return v
3c0937d1faefda4ba8649005e29c9961afbadc0a
698,751
import os def check_wiki_art(artical_name): """ takes the user input (the artical name) and checks if its in the dicts file outputs - bool (is the file on the disk of not) - a list of all file names for all files in dict dictionary """ list_of_text_files = [] for i in os.listdir('dict/'): list_of_text_files.append(i.split('.')[0]) # print(list_of_text_files) return artical_name.lower() in list(map(str.lower, list_of_text_files))
cbed2a33a2855a50347640dbfe6d2ad9a2e35f18
698,752
def move_polygon(polygon_coords=None, move_coords=None, limit_x=None, limit_y=None): """ update coordinate pairs in a list by the specified coordinate pair that represents distance :param polygon_coords: a list of coordinate pairs :param move_coords: a coordinate pair that holds offset values for the x and y directions :param limit_x: a value that represents the maximum x value :param limit_y: a value that represents the maximum y value :returns: Updated coordinate pair list or False :raises TypeError: none """ if polygon_coords and move_coords: new_polygon_coords = [] for polygon_coord_pair in polygon_coords: polygon_coord_pair[0] += move_coords[0] polygon_coord_pair[1] += move_coords[1] if polygon_coord_pair[0] < 0: polygon_coord_pair[0] = 0 if limit_x is not None and polygon_coord_pair[0] > limit_x: polygon_coord_pair[0] = limit_x if polygon_coord_pair[1] < 0: polygon_coord_pair[1] = 0 if limit_y is not None and polygon_coord_pair[1] > limit_y: polygon_coord_pair[1] = limit_y new_polygon_coords.append(polygon_coord_pair) return new_polygon_coords else: return False
4f21f5e6e561697edde7e1e3c6baefada16fe7e9
698,753
def validate_request(request): """ Make sure the request is from Twilio and is valid. Ref: https://www.twilio.com/docs/security#validating-requests """ # Forgot where this token if from. Different from above. # From: https://www.twilio.com/user/account/developer-tools/test-credentials # Should probably use TWILIO_AUTH_TOKEN once testing complete. auth_token = '9ea8a4f0ac5fd659fef71719e480c3c0' if 'HTTP_X_TWILIO_SIGNATURE' not in request.META: return 'X_TWILIO_SIGNATURE header is missing ' \ 'from request, not a valid Twilio request.' validator = RequestValidator(auth_token) if not validator.validate( request.build_absolute_uri(), request.POST, request.META['HTTP_X_TWILIO_SIGNATURE']): return 'Twilio request is not valid.'
a83f3065fcebcb8231be4ed5558743fa6d837754
698,754
def mathprod(seq): # math.prod function is available at python 3.8 """ returns product of sequence elements """ v = seq[0] for s in seq[1:]: v *= s return v
b75ff229e4cfaabe42e542367dd27e257f1d33ec
698,755
from typing import List from typing import Any def _split_list_into_chunks(x: List[Any], chunk_size: int = 50) -> List[List[Any]]: """Helper function that splits a list into chunks.""" chunks = [x[i:i + chunk_size] for i in range(0, len(x), chunk_size)] return chunks
b9972b57a18a97fceec78ec1d55156c83d255f9f
698,756
import math def fix(x): """ From http://www.mathworks.com/help/matlab/ref/fix.html fix Round toward zero Syntax: B = fix(A) Description: B = fix(A) rounds the elements of A toward zero, resulting in an array of integers. For complex A, the imaginary and real parts are rounded independently. Examples: a = [-1.9, -0.2, 3.4, 5.6, 7.0, 2.4+3.6i] a = Columns 1 through 4 -1.9000 -0.2000 3.4000 5.6000 Columns 5 through 6 7.0000 2.4000 + 3.6000i fix(a) ans = Columns 1 through 4 -1.0000 0 3.0000 5.0000 Columns 5 through 6 7.0000 2.0000 + 3.0000i """ if x < 0: return math.ceil(x); else: return math.floor(x);
d57d4dd7088b2191e63fff16d0000508275f8a09
698,758
import platform def is_linux(): """是否Linux操作系统""" return 'Linux' in platform.system()
f88693b702396c1ccb1220c0ea0c7edae78d03e6
698,759
import os def get_mock_server_mode() -> str: """Returns a str representing the mode. :return: threading/multiprocessing """ mode = os.environ.get("BOLT_PYTHON_MOCK_SERVER_MODE") if mode is None: # We used to use "multiprocessing"" for macOS until Big Sur 11.1 # Since 11.1, the "multiprocessing" mode started failing a lot... # Therefore, we switched the default mode back to "threading". return "threading" else: return mode
ead2a2b4d6ec569bdbc1b75c57c00382b03a52b2
698,760
def reversed_dict(choices): """Create a reverse lookup dictionary""" return dict([(b, a) for a, b in choices])
85355bac403e8a49e55710eeef979120b1a34788
698,761
def project_detail(list_of_active_project, user_roles): """ To give project details for a particular project id Args: list_of_active_project(list):List of project id's. user_roles(object):UserOrgRole object. Returns: Returns project details with org id. """ # dict of org and list of projects to be returned in the response projects_to_return = dict() # list of projects to be sent in response project_details_list = list() organization_id_in_database = None for each_project in list_of_active_project: # Store each project details in a list project_details_list.append( {'project_id': each_project.project_id, 'project_name': each_project.project_name, 'project_description': each_project.project_description}) # Store Organization Id organization_id_in_database = each_project.org_id projects_to_return.update( {'org_id': organization_id_in_database, 'is_org_user': True if user_roles else False, 'project_details': project_details_list}) return projects_to_return
6bdf9bdd74e33c0553ac2b804b58c54c351b69f0
698,762
def create_demand_callback(data): """Creates callback to get demands at each location.""" def demand_callback(from_node, to_node): return data["demands"][from_node] return demand_callback
ceff0d3caaa1e1269fee18e8aa4684afaa7a5e81
698,763
import re def is_pages(value: str) -> float: """Asserts that a value looks like page number(s).""" pages = re.compile(r'(\d+)(?:\s+)?[\s\-._/\:]+(?:\s+)?(\d+)') match = pages.match(value) if match: start, end = [int(i) for i in match.groups()] if start < end: return 1.0 return 0.5 return 0.0
e816007698f902a4812446bd4270224fbb7ae6fc
698,764
import sys import sqlite3 def canRunPinger(): """Return true iff we have the required libraries installed to run a pinger. """ return sys.version_info[:2] >= (2,2) and sqlite3 is not None
3c6167412622504b860c971a7a9e976c59f576be
698,765
def _get_unique_index_values(idf, index_col, assert_all_same=True): """ Get unique values in index column from a dataframe Parameters ---------- idf : :obj:`pd.DataFrame` Dataframe to get index values from index_col : str Column in index to get the values for assert_all_same : bool Should we assert that all the values are the same before returning? If True, only a single value is returned. If False, a list is returned. Returns ------- str, list Values found, either a string or a list depending on ``assert_all_same``. Raises ------ AssertionError ``assert_all_same`` is True and there's more than one unique value. """ out = idf.index.get_level_values(index_col).unique().tolist() if assert_all_same: if len(out) > 1: raise AssertionError(out) return out[0] return out
306a919a547a6d0056a4547daa50e6149d840910
698,766
import os def is_empty_file(f_name): """Function: is_empty_file Description: Checks to see if a file is empty. NOTE: Returns None if file does not exist. Arguments: (input) f_name -> File being checked. (output) status -> True|False|None -> True if file is empty. """ if os.path.isfile(f_name): status = True if os.stat(f_name).st_size == 0 else False else: status = None return status
f5f04af0cd721424147188fec98a44e054e8614f
698,767
from typing import Counter def occurences_counter(filepath, writepath='counts.txt'): """Counts occurences of words in a dataset Input: filepath: file to open writepath: file where is written the result of the count, format is: key | count Output: Counter dictionary object filled with occurences found in the file opened """ c = Counter() with open(filepath,'r') as dataset: for line in dataset: c.update(line.split()) with open(writepath,encoding="utf-8-sig", mode='w') as output: for k,v in c.items(): output.write('{}|{}\n'.format(k, v)) #TRY TO USE A SEPARATOR THAT IS NOT PRESENT IN TEXT #return the counter anyway, print the writepath print("counts saved to: " + writepath +"\nOpen it in excel to sort it and have a quick view" ) return c
cb72b7b9bdcbbbe5dfdf5c86e3c43b5fe98ac3ea
698,768
import os def locate_blockmesh_template(): """the function uses the StoveOpt path and blockmesh template name to open the template version of the blockMeshDict file for editing in the system folder Args: None Returns: blockmesh_template (str): full file path where blockmesh template lives """ # Current working dir for stove opt master path_StoveOpt_master = os.getcwd() # Steps to system folder dir_steps = "//blockMeshDict_foamfile//template//blockMeshDict_template" blockmesh_template = path_StoveOpt_master + dir_steps # location and path name of blockmesh template print("blockmesh template located at:") print(blockmesh_template) return blockmesh_template
d2fdbdf5890551f1e808217d70076654ce59e96a
698,769
def count_query(query): """ The Google Cloud Datastore API doesn't expose a way to count a query the traditional method of doing a keys-only query is apparently actually slower than this method """ # Largest 32 bit number, fairly arbitrary but I've seen Java Cloud Datastore # code that uses Integer.MAX_VALUE which is this value MAX_INT = 2147483647 # Setting a limit of zero and an offset of max int will make # the server (rather than the client) skip the entities and then # return the number of skipped entities, fo realz yo! iterator = query.fetch(limit=0, offset=MAX_INT) [x for x in iterator] # Force evaluation of the iterator count = iterator._skipped_results while iterator._more_results: # If we have more results then use cursor offsetting and repeat iterator = query.fetch(limit=0, offset=MAX_INT, start_cursor=iterator.next_page_token) [x for x in iterator] # Force evaluation of the iterator count += iterator._skipped_results return count
6ad65b75838048cac0a0360098972427034e6cca
698,770
import torch def log_importance_weight_matrix(batch_size, dataset_size): """ Calculates a log importance weight matrix Parameters ---------- batch_size: int number of training images in the batch dataset_size: int number of training images in the dataset """ N = dataset_size M = batch_size - 1 strat_weight = (N - M) / (N * M) W = torch.Tensor(batch_size, batch_size).fill_(1 / M) W.view(-1)[::M + 1] = 1 / N W.view(-1)[1::M + 1] = strat_weight W[M - 1, 0] = strat_weight return W.log()
78555bd2e35bae8587602e43698c98bb0f463b21
698,772
def algo2(x,y): """ Trouve le nombre de carres dans deux listes pour chaque absisse Args: x (list): liste des cordonnees en x y (lsit): liste des coordonnees en y Returns: int: Nombre de carres """ liste_x = [] liste_y = [] resul = 0 '''Crée une liste qui contient toutes les valeurs possibles pour des longueurs.''' for i in range(len(x)): for j in range(len(x)): if x[j] - x[i] > 0: liste_x.append(x[j] - x[i]) print("liste_x:",liste_x) '''Crée une liste qui contient toutes les valeurs possibles pour des largeurs.''' for i in range(len(y)): for j in range(len(y)): if y[j] - y[i] > 0: liste_y.append(y[j] - y[i]) print("liste_y:", liste_y) '''Ici on compare les 2 listes et si les valeurs sont identiques on ajoute 1 au compteur résul''' for i in range(len(liste_x)): for j in range(len(liste_y)): if liste_x[i] == liste_y[j]: resul += 1 return resul
d2e1c47f98c912fff3f02f759176b81b479ca659
698,773
def check_board(board): """ This function is to check if the board is in correct format The length of the board must be 9 (Rows) Each row must have 9 elements (Columns) Each element must be between 1 - 9 """ check_if_the_board_is_correct = True if len(board) == 9: for i in board: if len(i) == 9 and check_if_the_board_is_correct == True: for j in i: if j not in range(1,9+1): check_if_the_board_is_correct = False break else: check_if_the_board_is_correct = False if check_if_the_board_is_correct == False: break else: check_if_the_board_is_correct = False return check_if_the_board_is_correct
f5940470c6158bfd500aca40a9f89c715d8592ac
698,774
import re def remove_url(txt): """Replace URLs found in a text string with nothing (i.e. it will remove the URL from the string). Parameters ---------- txt : string A text string that you want to parse and remove urls. Returns ------- The same txt string with url's removed. """ return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
8d1b8b89cb65ca7761c093dc388d1f19729137e7
698,775
import time def findLoadOnBus(mirror, Busnum, Id=None): """Find first load on bus unless Id specified Note that Ids are typically a strings i.e. '2' """ tic = time.time() if not mirror.searchDict: for x in range(len(mirror.Load)): if mirror.Load[x].Busnum == Busnum: # Return first gen on bus if no Id if Id == None: mirror.FindTime += time.time() - tic return mirror.Load[x] if Id == mirror.Load[x].Id: mirror.FindTime += time.time() - tic return mirror.Load[x] else: bnum = str(int(Busnum)) if bnum in mirror.searchDict: # bus found if 'Load' in mirror.searchDict[bnum]: # bus has Load if Id == None: # return first Load if No id mirror.FindTime += time.time() - tic return mirror.searchDict[bnum]['Load'][0] else: # find Load with matching ID for bLoad in mirror.searchDict[bnum]['Load']: if Id == bLoad.Id: mirror.FindTime += time.time() - tic return bLoad if Id: print("Load on Bus %d with Id '%s' not Found" % (Busnum,Id)) else: print("Load on Bus %d not Found" % Busnum) mirror.FindTime += time.time() - tic return None
1b81f7495b5ea5fe060d7eead92db10510b76c97
698,776
def release_gamepad_input() -> None: """release_gamepad_input() -> None (internal) Resumes normal gamepad event processing. """ return None
fe4ea812ba64d50dec7d67c85f93199af8a27b30
698,777
def _bin_str_to_int(bin_str): """ This function returns integere value extracted from binary string of length 16. :param bin_str: :return: """ if len(bin_str) <= 16: return int(bin_str, 2) else: None
42ea2d6858d9e41f0dcffb9711ea287c8d051f07
698,778
def str_to_bytes(s): """convert string to byte unit. Case insensitive. >>> str_to_bytes('2GB') 2147483648 >>> str_to_bytes('1kb') 1024 """ s = s.replace(' ', '') if s[-1].isalpha() and s[-2].isalpha(): _unit = s[-2:].upper() _num = s[:-2] elif s[-1].isalpha(): _unit = s[-1].upper() _num = s[:-1] else: return float(s) if not _unit in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'): raise ValueError('invalid unit', _unit) carry = { 'B': 1, 'KB': 1024, 'MB': 1024 ** 2, 'GB': 1024 ** 3, 'TB': 1024 ** 4, 'PB': 1024 ** 5, 'EB': 1024 ** 6, 'ZB': 1024 ** 7, 'YB': 1024 ** 8 } return float(_num) * carry[_unit]
71bef1d7a81fad44a00deb268fba83dac1ed633e
698,779
def select_first_node(g): """ :param g: the graph :return: the first node of the node sequence based on which the graph will be partitioned. We select the first node as the node which has the lowest degree """ all_degrees = [] node_list = list(range(0, len(g))) for node in node_list: all_degrees.append(g.degree[node]) return all_degrees.index(min(all_degrees))
769a36437e38b19984f88f510b544c22df984106
698,780
def get_subsequences(sequence): """ Get length k-1 subsequences of length k sequence >>> get_subsequences((('A', 'B'), ('C',))) [(('A', 'B'),), (('A',), ('C',)), (('B',), ('C',))] >>> get_subsequences((('A', 'B'), ('C',), ('D', 'E'))) [(('A', 'B'), ('C',), ('D',)), (('A', 'B'), ('C',), ('E',)), (('A', 'B'), ('D', 'E')), (('A',), ('C',), ('D', 'E')), (('B',), ('C',), ('D', 'E'))] :rtype : tuple :return: """ subseqs = [] for i in reversed(list(range(0, len(sequence)))): element = sequence[i] for j in reversed(list(range(0, len(element)))): event = element[j] if len(element) == 1: subseq = sequence[:i] + sequence[(i + 1):] else: subseq = list(sequence) subseq[i] = subseq[i][:j] + subseq[i][(j + 1):] subseqs.append(tuple(subseq)) return subseqs
7f25a3339d7b73eda1d56b7c4cb7941adc17adad
698,781
import typing import os def find(root_dir: str, name: str) -> typing.Optional[str]: """ recurse through the desired directory, and return a py.path.local object that matches that name. """ for root, _, file_names in os.walk(root_dir): if name in file_names: return os.path.join(root, name)
08657d7a0a20137a283d20af73f39697f0cbdf86
698,782
import sys def get_keyword_list(file_name): """获取文件中的关键词列表""" with open(file_name, 'rb') as f: try: lines = f.read().splitlines() lines = [line.decode('utf-8-sig') for line in lines] except UnicodeDecodeError: print(u'%s文件应为utf-8编码,请先将文件编码转为utf-8再运行程序', file_name) sys.exit() keyword_list = [] for line in lines: if line: keyword_list.append(line) return keyword_list
2d3b2c0feb0fb43dd52cba83ce45ffb6722ad6c4
698,783
def _flatten(coll): """ Flatten list and convert elements to int """ if isinstance(coll, list): return [int(a) for i in coll for a in _flatten(i)] else: return [coll]
a6f1dee42a5c881e4cf4496283f248a230f38465
698,784
def augment_parser(parser): """Augment parser from sequential wrapper with parallel wrapper""" group = parser.add_argument_group("Parallel Options", "Configuration of parallel execution") group.add_argument( "--threads", type=int, default=1, help="Number of threads to use for local processing/jobs to spawn concurrently", ) group.add_argument( "--use-drmaa", action="store_true", default=False, help="Enables running the parallelization on cluster via DRMAA", ) group.add_argument( "--restart-times", type=int, default=5, help="Number of times to restart jobs automatically" ) group.add_argument( "--max-jobs-per-second", type=int, default=10, help="Maximal number of jobs to launch per second in DRMAA mode", ) group.add_argument( "--max-status-checks-per-second", type=int, default=10, help="Maximal number of DRMAA status checks for perform for second", ) return parser
481e0edc9e8d90ccbc5f0d71ef201ccd9ac23a01
698,785
def to_float_hours(hours, minutes, seconds): """ (int, int, int) -> float Return the total number of hours in the specified number of hours, minutes, and seconds. Precondition: 0 <= minutes < 60 and 0 <= seconds < 60 >>> to_float_hours(0, 15, 0) 0.25 >>> to_float_hours(2, 45, 9) 2.7525 >>> to_float_hours(1, 0, 36) 1.01 """ return hours + (minutes / 60) + (seconds / 3600)
f94f37585929fc45f4b417ab63ca9b7b501a2e57
698,786
def fit_simulaid(phi): """ DEPRECATED AND WORKING FOR SMALL NUMBER OF SAMPLES -- Fit theta such as: phi_i = theta * i + phi_0 (E) Solving the system: | SUM(E) | SUM(E*i for i) that can be written: | a11 * theta + a12 * phi_0 = b1 | a21 * theta + a22 * phi_0 = b2 --- Parameters: phi n --- Return: theta """ n = len(phi)-1 # coefficients a11 = (2*n + 1)*(n + 1)*n/6 a21 = n*(n+1)/2 a12 = a21 a22 = n # Second member b1 = 0 b2 = 0 for i, phi in enumerate(phi): b1 += phi*i b2 += phi theta = (a22*b1 - a12 * b2)/(a22*a11 - a12*a21) return theta
98a9bf064e56a48312170a8c32e032654317d40f
698,787
import difflib def _get_suggestion(provided_string, allowed_strings): """ Given a string and a list of allowed_strings, it returns a string to print on screen, with sensible text depending on whether no suggestion is found, or one or more than one suggestions are found. :param provided_string: the string to compare :param allowed_strings: a list of valid strings :return: A string to print on output, to suggest to the user a possible valid value. """ similar_kws = difflib.get_close_matches(provided_string, allowed_strings) if len(similar_kws) == 1: return "(Maybe you wanted to specify {0}?)".format(similar_kws[0]) elif len(similar_kws) > 1: return "(Maybe you wanted to specify one of these: {0}?)".format( ", ".join(similar_kws)) else: return "(No similar keywords found...)"
ebd1b963116bddebbc340312f2d7a16be36b11c6
698,789
def iso_8601(datetime): """Convert a datetime into an iso 8601 string.""" if datetime is None: return datetime value = datetime.isoformat() if value.endswith('+00:00'): value = value[:-6] + 'Z' return value
968b9d9edfe13340e4c2f74fb78c11cb38d6c013
698,790
import os def to_abs_path(relative_path): """Returns a canonical path for the specified path relative to the script directory. """ script_dir = os.path.dirname(os.path.realpath(__file__)) return os.path.realpath(os.path.join(script_dir, relative_path))
c333c999fce96ba9a05975d4df690c7c86ac0426
698,791
def compute_accuracy(y_pred, y_test): """Compute accuracy of prediction. Parameters ---------- y_pred : array, shape (N_test) Predicted labels. y_test : array, shape (N_test) True labels. """ true_counter = 0 for i in range(len(y_pred)): if y_pred[i] == y_test[i]: true_counter+=1 return true_counter / len(y_pred)
a06b4d91f1878288ab4746d1f729040c95b91a91
698,793
def get_request_url(url, *args, **kwargs): """Returns url parameter that request will use""" return url
fde43b531d53e0f3cf80655b0da9895b7a001e13
698,794
import sys def _load(target, **vars): """ Fetch something from a module. The exact behaviour depends on the the target string: If the target is a valid python import path (e.g. `package.module`), the rightmost part is returned as a module object. If the target contains a colon (e.g. `package.module:var`) the module variable specified after the colon is returned. If the part after the colon contains any non-alphanumeric characters (e.g. `package.module:func(var)`) the result of the expression is returned. The expression has access to keyword arguments supplied to this function. Example:: >>> _load('bottle') <module 'bottle' from 'bottle.py'> >>> _load('bottle:Bottle') <class 'bottle.Bottle'> >>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar') '!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg==' """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] vars[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), vars)
44446d742765ff96f4bbc38546157c5abc3dffa8
698,795
def square_table_while(n): """ Returns: list of squares less than (or equal to) N This function creates a list of integer squares 1*1, 2*2, ... It only adds those squares that are less than or equal to N. Parameter n: the bound on the squares Precondition: n >= 0 is a number """ seq = [] k = 0 while k*k < n: seq.append(k*k) k = k+1 return seq
8eb94d9690f4138fb6759cf73d5a602659571b34
698,796
import os def val_dir(string: str) -> str: """Check that provided string is a directory.""" if os.path.isdir(string): return string else: raise NotADirectoryError
092689cc81c40a29c71aa8475f231423f2bc2e3b
698,797
def populate_words(words, oxford_api, words_api=None, allow_messages=True): """ Iterates the words dictionary, populating the data of each object using the OxfordAPI and with the optional WordsAPI to be used in the case the word is not found in the Oxford dictionary. :param words: The dictionary containing the words to be populated. :param oxford_api: The OxfordAPI object. :param words_api: The WordsAPI object. :param allow_messages: Boolean value to indicate if messages should be displayed in the console. :return: A list of those words that have not been found. """ incomplete_words = [] i = 0 for word in words.values(): if allow_messages: print(i, ': ', word.text) i = i + 1 success = oxford_api.get_word(word) if not success and words_api: definitions = words_api.definitions(word.text) if definitions and definitions.get('definitions', False): for definition in definitions.get('definitions', list()): word.definitions.append(definition.get('definition', None)) for item in words.items(): key = item[0] word = item[1] if len(word.definitions) == 0: incomplete_words.append(key) return incomplete_words
6f435b2e8d9947a9e7c6ba558aa43e13fe0dcfd6
698,798
import argparse def read_cmd(): """Reading command line options.""" desc = "Program for relinking Khan Content Bakalari NEXT e-learning module." parser = argparse.ArgumentParser(description=desc) parser.add_argument('-s','--subject', dest='subject', default='root', help='Relink content for a given domain/subject.') parser.add_argument('-c','--content', dest='content', default='video', help='Which kind of content should we relink? Options: video') return parser.parse_args()
7015f4d4917fcabf9ee5e9437ac813138db1b874
698,799
def media_artist(self): """Artist of current playing media, music track only.""" return self._media_artist
12e0538850bfb04ae52fe6840b7445423e393687
698,800
def flattenEmailAddresses(addresses): """ Turn a list of email addresses into a comma-delimited string of properly formatted, non MIME-encoded email addresses, suitable for use as an RFC 822 header @param addresses: sequence of L{EmailAddress} instances """ return ', '.join(addr.pseudoFormat() for addr in addresses)
91c235451006747e419cfd1e01eb7151eec952ed
698,801
def split(text): """ sometext|split """ return text.split()
5067bad72a36827f221bde6fcd86c3c124f0b12b
698,802
def structural_parameters(keys, columns): """ Gets the structural parameters in a specific order (if catalog is generated by the method in tbridge.binning) """ mags = columns[keys.index("MAGS")] r50s = columns[keys.index("R50S")] ns = columns[keys.index("NS")] ellips = columns[keys.index("ELLIPS")] return mags, r50s, ns, ellips
3e6fd7c231f31f4fdcb47cd76359411d194f5539
698,803
def poly(a, x, y, order=4): """Polynomial evaluation. pol = a[i,j] * x**(i-j) * y**j summed over i and j, where i runs from 0 to order. Then for each value of i, j runs from 0 to i. For many of the polynomial operations the coefficients A[i,j] are contained in an array of dimension (order+1, order+1) but with all elements where j > i set equal to zero. This is called the triangular layout. The flattened layout is a one-dimensional array containing copies of only the elements where j <= i. The JWST layout is a[0,0] a[1,0] a[1,1] a[2,0] a[2,1] a[2,2] ... The number of coefficients will be (n+1)(n+2)/2 Parameters ---------- a : array float array of polynomial coefficients in flattened arrangement x : array x pixel position. Can be integer or float or an array of integers or floats y : array y pixel position in same layout as x positions. order : int integer polynomial order Returns ------- pol : float result as described above """ pol = 0.0 k = 0 # index for coefficients for i in range(order+1): for j in range(i+1): pol = pol + a[k] * x**(i-j) * y**j k += 1 return pol
e5575d4df2ae4cc5f7f1eea467d9b04ebbb97942
698,804
import argparse def parser(args): """Return parsed command line arguments.""" parser = argparse.ArgumentParser( description=( 'Extracts the convergence indicators of a ONETEP BFGS geometry\n' 'optimisation calculation from the output file, and compares\n' 'them to the convergence tolerances. The results are coloured to\n' 'indicate which parameters are converged and which are not.'), formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( 'outfiles', metavar='outfile', type=str, nargs='*', help='ONETEP output files to be extracted\n' 'If none is specified then all out files (*.out)\n' 'in the current directory will be read') return parser.parse_args(args)
bef30fbd9278a17e06c68bf7af9d04b877883b1c
698,805
import struct def short_to_bytes(short_data): """ For a 16 bit signed short in little endian :param short_data: :return: bytes(); len == 2 """ result = struct.pack('<h', short_data) return result
26eeb2de936fa6b434c724ef80c008d93aec0446
698,806
def adjust(new_rows, maxi): """ A function to set all with maxi number of columns for making csv compatible """ rows = [] for each_row in new_rows: if len(each_row) < maxi: for i in range(maxi - len(each_row)): each_row.append("-") rows.append(each_row) return rows
2151362add7a85c4a236ad9ee936dcc08578534b
698,807
def unflatten_B(t): """ Unflatten [B*3, ...] tensor to [B, 3, ...] tensor t is flattened tensor from component batch, which is [B, 3, ...] tensor """ shape = t.shape return t.view(shape[0]//3, 3, *shape[1:])
77017ca136a942f975dfd498fe3aa8d3d20f67b9
698,808
import torch def l12_smooth(input_tensor, a=0.05): """Smoothed L1/2 norm""" if type(input_tensor) == list: return sum([l12_smooth(tensor) for tensor in input_tensor]) smooth_abs = torch.where(torch.abs(input_tensor) < a, torch.pow(input_tensor, 4) / (-8 * a ** 3) + torch.square(input_tensor) * 3 / 4 / a + 3 * a / 8, torch.abs(input_tensor)) return torch.sum(torch.sqrt(smooth_abs))
f6de684a53605aa3f28cf21f46f90dbb5d325616
698,809
def _dedupe_entities(alerts, ents) -> list: """Deduplicate incident and alert entities.""" alrt_ents = [] for alrt in alerts: if alrt["Entities"]: alrt_ents += [ent.__hash__() for ent in alrt["Entities"]] for ent in ents: if ent.__hash__() in alrt_ents: ents.remove(ent) return ents
9fc56c217d5a6f2ad85c100f3ee5f8a049de0d1c
698,810
def find_components(value): """ Extract the three values which have been combined to form the given output. """ r1 = value >> 20 # 11 most significant bits r2 = (2**10 - 1) & (value >> 10) # 10 middle bits r3 = (2**10 - 1) & value # 10 least significant bits return r1, r2, r3
80a235cafe8ceb37cafb6453f3338e03653bffe1
698,811
def most_energetic(df): """Grab most energetic particle from mc_tracks dataframe.""" idx = df.groupby(["event_id"])["energy"].transform(max) == df["energy"] return df[idx].reindex()
031e5a92d9e890743323012227ffbd4048d0052a
698,812
def GetInvalidTypeErrors(type_names, metrics): """Check that all of the metrics have valid types. Args: type_names: The set of valid type names. metrics: A list of rappor metric description objects. Returns: A list of errors about metrics with invalid_types. """ invalid_types = [m for m in metrics if m['type'] not in type_names] return ['Rappor metric "%s" has invalid type "%s"' % ( metric['name'], metric['type']) for metric in invalid_types]
27af3804cb4a857d044ad365cdfee090a0d1ab56
698,813
import textwrap def indent(multiline_str: str, indented=4): """ Converts a multiline string to an indented string Args: multiline_str: string to be converted indented: number of space used for indentation Returns: Indented string """ return textwrap.indent(multiline_str, " " * indented)
9fd5f2310ade00071a57731040435428cff88557
698,815
def list_pattern_features(client, patter_name, type_=None, file_=None): """List features in a Creo Pattern. Args: client (obj): creopyson Client. patter_name (str): Pattern name. `type_` (str, optional): Feature type patter (wildcards allowed: True). Defaults: All feature types. `file_` (str, optional): File name. Defaults is the currently active model. Returns: (list:dict): List of feature information """ data = { "patter_name": patter_name, } if file_ is not None: data["file"] = file_ else: active_file = client.file_get_active() if active_file: data["file"] = active_file["file"] if type_: data["type"] = type_ return client._creoson_post("feature", "list_group_features", data, "featlist")
d1923ccdb178a211ecd896c325170a5e247557aa
698,816
def get_binary_mask(x): """ Return binary mask from numpy array :param x: numpy array :return: binary mask from numpy array """ x[x >= 0.5] = 1. x[x < 0.5] = 0. return x
d63a2cff4d8398c52ea97fff720d78a8d6df66b5
698,817
def is_possible_move(direction, you, snake_heads, occupied, height, width, spacing=1): """ :param direction: up down left or right :type direction: str :param you: all blocks our body is occupying :type you: list of dict :param occupied: all blocks all snakes are occupying :type occupied: list of dict :param height: height of the board :type height: int :param width: width of the board :type width: int :param spacing: how many spaces directly in front of you to look ahead :type spacing: int :return: boolean """ head_pos = you[0] i = 1 while i < len(occupied): headpos_x = head_pos["x"] headpos_y = head_pos["y"] taken_x = occupied[i]["x"] taken_y = occupied[i]["y"] dist_y = taken_y - headpos_y dist_x = taken_x - headpos_x if direction == "up": if (abs(dist_y) <= spacing and taken_y < headpos_y and headpos_x == taken_x) or headpos_y == 0:# or possible_head_collision("up", head_pos, snake_heads): return False if direction == "down": if (abs(dist_y) <= spacing and taken_y > headpos_y and headpos_x == taken_x) or headpos_y == height - 1:# or possible_head_collision("down", head_pos, snake_heads): return False if direction == "right": if (abs(dist_x) <= spacing and taken_x > headpos_x and headpos_y == taken_y) or headpos_x == width - 1:# or possible_head_collision("right", head_pos, snake_heads): return False if direction == "left": if (abs(dist_x) <= spacing and taken_x < headpos_x and headpos_y == taken_y) or headpos_x == 0:# or possible_head_collision("left", head_pos, snake_heads): return False i = i + 1 return True
01909f9daecd01fd21b13c5a24bc0081ce50b7c9
698,818
def valid_permlink(permlink, allow_empty=False): """Returns validated permlink or throws Assert.""" assert isinstance(permlink, str), "permlink must be string: %s" % permlink if not (allow_empty and permlink == ''): assert permlink and len(permlink) <= 256, "invalid permlink" return permlink
ec5094ed8ac938b85c423218527d92b89ba3f1f3
698,819
def get_answer(offer, user): """Returns an user answer for the given offer""" if not user.is_authenticated: return None return offer.answers.filter(user=user).first()
e89da7d11e6e8a01deb8177d3af9c693267fe09a
698,820
import os def getMusZs(directory=""): """ Searches directory specified and returns a list of redshifts for GadgetMUSIC snapshots. Assumes file names are of format GadgetMUSIC-NewMDCLUSTER_0001.z0.000... Directory must only contain files with names of this format. Parameters ---------- directory : str Directory to search. If not specified, defaults to current directory. Returns ------- zs : list of floats Redshifts in descending order See also -------- getSnapNumToZMapGX : Gets snapshot number to redshift map for GadgetX files getSnapNumToZMapGiz : Gets snapshot number to redshift map for GIZMO files """ files = os.listdir(directory) #assuming file name is of format GadgetMUSIC-NewMDCLUSTER_0001.z0.000... zs = set() for file in files: if file[30] == "z": try: zs.add( float(file[31:36]) ) except ValueError: pass zs = list(zs) #sort list such that in descending order zs.sort(reverse=True) return zs
8834b8dbae8e07056fd68fe974476d54be64487a
698,821
async def oppio_client_supervisor(opp, aiohttp_client, oppio_stubs): """Return an authenticated HTTP client.""" access_token = opp.auth.async_create_access_token(oppio_stubs) return await aiohttp_client( opp.http.app, headers={"Authorization": f"Bearer {access_token}"}, )
ccc821b17490e8c9163a813d8b9aab03f1467713
698,822
import os import json def load_gt(file_name): """load ground truth data, if existent""" file_path = os.path.join(os.path.realpath('.'), 'mock', file_name + '.gt.json') if os.path.exists( file_path ): with open(file_path, 'r') as file: # load file processed_table = json.load(file) # convert ids to numbers if 'cea' in processed_table: for item in processed_table['cea']: item['col_id'] = int(item['col_id']) if item['col_id'] is not None else None item['row_id'] = int(item['row_id']) if item['row_id'] is not None else None if 'cta' in processed_table: for item in processed_table['cta']: item['col_id'] = int(item['col_id']) if item['col_id'] is not None else None if 'cpa' in processed_table: for item in processed_table['cpa']: item['sub_id'] = int(item['sub_id']) if item['sub_id'] is not None else None item['obj_id'] = int(item['obj_id']) if item['obj_id'] is not None else None return processed_table else: return None
bec3389d0316f3965fed874aa6668a2a44e27d8d
698,823
import logging import os def get_path_contents(folder_path): """ Get the entire contents of a path. Returns both files and folders at this path, and does not return filenames contained within sub-folders at this path. :param folder_path: a string containing a valid path to a computer directory :return: a list containing names of files and folders in the provided folder path """ logging.info("Retrieving contents of path {}".format(folder_path)) # Look up formatting strings--very handy. folder_contents = os.listdir(folder_path) # The return keyword always forces the function to quit running the function, and it will # send the contents of the variable (or variables--we'll get to that later) that's named after it # as the returned value of the function. num_items_in_folder = len(folder_contents) logging.info("Contents retrieved. {} items found.".format(num_items_in_folder)) return folder_contents
bcae9d348e236ebdd3716e1397cbf23eecb9e883
698,824
def leiaDinheiro(msg): """ -> Função que verifica se um preço digitado é valido :param msg: recebe a menssagem pedindo um preço :return: retorna o preço válido """ while True: preco = input(msg).strip().replace(',', '.') if preco.isalpha() or preco == "": print(f'\033[31mERRO: \"{preco}\" é um preço invalido!\033[m') else: break return float(preco)
9712bf74c29f0ac62d6f2326794d0537cc885b5c
698,825
def process_dsn(dsn): """ Take a standard DSN-dict and return the args and kwargs that will be passed to the psycopg2 Connection constructor. """ args = ['host=%s dbname=%s user=%s password=%s' % (dsn['host'], dsn['db'], dsn['user'], dsn['passwd'])] del dsn['host'] del dsn['db'] del dsn['user'] del dsn['passwd'] return args, dsn
c73b641cd4e28c5824db6d37d3aec2786c7461ff
698,826
import sys def Unchecked(): """Verify if the command "Unchecked" is present. In this case it means the action was just unchecked from RoboDK (applicable to checkable actions only).""" if len(sys.argv) >= 2: if "Unchecked" in sys.argv[1:]: return True return False
70e288eb5a662f0aca9ebf6f4429b3b8510ea307
698,827
import random def _random_level(): """Returns a random level for the new skiplist node The return value of this function is between 1 and ZSKIPLIST_MAXLEVEL (both inclusive), with a powerlaw-alike distribution where higher levels are less likely to be returned. """ level = 1 while random.random() < 0.25: level += 1 return level
3f5f13defae1140836fedf29c257ed6fc9b68384
698,829
import subprocess def capture(command): """ Run input command as subprocess and caputure the subprocess' exit code, stdout and stderr. Parameters ---------- command : list of str Command to be run as subprocess. Returns ------- out : str Standard output message. err : str Standard error message. exitcode : int Exit code. """ proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) out, err = proc.communicate() return out, err, proc.returncode
b2d668f4ff125f1793b2688c127276fe9c209651
698,830
from string import Template import os def css_renditions(selector=None): """ Returns a (long) string containing all the CSS styles in order to support terminal text renditions (different colors, bold, etc) in an HTML terminal using the dump_html() function. If *selector* is provided, all styles will be prefixed with said selector like so:: ${selector} span.f0 { color: #5C5C5C; } Example:: >>> css_renditions("#gateone").splitlines()[7] '#gateone span.f0 { color: #5C5C5C; } /* Black */' """ # Try looking for the fallback CSS template in two locations: # * The same directory that holds terminal.py # * A 'templates' directory in the same location as terminal.py template_name = 'terminal_renditions_fallback.css' template_path = os.path.join(os.path.split(__file__)[0], template_name) if not os.path.exists(template_path): # Try looking in a 'templates' directory template_path = os.path.join( os.path.split(__file__)[0], 'templates', template_name) if not os.path.exists(template_path): raise IOError("File not found: %s" % template_name) with open(template_path) as f: css = f.read() renditions_template = Template(css) return renditions_template.substitute(selector=selector)
0582fce55be4b9e5a5e617dac02d930159857b20
698,831
import re def quac_qasm_transpiler(qiskit_qasm: str) -> str: """Converts Qiskit-generated QASM instructions into QuaC-supported instructions :param qiskit_qasm: a string with a QASM program :return: a string with a modified QuaC-supported QASM program """ quac_qasm = "" for line in qiskit_qasm.splitlines(): # Remove certain types of unsupported operations if any(instruction in line for instruction in ["measure", "creg", "barrier", "id"]): continue # Reformat implicit multiplication by pi for pi_mult in re.findall("[0-9]pi", line): line = line.replace(pi_mult, pi_mult[0] + '*pi') # Evaluate and inject new parameters instruction_params = re.findall("\(.+\)", line) # only one parameter set per line if len(instruction_params) == 1: instruction_params = instruction_params[0] else: quac_qasm += line + "\n" continue # Evaluate pi-based parameter expressions evaluated_instruction_params = "(" for parameter in instruction_params[1:-1].split(","): parameter = parameter.replace("pi", "np.pi") evaluated_instruction_params += str(eval(parameter)) + "," evaluated_instruction_params = evaluated_instruction_params[:-1] + ")" line = line.replace(instruction_params, evaluated_instruction_params) # Add formatted QASM line to final result quac_qasm += line + "\n" return quac_qasm
1db5932053e00756e80db49b555ee70d3f4d881c
698,832
def moftype(cimtype, refclass): """Converts a CIM type name to MOF syntax.""" if cimtype == 'reference': _moftype = refclass + " REF" else: _moftype = cimtype return _moftype
c09697b9a1fabea1f1529de8ab7ee7eeed4da846
698,834
def shift(arr: list): """ 方法从数组中删除第一个元素,并返回该元素的值。此方法更改数组的长度 从数组中删除的元素; 如果数组为空则返回 undefined(None) """ if len(arr): return arr.pop(0) return None
ff7ea2320c6b9f846b8d047e6a6a76d0cd7ab645
698,835
def handle_status_update(loan_id, new_status): """Handles a status update for an order.""" # lookup order in your system using loan_id # set order status to new_status return ''
d75f6c145346d7fe9c6fd124dc0590fd66b64da6
698,836
def name_fun(n): """ input: stopping rule output: finish nodes """ output = [] temp = [''] for i in range(2*n - 1): temp_cur = [] for j in temp: candidate_pos = j + '+' candidate_neg = j + '-' if str.count(candidate_pos, '+') >= n: output.append(candidate_pos) else: temp_cur.append(candidate_pos) if str.count(candidate_neg, '-') >= n: output.append(candidate_neg) else: temp_cur.append(candidate_neg) temp = temp_cur neg_symbol = [x for x in output if str.count(x, '-') == n] pos_symbol = [x for x in output if str.count(x, '+') == n] return output, neg_symbol, pos_symbol
74111c9a31475a764ed98ab5e2cfae6ec894b10c
698,838
import json def translate_header(df, dictionary, dictionary_type='inline'): """change the headers of a dataframe base on a mapping dictionary. Parameters ---------- df : `DataFrame` The dataframe to be translated dictionary_type : `str`, default to `inline` The type of dictionary, choose from `inline` or `file` dictionary : `dict` or `str` The mapping dictionary or path of mapping file """ if dictionary_type == 'inline': return df.rename(columns=dictionary) elif dictionary_type == 'file': with open(dictionary, 'r') as f: dictionary = json.load(f) return df.rename(columns=dictionary) else: raise ValueError('dictionary not supported: '+dictionary_type)
371443bc112fa2b0892bbcf945e3089aab995122
698,839
def isHit(obbTree, pSource, pTarget): """ From https://blog.kitware.com/ray-casting-ray-tracing-with-vtk/ :param obbTree: :param pSource: :param pTarget: :return: """ code = obbTree.IntersectWithLine(pSource, pTarget, None, None) if code == 0: return False return True
6c37fcb3ffc6f33d308e9762b1b0c27b2b374ef7
698,840
def rect2pathd(rect): """Converts an SVG-rect element to a Path d-string. The rectangle will start at the (x,y) coordinate specified by the rectangle object and proceed counter-clockwise.""" x, y = float(rect.get('x', 0)), float(rect.get('y', 0)) w, h = float(rect.get('width', 0)), float(rect.get('height', 0)) if 'rx' in rect or 'ry' in rect: rx = float(rect.get('rx', 0)) ry = float(rect.get('ry', 0)) d = "M {} {} ".format(x + rx, y) # right of p0 d += "L {} {} ".format(x + w - rx, y) # go to p1 d += "A {} {} 0 0 1 {} {} ".format(rx, ry, x+w, y+ry) # arc for p1 d += "L {} {} ".format(x+w, y+h-ry) # above p2 d += "A {} {} 0 0 1 {} {} ".format(rx, ry, x+w-rx, y+h) # arc for p2 d += "L {} {} ".format(x+rx, y+h) # right of p3 d += "A {} {} 0 0 1 {} {} ".format(rx, ry, x, y+h-ry) # arc for p3 d += "L {} {} ".format(x, y+ry) # below p0 d += "A {} {} 0 0 1 {} {} z".format(rx, ry, x+rx, y) # arc for p0 return d x0, y0 = x, y x1, y1 = x + w, y x2, y2 = x + w, y + h x3, y3 = x, y + h d = ("M{} {} L {} {} L {} {} L {} {} z" "".format(x0, y0, x1, y1, x2, y2, x3, y3)) return d
60827810118abb4de47b12457806199639626aee
698,841
import numpy def weights(x, y, seeds, influences, decay=2): """ Calculate weights for the data based on the distance to the seeds. Use weights to ignore regions of data outside of the target anomaly. Parameters: * x, y : 1d arrays The x and y coordinates of the observations * seeds : list List of seeds, as returned by :func:`~fatiando.gravmag.harvester.sow` * influences : list of floats The respective diameter of influence for each seed. Observations outside the influence will have very small weights. A recommended value is aproximately the diameter of the anomaly * decay : float The decay factor for the weights. Low decay factor makes the weights spread out more. High decay factor makes the transition from large weights to low weights more abrupt. Returns: * weights : 1d array The calculated weights """ distances = numpy.array([((x - s.x) ** 2 + (y - s.y) ** 2)/influence**2 for s, influence in zip(seeds, influences)]) # min along axis=0 gets the smallest value from each column weights = numpy.exp(-(distances.min(axis=0) ** decay)) return weights
2841c0740614cda25a157c3ff8a4eb0aa859e16c
698,842
def judge_same_month(input_month_list): """ 判断 数据 是否 在同一个月 :param input_month_list:list :return: str or bool """ month_list = [] for item in input_month_list: month_list.append(item["trade_date"].strftime("%Y-%m")) month_set = set(month_list) if len(month_set) != 1: return False month_str = month_set.pop() # '2018-11-01' return month_str + "-01"
69082ac16150082bfe6cd9b384fa010a3849e682
698,843
def _create_trip_from_stack(temp_trip_stack, origin_activity, destination_activity): """ Aggregate information of trip elements in a structured dictionary Parameters ---------- temp_trip_stack : list list of dictionary like elements (either pandas series or python dictionary). Contains all elements that will be aggregated into a trip origin_activity : dictionary like Either dictionary or pandas series destination_activity : dictionary like Either dictionary or pandas series Returns ------- dictionary """ # this function return and empty dict if no tripleg is in the stack first_trip_element = temp_trip_stack[0] last_trip_element = temp_trip_stack[-1] # all data has to be from the same user assert origin_activity['user_id'] == last_trip_element['user_id'] # double check if trip requirements are fulfilled assert origin_activity['activity'] == True assert destination_activity['activity'] == True assert first_trip_element['activity'] == False trip_dict_entry = {'user_id': origin_activity['user_id'], 'started_at': first_trip_element['started_at'], 'finished_at': last_trip_element['finished_at'], 'origin_staypoint_id': origin_activity['id'], 'destination_staypoint_id': destination_activity['id'], 'tpls': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'tripleg'], 'spts': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'staypoint']} return trip_dict_entry
f2ddb6c19650c001c714ddeb8372b81ff40f2abe
698,844