content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import logging import tokenizers import sys import gc def get_bm25_index(args, db, db_opts): """Form a sparse word to document count matrix (inverted index). M[i, j] = # times word i appears in document j. """ # Map doc_ids to indexes global DOC2IDX db_class = retriever.get_class(db) logging.info("Getting doc ids") with db_class(**db_opts) as doc_db: doc_ids = doc_db.get_doc_ids() DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)} # Setup worker pool tok_class = tokenizers.get_class(args.tokenizer) #workers = ProcessPool( # args.num_workers, # initializer=init, # initargs=(tok_class, db_class, db_opts) #) logging.info(f"Documents ({len(doc_ids)}) are being tokenized") tok=tok_class() tokenized_corpus=[] bm25 = BM25Okapi() for i,doc_id in enumerate(doc_ids): #logging.info(doc_db.get_doc_text(doc_id)) #logging.info(tok.tokenize(retriever.utils.normalize(doc_db.get_doc_text(doc_id))).words())i tokens= tok.tokenize(retriever.utils.normalize(doc_db.get_doc_text(doc_id))) ngrams = tokens.ngrams(n=args.ngram, uncased=True, filter_fn=retriever.utils.filter_ngram) tokenized_corpus.append(ngrams) if i%10000==0: logging.info("%s/%s"%(i,len(doc_ids))) bm25.add(tokenized_corpus) logging.info(f"sizeof tokenized_corpus {sys.getsizeof(tokenized_corpus)}") logging.info(f"sizeof doc_freqs {sys.getsizeof(bm25.doc_freqs)}") logging.info(f"sizeof nd {sys.getsizeof(bm25.nd)}") logging.info(f"sizeof num_doc {sys.getsizeof(bm25.num_doc)}") del tokenized_corpus tokenized_corpus = [] gc.collect() if i==20000: pass #break bm25.add(tokenized_corpus) logging.info(f"sizeof tokenized_corpus {sys.getsizeof(tokenized_corpus)}") logging.info(f"sizeof doc_freqs {sys.getsizeof(bm25.doc_freqs)}") logging.info(f"sizeof nd {sys.getsizeof(bm25.nd)}") logging.info(f"num_doc {bm25.num_doc}") logging.info("Tokenization done") #logging.info(tokenized_corpus) bm25.finish() return bm25 , (DOC2IDX, doc_ids)
fc6f29099683cb7fc88a3b5079f290d1229e80bc
3,638,600
import torch def get_bernoulli_sample(probs): """Conduct Bernoulli sampling according to a specific probability distribution. Args: prob: (torch.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution. Returns: A Tensor of binary samples (0 or 1) with the same shape of probs. """ if torch.cuda.is_available(): bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape, device=torch.device('cuda'))) else: bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape)) return bernoulli_sample
14c45741d47f5eaff24893471425ddd4de7e2e4b
3,638,601
def angle_load(root, ext='.angle'): """ Load information from the :ref:`Output_angle` file previously created by :func:`.angle_save`. Args: root (str): root name for the file to be loaded ext (str, optional): default ".angle" - extension for the file to be loaded: name = root + ext Returns: (tuple): tuple containing: - ndarray(float): 2D array containing degrees and corresponding values of adf """ # open file path = root + ext try: f = open(path, 'r') except IOError: utility.err_file('angle_load', path) text = f.readlines() # read text as lines for i in range(len(text)): text[i] = text[i].split() # split each line into list with strings as elements for i in range(len(text)): if len(text[i]) > 1: # find beginning beginning of data if text[i] == ['DEGREE', 'ADF']: data = np.array(text[i+1:], dtype=float) break return data
f1218dc2dc1a6c5ef1c56689111086137b04a786
3,638,602
import subprocess import locale def parse_env_file(filename, pattern): """Source a shell script and extract variables from it.""" # Use the shell to parse this so we can also read substitutions # like $() for example. env = {} command = 'source {}; set | grep -E "{}"'.format(filename, pattern) output = subprocess.check_output(['sh', '-c', command]) output = output.decode(locale.getpreferredencoding()) for line in output.splitlines(): p1 = line.find('=') env[line[:p1]] = line[p1+1:] return env
d7a3a14bc163066f0232bf7811c397fbb594b45c
3,638,603
def dispatch(intent_request): """ Called when the user specifies an intent for this bot. """ logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name'])) intent_name = intent_request['currentIntent']['name'] # Dispatch to your bot's intent handlers if intent_name == 'GetAccountDetail': return get_balance(intent_request) elif intent_name == 'GetLoanDetail': return get_loan_balance(intent_request) elif intent_name == 'GetLoanProducts': return get_loan_offer(intent_request) raise Exception('Intent with name ' + intent_name + ' not supported')
e7803d2ad62feb59619c212cb720e0b25fd29a57
3,638,604
import pickle def load_newsdata_and_labels(): """ Read newsdata, return list of documents, each line in list is one document as string. And list of labels, each line in list is one-hot-encoded class """ # read newsdata which is pickled def read_pickle_one_by_one(pickle_file): with open(pickle_file, "rb") as t_in: while True: try: yield pickle.load(t_in) except EOFError: break #sentnos = [s for s in read_pickle_one_by_one("sentnos.pkl")] # sentence numbers labels = [l for l in read_pickle_one_by_one("data_own/labels.pkl")] #focuses = [f for f in read_pickle_one_by_one("focuses.pkl")] texts = [t for t in read_pickle_one_by_one("data_own/texts.pkl")] # assert == len(labels) == len(texts) # == len(sentnos) == len(focuses) #print("longest text") #print(max(len(t) for t in texts)) #print(sentnos[23]) #print(texts[23]) #print(focuses[23]) #print(labels[23]) # import copy # if need real copies, not just new pointers # new_texts = copy.deepcopy(texts) # empty list, same lenght as texts new_texts = [None] * len(texts) # go through list and for each document in list, join list of words to a string for documentnr, value in enumerate(texts): #print(document, value) new_texts[documentnr] = ' '.join(value) # labels are 5-6 classes. turn them into 1-hot-encoded. 6 classes mentioned in paper, only 5 present in data. new_labels = np.zeros((len(labels),5)) for labelnr, value in enumerate(labels): if value[0]==1: new_labels[labelnr][0]=1 #one hot to true elif value[0]==0.7: new_labels[labelnr][1]=1 elif value[0]==0.5: new_labels[labelnr][2]=1 elif value[1]==0.7: new_labels[labelnr][3]=1 elif value[0]==0: new_labels[labelnr][4]=1 x_text = new_texts y = new_labels return [x_text, y]
3838cbd42e5bab898fc969ebe9b4f326c736c773
3,638,605
def CollectUniqueByOrderOfAppearance(dataset:list): """ This method collect all unique in order of appearance and return it as list. :param dataset:list: dataset list """ try: seen = set() seen_add = seen.add return [x for x in dataset if not (x in seen or seen_add(x))] except Exception as ex: template = "An exception of type {0} occurred in [ContentSupport.CollectUniqueByOrderOfAppearance]. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message)
e252d064bf0c525ec1c1781ca6dc915dbc9d46f0
3,638,606
async def list_slot_set_actions(current_user: User = Depends(Authentication.get_current_user_and_bot)): """ Returns list of slot set actions for bot. """ actions = mongo_processor.list_slot_set_actions(current_user.get_bot()) return Response(data=actions)
f52f1e996b2be38a0e175ca6bfcbfd694dc79240
3,638,607
from PyQt5 import QtGui, QtWidgets, QtCore def openfile_dialog(file_types="All files (*)", multiple_files=False, file_path='.', caption="Select a file..."): """ Opens a File dialog which is used in open_file() function This function uses pyQt5. Parameters ---------- file_types : str, optional. Default = all types of files accepted multiple_files : bool, optional. Default = False Whether or not multiple files can be selected file_path: str, optional. Default = '.' path to starting or root directory caption: str, optional. Default = "Select a file..." caption of the open file dialog Returns ------- filename : str full filename with absolute path and extension Notes ----- In jupyter notebooks use ``%gui Qt`` early in the notebook. Examples -------- >> import sidpy as sid >> filename = sid.io.openfile_dialog() >> print(filename) """ # Check whether QT is available try: except ImportError: raise ModuleNotFoundError('Required package PyQt5 not available') # try to find a parent the file dialog can appear on top try: get_QT_app() except: pass for param in [file_path, file_types, caption]: if param is not None: if not isinstance(param, (str, unicode)): raise TypeError('param must be a string') parent = None if multiple_files: func = QtWidgets.QFileDialog.getOpenFileNames fnames, file_filter = func(parent, caption, file_path, filter=file_types, options=[QtCore.Qt.WindowStaysOnTopHint]) if len(fnames) > 0: fname = fnames[0] else: return else: func = QtWidgets.QFileDialog.getOpenFileName fname, file_filter = func(parent, caption, file_path, filter=file_types) if multiple_files: return fnames else: return str(fname)
b96112c5af25350b49bc086820bcea32c228d3c8
3,638,608
def getBlocks(bal: "BKAlignedLayout"): """ Finds all blocks of a given layout. :param bal The layout of which the blocks shall be found :return: The blocks of the given layout """ blocks = defaultdict(list) for layer in bal.layeredGraph.layers: for node in layer: root = bal.root[node] blockContents = blocks[root] blockContents.append(node) return blocks
6f40dc209b72747f6960d474d54e1ffedd2fa9a1
3,638,609
def read_mcmc(path_to_file): """ Reads mcmc chain from file Parameters ---------- path_to_file: string Path to mcmc chain file Returns --------- emcee_table: pandas dataframe Dataframe of mcmc chain values with NANs removed """ colnames = ['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',\ 'scatter'] if mf_type == 'smf' and survey == 'eco': emcee_table = pd.read_csv(path_to_file,names=colnames,sep='\s+',\ dtype=np.float64) else: emcee_table = pd.read_csv(path_to_file, names=colnames, delim_whitespace=True, header=None) emcee_table = emcee_table[emcee_table.mhalo_c.values != '#'] emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64) emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64) emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64) # Cases where last parameter was a NaN and its value was being written to # the first element of the next line followed by 4 NaNs for the other # parameters for idx,row in enumerate(emcee_table.values): if np.isnan(row)[4] == True and np.isnan(row)[3] == False: scatter_val = emcee_table.values[idx+1][0] row[4] = scatter_val # Cases where rows of NANs appear emcee_table = emcee_table.dropna(axis='index', how='any').\ reset_index(drop=True) return emcee_table
f8d0cdd5ea5a7274e81992722db4df29d7664e43
3,638,610
def rotzV(x, theta): """Roate a coordinate in the local z frame""" M = [[np.cos(theta), -np.sin(theta), 0], \ [np.sin(theta), np.cos(theta), 0], [0, 0, 1]] return np.dot(M, x)
43e4f7a8f93fb2b237da1f6ac3f699bf41e38e0a
3,638,611
import fcntl def has_flock(fd): """ Checks if fd has flock over it True if it is, False otherwise :param fd: :return: :rtype: bool """ try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: return True else: return False
9ae997d06a12d73a659958bc2f0467ebdf0142b7
3,638,612
def ExtractCodeBySystem( codable_concept, system): """Extract code in codable_concept.""" for coding in codable_concept.coding: if (coding.HasField('system') and coding.HasField('code') and coding.system.value == system): return coding.code.value return None
e672cb3d2c1d8d65e49d00539cdecf6ee03d1143
3,638,613
def add_item(data, type): """ Add an item to the data in ranked order This function handles the process of adding an item to the list. It first requests the item from the console. Items are nothing more than a line of text typed in. Next, this kicks off a type of binary search to find the proper location of the new item. Then it adds the item to the data and lastly it prompts to add another. Args: data The original list of items type A label describing the type of items being ranked Returns A new list containing the new item """ # prompt for item clear_console() print_header(' A D D I T E M') prompt_message = "\nAdd something new for {type}, leave blank to quit ==> ".format(type = type.lower()) thing = input(prompt_message) while thing != "": # search for placement place_at = search_for_spot(thing, data, 0, len(data) - 1) # add to list data.insert(place_at, thing) # prompt for another thing = input(prompt_message) return data
0662fb19725e0985043d7ea75bad4c5fa55d921f
3,638,614
from typing import List def _all_steps_multiples_of_min_step(rows: np.ndarray) -> bool: """ Are all steps integer multiples of the smallest step? This is used in determining whether the setpoints correspond to a regular grid Args: rows: the output of _rows_from_datapoints Returns: The answer to the question """ steps: List[np.ndarray] = [] for row in rows: # TODO: What is an appropriate precision? steps += list(np.unique(np.diff(row).round(decimals=15))) steps = np.unique(steps) remainders = np.mod(steps[1:]/steps[0], 1) # TODO: What are reasonable tolerances for allclose? asmoms = bool(np.allclose(remainders, np.zeros_like(remainders))) return asmoms
73c4d63cdfa96610ce21e18424663c01fd395e27
3,638,615
import subprocess import os import configparser def black_config( config: c2cciutils.configuration.ChecksBlackConfigurationConfig, full_config: c2cciutils.configuration.Configuration, args: Namespace, ) -> bool: """ Check the black configuration. config is like: properties: # dictionary of properties to check Arguments: config: The check section config full_config: All the CI config args: The parsed command arguments """ del full_config, args # If there is no python file the check is disabled python = False for filename in subprocess.check_output(["git", "ls-files"]).decode().strip().split("\n"): if ( os.path.isfile(filename) and magic.from_file(filename, mime=True) == "text/x-python" # type: ignore ): python = True break if python: if not os.path.exists("pyproject.toml"): c2cciutils.error( "black_config", "The file 'pyproject.toml' with a section tool.black is required", "pyproject.toml", ) return False config_parser = configparser.ConfigParser() config_parser.read("pyproject.toml") if "tool.black" not in config_parser.sections(): c2cciutils.error( "black_config", "The 'tool.black' section is required in the 'pyproject.toml' file", "pyproject.toml", ) return False if isinstance(config, dict): for key, value in config.get("properties", {}).items(): if config_parser.get("tool.black", key) != str(value): c2cciutils.error( "black_config", f"The property '{key}' should have the value, '{value}', " f"but is '{config_parser.get('tool.black', key)}'", "pyproject.toml", ) return False return True
7cee55e29ec18656abbe785fc63dae69354024a8
3,638,616
def get_full_test_names(testargs, machine, compiler): ############################################################################### """ Return full test names in the form: TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS Testmods are optional Testargs can be categories or test names and support the NOT symbol '^' >>> get_full_test_names(["cime_tiny"], "melvin", "gnu") ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu'] >>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37_rx1.A"], "melvin", "gnu") ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu'] >>> get_full_test_names(['ERS.f19_g16_rx1.A', 'NCK.f19_g16_rx1.A', 'PEA_P1_M.f45_g37_rx1.A'], "melvin", "gnu") ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu'] >>> get_full_test_names(["cime_tiny", "^NCK.f19_g16_rx1.A"], "melvin", "gnu") ['ERS.f19_g16_rx1.A.melvin_gnu'] >>> get_full_test_names(["cime_test_multi_inherit"], "melvin", "gnu") ['TESTBUILDFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTMEMLEAKFAIL_P1.f09_g16.X.melvin_gnu', 'TESTMEMLEAKPASS_P1.f09_g16.X.melvin_gnu', 'TESTRUNDIFF_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f45_g37_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P2.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P4.f45_g37_rx1.A.melvin_gnu', 'TESTRUNSTARCFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTTESTDIFF_P1.f19_g16_rx1.A.melvin_gnu'] """ expect(machine is not None, "Must define a machine") expect(compiler is not None, "Must define a compiler") e3sm_test_suites = get_test_suites() tests_to_run = set() negations = set() for testarg in testargs: # remove any whitespace in name testarg = testarg.strip() if (testarg.startswith("^")): negations.add(testarg[1:]) elif (testarg in e3sm_test_suites): tests_to_run.update(get_test_suite(testarg, machine, compiler)) else: try: tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler)) except Exception: if "." not in testarg: expect(False, "Unrecognized test suite '{}'".format(testarg)) else: raise for negation in negations: if (negation in e3sm_test_suites): tests_to_run -= set(get_test_suite(negation, machine, compiler)) else: fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler) if (fullname in tests_to_run): tests_to_run.remove(fullname) return list(sorted(tests_to_run))
05fd19a412172195c2365b0c002c442ceabf7946
3,638,617
def record_or_not(record_mode, line, start_block, end_block): """ """ if not record_mode: if start_block in line: record_mode = True elif end_block in line: record_mode = False return record_mode
2b3952ab7fa3aa23ccbd712dee0aa06083b7b5f5
3,638,618
def compute_angle_stats(vec_mat, unit='deg'): """ Get mean of angles the successif vectors used in the reconstruction. return mean an variance of the angles. """ angles = [] for i in range(vec_mat.shape[1] - 1): aux = 0 dot_prod = np.dot(vec_mat[i] / np.linalg.norm(vec_mat[i]), vec_mat[i + 1] / np.linalg.norm(vec_mat[i + 1])) if dot_prod < 0: aux = np.pi angles.append(np.arccos(dot_prod) + aux) angles = np.asarray(angles) if unit == 'deg': angles *= 180 / np.pi mean = np.mean(angles) std = np.std(angles) return (mean, std)
d1701a3eca12e41b05a38433a7561f811aceb02c
3,638,619
def id_test_data(value): """generate id""" return f"action={value.action_name} return={value.return_code}"
47649b7302ef2f3ad046fc1c7b3fc18da2687921
3,638,620
def kneeJointCenter(frame, hip_JC, delta, vsk=None): """Calculate the knee joint center and axis. Takes in a dictionary of marker names to x, y, z positions, the hip axis and pelvis axis. Calculates the knee joint axis and returns the knee origin and axis. Markers used: RTHI, LTHI, RKNE, LKNE, hip_JC Subject Measurement values used: RightKneeWidth, LeftKneeWidth Knee joint center: Computed using Knee Axis Calculation [1]_. Parameters ---------- frame : dict dictionaries of marker lists. hip_JC : array An array of hip_JC containing the x,y,z axes marker positions of the hip joint center. delta : float, optional The length from marker to joint center, retrieved from subject measurement file. vsk : dict, optional A dictionary containing subject measurements. Returns ------- R, L, axis : array Returns an array that contains the knee axis center in a 1x3 array of xyz values, which is then followed by a 2x3x3 array composed of the knee axis center x, y, and z axis components. The xyz axis components are 2x3 arrays consisting of the axis center in the first dimension and the direction of the axis in the second dimension. References ---------- .. [1] Baker, R. (2013). Measuring walking : a handbook of clinical gait analysis. Mac Keith Press. Notes ----- delta is changed suitably to knee. Examples -------- >>> import numpy as np >>> from .pyCGM import kneeJointCenter >>> vsk = { 'RightKneeWidth' : 105.0, 'LeftKneeWidth' : 105.0 } >>> frame = { 'RTHI': np.array([426.50, 262.65, 673.66]), ... 'LTHI': np.array([51.93, 320.01, 723.03]), ... 'RKNE': np.array([416.98, 266.22, 524.04]), ... 'LKNE': np.array([84.62, 286.69, 529.39])} >>> hip_JC = [[182.57, 339.43, 935.52], ... [309.38, 32280342417, 937.98]] >>> delta = 0 >>> [arr.round(2) for arr in kneeJointCenter(frame,hip_JC,delta,vsk)] #doctest: +NORMALIZE_WHITESPACE [array([413.2 , 266.22, 464.66]), array([143.55, 279.91, 524.77]), array([[[414.2 , 266.22, 464.6 ], [413.14, 266.22, 463.66], [413.2 , 267.22, 464.66]], [[143.65, 280.89, 524.62], [142.56, 280.02, 524.85], [143.65, 280.05, 525.76]]])] """ #Get Global Values mm = 7.0 R_kneeWidth = vsk['RightKneeWidth'] L_kneeWidth = vsk['LeftKneeWidth'] R_delta = (R_kneeWidth/2.0)+mm L_delta = (L_kneeWidth/2.0)+mm #REQUIRED MARKERS: # RTHI # LTHI # RKNE # LKNE # hip_JC RTHI = frame['RTHI'] LTHI = frame['LTHI'] RKNE = frame['RKNE'] LKNE = frame['LKNE'] R_hip_JC = hip_JC[1] L_hip_JC = hip_JC[0] # Determine the position of kneeJointCenter using findJointC function R = findJointC(RTHI,R_hip_JC,RKNE,R_delta) L = findJointC(LTHI,L_hip_JC,LKNE,L_delta) # Knee Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013) #Right axis calculation thi_kne_R = RTHI-RKNE # Z axis is Thigh bone calculated by the hipJC and kneeJC # the axis is then normalized axis_z = R_hip_JC-R # X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers. # and calculated by each point's vector cross vector. # the axis is then normalized. # axis_x = cross(axis_z,thi_kne_R) axis_x = cross(axis_z,RKNE-R_hip_JC) # Y axis is determined by cross product of axis_z and axis_x. # the axis is then normalized. axis_y = cross(axis_z,axis_x) Raxis = np.asarray([axis_x,axis_y,axis_z]) #Left axis calculation thi_kne_L = LTHI-LKNE # Z axis is Thigh bone calculated by the hipJC and kneeJC # the axis is then normalized axis_z = L_hip_JC-L # X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers. # and calculated by each point's vector cross vector. # the axis is then normalized. # axis_x = cross(thi_kne_L,axis_z) #using hipjc instead of thigh marker axis_x = cross(LKNE-L_hip_JC,axis_z) # Y axis is determined by cross product of axis_z and axis_x. # the axis is then normalized. axis_y = cross(axis_z,axis_x) Laxis = np.asarray([axis_x,axis_y,axis_z]) # Clear the name of axis and then nomalize it. R_knee_x_axis = Raxis[0] R_knee_x_axis = R_knee_x_axis/norm3d(R_knee_x_axis) R_knee_y_axis = Raxis[1] R_knee_y_axis = R_knee_y_axis/norm3d(R_knee_y_axis) R_knee_z_axis = Raxis[2] R_knee_z_axis = R_knee_z_axis/norm3d(R_knee_z_axis) L_knee_x_axis = Laxis[0] L_knee_x_axis = L_knee_x_axis/norm3d(L_knee_x_axis) L_knee_y_axis = Laxis[1] L_knee_y_axis = L_knee_y_axis/norm3d(L_knee_y_axis) L_knee_z_axis = Laxis[2] L_knee_z_axis = L_knee_z_axis/norm3d(L_knee_z_axis) #Put both axis in array # Add the origin back to the vector y_axis = R_knee_y_axis+R z_axis = R_knee_z_axis+R x_axis = R_knee_x_axis+R Raxis = np.asarray([x_axis,y_axis,z_axis]) # Add the origin back to the vector y_axis = L_knee_y_axis+L z_axis = L_knee_z_axis+L x_axis = L_knee_x_axis+L Laxis = np.asarray([x_axis,y_axis,z_axis]) axis = np.asarray([Raxis,Laxis]) return [R,L,axis]
1f4ab38ef90c79c964587551e05ce32ccf482e53
3,638,621
def uniform(low: float = 0.0, high: float = 1.0, size: tp.Optional[SIZE_TYPE] = None): """ Draw samples from a uniform distribution. """ if high < low: raise ValueError("high must not be less than low") u = _draw_and_reshape(size, rand) return u * (high - low) + low
da79a086a9c129a88e45c11407ca0d3993104f1a
3,638,622
import math def toInt(): """This built-in function casts the current value to Int and returns the result. """ def transform_function(current_value: object, record: dict, complete_transform_schema: dict, custom_variables: dict): value_to_return = None if current_value is not None: try: clean_current_value = current_value.replace(",", ".") float_current_value = float(clean_current_value) value_to_return = int(math.floor(float_current_value)) except: value_to_return = None return value_to_return return transform_function
0f58cb6c85ca4015696c7fef2d9378c0466c2422
3,638,623
import json def public_upload(request): """Public form to upload missing images :param request: current user request :type request: django.http.request :return: rendered response :rtype: HttpResponse """ upload_success = False if request.method == "POST": document = Document.objects.get(id=request.POST.get("inputDocument", None)) if document: uploaded_image = request.FILES.get("inputFile", None) if uploaded_image: image = DocumentImage( document=document, image=uploaded_image, name=document.word, confirmed=False, ) image.save() upload_success = True missing_images = Document.objects.values_list( "id", "word", "article", "training_sets" ).filter(document_image__isnull=True) training_sets = ( TrainingSet.objects.values_list("id", "title") .filter(documents__document_image__isnull=True) .distinct() ) context = { "documents": json.dumps(list(missing_images)), "training_sets": json.dumps(list(training_sets)), "upload_success": upload_success, } return render(request, "public_upload.html", context)
857b558566a52dc2b192efc3b1441a0da11a649c
3,638,624
import pathlib import re def load_classes(fstem): """Load all classes from a python file.""" all_classes = [] header = [] forward_refs = [] class_text = None done_header = False fname = pathlib.Path('trestle/oscal/tmp') / (fstem + '.py') with open(fname, 'r', encoding='utf8') as infile: for r in infile.readlines(): # collect forward references if r.find('.update_forward_refs()') >= 0: forward_refs.append(r) elif r.find(class_header) == 0: # start of new class done_header = True if class_text is not None: # we are done with current class so add it all_classes.append(class_text) class_text = ClassText(r, fstem) else: if not done_header: # still in header header.append(r.rstrip()) else: # this may not be needed p = re.compile(r'.*Optional\[Union\[([^,]+),.*List\[Any\]') refs = p.findall(r) if len(refs) == 1: logger.info(f'Replaced Any with {refs[0]} in {fstem}') r_orig = r r = r.replace('List[Any]', f'List[{refs[0]}]') logger.info(f'{r_orig} -> {r}') class_text.add_line(r.rstrip()) all_classes.append(class_text) # don't forget final class # force all oscal versions to the current one all_classes = constrain_oscal_version(all_classes) return all_classes
da4baaed8d849b90c51200b778bdde9a47d58cc4
3,638,625
def findAllSubstrings(string, substring): """ Returns a list of all substring starting positions in string or an empty list if substring is not present in string. :param string: a template string :param substring: a string, which is looked for in the ``string`` parameter. :returns: a list of substring starting positions in the template string """ #TODO: solve with regex? what about '.': #return [m.start() for m in re.finditer('(?='+substring+')', string)] start = 0 positions = [] while True: start = string.find(substring, start) if start == -1: break positions.append(start) #+1 instead of +len(substring) to also find overlapping matches start += 1 return positions
d9af07047c9ac6572dff430e07117220b0587847
3,638,626
def build_optimizer(name, lr=0.001, **kwargs): """Get an optimizer for TensorFlow high-level API Estimator. Args: name (str): Optimizer name. Note, to use 'Momentum', should specify lr (float): Learning rate. kwargs (dictionary): Optimizer arguments. Returns: tf.train.Optimizer """ if name == 'Adadelta': optimizer = tf.train.AdadeltaOptimizer(learning_rate=lr, **kwargs) elif name == 'Adagrad': optimizer = tf.train.AdagradOptimizer(learning_rate=lr, **kwargs) elif name == 'Adam': optimizer = tf.train.AdamOptimizer(learning_rate=lr, **kwargs) elif name == 'Ftrl': optimizer = tf.train.FtrlOptimizer(learning_rate=lr, **kwargs) elif name == 'Momentum': if 'momentum' in kwargs: optimizer = tf.train.MomentumOptimizer(learning_rate=lr, **kwargs) else: optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9, **kwargs) elif name == 'RMSProp': optimizer = tf.train.RMSPropOptimizer(learning_rate=lr, **kwargs) elif name == 'SGD': optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr, **kwargs) else: raise ValueError( """Optimizer name should be either 'Adadelta', 'Adagrad', 'Adam', 'Ftrl', 'Momentum', 'RMSProp', or 'SGD'""" ) return optimizer
c04c9348f26fdf25951f362a693cb70765133756
3,638,627
import yaml def generate_pruning_config(model_name, sparsity, begin_step=0, end_step=-1, schedule='ConstantSparsity', granularity='BlockSparsity', respect_submatrix=False, two_over_four_chin=False, ch_share=True, path=None): """Generate a model pruning config out of sparsity configuration. Arguments: model_name: A `str`. 'mnist', 'resnet56' (CIFAR-10), 'resnet50' (ImageNet), or 'mobilenetV1'. sparsity: A `dict`. Keys are `str` representing layer names (or possibly a regexp pattern), and values are sparsity (must be convertible to float). begin_step: Step at which to begin pruning. `0` by default. end_step: Step at which to end pruning. `-1` by default. `-1` implies continuing to prune till the end of training (available only for 'ConstantSparsity' schedule). schedule: 'ConstantSparsity' or 'PolynomialDecay'. granularity: 'ArayaMag', 'BlockSparsity', 'ChannelPruning', 'KernelLevel', or 'QuasiCyclic'. respect_submatrix: A `bool`. Whether or not to mask weight tensors submatrix-wise. two_over_four_chin: A `bool`. Whether or not to realize two-out-of-four sparsity pattern along input channels. Defaults to `False`, in which case the sparsity pattern is achieved along the output channels. ch_share: A `bool`. Whether or not to share masks ac path: `None` or a `str`. If `str`, saves the model pruning config as YAML file. Returns: A ModelPruningConfig instance. """ def get_pruning_schedule_config(_sparsity): _sparsity = float(_sparsity) config = dict(begin_step=begin_step, end_step=end_step, frequency=100) if schedule == 'ConstantSparsity': config['target_sparsity'] = _sparsity elif schedule == 'PolynomialDecay': config['initial_sparsity'] = 0. config['final_sparsity'] = _sparsity config['power'] = 3 else: raise ValueError return pruning_base_configs.PruningScheduleConfig( class_name=schedule, config=config ) def get_pruning_granularity_config(_sparsity): _sparsity = float(_sparsity) config = dict() if granularity in ('ArayaMag', 'QuasiCyclic'): config['gamma'] = int(1/(1.0 - _sparsity)) if respect_submatrix: config['respect_submatrix'] = True elif granularity == 'BlockSparsity': config['block_size'] = [1, 1] config['block_pooling_type'] = 'AVG' elif granularity == 'ChannelPruning': config['ch_axis'] = -1 elif granularity == 'KernelLevel': config['ker_axis'] = [0, 1] elif granularity == 'TwoOutOfFour': block_axis = -2 if two_over_four_chin else -1 config['block_axis'] = block_axis if respect_submatrix: config['respect_submatrix'] = True else: raise ValueError return pruning_base_configs.PruningGranularityConfig( class_name=granularity, config=config, ) def get_pruning_config(_sparsity): return pruning_base_configs.PruningConfig( pruning_schedule=get_pruning_schedule_config(_sparsity), pruning_granularity=get_pruning_granularity_config(_sparsity), ) model_pruning_config = pruning_base_configs.ModelPruningConfig( model_name=model_name, pruning=[] ) for layer_name, _sparsity in sparsity.items(): layer_pruning_config = pruning_base_configs.LayerPruningConfig( layer_name=layer_name, pruning = [ pruning_base_configs.WeightPruningConfig( weight_name='kernel', pruning=get_pruning_config(_sparsity), ) ] ) model_pruning_config.pruning.append(layer_pruning_config) if granularity == 'ChannelPruning' and ch_share: if model_name.startswith('resnet'): model_pruning_config.share_mask = _get_resnet_share_mask(model_name) if path: def save_params_dict_to_yaml(params, file_path): """Saves the input ParamsDict to a YAML file. Taken from params_dict.save_params_dict_to_yaml. """ with tf.io.gfile.GFile(file_path, 'w') as f: #def _my_list_rep(dumper, data): # # u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence. # return dumper.represent_sequence( # u'tag:yaml.org,2002:seq', data, flow_style=True) # #yaml.add_representer(list, _my_list_rep) yaml.dump(params.as_dict(), f, default_flow_style=False) save_params_dict_to_yaml(model_pruning_config, path) return model_pruning_config
42f566ce574b53d6effafcec18984c201fba7f92
3,638,628
def collect_FR_dev(stim_array,stim_dt,sim_dt,spikemon,n,return_spikes=False): """ get all firing rates for a given spikemon stim_array: array of stimulation time/strengths, e.g., [0,0,0,0,1,0,0,0,1,0,0] stim_dt: time interval of stimulation sim_dt: time interval of simulation spikemon_t: time array from spike monitor returns: spikelist: (n,len(stim_start_times)) matrix of spike counts. """ #print 'spikemon.i min',np.amin(spikemon.i),np.amax(spikemon.i) spikemon_t = spikemon # get all stim start times (index position*stim_dt) stim_start_times = np.where(stim_array!=0)[0]*stim_dt # preallocate firing rate array and standard deviation FR_array = np.zeros(len(stim_start_times)) dev_array = np.zeros(len(stim_start_times)) spikelist = np.zeros((n,len(stim_start_times))) for i in range(len(stim_start_times)): FR_array[i],dev_array[i],spikelist[:,i] = get_FR_dev(stim_start_times[i],stim_start_times[i]+stim_dt,sim_dt,spikemon,n) #print 'type',type(dev_array) return FR_array,dev_array,spikelist
05914a68085112e0b1b4353dbd6434fb3e59c7c8
3,638,629
import json def assertDict(s): """ Assert that the input is a dictionary. """ if isinstance(s,str): try: s = json.loads(s) except: raise AssertionError('String "{}" cannot be json-decoded.'.format(s)) if not isinstance(s,dict): raise AssertionError('Variable "{}" is not a dictionary.'.format(s)) return s
302defb4e1eecc9a6171cda0401947e3251be585
3,638,630
import math def _consolidate_subdivide_geometry(geometry, max_query_area_size): """ Consolidate and subdivide some geometry. Consolidate a geometry into a convex hull, then subdivide it into smaller sub-polygons if its area exceeds max size (in geometry's units). Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to consolidate and subdivide max_query_area_size : float max area for any part of the geometry in geometry's units: any polygon bigger will get divided up for multiple queries to API Returns ------- geometry : Polygon or MultiPolygon """ # let the linear length of the quadrats (with which to subdivide the # geometry) be the square root of max area size quadrat_width = math.sqrt(max_query_area_size) if not isinstance(geometry, (Polygon, MultiPolygon)): raise TypeError("Geometry must be a shapely Polygon or MultiPolygon") # if geometry is a MultiPolygon OR a single Polygon whose area exceeds the # max size, get the convex hull around the geometry if isinstance(geometry, MultiPolygon) or ( isinstance(geometry, Polygon) and geometry.area > max_query_area_size ): geometry = geometry.convex_hull # if geometry area exceeds max size, subdivide it into smaller sub-polygons if geometry.area > max_query_area_size: geometry = _quadrat_cut_geometry(geometry, quadrat_width=quadrat_width) if isinstance(geometry, Polygon): geometry = MultiPolygon([geometry]) return geometry
32fbafcf3066cc73c022fc1482030d35387107c2
3,638,631
import logging def deleteAllPatientes(): """Delete all patient record Raises: HTTPException: raises if there is any error in underlying CRUD operation. Returns: null: success response if all the records are successfully deleted """ logging.debug("Router: /patient/all") logging.debug(f"Router deleteAllPatientes") try: return PatientController.delete_all_patients() except PatientException as response_error: raise HTTPException(response_error.status_code, response_error.error_detail)
95eea9cd316c19dd6e73e560e45187ca6b96d06e
3,638,632
import ast from typing import Tuple from typing import List from typing import Any def get_function_args(node: ast.FunctionDef) -> Tuple[List[Any], List[Any]]: """ This functon will process function definition and will extract all arguments used by a given function and return all optional and non-optional args used by the function. Args: node: Function node containing function that needs to be analyzed Returns: (non_optional_args, optional_args): named function args """ assert ( type(node) == ast.FunctionDef ), "Incorrect node type. Expected ast.FunctionDef, got {}".format(type(node)) total_args = len(node.args.args) default_args = len(node.args.defaults) optional_args = [] non_optional_args = [] # Handle positional args for i in range(total_args): if i + default_args < total_args: non_optional_args.append(node.args.args[i].arg) else: optional_args.append(node.args.args[i].arg) # Handle named args for arg in node.args.kwonlyargs: optional_args.append(arg.arg) return non_optional_args, optional_args
a4fe9dccedd5684050a7d5e7949e384dd4021035
3,638,633
def svn_client_copy3(*args): """ svn_client_copy3(svn_commit_info_t commit_info_p, char src_path, svn_opt_revision_t src_revision, char dst_path, svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t """ return apply(_client.svn_client_copy3, args)
336aaec7d6c2a44f22d6b1164316f16b4fd5f53f
3,638,634
import json import copy def delete(server = None, keys = None): """ Marks an entity or entities as deleted on the server. Until an entity is permanently deleted (an administrative operation, not available through the RESTful API), it can still be accessed, but will not turn up in search results. :param server: a :class:`~pyCoalesce.coalesce_request.CoalesceServer` object or the URL of a Coalesce server :param keys: a UUID key of the entity to be deleted, or an iterable of such keys. Each key can be an instance of the :class:`uuid.UUID` class, or any string or integer that could serve as input to the :class:`UUID <uuid.UUID>` class constructor. :returns: ``True`` if the returned status code is 204 (indicating a successful deletion), ``False`` (with a warning) in the unlikely event that the server returns another status code in the 200's. (Any value outside the 200's will cause an exception.) """ if isinstance(server, str): server_obj = CoalesceServer(server) else: server_obj = server # Figure out whether we have one key or an iterable of them, check the # validity of each, and transform them into a JSON array. if keys: # Test for a single key--a list of keys or a JSON array as a string # will cause "_test_key" to throw an error. try: key_str = _test_key(keys) except TypeError: # "keys" is probably a list of keys. keys_list = [_test_key(key) for key in keys] keys_str = json.dumps(keys_list) except ValueError: # "keys" is probably a JSON array of keys. json.loads(keys) # Make sure that "keys" is valid JSON. keys_str = keys else: keys_str = '["' + key_str + '"]' operation = "delete" try: API_URL = _construct_URL(server_obj = server_obj, operation = operation) except AttributeError as err: raise AttributeError(str(err) + '\n.This error can occur if the ' + 'argument "server" is not either a URL or a ' + 'CoalesceServer object.') method = OPERATIONS[operation]["method"] headers = copy(server_obj.base_headers) headers["Content-type"] = "application/json" # Submit the request. response = get_response(URL = API_URL, method = method, data = keys_str, headers = headers, delay = 1, max_attempts = 4) # Check for the appropriate status code. status = response.status_code if status == 204: success = True else: warn("The API server returned an unexpected status code, " + status + ". However, the entity might have been deleted on the server, " + "or might be deleted after a delay.", UnexpectedResponseWarning) success = False return success
b3662ba85b1aacfca6034da5d5e198a5ffada2fa
3,638,635
import collections import csv def ParseMemCsv(f): """Compute summary stats for memory. vm5_peak_kib -> max(vm_peak_kib) # over 5 second intervals. Since it uses the kernel, it's accurate except for takes that spike in their last 4 seconds. vm5_mean_kib -> mean(vm_size_kib) # over 5 second intervals """ peak_by_pid = collections.defaultdict(list) size_by_pid = collections.defaultdict(list) # Parse columns we care about, by PID c = csv.reader(f) for i, row in enumerate(c): if i == 0: continue # skip header # looks like timestamp, pid, then (rss, peak, size) _, pid, _, peak, size = row if peak != '': peak_by_pid[pid].append(int(peak)) if size != '': size_by_pid[pid].append(int(size)) mem_by_pid = {} # Now compute summaries pids = peak_by_pid.keys() for pid in pids: peaks = peak_by_pid[pid] vm5_peak_kib = max(peaks) sizes = size_by_pid[pid] vm5_mean_kib = sum(sizes) / len(sizes) mem_by_pid[pid] = (vm5_peak_kib, vm5_mean_kib) return mem_by_pid
5d10a0d0ac5ab3d3e99ff5fd4c9ca6cd0b74656b
3,638,636
def index_containing_substring(list_str, substring): """For a given list of strings finds the index of the element that contains the substring. Parameters ---------- list_str: list of strings substring: substring Returns ------- index: containing the substring or -1 """ for i, s in enumerate(list_str): if substring in s: return i return -1
2816899bc56f6b2c305192b23685d3e803b420df
3,638,637
import pycountry import gettext import six def _localized_country_list_inner(locale): """ Inner function supporting :func:`localized_country_list`. """ if locale == 'en': countries = [(country.name, country.alpha_2) for country in pycountry.countries] else: pycountry_locale = gettext.translation('iso3166-1', pycountry.LOCALES_DIR, languages=[locale]) if six.PY2: countries = [(pycountry_locale.gettext(country.name).decode('utf-8'), country.alpha_2) for country in pycountry.countries] else: countries = [(pycountry_locale.gettext(country.name), country.alpha_2) for country in pycountry.countries] countries.sort() return [(code, name) for (name, code) in countries]
c67b107d10b8bc2426de39f11c30e886b5fc2894
3,638,638
def ingest_questions(questions: dict, assignment: Assignment): """ questions: [ { sequence: int questions: [ { q: str // what is 2*2 a: str // 4 }, ] }, ... ] response = { rejected: [ ... ] ignored: [ ... ] accepted: [ ... ] } :param questions: :param assignment: :return: """ question_shape = {"questions": {"q": str, "a": str}, "sequence": int} if questions is None: return # Iterate over questions rejected, ignored, accepted = [], [], [] for question_sequence in questions: shape_good, err_path = _verify_data_shape(question_sequence, question_shape) if not shape_good: # Reject the question if the shape is bad and continue rejected.append( { "question": question_sequence, "reason": "could not verify data shape " + err_path, } ) continue pool = question_sequence["pool"] for question in question_sequence["questions"]: # Check to see if question already exists for the current # assignment exists = AssignmentQuestion.query.filter( AssignmentQuestion.assignment_id == assignment.id, AssignmentQuestion.question == question["q"], ).first() if exists is not None: # If the question exists, ignore it and continue ignored.append({"question": question, "reason": "already exists"}) continue # Create the new question from posted data assignment_question = AssignmentQuestion( assignment_id=assignment.id, question=question["q"], solution=question["a"], pool=pool, ) db.session.add(assignment_question) accepted.append({"question": question}) # Commit additions db.session.commit() return accepted, ignored, rejected
d72370bcaa5cf1f5017eda827cca6dd011ac36d0
3,638,639
from datetime import datetime def render_book_template(book_id): """ Find a specific book in the database. Locate the associated reviews (sorted by score and date). Create the purchase url. Check whether the user has saved the book to their wishlist. """ # Find the book document in the database this_book = mongo.db.books.find_one({"_id": ObjectId(book_id)}) # Find the reviews that relate to that book this_book_reviews = list( mongo.db.reviews.find({"book_id": ObjectId(book_id)}) ) # Sort by review score and then by date added sorted_book_reviews = sorted( this_book_reviews, key=lambda b: (-b["review_score"], -b["review_date"]), ) # Create the book purchase url # by adding the book title and author to the url this_book_title = this_book["title"].replace(" ", "+") this_book_author = this_book["authors"][0].replace(" ", "+") book_purchase_url = ( "https://www.amazon.com/s?tag=falsetag&k=" + this_book_title + "+" + this_book_author ) # Create a list of users who have reviewed this book already reviewers = [] for book_review in this_book_reviews: # Convert floats to datetime format in each book review book_review["review_date"] = datetime.datetime.fromtimestamp( book_review["review_date"] ).strftime("%a, %b %d, %Y") # Add reviewers to the reviewers list reviewers.append(book_review["created_by"]) bookmark = False purchased = False # If the session cookie exists then the user is logged in if session: # Grab the session user's wishlist from the database wishlist = mongo.db.users.find_one({"username": session["user"]})[ "wishlist" ] # Check to see whether the current user # has already saved this book to their wishlist # If so, remove the bookmark if this_book["_id"] in wishlist: bookmark = True # Check and see whether the current user has reviewed this book # If they have presumably they don't want to purchase the book if session["user"] in reviewers: purchased = True return render_template( "view_book.html", this_book=this_book, this_book_reviews=sorted_book_reviews, book_purchase_url=book_purchase_url, reviewers=reviewers, bookmark=bookmark, purchased=purchased, )
d30b30b79b102b1c08404bc00c69b1f22ccebc6a
3,638,640
def iatan2(y,x): """One coordinate must be zero""" if x == 0: return 90 if y > 0 else -90 else: return 0 if x > 0 else 180
a0b18b61d7ffadf864a94299bc4a3a0aacd7c65a
3,638,641
import torch def fuse_bn_sequential(model): """ This function takes a sequential block and fuses the batch normalization with convolution :param model: nn.Sequential. Source resnet model :return: nn.Sequential. Converted block """ if not isinstance(model, torch.nn.Sequential): return model stack = [] for m in model.children(): if isinstance(m, torch.nn.BatchNorm2d): if isinstance(stack[-1], torch.nn.Conv2d): bn_st_dict = m.state_dict() conv_st_dict = stack[-1].state_dict() # BatchNorm params eps = m.eps mu = bn_st_dict['running_mean'] var = bn_st_dict['running_var'] gamma = bn_st_dict['weight'] if 'bias' in bn_st_dict: beta = bn_st_dict['bias'] else: beta = torch.zeros(gamma.size(0)).float().to(gamma.device) # Conv params W = conv_st_dict['weight'] if 'bias' in conv_st_dict: bias = conv_st_dict['bias'] else: bias = torch.zeros(W.size(0)).float().to(gamma.device) denom = torch.sqrt(var + eps) b = beta - gamma.mul(mu).div(denom) A = gamma.div(denom) bias *= A A = A.expand_as(W.transpose(0, -1)).transpose(0, -1) W.mul_(A) bias.add_(b) stack[-1].weight.data.copy_(W) if stack[-1].bias is None: stack[-1].bias = torch.nn.Parameter(bias) else: stack[-1].bias.data.copy_(bias) else: stack.append(m) if len(stack) > 1: return torch.nn.Sequential(*stack) else: return stack[0]
6d31cd2cd73e8dc91098b7f9cc7f70ce3b81a3b9
3,638,642
def get_baseconf_settings( baseconf_settings_filename = None ): """ Returns the basic configuration settings as a parameter structure. :param baseconf_settings_filename: loads the settings from the specified filename, otherwise from the default filename or in the absence of such a file creates default settings from scratch. :return: parameter structure """ # These are the parameters for the general I/O and example cases baseconf_params = pars.ParameterDict() baseconf_params[('baseconf',{},'determines if settings should be loaded from file and visualization options')] if baseconf_settings_filename is not None: print( 'Loading baseconf configuration from: ' + baseconf_settings_filename ) baseconf_params.load_JSON( baseconf_settings_filename ) return baseconf_params else: print( 'Using default baseconf settings from config_parser.py') baseconf_params['baseconf'][('load_default_settings_from_default_setting_files',False,'if set to True default configuration files (in settings directory) are first loaded')] baseconf_params['baseconf'][('load_settings_from_file',True,'if set to True configuration settings are loaded from file')] baseconf_params['baseconf'][('save_settings_to_file',True,'if set to True configuration settings are saved to file')] if not baseconf_params['baseconf']['load_default_settings_from_default_setting_files']: print('HINT: Only compute_settings.json and baseconf_settings.json will be read from file by default.') print('HINT: Set baseconf.load_default_settings_from_default_setting_files to True if you want to use the other setting files in directory settings.') print('HINT: Otherwise the defaults will be as defined in config_parser.py.') return baseconf_params
0b0b829f4923072431b8e73c7fd70e732f17dc30
3,638,643
from blog import blog, user, api,auth import os def create_app(test_config=None): """Create and configure an instance of the Flask application.""" app = Flask(__name__, instance_relative_config=True) app.logger.debug('app.instance_path = %s', app.instance_path) app.config.from_mapping( SECRET_KEY="$%px0vz%84j2y9ztqg^8k8_!8*-372g85z73(art-z#+5l5h1w'",) if test_config is None: # load the instance config, if it exists, when not testing app.config.from_pyfile("config.py", silent=True) else: # load the test config if passed in app.config.update(test_config) # ensure the instance folder exists try: os.makedirs(app.instance_path) except OSError: pass @app.route("/hello") def hello(): return "Hello, World!" # register the database commands app.register_blueprint(blog.bp) app.register_blueprint(user.bp) app.register_blueprint(api.bp) app.register_blueprint(auth.bp) app.add_url_rule("/", endpoint="index") return app
1f4ca85eccf4a5ab226d84470d90ccd0569d16aa
3,638,644
def subtract_background(image, background_image): """Subtracts background image from a specified image. Returns ------- bs_image : np.ndarray of type np.int | shape = [image.shape] Background-subtracted image. """ image = image.copy().astype(np.int) background = background_image.copy().astype(np.int) bs_image = image - background return bs_image.astype(np.int)
c136f78c1f355c2031ef60ca17fb0bd6fc63c94e
3,638,645
import math def batch_genomes(genomes, num_batches, order): """ Populates 2D numpy array with len(rows)==num_batches in {order} major order. Using col is for when you know you are using X number of nodes, and want- to evenly distribute genomes across each node Use row when you want to fill each node, i.e. you give each node 16 cores- and would rather have 3 at 32 and 1 at 16 than 4 at 28 """ total_genomes = len(genomes) # num_batches designates number of rows in col major order # but number of cols in row major order genomes_per_batch = math.ceil(total_genomes/num_batches) batches = np.empty([num_batches, genomes_per_batch], dtype=object) if order == 'col': for i, genome in enumerate(genomes): batches[i%num_batches][i//num_batches] = genome elif order == 'row': for i, genome in enumerate(genomes): batches[i//genomes_per_batch][i%genomes_per_batch] = genome else: raise Exception("Order must be specified as 'col' or 'row'") return batches
00fea150ea20ae886fd72f099a1c0bd4216ba987
3,638,646
def single_gate_params(gate, params=None): """Apply a single qubit gate to the qubit. Args: gate(str): the single qubit gate name params(list): the operation parameters op['params'] Returns: a tuple of U gate parameters (theta, phi, lam) """ if gate == 'U' or gate == 'u3': return (params[0], params[1], params[2]) elif gate == 'u2': return (np.pi/2, params[0], params[1]) elif gate == 'u1': return (0., 0., params[0]) elif gate == 'id': return (0., 0., 0.)
153459403639103cdfa9502a26797e9c536ba112
3,638,647
def check_copr_build(build_id: int) -> bool: """ Check the copr_build with given id and refresh the status if needed. Used in the babysit task. :param build_id: id of the copr_build (CoprBuildModel.build.id) :return: True if in case of successful run, False when we need to retry """ logger.debug(f"Getting copr build ID {build_id} from DB.") builds = CoprBuildModel.get_all_by_build_id(build_id) if not builds: logger.warning(f"Copr build {build_id} not in DB.") return True copr_client = CoprClient.create_from_config_file() build_copr = copr_client.build_proxy.get(build_id) if not build_copr.ended_on: logger.info("The copr build is still in progress.") return False logger.info(f"The status is {build_copr.state!r}.") for build in builds: if build.status != "pending": logger.info( f"DB state says {build.status!r}, " "things were taken care of already, skipping." ) continue chroot_build = copr_client.build_chroot_proxy.get(build_id, build.target) event = CoprBuildEvent( topic=FedmsgTopic.copr_build_finished.value, build_id=build_id, build=build, chroot=build.target, status=( COPR_API_SUCC_STATE if chroot_build.state == COPR_SUCC_STATE else COPR_API_FAIL_STATE ), owner=build.owner, project_name=build.project_name, pkg=build_copr.source_package.get( "name", "" ), # this seems to be the SRPM name timestamp=chroot_build.ended_on, ) job_configs = get_config_for_handler_kls( handler_kls=CoprBuildEndHandler, event=event, package_config=event.get_package_config(), ) for job_config in job_configs: CoprBuildEndHandler( package_config=event.package_config, job_config=job_config, data=EventData.from_event_dict(event.get_dict()), copr_event=event, ).run() return True
792b25d6a15d25d0bae3ccdb942d95512139b70b
3,638,648
import sys def net_to_graph(net): """ Convert Net object from parse_net_file to graph represented (as per dijkstra.py) as dict of dicts where G[v][w] for any v,w is cost of edge from v to w. Here v and w are just integers (node numbers). Parameters: net (in/OUT) - Net object as returned by parse_net_file() duplicate entries in the links list are removed Return value: graph (dict of dicts) as described above """ sys.stderr.write('[debug]: net_to_graph edges = ' + str(len(net.links))+'\n') netgraph = dict((i, {}) for i in xrange(1,net.num_nodes+1)) delete_links = {} # dict { (initnode,termnode):seen } to delete after first for link in net.links: if (netgraph.has_key(link.init_node) and netgraph[link.init_node].has_key(link.term_node)): sys.stderr.write('WARNING: duplicate link %d -> %d\n' % (link.init_node, link.term_node)) sys.stderr.write(' using first link only\n') delete_links[(link.init_node, link.term_node)] = False else: netgraph[link.init_node][link.term_node] = link.cost # now rebuild net.links without duplicate links (this happend on # Berlin data) net_links_copy = [] for link in net.links: # net_links_copy.append(copy.deepcopy(link)) # neither copy.copy() nor copy.deepcopy() actually seem to work at all # whether on lists or the objects in the list, have to do it manually. copylink = Link() copylink.init_node = link.init_node copylink.term_node = link.term_node copylink.capacity = link.capacity copylink.length = link.length copylink.free_flow_time = link.free_flow_time copylink.B = link.B copylink.power = link.power copylink.speed_limit = link.speed_limit copylink.toll = link.toll copylink.linktype = link.linktype copylink.cost = link.cost net_links_copy.append(copylink) net.links = [] for link in net_links_copy: if (not delete_links.has_key((link.init_node, link.term_node)) or not delete_links[(link.init_node, link.term_node)]): net.links.append(link) delete_links[(link.init_node,link.term_node)] = True if net.num_links != len(net.links): sys.stderr.write('WARNING: had %d links, now %d after deleting duplicates\n' % (len(net_links_copy), len(net.links))) net.num_links = len(net.links) return netgraph
3f27a53eb0a0c49deddb5d88af4e1bdb999ca716
3,638,649
import time def runTests(data, targets, pipeline, parameters): """ Perform grid search with specified pipeline and parameters on data training set with targets as labels Evaluate performance based on precision and print parameters for best estimator grid search object is returned for further analysis""" grid_search = GridSearchCV(pipeline, parameters, verbose=1, cv=10, scoring='precision') print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") print(parameters) t0 = time() grid_search.fit(data, targets) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) return grid_search
122d1330444dd72aa064cd263cb7f405bf2bf9ba
3,638,650
def isMultipleTagsInput(item): """ Returns True if the argument datatype is not a column or a table, and if it allows lists and if it has no permitted value. This function is used to check whether the argument values have to be delimited by the null character (returns True) or not. :param item: Table argument. """ return item.get('datatype', 'STRING') in ['STRING','DOUBLE','INTEGER','DRIVER','SQLEXPR', 'LONG']\ and item.get('allowsLists', False)\ and not item.get('permittedValues', [])
f7710902e27962fc8df55bc75be2d5d404144aeb
3,638,651
def imagenet_vgg_compression(compression_config, var_config, overall_model, muVes, strategy, optimizer, verbose=True): """ :param compression_config: :param var_config: :param overall_model: :param muVes: :param strategy: :param optimizer: :param verbose: :return: """ cc = compression_config # Get information about the layers in the original network all_layers = [] n_full = {} last_conv = "" for l in overall_model.layers: # note: the input layer is not included in overall_model.layers if 'conv' in l.name or 'fc' in l.name or 'output' in l.name: n_full[l.name] = l.output_shape[-1] if 'conv' in l.name: last_conv = l.name all_layers.append(l.name) # Verify necessary config constraints cc_keys = list(cc.keys()) cc_values = list(cc.values()) assert cc_keys == list(n_full.keys()), 'compression config must have keys for all compute layers (conv, fc, output)' assert cc_values[0][0] is None, 'currently we are not considering compression of the network input' for index in range(len(cc_keys)): if cc_keys[index] in ['output']: continue if cc_values[index][1] is not None: assert cc_values[index+1][0] is not None, 'to kill outputs the next layer must perform PCA compression' assert cc_values[-1][1] is None, 'can not kill columns in the output layer' assert len(cc_keys)-1 == len(muVes), 'PCA for each compute layer (except conv1) should already be computed' # Go through the layers and create the (possibly) compressed layers # TODO: model creation inside distribute strategy with strategy.scope(): compressed_model = tf.keras.Sequential() compressed_model.add(tf.keras.layers.InputLayer(input_shape=overall_model.layers[0].input_shape[1:], name='input')) mu_c, V_c, W_n = None, None, None for index in range(len(cc_keys)): prev_layer = cc_keys[index-1] if index-1 >= 0 else None curr_layer = cc_keys[index] next_layer = cc_keys[index+1] if index+1 <= len(cc_keys)-1 else None n_c = n_full[curr_layer] W_c = overall_model.get_layer(curr_layer).weights[0] if W_n is None else W_n b_c = overall_model.get_layer(curr_layer).weights[1] W_n = overall_model.get_layer(next_layer).weights[0] if next_layer is not None else None mu_p, V_p = mu_c, V_c # previous layer could have modified mu, V if it killed columns conv_c = True if 'conv' in curr_layer else False # generic iteration if cc[curr_layer][0] is not None and cc[prev_layer][1] is None: mu_p, V_p, e_p = muVes[index-1] # want output of previous layer (input to this layer), recall offset if cc[curr_layer][1] is not None: mu_c, V_c, e_c = muVes[index] # want mu, V output of this layer (input to next layer), recall offset num, ut = decode_cc(cc[curr_layer][1]) n_c, W_c, b_c, mu_c, V_c, W_n = tf_kill_outputs(W_c, b_c, mu_c, V_c, W_n, num, num_as_threshold=ut, conv=conv_c, conv_to_dense=curr_layer == last_conv, verbose=verbose, prefix=' {}'.format(curr_layer)) else: mu_c, V_c = None, None # add layer activation = None if curr_layer == 'output' else 'relu' if conv_c and cc[curr_layer][0] is not None: W_c_p, b_c_p = tf_transform_conv_weights(mu_p, V_p, W_c, b_c) compressed_model.add(Conv2DPCALayer(int(n_c), 3, mu_p, V_p, kernel_initializer=cift(W_c_p), bias_initializer=cift(b_c_p), activation='relu', name=curr_layer)) elif conv_c: compressed_model.add(tf.keras.layers.Conv2D(int(n_c), 3, padding='same', kernel_initializer=cift(W_c), bias_initializer=cift(b_c), activation='relu', name=curr_layer)) elif cc[curr_layer][0] is not None: W_c_p, b_c_p = tf_transform_dense_weights(mu_p, V_p, W_c, b_c) compressed_model.add(DensePCALayer(int(n_c), mu_p, V_p, kernel_initializer=cift(W_c_p), bias_initializer=cift(tf.squeeze(b_c_p)), activation=activation, name=curr_layer)) else: compressed_model.add(tf.keras.layers.Dense(int(n_c), kernel_initializer=cift(W_c), bias_initializer=cift(b_c), activation=activation, name=curr_layer)) # add extra layers if necessary c_i = all_layers.index(curr_layer) n_i = all_layers.index(next_layer) if next_layer is not None else len(all_layers) while c_i < n_i - 1: c_i += 1 if 'mp' in all_layers[c_i]: compressed_model.add(tf.keras.layers.MaxPooling2D((2, 2), name=all_layers[c_i])) elif 'flatten' in all_layers[c_i]: compressed_model.add(tf.keras.layers.Flatten(name=all_layers[c_i])) # TODO: do you want to add dropout back in, does dropout scaling affect compression?? elif 'dropout' in all_layers[c_i] and var_config['add_dropout_to_compressed_model'] is True: compressed_model.add(tf.keras.layers.Dropout(0.5)) elif 'softmax' in all_layers[c_i]: compressed_model.add(tf.keras.layers.Softmax(name=all_layers[c_i])) # Optimizer # TODO: fix this compressed_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=optimizer, metrics=['accuracy']) # compressed_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), # optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) return compressed_model
27d52928046af28f4243e667887958e679e8655d
3,638,652
def remove_url(url: str = Form(...)): """ Remove url from the url json file :param url: api url in the format: http://ip:port/ :return: ApiResponse """ try: payload = helpers.parse_json(url_config_path) except Exception as e: return ApiResponse(success=False, error=e) if url in payload['urls']: payload['urls'].remove(url) helpers.write_json(payload, url_config_path) return ApiResponse(data={"url removed successfully"}) else: return ApiResponse(success=False, error="url is not present in config file")
c7c218926c2992df19b3988987fca8cf2bbff3d1
3,638,653
def load_CSVdata(messages_filepath, categories_filepath): """ Load and merge datasets messages and categories Inputs: Path to the CSV file containing messages Path to the CSV file containing categories Output: dataframe with merged data containing messages and categories """ #reading messages and categories messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) #merge datasets df = pd.merge(messages,categories,on='id') return df
e216c09ca403545fbc1152a25e966efeb4baeefc
3,638,654
def accept_invite(payload, user): """ Accepts an invite args: payload, user ret: response """ try: invite = Invites.get(payload['invite'])[0] except: return Message( Codes.NOT_FOUND, { 'message': 'There isn\'t any active invite with the given id.' } ) if user['id'] != invite[1]: return Message( Codes.FORBIDDEN, { 'message': 'This invitation was sent to another user.' } ) UsersGroups.insert(user['id'], invite[2]) Invites.close(invite[0]) return Message( Codes.SUCCESS, { 'message': 'You have successfully joined this group.' } )
bff423eeec7f7f527771934de0f32ede0f528948
3,638,655
from typing import Tuple from typing import Dict from typing import List import re def clean_status_output( input: str, ) -> Tuple[bool, Dict[str, str], List[Dict[str, str]]]: # example input """ # Health check: # - dns: rename /etc/resolv.conf /etc/resolv.pre-tailscale-backup.conf: device or resource busy 100.64.0.1 test_domain_1 omnet linux - 100.64.0.2 test_network_1 omnet linux active; relay "syd", tx 1188 rx 1040 """ up = False peers: List[Dict[str, str]] = [] host: Dict[str, str] = {} if "Tailscale is stopped." in input: return up, host, peers elif "unexpected state: NoState" in input: return up, host, peers count = 0 for line in str(input).split("\n"): matches = re.match(r"^\d.+", line) if matches is not None: try: stat_parts = re.split(r"(\s+)", matches.string) entry = {} entry["ip"] = stat_parts[0] entry["hostname"] = stat_parts[2] entry["network"] = stat_parts[4] entry["os"] = stat_parts[6] connection_info_parts = matches.string.split(entry["os"]) entry["connection_info"] = "n/a" connection_info = "" if len(connection_info_parts) > 1: connection_info = connection_info_parts[1].strip() entry["connection_info"] = connection_info entry["connection_status"] = "n/a" if "active" in connection_info: entry["connection_status"] = "active" if "idle" in connection_info: entry["connection_status"] = "idle" entry["connection_type"] = "n/a" if "relay" in connection_info: entry["connection_type"] = "relay" if "direct" in connection_info: entry["connection_type"] = "direct" if count == 0: host = entry count += 1 up = True else: peers.append(entry) except Exception as e: print("Error parsing tailscale status output", e) pass return up, host, peers
bbf100514373595948b0691dff857deb5772f019
3,638,656
def test_tensor_method_mul(): """test_tensor_method_mul""" class Net(Cell): def __init__(self): super(Net, self).__init__() self.sub = P.Sub() def construct(self, x, y): out = x * (-y) return out.transpose() net = Net() x = ms.Tensor(np.ones([5, 3], np.float32)) y = ms.Tensor(np.ones([8, 5, 3], np.float32)) _executor.compile(net, x, y)
89866ebd9311e0bac0e3324b01545507b986751f
3,638,657
def _get_top_artists(session: Session, limit=100): """Gets the top artists by follows of all of Audius""" top_artists = ( session.query(User) .select_from(AggregateUser) .join(User, User.user_id == AggregateUser.user_id) .filter(AggregateUser.track_count > 0, User.is_current) .order_by(desc(AggregateUser.follower_count), User.user_id) .limit(limit) .all() ) return helpers.query_result_to_list(top_artists)
ae6a45e7190995fc35daf62236e73c4bd5c6235f
3,638,658
import os def get_imagenet_lmdb(train_transform, val_transform, test_transform, CONFIG): """ Load lmdb imagenet dataset https://github.com/Fangyh09/Image2LMDB """ train_path = os.path.join(CONFIG.dataset_dir, "train_lmdb", "train.lmdb") val_path = os.path.join(CONFIG.dataset_dir, "val_lmdb", "val.lmdb") test_path = os.path.join(CONFIG.dataset_dir, "test_lmdb", "test.lmdb") train_data = ImageFolderLMDB(train_path, train_transform, None) val_data = ImageFolderLMDB(val_path, val_transform, None) test_data = ImageFolderLMDB(test_path, test_transform, None) return train_data, val_data, test_data
8d3b66f9436db2d072e097d98cb44693c3bf001b
3,638,659
def _get_other_locations(): """Returns all locations except convention venues.""" if 'all' not in location_cache.keys(): conv_venue = LocationType.objects.get(name='Convention venue') location_cache['all'] = Location.objects.exclude(loc_type=conv_venue) return location_cache['all']
a34bf432529a31bc013988c394230e55b01ac21b
3,638,660
import torch def _check_cuda_version(): """ Make sure that CUDA versions match between the pytorch install and torchvision install """ if not _HAS_OPS: return -1 _version = torch.ops.torchvision._cuda_version() if _version != -1 and torch.version.cuda is not None: tv_version = str(_version) if int(tv_version) < 10000: tv_major = int(tv_version[0]) tv_minor = int(tv_version[2]) else: tv_major = int(tv_version[0:2]) tv_minor = int(tv_version[3]) t_version = torch.version.cuda t_version = t_version.split('.') t_major = int(t_version[0]) t_minor = int(t_version[1]) if t_major != tv_major or t_minor != tv_minor: raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. " "PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. " "Please reinstall the torchvision that matches your PyTorch install." .format(t_major, t_minor, tv_major, tv_minor)) return _version
d86e209d10514060f0c15bff9ea28df6b2054480
3,638,661
def largest(layer,field): """largest(layer,field) Returns the largest area significant class in the study area. """ theitems = [] rows = arcpy.SearchCursor(layer) for row in rows: theitems.append(row.getValue(field)) del rows theitems.sort() max1= theitems[-1] return max1
ff6433a5fef48550e902317384dec746136063dc
3,638,662
import asyncio import random async def double_up(ctx): """ 「ダブルアップチャンス!」を開始します。 """ depth = 1 # 現在の階層 HOLE = "\N{HOLE}\N{VARIATION SELECTOR-16}" LEFT_ARROW = "\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}" RIGHT_ARROW = "\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}" TOP_ARROW = "\N{UPWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}" emojis = [LEFT_ARROW,RIGHT_ARROW] # 通常の穴選択用絵文字リスト final_emojis = [LEFT_ARROW,TOP_ARROW,RIGHT_ARROW] # 最後の穴選択用絵文字リスト def gold_check(msg): # 掛け金の入力チェック用 return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.isdecimal() embed = discord.Embed(title="ダブルアップ",description=f"{ctx.author.mention} 掛け金を入力してください。",color=0x0000ff) await ctx.send(embed=embed) try: gold_msg = await bot.wait_for("message",check=gold_check,timeout=30.0) except asyncio.TimeoutError: embed = discord.Embed(title="エラー", description=f"{ctx.author.mention} 掛け金の正常な入力が確認されませんでした。コマンドの処理を終了します。", color=0xff0000) await ctx.send(embed=embed) return gold = int(gold_msg.content) embed = discord.Embed(title=f"どちらの穴に入るか選ぼう!(このテキストのリアクションをタッチして選択)({depth}回目)", description=f"{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}",color=0x00ff00) embed.set_footer(text=f"掛け金:{gold * 2} G") game_msg = await ctx.send(embed=embed) # ゲーム用メッセージ。以降はこれを編集してゲームを表現する。 while depth < 5: await game_msg.edit(embed=embed) for emoji in emojis: await game_msg.add_reaction(emoji) # 穴選択用絵文字でリアクションする def hole_check(reaction,user): # 穴の入力チェック用 react_msg = reaction.message are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel # メッセージの同一性 return are_same_msgs and user == ctx.author and str(reaction.emoji) in emojis try: hole_react,user = await bot.wait_for("reaction_add",check=hole_check,timeout=30.0) except asyncio.TimeoutError: embed = discord.Embed(title="エラー", description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。", color=0xff0000) await ctx.send(embed=embed) return if random.randrange(2) == 0: # 2分の1の確率ではずれを引く embed = discord.Embed(title=f"はずれー!!", description=f"{ctx.author.mention} 懲りずに、また挑戦してみてね!",color=0x00ff00) await ctx.send(embed=embed) return depth += 1 gold *= 2 await hole_react.remove(user) embed = discord.Embed(title=f"当たり!次の穴を選んでね!({depth}回目)", description=f"{ctx.author.mention}\n{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}", color=random.randrange(0xffffff)) embed.set_footer(text=f"次の掛け金:{gold * 2} G") embed = discord.Embed(title=f"当たり!次の穴が最後!({depth}回目)", description=f"{HOLE}\t{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{TOP_ARROW}\t{RIGHT_ARROW}", color=random.randrange(0xffffff)) embed.set_footer(text=f"掛け金:{gold * 2} G") await game_msg.edit(embed=embed) await game_msg.clear_reactions() # 最後は中間にもう一つ穴が追加されるので、全てのリアクションを削除しておく for emoji in final_emojis: await game_msg.add_reaction(emoji) def hole_check_final(reaction,user): # 最後の穴の入力チェック用 react_msg = reaction.message are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel return are_same_msgs and user == ctx.author and str(reaction.emoji) in final_emojis try: await bot.wait_for("reaction_add",check=hole_check_final,timeout=30.0) except asyncio.TimeoutError: embed = discord.Embed(title="エラー", description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。", color=0xff0000) await ctx.send(embed=embed) return if random.randrange(3) != 2: # 3分の2の確率ではずれを引く embed = discord.Embed(title="はずれ。", description=f"{ctx.author.mention} 君たちは一体今までにいくら貢いだんだろうね",color=0x00ff00) await ctx.send(embed=embed) return gold *= 2 embed = discord.Embed(title="おめでとう!", description=f"{ctx.author.mention} **{gold}** G入手したよ!\n達成できたのは今回で…何回目だったっけ",color=0x0000ff) await ctx.send(embed=embed)
3ec1394d681fcb0e626e98b11bd815dddbe64254
3,638,663
def read_version(file_contents): """Read the project setting from pyproject.toml.""" data = tomlkit.loads(file_contents) details = data["tool"]["poetry"] return details["version"]
7255c199463437765d21658f792a54049bbb45ee
3,638,664
from datetime import datetime def schedule_time(check_start_time, check_end_time, time_duaration=7) -> dict: """ Returns dictionary of earliest available time within the next week """ all_busy_events = get_busy_events() for d in range(1,time_duaration): # Increment by one day throughout the week check_day = datetime.today().date() + timedelta(d) if all_busy_events: # ! still something wrong is_day_free = [] is_time_overlapping = False is_time_free = True for start,end in [event for event in all_busy_events if event[0].date() == check_day]: is_time_overlapping = is_time_between(check_start_time, check_end_time, start.time()) and is_time_between(check_start_time, check_end_time, end.time()) is_time_free = not is_time_between(start.time(), end.time(), check_start_time) and not is_time_between(start.time(), end.time(), check_end_time) is_day_free.append(is_time_free) if all(is_day_free) and not is_time_overlapping: appointment_start = datetime.combine(check_day, check_start_time) appointment_end = datetime.combine(check_day, check_end_time) return {"start": appointment_start, "end": appointment_end} else: # Schedule time for tomorrrow if no busy events within the next week return {"start": datetime.combine(check_day, check_start_time), "end": datetime.combine(check_day, check_end_time)}
858387e8d07634df7a143b3ee500e648ed54abd6
3,638,665
import array def _to_array(value): """When `value` is a plain Python sequence, return it as a NumPy array.""" if not hasattr(value, 'shape') and hasattr(value, '__len__'): return array(value) else: return value
3bf185f34c51dc2042bdb05138b0febd9e89b421
3,638,666
def dict_pix_to_deg(input_dict, changeN): """Convert pix to deg for a given dictionary format, changeN is 1 or 2, to let the function works for the first or both elements of the tuple""" dict_deg = {} for key, values in input_dict.items(): new_display = [] for display in values: new_posi = [] for posi in display: new_posi.append(__pix_to_deg_tuple(posi, changeN)) new_display.append(new_posi) dict_deg.update({key: new_display}) return dict_deg
03c49e113c8805c4d675899f3c61e3ae00bd7681
3,638,667
def remove_container_name_from_blob_path(blob_path, container_name): """ Get the bit of the filepath after the container name. """ # container name will often be part of filepath - we want # the blob name to be the bit after that if not container_name in blob_path: return blob_path blob_name_parts = [] filepath_parts = split_filepath(blob_path) container_name_found = False for path_part in filepath_parts: if container_name_found: blob_name_parts.append(path_part) if path_part == container_name: container_name_found = True if len(blob_name_parts) == 0: return "" return "/".join(blob_name_parts)
e02807abebdf3a193efcabee1dda3f733a780dd5
3,638,668
from typing import Dict from typing import List def _complex_ar_from_dict(dic: Dict[str, List]) -> np.ndarray: """Construct complex array from dictionary of real and imaginary parts""" out = np.array(dic["real"], dtype=complex) out.imag = np.array(dic["imag"], dtype=float) return out
a3938619f84c2dcbec5c9ac90c0064ea380346c4
3,638,669
def endpoint(url_pattern, method="GET"): """ :param url_pattern: :param method: :param item: :return: """ def wrapped_func(f): @wraps(f) def inner_func(self, *args, **kwargs): """ :param self: :param args: :param kwargs: :return: """ func_params = translate_params(f, *args, **kwargs) params = translate_special_params(func_params, self.special_attributes_map) response = None if method == "GET": response = self._get(url_pattern, params=params) elif method == "POST": response = self._post(url_pattern, params=params) if response: try: if response.headers["Content-Type"] == "application/json": return response.json() else: return response.text except Exception as e: return response.content return inner_func return wrapped_func
23b68a1440e96eac27926f2a37d96cb74a568734
3,638,670
def elastic_transform( image, alpha, sigma, alpha_affine, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, random_state=None, approximate=False, ): """Elastic deformation of images as described in [Simard2003]_ (with modifications). Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. """ if random_state is None: random_state = np.random.RandomState(1234) height, width = image.shape[:2] # Random affine center_square = np.float32((height, width)) // 2 square_size = min((height, width)) // 3 alpha = float(alpha) sigma = float(sigma) alpha_affine = float(alpha_affine) pts1 = np.float32( [ center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size, ] ) pts2 = pts1 + random_state.uniform( -alpha_affine, alpha_affine, size=pts1.shape ).astype(np.float32) matrix = cv2.getAffineTransform(pts1, pts2) image = cv2.warpAffine( image, matrix, (width, height), flags=interpolation, borderMode=border_mode ) if approximate: # Approximate computation smooth displacement map with a large enough kernel. # On large images (512+) this is approximately 2X times faster dx = random_state.rand(height, width).astype(np.float32) * 2 - 1 cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx) dx *= alpha dy = random_state.rand(height, width).astype(np.float32) * 2 - 1 cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy) dy *= alpha else: dx = np.float32( gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha ) dy = np.float32( gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha ) x, y = np.meshgrid(np.arange(width), np.arange(height)) mapx = np.float32(x + dx) mapy = np.float32(y + dy) return cv2.remap(image, mapx, mapy, interpolation, borderMode=border_mode)
e60754cc898d83051164180b1d07ac0e4c688946
3,638,671
import re from orangecontrib.xoppy.util.xoppy_xraylib_util import f0_xop def crystal_atnum(list_AtomicName, unique_AtomicName, unique_Zatom,list_fraction, f0coeffs): """ To get the atom and fractional factor in diffierent sites list_AtomicName: list of all atoms in the crystal unique_AtomicName: list of unique atomicname in the list unique_Zatom: list of unique atomic number list_fraction: list of unique fractial factor return: num_e, fract, n_atom, list of number of electrons for atom with same fractional factor, and corresponding fractional factor, atomic number """ num_e = [] fract = [] n_atom = [] n_ATUM = [] for k,x in enumerate(unique_AtomicName): tmp1 = re.search('(^[a-zA-Z]*)',x) if tmp1.group(0) == x: #AtomicName only, without valence info (i.e., B, Y, O) f0 = f0_xop(unique_Zatom[k]) else: #f0 = f0_xop(0,AtomicName=x) f0 = f0coeffs[x] icentral = int(len(f0)/2) F000 = f0[icentral] for i in range(icentral): F000 += f0[i] a=[list_fraction[i] for i,v in enumerate(list_AtomicName) if v==x] fac = list(set(a)) for y in fac: n = a.count(y) num_e.append(F000) fract.append(y) n_atom.append(n) n_ATUM.append(unique_Zatom[k]) return num_e.copy(), fract.copy(), n_atom.copy(),n_ATUM.copy()
f9805890971e1c2e6696084fad5f8b9071999046
3,638,672
def integrate(name, var): """ given filename and var, generate profile """ d = vtk.vtkExodusIIReader() d.SetFileName(name) d.UpdateInformation() d.SetPointResultArrayStatus(var,1) d.Update() blocks = d.GetOutput().GetNumberOfBlocks() data = d.GetOutput() # range to integrate at height = 0.804380714893 thresh = 0.004 rmin = 0.0 rmax = 1.0 nr = 10 dr = (rmax-rmin)/nr rint = np.zeros(nr) rn = np.ones(nr) for j in xrange(blocks): blk = data.GetBlock(0).GetBlock(j) pts = blk.GetNumberOfPoints() pt_data = blk.GetPointData().GetArray(var) for i in xrange(pts): # gather x,y,z location z,y,x = blk.GetPoint(i) # gather point scalar value u = pt_data.GetValue(i) # now, find all values near the target height # (convert to cylindrical) if(abs(z - height) < thresh): r = np.sqrt((x)**2 + (y)**2) fr = np.floor(r/dr) rint[fr] += u rn [fr] += 1 return rint/rn
116993d18c4430f6ce0e7dacba8b73ef3a03f689
3,638,673
def mediate(timer: TimerBase, decimals: int | None) -> int: """If the start function doesn't have decimals defined, then use the decimals value defined when the Timer() was initiated.""" return timer.decimals if decimals is None else validate_and_normalise(decimals)
030ec62071bc4c2bc41ae30c5eb8212b36e0359a
3,638,674
def calculate_n_inputs(inputs, config_dict): """ Calculate the number of inputs for a particular model. """ input_size = 0 for input_name in inputs: if input_name == 'action': input_size += config_dict['prior_args']['n_variables'] elif input_name == 'state': input_size += config_dict['misc_args']['state_size'] elif input_name == 'reward': input_size += 1 elif input_name in ['params', 'grads']: if config_dict['approx_post_args']['constant_scale']: input_size += config_dict['prior_args']['n_variables'] else: input_size += 2 * config_dict['prior_args']['n_variables'] return input_size
78d750ff4744d872d696dcb454933c868b0ba41e
3,638,675
from typing import Sequence from typing import Tuple def clustering( adata: ad.AnnData, resolutions: Sequence[float], clustering_method: str = "leiden", cell_type_col: str = "cell_types", batch_col: str = "batch_indices" ) -> Tuple[str, float, float]: """Clusters the data and calculate agreement with cell type and batch variable. This method cluster the neighborhood graph (requires having run sc.pp. neighbors first) with "clustering_method" algorithm multiple times with the given resolutions, and return the best result in terms of ARI with cell type. Other metrics such as NMI with cell type, ARi with batch are logged but not returned. (TODO: also return these metrics) Args: adata: the dataset to be clustered. adata.obsp shouhld contain the keys 'connectivities' and 'distances'. resolutions: a list of leiden/louvain resolution parameters. Will cluster with each resolution in the list and return the best result (in terms of ARI with cell type). clustering_method: Either "leiden" or "louvain". cell_type_col: a key in adata.obs to the cell type column. batch_col: a key in adata.obs to the batch column. Returns: best_cluster_key: a key in adata.obs to the best (in terms of ARI with cell type) cluster assignment column. best_ari: the best ARI with cell type. best_nmi: the best NMI with cell type. """ assert len(resolutions) > 0, f'Must specify at least one resolution.' if clustering_method == 'leiden': clustering_func: function = sc.tl.leiden elif clustering_method == 'louvain': clustering_func: function = sc.tl.louvain else: raise ValueError("Please specify louvain or leiden for the clustering method argument.") _logger.info(f'Performing {clustering_method} clustering') assert cell_type_col in adata.obs, f"{cell_type_col} not in adata.obs" best_res, best_ari, best_nmi = None, -inf, -inf for res in resolutions: col = f'{clustering_method}_{res}' clustering_func(adata, resolution=res, key_added=col) ari = adjusted_rand_score(adata.obs[cell_type_col], adata.obs[col]) nmi = normalized_mutual_info_score(adata.obs[cell_type_col], adata.obs[col]) n_unique = adata.obs[col].nunique() if ari > best_ari: best_res = res best_ari = ari if nmi > best_nmi: best_nmi = nmi if batch_col in adata.obs and adata.obs[batch_col].nunique() > 1: ari_batch = adjusted_rand_score(adata.obs[batch_col], adata.obs[col]) _logger.info(f'Resolution: {res:5.3g}\tARI: {ari:7.4f}\tNMI: {nmi:7.4f}\tbARI: {ari_batch:7.4f}\t# labels: {n_unique}') else: _logger.info(f'Resolution: {res:5.3g}\tARI: {ari:7.4f}\tNMI: {nmi:7.4f}\t# labels: {n_unique}') return f'{clustering_method}_{best_res}', best_ari, best_nmi
c94efe865ca2ee52c7dfeeef40e46bc20786e284
3,638,676
from datetime import datetime async def login_swagger(form_data: OAuth2PasswordRequestForm, db: AsyncIOMotorClient) -> LoginUserReplyModel: """ Login route, returns Bearer Token. SWAGGER FRIENDLY. Due to the swagger Api not letting me add otp as a required parameter the otp needs to be added to the the end of the password ex. 'passwordotpotp' .. no space just right after and otp is always 6 digits TODO find way to modify swagger to let me add otp separately, no login2 needed """ password = form_data.password[:-6] # exclude the last 6 digits otp = form_data.password[-6:] # include only the last 6 digits user: UserModelDB = await get_user_by_email(form_data.username, db) # username is email is_user_auth = authenticate_user(user, password=password, otp=otp) if not is_user_auth: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect Authentication Data", headers={"WWW-Authenticate": "Bearer"}, ) # create access token access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(user_id=str(user.id), expires_delta=access_token_expires) # update db with last_login time and set the user to is_active=True await db["users"].update_one({"email": form_data.username}, {"$set": { "lastLogin": datetime.now().strftime("%m/%d/%y %H:%M:%S"), "isActive": "true" }}) reply = LoginUserReplyModel( user=ShowUserModel.parse_obj(user), access_token=access_token, token_type="Bearer" ) return reply
b6452d28570ebbd3b09c52690da34ea6b7d5d1a6
3,638,677
def coords(lat: float, lon: float, alt: float = None ) -> str: """Turn longitude, latitude into a printable string.""" txt = "%2.4f%s" % (abs(lat), "N" if lat>0 else "S") txt += " %2.4f%s" % (abs(lon), "E" if lon>0 else "W") if alt: txt += " %2.0fm" % alt return txt
c5768e03c55d5f567695056d78108812014b9ef4
3,638,678
def chromosome_to_smiles(): """Wrapper function for simplicity.""" def sc2smi(chromosome): """Generate a SMILES string from a list of SMILES characters. To be customized.""" silyl = "([Si]([C])([C])([C]))" core = chromosome[0] phosphine_1 = ( "(P(" + chromosome[1] + ")(" + chromosome[2] + ")(" + chromosome[3] + "))" ) phosphine_2 = ( "(P(" + chromosome[4] + ")(" + chromosome[5] + ")(" + chromosome[6] + "))" ) smiles = "{0}{1}{2}{3}".format(core, phosphine_1, phosphine_2, silyl) return smiles return sc2smi
793995484c46295977f1d312c4fa11f69bca6c84
3,638,679
def softmax_edges(graph, feat): """Apply batch-wise graph-level softmax over all the values of edge field :attr:`feat` in :attr:`graph`. Parameters ---------- graph : DGLGraph The graph. feat : str The feature field. Returns ------- tensor The tensor obtained. Examples -------- >>> import dgl >>> import torch as th Create two :class:`~dgl.DGLGraph` objects and initialize their edge features. >>> g1 = dgl.DGLGraph() # Graph 1 >>> g1.add_nodes(2) >>> g1.add_edges([0, 1], [1, 0]) >>> g1.edata['h'] = th.tensor([[1., 0.], [2., 0.]]) >>> g2 = dgl.DGLGraph() # Graph 2 >>> g2.add_nodes(3) >>> g2.add_edges([0, 1, 2], [1, 2, 0]) >>> g2.edata['h'] = th.tensor([[1., 0.], [2., 0.], [3., 0.]]) Softmax over edge attribute :attr:`h` in a batched graph. >>> bg = dgl.batch([g1, g2], edge_attrs='h') >>> dgl.softmax_edges(bg, 'h') tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.]) [0.7311, 0.5000], # [0.5000, 0.5000] = softmax([0., 0.]) [0.0900, 0.3333], # [0.0900, 0.2447, 0.6652] = softmax([1., 2., 3.]) [0.2447, 0.3333], # [0.3333, 0.3333, 0.3333] = softmax([0., 0., 0.]) [0.6652, 0.3333]]) Softmax over edge attribute :attr:`h` in a single graph. >>> dgl.softmax_edges(g1, 'h') tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.]) [0.7311, 0.5000]]), # [0.5000, 0.5000] = softmax([0., 0.]) Notes ----- If the input graph has batch size greater then one, the softmax is applied at each example in the batch. """ return _softmax_on(graph, 'edges', feat)
f5dafccca3c487756deeb37f534ef178cf1de75f
3,638,680
def command_result_processor_parameter_required(command_line_parameter): """ Command result message processor if a parameter stays unsatisfied. Parameters ---------- command_line_parameter : ``CommandLineParameter`` Respective command parameter. Returns ------- message : `str` """ message_parts = [] message_parts.append('Parameter: ') message_parts.append(repr(command_line_parameter.name)) message_parts.append(' is required.\n') return ''.join(message_parts)
fed1b7af60018cb5638e021365ae754477b7a241
3,638,681
def randdirichlet(a): """ Python implementation of randdirichlet.m using randomgamma fucnction :param a: vector of weights (shape parameters to the gamma distribution) """ try: x = rand.randomgamma(a) except ValueError: a[a == 0] += 1e-16 x = rand.randomgamma(a) x /= x.sum(axis=0) return x
c825fb81c07337231f49437a2bea7ddd5a40234f
3,638,682
def home(request): """ This is the home page request """ return render(request, 'generator/home.html')
ad8b69871d484c16583752d029fec2970084e698
3,638,683
def print_insn_mnem(ea): """ Get instruction mnemonics @param ea: linear address of instruction @return: "" - no instruction at the specified location @note: this function may not return exactly the same mnemonics as you see on the screen. """ res = ida_ua.ua_mnem(ea) if not res: return "" else: return res
4c60e853356217c2fbfdd21047429c729b57f10f
3,638,684
def setdim(P, dim=None): """ Adjust the dimensions of a polynomial. Output the results into Poly object Args: P (Poly) : Input polynomial dim (int) : The dimensions of the output polynomial. If omitted, increase polynomial with one dimension. If the new dim is smaller then P's dimensions, variables with cut components are all cut. Examples: >>> x,y = chaospy.variable(2) >>> P = x*x-x*y >>> print(chaospy.setdim(P, 1)) q0^2 """ P = P.copy() ldim = P.dim if not dim: dim = ldim+1 if dim==ldim: return P P.dim = dim if dim>ldim: key = np.zeros(dim, dtype=int) for lkey in P.keys: key[:ldim] = lkey P.A[tuple(key)] = P.A.pop(lkey) else: key = np.zeros(dim, dtype=int) for lkey in P.keys: if not sum(lkey[ldim-1:]) or not sum(lkey): P.A[lkey[:dim]] = P.A.pop(lkey) else: del P.A[lkey] P.keys = sorted(P.A.keys(), key=sort_key) return P
610138c1d1a13112d35583d758cac43c1e296d18
3,638,685
def extract_file_from_zip(zipfile, filename): """ Returns the compressed file `filename` from `zipfile`. """ raise NotImplementedError() return None
dc7b1e5a196a019d1fd2274155e0404b03b09702
3,638,686
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height, image_width): """Transforms the box masks back to full image masks. Embeds masks in bounding boxes of larger masks whose shapes correspond to image shape. Args: box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width]. boxes: A tf.float32 tensor of size [num_masks, 4] containing the box corners. Row i contains [ymin, xmin, ymax, xmax] of the box corresponding to mask i. Note that the box corners are in normalized coordinates. image_height: Image height. The output mask will have the same height as the image height. image_width: Image width. The output mask will have the same width as the image width. Returns: A tf.float32 tensor of size [num_masks, image_height, image_width]. """ # TODO(rathodv): Make this a public function. def reframe_box_masks_to_image_masks_default(): """The default function when there are more than 0 box masks.""" def transform_boxes_relative_to_boxes(boxes, reference_boxes): boxes = tf.reshape(boxes, [-1, 2, 2]) min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1) max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1) transformed_boxes = (boxes - min_corner) / (max_corner - min_corner) return tf.reshape(transformed_boxes, [-1, 4]) box_masks_expanded = tf.expand_dims(box_masks, axis=3) num_boxes = tf.shape(box_masks_expanded)[0] unit_boxes = tf.concat( [tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1) reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes) return tf.image.crop_and_resize( image=box_masks_expanded, boxes=reverse_boxes, box_ind=tf.range(num_boxes), crop_size=[image_height, image_width], extrapolation_value=0.0) image_masks = tf.cond( tf.shape(box_masks)[0] > 0, reframe_box_masks_to_image_masks_default, lambda: tf.zeros([0, image_height, image_width, 1], dtype=tf.float32)) return tf.squeeze(image_masks, axis=3)
b8fb1d255b01bfbd8f89dbf1a31c797342279e0f
3,638,687
import torch def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None): """ Args: cls_scores: (N, num_class) box_preds: (N, 7 + C) nms_config: score_thresh: Returns: """ pred_scores, pred_labels, pred_boxes = [], [], [] for k in range(cls_scores.shape[1]): if score_thresh is not None: scores_mask = (cls_scores[:, k] >= score_thresh) box_scores = cls_scores[scores_mask, k] cur_box_preds = box_preds[scores_mask] else: box_scores = cls_scores[:, k] cur_box_preds = box_preds selected = [] if box_scores.shape[0] > 0: box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0])) boxes_for_nms = cur_box_preds[indices] keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)( boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config ) selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]] pred_scores.append(box_scores[selected]) pred_labels.append(box_scores.new_ones(len(selected)).long() * k) pred_boxes.append(cur_box_preds[selected]) pred_scores = torch.cat(pred_scores, dim=0) pred_labels = torch.cat(pred_labels, dim=0) pred_boxes = torch.cat(pred_boxes, dim=0) return pred_scores, pred_labels, pred_boxes
a0451c3769b4415e7e7d184d43f6b8f121b651b1
3,638,688
def check_str_length(str_to_check, limit=MAX_LENGTH): """Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. """ str_bytes = str_to_check.encode(UTF8) str_len = len(str_bytes) truncated_byte_count = 0 if str_len > limit: truncated_byte_count = str_len - limit str_bytes = str_bytes[:limit] result = str(str_bytes.decode(UTF8, errors='ignore')) return (result, truncated_byte_count)
73ae59241a18c5398c041c2c332c932831a39c55
3,638,689
async def create_or_update( hub, ctx, name, resource_group, prefix_length, sku="standard", public_ip_address_version="IPv4", zones=None, **kwargs, ): """ .. versionadded:: 4.0.0 Creates or updates a static or dynamic public IP prefix. :param name: The name of the public IP prefix. :param resource_group: The resource group of the public IP prefix. :param prefix_length: An integer representing the length of the Public IP Prefix. This value is immutable once set. If the value of the ``public_ip_address_version`` parameter is "IPv4", then possible values include 28, 29, 30, 31. If the value of the ``public_ip_address_version`` parameter is "IPv6", then possible values include 124, 125, 126, 127. :param sku: The name of a public IP prefix SKU. Possible values include: "standard". Defaults to "standard". :param public_ip_address_version: The public IP address version. Possible values include: "IPv4" and "IPv6". Defaults to "IPv4". :param zones: A list of availability zones that denotes where the IP allocated for the resource needs to come from. CLI Example: .. code-block:: bash azurerm.network.public_ip_prefix.create_or_update test_name test_group test_length """ if "location" not in kwargs: rg_props = await hub.exec.azurerm.resource.group.get( ctx, resource_group, **kwargs ) if "error" in rg_props: log.error("Unable to determine location from resource group specified.") return { "error": "Unable to determine location from resource group specified." } kwargs["location"] = rg_props["location"] if sku: sku = {"name": sku.lower()} result = {} netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs) try: prefix_model = await hub.exec.azurerm.utils.create_object_model( "network", "PublicIPPrefix", prefix_length=prefix_length, sku=sku, public_ip_address_version=public_ip_address_version, zones=zones, **kwargs, ) except TypeError as exc: result = { "error": "The object model could not be built. ({0})".format(str(exc)) } return result try: prefix = netconn.public_ip_prefixes.create_or_update( resource_group_name=resource_group, public_ip_prefix_name=name, parameters=prefix_model, ) prefix.wait() result = prefix.result().as_dict() except CloudError as exc: await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs) result = {"error": str(exc)} except SerializationError as exc: result = { "error": "The object model could not be parsed. ({0})".format(str(exc)) } return result
08fb4db3f585ec8844e28e5fa90867f588cdb91a
3,638,690
def _get_job_name(job_label: str = None) -> str: """Returns Beam runner job name. Args: job_label: A user defined string that helps define the job. Returns: A job name compatible with apache beam runners, including a time stamp to insure uniqueness. """ job_name = 'tfrecorder-' + common.get_timestamp() if job_label: job_label = job_label.replace('_', '-') job_name += '-' + job_label return job_name
3786c532109c880ec9255b82a6a2442cf8414780
3,638,691
def all_not_none(*args): """Shorthand function for ``all(x is not None for x in args)``. Returns True if all `*args` are not None, otherwise False.""" return all(x is not None for x in args)
2d063f39e253a78b28be6857df08d8f386d8eb4a
3,638,692
def weighted_photon_spec(eng): """ Returns the weighted photon spectrum from positronium annihilation. This assumes 3/4 ortho- and 1/4 para-, normalized to a single annihilation. Parameters ---------- eng : ndarray The energy abscissa. Returns ------- Spectrum The resulting photon :class:`.Spectrum` object. """ return 3/4*ortho_photon_spec(eng) + 1/4*para_photon_spec(eng)
39b5301d5e49f070d1128d678f4f0672b32c275d
3,638,693
def get_for_tag(app_name): """ Retorna a tag for customizada para listar registros no template list.html :param app_name: Nome do app que está sendo criado :type app_name: str """ return "{% for " + app_name + " in " + app_name + "s %}"
12399a148262893047bf21c20e784bfb33373c29
3,638,694
def _reloadFn(*args): """Placeholder callback function for :func:`_handleSIGHUP`.""" return True
261a54f52e4e448671b8625dae4fbc67116bd546
3,638,695
def interpolation_lagrange_matrix(old_grid, new_grid): """ Evaluate lagrange matrix to interpolate state and control values from the solved grid onto the new grid. Parameters ---------- old_grid : <GridData> GridData object representing the grid on which the problem has been solved. new_grid : <GridData> GridData object representing the new, higher-order grid. Returns ------- ndarray The lagrange interpolation matrix. """ L_blocks = [] D_blocks = [] for iseg in range(old_grid.num_segments): i1, i2 = old_grid.subset_segment_indices['all'][iseg, :] indices = old_grid.subset_node_indices['all'][i1:i2] nodes_given = old_grid.node_stau[indices] i1, i2 = new_grid.subset_segment_indices['all'][iseg, :] indices = new_grid.subset_node_indices['all'][i1:i2] nodes_eval = new_grid.node_stau[indices] L_block, D_block = lagrange_matrices(nodes_given, nodes_eval) L_blocks.append(L_block) D_blocks.append(D_block) L = block_diag(*L_blocks) D = block_diag(*D_blocks) return L, D
ce219dbfa65842ad275fe22f1122474b865fcfb0
3,638,696
def optimize_shim(coils, unshimmed, mask, mask_origin=(0, 0, 0), bounds=None): """ Optimize unshimmed volume by varying current to each channel Args: coils (numpy.ndarray): X, Y, Z, N coil map unshimmed (numpy.ndarray): 3D B0 map mask (numpy.ndarray): 3D integer mask used for the optimizer (only consider voxels with non-zero values). mask_origin (tuple): Mask origin if mask volume does not cover unshimmed volume bounds (list): List of ``(min, max)`` pairs for each coil channels. None is used to specify no bound. Returns: numpy.ndarray: Coefficients corresponding to the coil profiles that minimize the objective function (coils.size) """ # cmap = plt.get_cmap('bone') # cmap.set_bad('black') # mag_fig, mag_ax = plt.subplots(1, 1) # plotter_mag = Slice_Plotter(mag_ax, np.transpose((unshimmed), axes=(1, 0, 2)), f'Unshimmed Full', cmap=cmap) # mag_fig.canvas.mpl_connect('scroll_event', plotter_mag.onscroll) # plt.show(block=True) # plt.close() mask_range = tuple([slice(mask_origin[i], mask_origin[i] + mask.shape[i]) for i in range(3)]) mask_vec = mask.reshape((-1,)) # Least squares solver N = coils.shape[3] # Reshape coil profile: X, Y, Z, N --> [mask.shape], N # --> N, [mask.shape] --> N, mask.size --> mask.size, N coil_mat = np.reshape(np.transpose(coils[mask_range], axes=(3, 0, 1, 2)), (N, -1)).T coil_mat = coil_mat[mask_vec != 0, :] # masked points x N unshimmed = unshimmed[mask_range] unshimmed_vec = np.reshape(unshimmed, (-1,)) # mV unshimmed_vec = unshimmed_vec[mask_vec != 0] # mV' # Set up output currents and optimize if bounds is not None: bounds = np.asarray(bounds) currents_0 = np.zeros(N) currents_sp = opt.least_squares(shim_residuals, currents_0, args=(unshimmed_vec, coil_mat), bounds=bounds) currents = currents_sp.x residuals = np.asarray(currents_sp.fun) return (currents, residuals)
f9e5d88e4a54222c841b3a2a66254675218207c4
3,638,697
def _log(x1): """closure of log for zero arguments, sign-protected""" with np.errstate(divide="ignore", invalid="ignore"): x1 = np.where(np.abs(x1) > 0.001, x1, 1) return np.where(x1 < -1, np.log(np.abs(x1)) * np.sign(x1), np.log(np.abs(x1)))
bcc782434b1e38749096d56e6b78df287a14eeb2
3,638,698
def whats_the_meaning_of_life(n_cores=23): """Answers the question about the meaning of life. You don't even have to ask the question, it will figure it out for you. Don't use more cores than available to mankind. Parameters ---------- n_cores: int [default: 23] The number of CPU cores to use. Returns ------- int The type of the expected answer is of course an integer. """ return 42
9b42257161ad3063bd7d8faddb6e385aa5586bf0
3,638,699