content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def singer_map(pop, rate): """ Define the equation for the singer map. Arguments --------- pop: float current population value at time t rate: float growth rate parameter values Returns ------- float scalar result of singer map at time t+1 """ return rate * (7.86 * pop - 23.31 * pop ** 2 + 28.75 * pop ** 3 - 13.3 * pop ** 4)
84aba1d96304b67fba1b4a0e7a909e23121a3d6b
3,630,500
def initialize_embedding_from_dict(vector_map, dim, vocabulary, zero_init=False, standardize=False): """ Initialize a numpy matrix from pre-exi\sting vectors with indices corresponding to a given vocabulary. Words in vocabulary not in vectors are initialized using a given function. :param vector_map: dictionary from words to numpy arrays :param dim: dimensionality of vectors :param vocabulary: dictionary from words to corresponding indices :param zero_init: initialization function taking the word and dimensionality for words not in vector_map :param standardize: set word embedding values to have standard deviation of 1 :return: numpy matrix with rows corresponding to vectors """ initializer = random_normal_initializer if zero_init: initializer = zero_initializer emb = np.zeros([len(vocabulary), dim], dtype=np.float32) for word, index in vocabulary.items(): if word not in vector_map: vector_map[word] = initializer(word, dim) emb[index] = vector_map[word] if standardize: emb = emb / np.std(emb) return emb
0af00d66f8c14909e5e447f2b8eb4bd68c551c97
3,630,501
def setup_kfolds( data_dict, ordered_chrom_keys, num_examples_per_file, k): """given k, split the files as equally as possible across k """ # set up kfolds dict kfolds = {} examples_per_fold = {} for k_idx in xrange(k): kfolds[k_idx] = [[], [], []] examples_per_fold[k_idx] = 0 # now initialize with first files for k_idx in xrange(k): chrom_key = ordered_chrom_keys[k_idx] kfolds[k_idx][0] += data_dict[chrom_key][0] kfolds[k_idx][1] += data_dict[chrom_key][1] kfolds[k_idx][2] += data_dict[chrom_key][2] examples_per_fold[k_idx] += num_examples_per_file[k_idx] # given k buckets, go from biggest to smallest, # always filling in the bucket that has the smallest num of examples for i in xrange(k, len(data_dict.keys())): fill_k = 0 least_examples_k = examples_per_fold[0] # check which has fewest examples for k_idx in xrange(1, k): fold_examples = examples_per_fold[k_idx] if fold_examples < least_examples_k: fill_k = k_idx least_examples_k = fold_examples # append to that one chrom_key = ordered_chrom_keys[i] kfolds[fill_k][0] += data_dict[chrom_key][0] kfolds[fill_k][1] += data_dict[chrom_key][1] kfolds[fill_k][2] += data_dict[chrom_key][2] examples_per_fold[fill_k] += num_examples_per_file[i] # debug tool print reduced set if False: kfold_print = {} total_examples = 0 for key in kfolds.keys(): chroms = list(set([filename.split(".")[-4] for filename in kfolds[key][0]])) kfold_print[key] = (chroms, examples_per_fold[key]) total_examples += examples_per_fold[key] print kfold_print print total_examples / float(k) return kfolds, examples_per_fold
78035e0e21a9a31cc134721846ded14ccc9528b7
3,630,502
def get_wall_status_data_by_simplified_calculation_no_01() -> pd.DataFrame: """ 通気層を有する壁体の総当たりパラメータを取得し、簡易計算法案No.1(簡易版の行列式)による計算結果を保有するDataFrameを作成する :param: なし :return: DataFrame """ # パラメータの総当たりリストを作成する parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle', 'v_a', 'l_s', 'emissivity_1', 'emissivity_2'] df = pd.DataFrame(get_parameter_list(), columns=parameter_name) # 固定値の設定 h_out = global_number.get_h_out() # 計算結果格納用配列を用意 theta_sat = [] # 相当外気温度[℃] theta_1_surf = [] # 通気層に面する面1の表面温度[℃] theta_2_surf = [] # 通気層に面する面1の表面温度[℃]] theta_as_ave = [] # 通気層の平均温度[℃] effective_emissivity = [] # 有効放射率[-] h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)] h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)] q_room_side = [] # 室内表面熱流[W/m2] # エラーログ出力用の設定 log = Log() saved_handler = np.seterrcall(log) with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する for row in df.itertuples(): print(row[0]) # パラメータを設定 parms = (vw.Parameters(theta_e=row.theta_e, theta_r=row.theta_r, J_surf=row.j_surf, a_surf=row.a_surf, C_1=row.C_1, C_2=row.C_2, l_h=row.l_h, l_w=row.l_w, l_d=row.l_d, angle=row.angle, v_a=row.v_a, l_s=row.l_s, emissivity_1=row.emissivity_1, emissivity_2=row.emissivity_2)) # 通気層の状態値を取得 temps, h_cv_buf, h_rv_buf, r_i_buf = vws.get_vent_wall_temperature_by_simplified_calculation_no_01(parm=parms, h_out=h_out) theta_1_surf.append(temps[0]) theta_2_surf.append(temps[2]) theta_as_ave.append(temps[1]) effective_emissivity.append(htc.effective_emissivity_parallel(emissivity_1=row.emissivity_1, emissivity_2=row.emissivity_2)) h_cv.append(h_cv_buf) h_rv.append(h_rv_buf) # 相当外気温度を計算 theta_sat_buf = epf.get_theta_SAT(theta_e=row.theta_e, a_surf=row.a_surf, j_surf=row.j_surf, h_out=h_out) theta_sat.append(theta_sat_buf) # 室内側表面熱流を計算 q_room_side.append(epf.get_heat_flow_room_side_by_vent_layer_heat_resistance(r_i=r_i_buf, theta_2=temps[2], theta_r=row.theta_r)) # 計算結果をDataFrameに追加 df['theta_sat'] = theta_sat df['theta_1_surf'] = theta_1_surf df['theta_2_surf'] = theta_2_surf df['theta_as_ave'] = theta_as_ave df['effective_emissivity'] = effective_emissivity df['h_cv'] = h_cv df['h_rv'] = h_rv df['q_room_side'] = q_room_side return df
dafe25b6b484c0116da0f7837555a3b29cc5b068
3,630,503
def get_audio_embedding(audio, sr, model=None, input_repr="mel256", content_type="music", embedding_size=6144, center=True, hop_size=0.1, batch_size=32, verbose=True): """ Computes and returns L3 embedding for given audio data. Embeddings are computed for 1-second windows of audio. Parameters ---------- audio : np.ndarray [shape=(N,) or (N,C)] or list[np.ndarray] 1D numpy array of audio data or list of audio arrays for multiple inputs. sr : int or list[int] Sampling rate, or list of sampling rates. If not 48kHz audio will be resampled. model : keras.models.Model or None Loaded model object. If a model is provided, then `input_repr`, `content_type`, and `embedding_size` will be ignored. If None is provided, the model will be loaded using the provided values of `input_repr`, `content_type` and `embedding_size`. input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for model. Ignored if `model` is a valid Keras model. content_type : "music" or "env" Type of content used to train the embedding model. Ignored if `model` is a valid Keras model. embedding_size : 6144 or 512 Embedding dimensionality. Ignored if `model` is a valid Keras model. center : boolean If True, pads beginning of signal so timestamps correspond to center of window. hop_size : float Hop size in seconds. batch_size : int Batch size used for input to embedding model verbose : bool If True, prints verbose messages. Returns ------- embedding : np.ndarray [shape=(T, D)] or list[np.ndarray] Array of embeddings for each window or list of such arrays for multiple audio clips. timestamps : np.ndarray [shape=(T,)] or list[np.ndarray] Array of timestamps corresponding to each embedding in the output or list of such arrays for multiple audio cplips. """ if model is not None and not isinstance(model, keras.models.Model): raise OpenL3Error('Invalid model provided. Must be of type keras.model.Models' ' but got {}'.format(str(type(model)))) if str(input_repr) not in ("linear", "mel128", "mel256"): raise OpenL3Error('Invalid input representation "{}"'.format(input_repr)) if str(content_type) not in ("music", "env"): raise OpenL3Error('Invalid content type "{}"'.format(content_type)) if embedding_size not in (6144, 512): raise OpenL3Error('Invalid content type "{}"'.format(embedding_size)) if verbose not in (0, 1): raise OpenL3Error('Invalid verbosity level {}'.format(verbose)) if isinstance(audio, np.ndarray): audio_list = [audio] list_input = False elif isinstance(audio, list): audio_list = audio list_input = True else: err_msg = 'audio must be type list[np.ndarray] or np.ndarray. Got {}' raise OpenL3Error(err_msg.format(type(audio))) if isinstance(sr, Real): sr_list = [sr] * len(audio_list) elif isinstance(sr, list): sr_list = sr else: err_msg = 'sr must be type list[numbers.Real] or numbers.Real. Got {}' raise OpenL3Error(err_msg.format(type(sr))) if len(audio_list) != len(sr_list): err_msg = 'Mismatch between number of audio inputs ({}) and number of' \ ' sample rates ({})' raise OpenL3Error(err_msg.format(len(audio_list), len(sr_list))) # Get embedding model if model is None: model = load_audio_embedding_model(input_repr, content_type, embedding_size) embedding_list = [] ts_list = [] # Collect all audio arrays in a single array batch = [] file_batch_size_list = [] for audio, sr in zip(audio_list, sr_list): x = _preprocess_audio_batch(audio, sr, hop_size=hop_size, center=center) batch.append(x) file_batch_size_list.append(x.shape[0]) batch = np.vstack(batch) # Compute embeddings batch_embedding = model.predict(batch, verbose=1 if verbose else 0, batch_size=batch_size) start_idx = 0 for file_batch_size in file_batch_size_list: end_idx = start_idx + file_batch_size embedding = batch_embedding[start_idx:end_idx, ...] ts = np.arange(embedding.shape[0]) * hop_size embedding_list.append(embedding) ts_list.append(ts) start_idx = end_idx if not list_input: return embedding_list[0], ts_list[0] else: return embedding_list, ts_list
2a2448c46cbe31c0631ccd049eca796e94a5f973
3,630,504
import json def format_rpc_response(data, exception=None): """ Formats a response from a RPC Manager. It provides the data and/or a serialized exception so it can be re-created by the caller. :param Any data: A JSON Serializable object. :param Exception exception: An Exception object :return str: JSON Response. """ exception_data = None if exception: args = exception.__getargs__() if hasattr(exception, "__getargs__") else exception.args kwargs = exception.__getkwargs__() if hasattr(exception, "__getkwargs__") else {} if kwargs is None: kwargs = {} try: module = exception.__module__ except: module = None exception_data = { 'exception': type(exception).__name__, 'message': str(exception), 'args': args, "kwargs": kwargs, 'module': module, } return json.dumps({ 'data': data, 'exception': exception_data })
c900e2512fd486c91789ab4312883061553a2fb1
3,630,505
def AbsoluteError(U, Uold): """Return absolute error. Absolute error is calculated to track the changes or deviation in the numerical solution. This is done by subtracting the solution obtained at the current time step from the solution at previous time step within the entire domain. The computed error is a scalar floating-point value. It is a good practice to observe errors during simulation as it helps in identifying whether solution is converging or diverging. The computed error is also used in stopping the simulation once the errors have converged to the set minimum criteria for convergence. This error must not be confused with the ERROR TERM which is used for calculating accuracy of the numerical method. """ shapeU = U.shape # Obtain shape for Dimension if len(shapeU) == 2: # Dimension = 2D Error = abs(Uold[1:-1, 1:-1] - U[1:-1, 1:-1]).sum() elif len(shapeU) == 1: # Dimension = 1D Error = abs(Uold[1:-1] - U[1:-1]).sum() return Error
179cbe81cee92a01e12509b42f6a8800a6c05976
3,630,506
import os def scene_filename(): """Construct a safe scene filename, using 'untitled' instead of ''""" filename = os.path.splitext(os.path.basename(bpy.data.filepath))[0] if filename == '': filename = 'untitled' return bpy.path.clean_name(filename)
685177d19345f3e3e8f57761f4f7f7a675bd4920
3,630,507
from datetime import datetime import os def init(): """Create a new cycle and return its identifier.""" idf = datetime.now().strftime(idffmt) os.mkdir(directory(idf)) return idf
f93fcf76e120de34fbd2aa7fb32b00c7a0f13add
3,630,508
import os from datetime import datetime def convsub_photometry_to_ismphot_database(convsubfits, convsubphot, photreftype, subtracttype, kernelspec, lcapertures, projectid=None, field=None, ccd=None, overwrite=False, database=None): """ This inserts the ISM photometry from a single convsub FITS into the DB. If projectid, field, ccd are not provided, gets them from the FITS file. Also gets the photreftype from the filename of the convolved-subtracted photometry iphot file. """ # open a database connection if database: cursor = database.cursor() closedb = False else: database = pg.connect(user=PGUSER, password=PGPASSWORD, database=PGDATABASE, host=PGHOST) cursor = database.cursor() closedb = True # start work here try: # figure out the projectid, field, ccd, photreftype # first, figure out the input frame's projid, field, and ccd frameelems = get_header_keyword_list(convsubfits, ['object', 'projid']) felems = FRAMEREGEX.findall( os.path.basename(convsubfits) ) if not (projectid and field and ccd): field, ccd, projectid = (frameelems['object'], int(felems[0][2]), frameelems['projid']) convsubdir = os.path.abspath(os.path.dirname(convsubfits)) if not os.path.exists(iphotpath): print('ERR! %sZ: expected iphot %s for ' 'convsub FITS %s does not exist, ' 'not processing...' % (datetime.utcnow().isoformat(), iphotpath, convsubfits)) return (convsubfits, False) # find the frame's original FITS file (unsubtracted calibrated frame) originalfitsbasename = '%s-%s_%s.fits' % (felems[0][0], felems[0][1], felems[0][2]) originalfitspath = os.path.join(convsubdir, originalfitsbasename) if not os.path.exists(originalfitspath): print('%ERR! sZ: expected original FITS %s ' 'for convsub FITS %s does not exist, ' 'not processing...' % (datetime.utcnow().isoformat(), originalfitspath, convsubfits)) return (convsubfits, False) # figure out the frame's info from the original frame's header framerjd = get_header_keyword_list(originalfitspath, ['JD','']) # also get some metadata from the frameheader # now open the accompanying iphot file, and stream the photometry to the # database with open(convsubphot,'rb') as infd: # prepare the statement query = ("insert into ism_photometry (" "frameutcdt, objectid, framekey, photkey, " "xcc, ycc, xic, yic, bgv, bge, fsv, fdv, fkv, " "ifl_000, ife_000, irm_000, ire_000, irq_000, " "ifl_001, ife_001, irm_001, ire_001, irq_001, " "ifl_002, ife_002, irm_002, ire_002, irq_002" ") values (" "%s, %s, %s, %s, " "%s, %s, %s, %s, %s, %s, %s, %s, %s, " "%s, %s, %s, %s, %s, %s, " "%s, %s, %s, %s, %s, %s, " "%s, %s, %s, %s, %s, %s" ")") # prepare the input params # TODO: finish this for line in infd: parsedline = parse_iphot_line(line) # update the iphotfiles table file with all of this info. if there's a # uniqueness conflict, i.e. this same combination exists, then overwrite # if told to do so if overwrite: print('WRN! %sZ: overwriting existing photometry info in DB for %s' % (datetime.utcnow().isoformat(), convsubfits)) query = ("insert into iphotfiles " "(projectid, field, ccd, photreftype, convsubtype, " "isactive, iphotfilepath, framerjd, framefilepath) " "values (" "%s, %s, %s, %s, %s, " "%s, %s, %s, %s" ") on conflict on constraint iphotfiles_pkey " "do update " "set projectid = %s, field = %s, ccd = %s, " "photreftype = %s, convsubtype = %s, " "isactive = %s, iphotfilepath = %s, framerjd = %s, " "framefilepath = %s, entrytimestamp = current_timestamp") params = (projectid, field, ccd, photreftype, subtractiontype, True, iphotpath, framerjd, originalfitspath, projectid, field, ccd, photreftype, subtractiontype, True, iphotpath, framerjd, originalfitspath) else: query = ("insert into iphotfiles " "(projectid, field, ccd, photreftype, convsubtype, " "isactive, iphotfilepath, framerjd, framefilepath) " "values (" "%s, %s, %s, %s, %s, " "%s, %s, %s, %s" ")") params = (projectid, field, ccd, photreftype, subtractiontype, True, iphotpath, framerjd, originalfitspath) # execute the query to insert the object cursor.execute(query, params) database.commit() # update the iphotobjects table with all of these objects. if there's a # uniqueness conflict, i.e. this same combination exists, then overwrite # if told to do so if overwrite: query = ("insert into iphotobjects " "(projectid, field, ccd, photreftype, convsubtype, " "isactive, objectid, iphotfilepath, iphotfileline) " "values (" "%s, %s, %s, %s, %s, " "%s, %s, %s, %s" ") on conflict on constraint iphotobjects_pkey " "do update set " "projectid = %s, field = %s, ccd = %s, photreftype = %s, " "convsubtype = %s, isactive = %s, objectid = %s, " "iphotfilepath = %s, iphotfileline = %s, " "entrytimestamp = current_timestamp") else: query = ("insert into iphotobjects " "(projectid, field, ccd, photreftype, convsubtype, " "isactive, objectid, iphotfilepath, iphotfileline) " "values (" "%s, %s, %s, %s, %s, " "%s, %s, %s, %s" ")") # execute statements for all of the iphot objects for ind, objectid in enumerate(iphotobjects): if overwrite: params = (projectid, field, ccd, photreftype, subtractiontype, True, objectid, iphotpath, ind, projectid, field, ccd, photreftype, subtractiontype, True, objectid, iphotpath, ind,) else: params = (projectid, field, ccd, photreftype, subtractiontype, True, objectid, iphotpath, ind) cursor.execute(query, params) database.commit() print('%sZ: convsub FITS %s with iphot %s and %s objects ' 'inserted into DB OK' % (datetime.utcnow().isoformat(), convsubfits, iphotpath, len(iphotobjects)) ) # return True if everything succeeded returnval = (convsubfits, True) # catch the overwrite = False scenario except pg.IntegrityError as e: database.rollback() message = ('failed to insert photometry from %s ' 'into DB because it exists already ' 'and overwrite = False' % convsubfits) print('EXC! %sZ: %s\n%s' % (datetime.utcnow().isoformat(), message, format_exc()) ) returnval = (convsubfits, False) # if everything goes wrong, exit cleanly except Exception as e: database.rollback() message = 'failed to insert photometry from %s into DB' % convsubfits print('EXC! %sZ: %s\nexception was: %s' % (datetime.utcnow().isoformat(), message, format_exc()) ) returnval = (convsubfits, False) raise finally: cursor.close() if closedb: database.close() return returnval
609a6b9b8bfb2189efa0d0271d4c7261495fb0c0
3,630,509
import typing def safe_redirect(endpoint: str, **params: str | bool | None) -> typing.RouteReturn | None: """Redirect to a specific page, except if we are already here. Avoids infinite redirection loops caused by redirecting to the current request endpoint. It also automatically add the following URL parameters if not present: * ``next``, allowing to go back to the original request later if necessary (see :func:`tools.utils.redirect_to_next`). To disable this behavior, pass ``next=None``; * ``doas``, allowing to preserve doas mode through redirection (see :attr:`flask.g.doas`). Args: endpoint (str): The endpoint to redirect to (e.g. ``"main.index"``) **params: URL parameters to pass to :func:`flask.url_for` Returns: The redirection response, or ``None`` if unsafe. """ if endpoint == flask.request.endpoint: # Do not redirect to request endpoint (infinite loop!) return None if "next" not in params: params["next"] = flask.request.endpoint elif params["next"] is None: del params["next"] try: doas = flask.g.doas except AttributeError: pass else: if doas and "doas" not in params: params["doas"] = flask.g.pceen.id return flask.redirect(flask.url_for(endpoint, **params))
9cca685ae4e7476be6759d9b4721d7f3e1b7b89f
3,630,510
import scipy def interpolate_with_ot(p0, p1, tmap, interp_frac, size): """ Interpolate between p0 and p1 at fraction t_interpolate knowing a transport map from p0 to p1 Parameters ---------- p0 : 2-D array The genes of each cell in the source population p1 : 2-D array The genes of each cell in the destination population tmap : 2-D array A transport map from p0 to p1 t_interpolate : float The fraction at which to interpolate size : int The number of cells in the interpolated population Returns ------- p05 : 2-D array An interpolated population of 'size' cells """ p0 = p0.toarray() if scipy.sparse.isspmatrix(p0) else p0 p1 = p1.toarray() if scipy.sparse.isspmatrix(p1) else p1 p0 = np.asarray(p0, dtype=np.float64) p1 = np.asarray(p1, dtype=np.float64) tmap = np.asarray(tmap, dtype=np.float64) if p0.shape[1] != p1.shape[1]: raise ValueError("Unable to interpolate. Number of genes do not match") if p0.shape[0] != tmap.shape[0] or p1.shape[0] != tmap.shape[1]: raise ValueError( "Unable to interpolate. Tmap size is {}, expected {}".format( tmap.shape, (len(p0), len(p1)) ) ) I = len(p0) J = len(p1) # Assume growth is exponential and retrieve growth rate at t_interpolate # If all sums are the same then this does not change anything # This only matters if sum is not the same for all rows p = tmap / np.power(tmap.sum(axis=0), 1.0 - interp_frac) p = p.flatten(order="C") p = p / p.sum() choices = np.random.choice(I * J, p=p, size=size) return np.asarray( [p0[i // J] * (1 - interp_frac) + p1[i % J] * interp_frac for i in choices], dtype=np.float64, )
419635d2f6db9a8b95eada62221b1242ceecb80f
3,630,511
import functools def db_session(func) -> Session: """ gets a connection from the pool, create an orm session and passes it as first parameter to `func` after finish the connection is returned to the pool no transaction handling """ @functools.wraps(func) def _wrapper(*args, **kwargs): with Session(DbHandler().eng) as session: return func(session, *args,**kwargs) return _wrapper
1ea1f3850b4c062c91b2a33302a23040c9d2d72b
3,630,512
import torch def get_attention(preds, temp): """ preds: Bs*C*W*H """ N, C, H, W = preds.shape value = torch.abs(preds) # Bs*W*H fea_map = value.mean(axis=1, keepdim=True) print("fea_map = ", fea_map.shape) S_attention = ( H * W * F.softmax((fea_map/temp).view(N, -1), dim=1)).view(N, H, W) # Bs*C channel_map = value.mean(axis=2, keepdim=False).mean( axis=2, keepdim=False) print("channel_map = ", channel_map.shape) C_attention = C * F.softmax(channel_map/temp, dim=1) return S_attention, C_attention
0c257ca2fbc17a5e56702c15a138567e7ed81bec
3,630,513
import logging def select_best_haplotype_match(all_matches): """Returns the best HaplotypeMatch among all_matches. The best matching HaplotypeMatch is the one with the lowest match_metrics score. Args: all_matches: iterable[HaplotypeMatch]. An iterable of HaplotypeMatch objects we want to select the best match from. Returns: The best matching HaplotypeMatch object. """ sorted_matches = sorted(all_matches, key=lambda x: x.match_metrics) best = sorted_matches[0] equivalents = [ f for f in all_matches if f.match_metrics == best.match_metrics ] # redacted if len(equivalents) > 1: for i, f in enumerate(equivalents): extra_info = 'best' if i == 0 else i logging.warning('Equivalent match to best: %s [%s]', f, extra_info) return equivalents[0]
0e40fef830055e5cd297b0f00672d8b0caedc62e
3,630,514
def get_tokens_list_from_column_list(column_name_list: list, delimiter: str = '!!') -> list: """Function that returns list of tokens present in the list of column names. Args: column_name_list: The list of column name strings. delimiter: delimiter seperating tokens within single column name string. Returns: A list of tokens present in the list of column names. """ tokens = [] for column_name in column_name_list: for tok in column_name.split(delimiter): if tok not in tokens: tokens.append(tok) return tokens
66e2c3c280188d2cc3e8df35e0112095f3244918
3,630,515
def solve(task: str, preamble_length=25) -> int: """What is the first invalid number?""" data = [int(num) for num in task.strip().split("\n")] return first_invalid(data, preamble_length)
e574aeaba225010e0538d0298a198e37d2272e74
3,630,516
def schedule(course_list): """ Given a list of courses, return a dictionary of the possible schedules ['BT 353','CS 135','HHS 468','BT 181','CS 146','CS 284'] --> {1: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12011,11995,10482,10487', 'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353C', 'HHS 468EV', 'CS 146B', 'CS 284RA')"}, 2: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12011,11995,10482,12166', 'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353C', 'HHS 468EV', 'CS 146B', 'CS 284RB')"}, 3: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12012,11995,10482,10487', 'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353D', 'HHS 468EV', 'CS 146B', 'CS 284RA')"}, 4: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12012,11995,10482,12166', 'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353D', 'HHS 468EV', 'CS 146B', 'CS 284RB')"}} """ print course_list url = 'https://web.stevens.edu/scheduler/core/' + SEMESTER + '/' + SEMESTER + '.xml' urllib.urlretrieve(url, 'courses.xml') tree = etree.parse('courses.xml') os.remove('courses.xml') root = tree.getroot() root = cleanupCourses(root, course_list) root = cleanupElements(root) root = fixSpacing(root) root = fixTimeFormat(root) call_numbers = getCallNums(root) big_dict = getBigDict(root) all_combos = findAllCombos(big_dict, call_numbers) return all_combos
8c9c5db6e49b1ae106e7d83a38dde0fc2e7617e7
3,630,517
def get_repository_instance(conf=None): """ Helper function to get a database Repository model instance based on CLA configuration. :param conf: Same as get_database_models(). :type conf: dict :return: A Repository model instance based on configuration specified. :rtype: cla.models.model_interfaces.Repository """ return get_database_models(conf)['Repository']()
33b49af8dd1db0af572b6ddc0d3ad5d919e06eb8
3,630,518
import operator def _setup_RepeatingContainer_special_names(repeating_class): """This function is run when the module is imported--users should not call this function directly. It assigns magic methods and special attribute names to the RepeatingContainer class. This behavior is wrapped in a function to help keep the module-level namespace clean. """ special_names = """ getitem missing setitem delitem lt le eq ne gt ge add sub mul matmul truediv floordiv mod pow lshift rshift and xor or div """.split() def repeating_getattr(self, name): repeating = self.__class__(getattr(obj, name) for obj in self._objs) repeating._keys = self._keys return repeating for name in special_names: dunder = '__{0}__'.format(name) method = partial(repeating_getattr, name=dunder) setattr(repeating_class, dunder, property(method)) # When a reflected method is called on a RepeatingContainer itself, the # original (unreflected) operation is re-applied to the individual objects # contained in the container. If these new calls are also reflected, they # will act on the individual objects--rather than on the container as a # whole. reflected_special_names = """ radd rsub rmul rmatmul rtruediv rfloordiv rmod rpow rlshift rrshift rand rxor ror rdiv """.split() def repeating_reflected_method(self, other, name): unreflected_op = name[1:] # Slice-off 'r' prefix. operation = getattr(operator, unreflected_op) repeating = self.__class__(operation(other, obj) for obj in self._objs) repeating._keys = self._keys return repeating for name in reflected_special_names: dunder = '__{0}__'.format(name) method = partialmethod(repeating_reflected_method, name=name) setattr(repeating_class, dunder, method)
83909835d62599f190e26d3e72c40a9759cfbbf4
3,630,519
def set_up_nircam(): """ Return a configured instance of the NIRCam simulator on JWST. Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros the OTE. :return: Tuple of NIRCam instance, and its OTE """ nircam = webbpsf.NIRCam() nircam.include_si_wfe = False nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name') nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop') nircam, ote = webbpsf.enable_adjustable_ote(nircam) ote.zero(zero_original=True) # https://github.com/spacetelescope/webbpsf/blob/96537c459996f682ac6e9af808809ca13fb85e87/webbpsf/opds.py#L1125 return nircam, ote
b994561fe00f34e704f4ffc06f162da1bc060425
3,630,520
def _dens0(S,T): """Density of seawater at zero pressure""" # --- Define constants --- a0 = 999.842594 a1 = 6.793952e-2 a2 = -9.095290e-3 a3 = 1.001685e-4 a4 = -1.120083e-6 a5 = 6.536332e-9 b0 = 8.24493e-1 b1 = -4.0899e-3 b2 = 7.6438e-5 b3 = -8.2467e-7 b4 = 5.3875e-9 c0 = -5.72466e-3 c1 = 1.0227e-4 c2 = -1.6546e-6 d0 = 4.8314e-4 # --- Computations --- # Density of pure water SMOW = a0 + (a1 + (a2 + (a3 + (a4 + a5*T)*T)*T)*T)*T # More temperature polynomials RB = b0 + (b1 + (b2 + (b3 + b4*T)*T)*T)*T RC = c0 + (c1 + c2*T)*T return SMOW + RB*S + RC*(S**1.5) + d0*S*S
a0df8ba385c18fbb7f51088cac2ec842bdef308f
3,630,521
import logging def parse_log_level(x): """Identify log level in config file""" return { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL }.get(x, None)
28e599c124c0b375659d3eed2a94b27f3b65e8b2
3,630,522
import warnings import ctypes def convolve(array, kernel, boundary='fill', fill_value=0., nan_treatment='interpolate', normalize_kernel=True, mask=None, preserve_nan=False, normalization_zero_tol=1e-8): """ Convolve an array with a kernel. This routine differs from `scipy.ndimage.convolve` because it includes a special treatment for ``NaN`` values. Rather than including ``NaN`` values in the array in the convolution calculation, which causes large ``NaN`` holes in the convolved array, ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Parameters ---------- array : `~astropy.nddata.NDData` or array-like The array to convolve. This should be a 1, 2, or 3-dimensional array or a list or a set of nested lists representing a 1, 2, or 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of the `~astropy.nddata.NDData` will be used as the ``mask`` argument. kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array, and the dimensions should be odd in all directions. If a masked array, the masked values will be replaced by ``fill_value``. boundary : str, optional A flag indicating how to handle boundaries: * `None` Set the ``result`` values to zero where the kernel extends beyond the edge of the array. * 'fill' Set values outside the array boundary to ``fill_value`` (default). * 'wrap' Periodic boundary that wrap to the other side of ``array``. * 'extend' Set values outside the array to the nearest ``array`` value. fill_value : float, optional The value to use outside the array when using ``boundary='fill'`` normalize_kernel : bool, optional Whether to normalize the kernel to have a sum of one. nan_treatment : {'interpolate', 'fill'} interpolate will result in renormalization of the kernel at each position ignoring (pixels that are NaN in the image) in both the image and the kernel. 'fill' will replace the NaN pixels with a fixed numerical value (default zero, see ``fill_value``) prior to convolution Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. preserve_nan : bool After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked of it is masked in either ``mask`` *or* ``array.mask``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". Returns ------- result : `numpy.ndarray` An array with the same dimensions and as the input array, convolved with kernel. The data type depends on the input array type. If array is a floating point type, then the return array keeps the same data type, otherwise the type is ``numpy.float``. Notes ----- For masked arrays, masked values are treated as NaNs. The convolution is always done at ``numpy.float`` precision. """ if boundary not in BOUNDARY_OPTIONS: raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}") if nan_treatment not in ('interpolate', 'fill'): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # OpenMP support is disabled at the C src code level, changing this will have # no effect. n_threads = 1 # Keep refs to originals passed_kernel = kernel passed_array = array # The C routines all need float type inputs (so, a particular # bit size, endianness, etc.). So we have to convert, which also # has the effect of making copies so we don't modify the inputs. # After this, the variables we work with will be array_internal, and # kernel_internal. However -- we do want to keep track of what type # the input array was so we can cast the result to that at the end # if it's a floating point type. Don't bother with this for lists -- # just always push those as float. # It is always necessary to make a copy of kernel (since it is modified), # but, if we just so happen to be lucky enough to have the input array # have exactly the desired type, we just alias to array_internal # Convert kernel to ndarray if not already # Copy or alias array to array_internal array_internal = _copy_input_if_needed(passed_array, dtype=float, order='C', nan_treatment=nan_treatment, mask=mask, fill_value=np.nan) array_dtype = getattr(passed_array, 'dtype', array_internal.dtype) # Copy or alias kernel to kernel_internal kernel_internal = _copy_input_if_needed(passed_kernel, dtype=float, order='C', nan_treatment=None, mask=None, fill_value=fill_value) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): raise_even_kernel_exception() # If both image array and kernel are Kernel instances # constrain convolution method # This must occur before the main alias/copy of ``passed_kernel`` to # ``kernel_internal`` as it is used for filling masked kernels. if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel): warnings.warn("Both array and kernel are Kernel instances, hardwiring " "the following parameters: boundary='fill', fill_value=0," " normalize_Kernel=True, nan_treatment='interpolate'", AstropyUserWarning) boundary = 'fill' fill_value = 0 normalize_kernel = True nan_treatment = 'interpolate' # ----------------------------------------------------------------------- # From this point onwards refer only to ``array_internal`` and # ``kernel_internal``. # Assume both are base np.ndarrays and NOT subclasses e.g. NOT # ``Kernel`` nor ``np.ma.maskedarray`` classes. # ----------------------------------------------------------------------- # Check dimensionality if array_internal.ndim == 0: raise Exception("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional ' 'arrays at this time') elif array_internal.ndim != kernel_internal.ndim: raise Exception('array and kernel have differing number of ' 'dimensions.') array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) pad_width = kernel_shape//2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). # E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2 # are convolved. It is therefore not possible to use this method to convolve an # array by a kernel that is larger (see note below) than the array - as ALL pixels would be ignored # leaving an array of only zeros. # Note: For even kernels the correctness condition is array_shape > kernel_shape. # For odd kernels it is: # array_shape >= kernel_shape OR array_shape > kernel_shape-1 OR array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is complete. if boundary is None and not np.all(array_shape > 2*pad_width): raise KernelSizeError("for boundary=None all kernel axes must be smaller than array's - " "use boundary in ['fill', 'extend', 'wrap'] instead.") # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() nan_interpolate = (nan_treatment == 'interpolate') and np.isnan(array_internal.sum()) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) if kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero: raise ValueError("The kernel can't be normalized, because its sum is " "close to zero. The sum of the given kernel is < {}" .format(1. / MAX_NORMALIZATION)) # Mark the NaN values so we can replace them later if interpolate_nan is # not set if preserve_nan or nan_treatment == 'fill': initially_nan = np.isnan(array_internal) if nan_treatment == 'fill': array_internal[initially_nan] = fill_value # Avoid any memory allocation within the C code. Allocate output array # here and pass through instead. result = np.zeros(array_internal.shape, dtype=float, order='C') embed_result_within_padded_region = True array_to_convolve = array_internal if boundary in ('fill', 'extend', 'wrap'): embed_result_within_padded_region = False if boundary == 'fill': # This method is faster than using numpy.pad(..., mode='constant') array_to_convolve = np.full(array_shape + 2*pad_width, fill_value=fill_value, dtype=float, order='C') # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0]] = array_internal elif array_internal.ndim == 2: array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], pad_width[1]:array_shape[1]+pad_width[1]] = array_internal else: array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], pad_width[1]:array_shape[1]+pad_width[1], pad_width[2]:array_shape[2]+pad_width[2]] = array_internal else: np_pad_mode_dict = {'fill': 'constant', 'extend': 'edge', 'wrap': 'wrap'} np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 if array_internal.ndim == 1: np_pad_width = (pad_width[0],) elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) array_to_convolve = np.pad(array_internal, pad_width=np_pad_width, mode=np_pad_mode) _convolveNd_c(result, array_to_convolve, array_to_convolve.ndim, np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'), kernel_internal, np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'), nan_interpolate, embed_result_within_padded_region, n_threads) # So far, normalization has only occured for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore # any NaNs if normalize_kernel: if not nan_interpolate: result /= kernel_sum elif nan_interpolate: result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): warnings.warn("nan_treatment='interpolate', however, NaN values detected " "post convolution. A contiguous region of NaN values, larger " "than the kernel size, are present in the input array. " "Increase the kernel size to avoid this.", AstropyUserWarning) if preserve_nan: result[initially_nan] = np.nan # Convert result to original data type array_unit = getattr(passed_array, "unit", None) if array_unit is not None: result <<= array_unit if isinstance(passed_array, Kernel): if isinstance(passed_array, Kernel1D): new_result = Kernel1D(array=result) elif isinstance(passed_array, Kernel2D): new_result = Kernel2D(array=result) else: raise TypeError("Only 1D and 2D Kernels are supported.") new_result._is_bool = False new_result._separable = passed_array._separable if isinstance(passed_kernel, Kernel): new_result._separable = new_result._separable and passed_kernel._separable return new_result elif array_dtype.kind == 'f': # Try to preserve the input type if it's a floating point type # Avoid making another copy if possible try: return result.astype(array_dtype, copy=False) except TypeError: return result.astype(array_dtype) else: return result
b4ec7a5e8f8e7b1ddd9fd14f50efda96ffb548fd
3,630,523
from pathlib import Path from typing import Optional def get_k_best_and_worst_performing(val_metrics_csv: Path, test_metrics_csv: Path, k: int, prediction_target: str = MetricsDict.DEFAULT_HUE_KEY) -> Optional[Results]: """ Get the top "k" best predictions (i.e. correct classifications where the model was the most certain) and the top "k" worst predictions (i.e. misclassifications where the model was the most confident). """ results = get_correct_and_misclassified_examples(val_metrics_csv=val_metrics_csv, test_metrics_csv=test_metrics_csv, prediction_target=prediction_target) if results is None: return None # sort by model_output sorted = Results(true_positives=results.true_positives.sort_values(by=LoggingColumns.ModelOutput.value, ascending=False).head(k), true_negatives=results.true_negatives.sort_values(by=LoggingColumns.ModelOutput.value, ascending=True).head(k), false_positives=results.false_positives.sort_values(by=LoggingColumns.ModelOutput.value, ascending=False).head(k), false_negatives=results.false_negatives.sort_values(by=LoggingColumns.ModelOutput.value, ascending=True).head(k)) return sorted
5aaa2510f72649be280859793e0959def07d6e0a
3,630,524
def flow_experiment_from_csv(filename): """ Initialise a flow experiment from a formatted .csv file. Parameters ---------- filename: str Name of a formatted configuration file. Returns ------- experiment: Classes.FlowExperiment """ with open(filename, "r", encoding="utf-8") as file: text = file.read() lines = text.split("\n") entries = {} for l in lines: entry = l.split(",") if len(entry) > 1: entries[entry[0]] = entry[1] experiment = Classes.FlowExperiment(entries["Exp_code"]) reactor_vol = entries["Reactor_volume"] reactor_unit = entries["Reactor_volume_unit"] experiment.reactor_volume = float(reactor_vol) experiment.reactor_volume_unit = reactor_unit return experiment
9e37a4392eb3f96dfbd3b8947badb9009e252513
3,630,525
def get_int(value, allow_sign=False): """Convert a value to an integer. Args: value: String value to convert. allow_sign: If True, negative values are allowed. Return: int(value) if possible. """ try: # rstrip needed when 0. is passed via [count] int_val = int(value.rstrip(".")) except ValueError: error = "Could not convert '%s' to int" % (value) raise StringConversionError(error) if int_val < 0 and not allow_sign: raise StringConversionError("Negative numbers are not supported.") return int_val
a50b23cac634d4cf414fa18a68f5a926c0702735
3,630,526
from typing import Union def calculate_weights( performance: Union['xr.DataArray', None], independence: Union['xr.DataArray', None], performance_sigma: Union[float, None], independence_sigma: Union[float, None]) -> 'xr.DataArray': """Calculate normalized weights for each model N. Parameters ---------- performance : array_like, shape (N,) or None Array specifying the model performance. None is mutually exclusive with independence being None. independence : array_like, shape (N, N) or None Array specifying the model independence. None is mutually exclusive with performance being None. performance_sigma : float or None Sigma value defining the form of the weighting function for the performance. Can be one only if performance is also None. independence_sigma : float or None Sigma value defining the form of the weighting function for the independence. Can be one only if independence is also None. Returns ------- weights : ndarray, shape (N,) """ if performance is not None: numerator = np.exp(-((performance / performance_sigma)**2)) else: numerator = 1 if independence is not None: exp = np.exp(-((independence / independence_sigma)**2)) # Note diagonal = exp(0) = 1, thus this is equal to 1 + sum(i!=j) denominator = exp.sum('perfect_model_ensemble') else: denominator = 1 weights = numerator / denominator # Normalize weights weights /= weights.sum() weights.name = 'weight' weights.attrs['variable_group'] = 'weight' # used in barplot weights.attrs['units'] = '1' return weights
2a5e578b80fb4cff7e8d90ce0d8c7a98d777342f
3,630,527
def validate_state(state): """ State validation rule. Property: LifecyclePolicy.State """ VALID_STATES = ("ENABLED", "DISABLED") if state not in VALID_STATES: raise ValueError("State must be one of : %s" % ", ".join(VALID_STATES)) return state
5dcc3d2c8bf9242d8090aef0933f26d2ffa1821d
3,630,528
def load_variable_config(project_config): """Extract the variable configuration out of the project configuration. Args: project_config (dict-like): Project configuration. Returns: dict: Variable dictionary with name: [levels] (single level will have a list containing None.) """ # Extract the different rank variables v2ds = project_config['variables_2d'] v3ds = project_config['variables_3d'] # Create a dictionary of variables to process keyed to an empty list of levels for 2D variables = {v2d: [None] for v2d in v2ds} # Add in the 3D variables, with levels this time for v3d, levels in v3ds.items(): variables[v3d] = levels return variables
37caccfa5f9c3a724e61233610c3e4a3e9938695
3,630,529
from typing import Union from typing import List def absolute_simulations_distance_for_tables( simulation_dfs: Union[List[pd.DataFrame], pd.DataFrame], gt_simulation_dfs: Union[List[pd.DataFrame], pd.DataFrame]): """Compute absolute normalized distance between simulations. Parameters ---------- simulation_dfs: PEtab simulation tables proposed by the tool under review. gt_simulation_dfs: Ground truth simulation tables. Returns ------- distance: The normalized absolute distance. """ # convenience if isinstance(simulation_dfs, pd.DataFrame): simulation_dfs = [simulation_dfs] if isinstance(gt_simulation_dfs, pd.DataFrame): gt_simulation_dfs = [gt_simulation_dfs] distances = [] for simulation_df, gt_simulation_df in zip( simulation_dfs, gt_simulation_dfs): distance = absolute_simulations_distance_for_table( simulation_df, gt_simulation_df) distances.append(distance) distance = sum(distances) / len(distances) return distance
e7090bc60bc9da4aed070e7c0af78e91aad7504d
3,630,530
def _experiment_fn(run_config, hparams): """Outputs `Experiment` object given `output_dir`. Args: run_config: `EstimatorConfig` object fo run configuration. hparams: `HParams` object that contains hyperparameters. Returns: `Experiment` object """ estimator = learn.Estimator( model_fn=model_fn, config=run_config, params=hparams) num_train_steps = 1 if FLAGS.oom_test else FLAGS.num_train_steps num_eval_steps = 1 if FLAGS.oom_test else FLAGS.num_eval_steps data = _get_data(hparams.data) return learn.Experiment( estimator=estimator, train_input_fn=_get_train_input_fn(data), eval_input_fn=_get_eval_input_fn(data), train_steps=num_train_steps, eval_steps=num_eval_steps, eval_delay_secs=FLAGS.num_eval_delay_secs)
aebf6b7586ece3a995f29b8f7f52283e3d29a478
3,630,531
import torch def cross_op_torch(r): """ Return the cross operator as a matrix i.e. for input vector r \in \R^3 output rX s.t. rX.dot(v) = np.cross(r, v) where rX \in \R^{3 X 3} """ if len(r.shape) > 1: rX = torch.zeros(r.shape[0], 3, 3).to(r) rX[..., 0, 1] = -r[..., 2] rX[..., 0, 2] = r[..., 1] rX[..., 1, 2] = -r[..., 0] rX = rX - rX.transpose(-1, -2) else: rX = torch.zeros(3, 3).to(r) rX[0, 1] = -r[2] rX[0, 2] = r[1] rX[1, 2] = -r[0] rX = rX - rX.T return rX
04f926f00f6ed58bee3feae80ef573f5a8822d20
3,630,532
from datetime import datetime def apply_internal(user_id, job_id, resume, comment): """ Basic logic for applying to internal job postings. Arguments: `user_id`: ID of the user applying `job_id`: ID of the job a user is applying for `resume`: Handy tool for applying to jobs """ if not user_id: return {"status": "Please double check your authentication token, no user ID found."} if not resume: return {"status": "Please enter a resume!"} if not job_id: return {"status": "Please make sure you are applying to a job with a non-null ID!"} for application in Application.query.filter_by(user_id=user_id): inhouse = Inhouse.query.filter_by(application_id=application.id, job_id=job_id).first() if inhouse: return {"status": f"Already found an application for this job for the user {user_id}"} generic_application = Application(date=str(datetime.now()), user_id=user_id, is_inhouse_posting=True, status="Applied", resume=resume, comment=comment) db.session.add(generic_application) db.session.commit() inhouse_application = Inhouse(application_id=generic_application.id, job_id=job_id) db.session.add(inhouse_application) db.session.commit() user_applications = Application.query.filter_by(user_id=user_id).all() return [application.to_dict() for application in user_applications]
af7ebb2e9a31c4ee41514d6330a416018e85999e
3,630,533
def check_acls(user, obj, acl_type): """Check ACLs.""" if acl_type == 'moz_contact': try: return user.email in obj.addon.get_mozilla_contacts() except AttributeError: return user.email in obj.thread.addon.get_mozilla_contacts() if acl_type == 'admin': return acl.action_allowed_user(user, 'Admin', '%') elif acl_type == 'reviewer': return acl.action_allowed_user(user, 'Apps', 'Review') elif acl_type == 'senior_reviewer': return acl.action_allowed_user(user, 'Apps', 'ReviewEscalated') else: raise Exception('Invalid ACL lookup.') return False
6d00906484479c918280e92cc61168aa5959e066
3,630,534
def variable_labels(): """Dictionaries that contain Variables objects.""" _phi = r'$\phi$' _eta = r'$\eta$' _T = r'$_\text{T}$ [GeV]' _mass = 'Mass [GeV]' variables = {} variables['ljet_C2'] = Variable(binning=hist1d(10, 0., 0.6), label=r'Large-R Jet C$_2^{\beta\text{=1}}$') variables['ljet_D2'] = Variable(binning=hist1d(20, 0., 5.0), label=r'Large-R Jet D$_2^{\beta\text{=1}}$') variables['ljet_d12'] = Variable(binning=hist1d(20, 0., 125.), label=r'Large-R Jet $\sqrt{\text{d}_{\text{12}}}$ [GeV]') variables['ljet_d23'] = Variable(binning=hist1d(12, 0., 60.), label=r'Large-R Jet $\sqrt{\text{d}_{\text{23}}}$ [GeV]') variables['ljet_eta'] = Variable(binning=hist1d(20, -3., 3.), label=r'Large-R Jet '+_eta) variables['ljet_phi'] = Variable(binning=hist1d(20, -2., 2.), label=r'Large-R Jet $\phi$') variables['ljet_m'] = Variable(binning=hist1d(40, 0., 400.), label=r'Large-R Jet '+_mass) variables['ljet_pt'] = Variable(binning=hist1d(14,200., 1500.), label=r'Large-R Jet p'+_T) variables['ljet_tau1'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet $\tau_{\text{1}}$') variables['ljet_tau2'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet $\tau_{\text{2}}$') variables['ljet_tau3'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet $\tau_{\text{3}}$') variables['ljet_tau21'] = Variable(binning=hist1d(11, 0., 1.1), label=r'Large-R Jet $\tau_{\text{21}}$') variables['ljet_tau32'] = Variable(binning=hist1d(11, 0., 1.1), label=r'Large-R Jet $\tau_{\text{32}}$') variables['ljet_charge'] = Variable(binning=hist1d(80, -5., 5.),label=r'Large-R Jet Charge') variables['ljet_SDmass'] = Variable(binning=hist1d(40,0.,400.), label=r'Large-R Jet Softdrop Mass [GeV]') variables['ljet_softDropMass'] = variables['ljet_SDmass'] variables['ljet_BEST_t'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(top)') variables['ljet_BEST_w'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(W)') variables['ljet_BEST_z'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(Z)') variables['ljet_BEST_h'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(H)') variables['ljet_BEST_j'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(jet)') variables['ljet_BEST_t_j'] = Variable(binning=hist1d(10, 0., 1.0), label=r'Large-R Jet BEST(top/(top+jet))') for i in range(16): variables['ljet_deepAK8_{0}'.format(i)] = Variable(binning=hist1d(10,0,1), label=r'Large-R Jet DeepAK8[{0}]'.format(i)) variables['ljet_subjet_0_charge_Qpos'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 0 charge') variables['ljet_subjet_0_bdisc_Qpos'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 b-disc.') variables['ljet_subjet_1_charge_Qpos'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 1 charge') variables['ljet_subjet_1_bdisc_Qpos'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 b-disc.') variables['ljet_subjet_0_charge_Qneg'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 0 charge') variables['ljet_subjet_0_bdisc_Qneg'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 b-disc.') variables['ljet_subjet_1_charge_Qneg'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 1 charge') variables['ljet_subjet_1_bdisc_Qneg'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 b-disc.') variables['ljet_subjet0_charge'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 0 charge') variables['ljet_subjet0_bdisc'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 b-disc.') variables['ljet_subjet0_mass'] = Variable(binning=hist1d(20, 0,200),label=r'Large-R Jet Subjet 0 '+_mass) variables['ljet_subjet0_mrel'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 Relative '+_mass) variables['ljet_subjet0_ptrel'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 Relative p'+_T) variables['ljet_subjet0_tau21'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 $\tau_{\text{21}}$') variables['ljet_subjet0_tau32'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 0 $\tau_{\text{32}}$') variables['ljet_subjet1_charge'] = Variable(binning=hist1d(50,-5,5), label=r'Large-R Jet Subjet 1 charge') variables['ljet_subjet1_bdisc'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 b-disc.') variables['ljet_subjet1_mass'] = Variable(binning=hist1d(20, 0,200),label=r'Large-R Jet Subjet 1 '+_mass) variables['ljet_subjet1_mrel'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 Relative '+_mass) variables['ljet_subjet1_ptrel'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 Relative p'+_T) variables['ljet_subjet1_tau21'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 $\tau_{\text{21}}$') variables['ljet_subjet1_tau32'] = Variable(binning=hist1d(10, 0,1), label=r'Large-R Jet Subjet 1 $\tau_{\text{32}}$') variables['ljet_subjets_deltaQ'] = Variable(binning=hist1d(100,-10,10), label=r'$\Delta$Q (Large-R Jet Subjets)') variables['ljet_contain'] = Variable(binning=hist1d(11,-5.5,5.5), label=r'Large-R Jet Containment') variables['jet_pt'] = Variable(binning=hist1d(10, 25., 500), label=r'Small-R Jet p'+_T) variables['jet_eta'] = Variable(binning=hist1d(10, -2.5, 2.5), label=r'Small-R Jet '+_eta) variables['jet_bdisc'] = Variable(binning=hist1d(10, 0., 1.), label=r'Small-R Jet b-disc.') variables['btags_n'] = Variable(binning=hist1d( 6, -0.5, 5.5), label=r'Number of b-tags') variables['n_btags'] = variables['btags_n'] variables['n_jets'] = Variable(binning=hist1d(11, -0.5,10.5), label=r'Number of Small-R Jets') variables['n_ljets'] = Variable(binning=hist1d( 6, -0.5, 5.5), label=r'Number of Large-R Jets') variables['lep_eta'] = Variable(binning=hist1d(10,-2.5, 2.5),label=r'Lepton '+_eta) variables['lep_pt'] = Variable(binning=hist1d(10, 25., 300.),label=r'Lepton p'+_T) variables['mu_pt'] = Variable(binning=hist1d(10, 25., 300.),label=r'Muon p'+_T) variables['mu_eta'] = Variable(binning=hist1d(10,-2.5, 2.5),label=r'Muon '+_eta) variables['mu_phi'] = Variable(binning=hist1d(10,-2.5, 2.5),label=r'Muon '+_phi) variables['mu_ptrel'] = Variable(binning=hist1d(50, 0, 500),label=r'Muon p$_\text{T}^\text{rel}$') variables['mu_drmin'] = Variable(binning=hist1d(10, 0, 5),label=r'Muon $\Delta$R$_\text{min}$') variables['el_pt'] = Variable(binning=hist1d(10, 25., 300.),label=r'Electron p'+_T) variables['el_eta'] = Variable(binning=hist1d(10,-2.5, 2.5),label=r'Electron '+_eta) variables['el_phi'] = Variable(binning=hist1d(10,-2.5, 2.5),label=r'Electron '+_phi) variables['el_ptrel'] = Variable(binning=hist1d(50, 0, 500),label=r'Electron p$_\text{T}^\text{rel}$') variables['el_drmin'] = Variable(binning=hist1d(10, 0, 5),label=r'Electron $\Delta$R$_\text{min}$') variables['deltaR_lep_ak4'] = Variable(binning=hist1d(10,0, 5), label=r'$\Delta$R(lepton,AK4)') variables['pTrel_lep_ak4'] = Variable(binning=hist1d(10,0,100), label=r'p$_\text{T}^\text{rel}$(lepton,AK4)') variables['deltaR_lep_ak8'] = Variable(binning=hist1d(10,0, 5), label=r'$\Delta$R(lepton,AK8)') variables['deltaR_ak4_ak8'] = Variable(binning=hist1d(10,0, 5), label=r'$\Delta$R(AK4,AK8)') variables['ljet_jet_m'] = Variable(binning=hist1d(50,0.,5000.), label=r'Large-R Jet + Small-R Jet '+_mass) variables['ljet_jet_deltaR'] = Variable(binning=hist1d(10,0.,5.), label=r'$\Delta$R(Large-R Jet,Small-R Jet)') variables['nu_phi'] = Variable(binning=hist1d(64,-3.2, 3.2), label=r'$\nu$ '+_phi) variables['nu_eta'] = Variable(binning=hist1d(10,-2.5, 2.5), label=r'$\nu$ '+_eta) variables['nu_pt'] = Variable(binning=hist1d(30, 0, 600.), label=r'$\nu$ p'+_T) variables['ht'] = Variable(binning=hist1d(50, 0., 5000.), label=r'H'+_T) variables['HT'] = variables['ht'] variables['mtw'] = Variable(binning=hist1d(12, 0., 120.), label=r'm$_\text{T}^\text{W}$ [GeV]') variables['mlb'] = Variable(binning=hist1d(32, 0., 800.), label=r'm$_{\ell\text{b}}$') variables['mass_lb'] = variables['mlb'] variables['met_met'] = Variable(binning=hist1d(50, 0., 500.), label=r'E$_{\text{T}}^{\text{miss}}$ [GeV]') variables['met_phi'] = Variable(binning=hist1d( 6,-3.2, 3.2), label=r'$\phi^{\text{miss}}$ [GeV]') ttbar = r"\text{t}\bar{\text{t}}" variables['mtt'] = Variable(binning=hist1d(25,0,5000),label=r'm$_{%s}$ [GeV]'%ttbar) variables['pttt'] = Variable(binning=hist1d(10,0, 500),label=r'p$_{\text{T},%s }$ [GeV]'%ttbar) variables['ytt'] = Variable(binning=hist1d(10,0, 5),label=r'y$_{%s}$ [GeV]'%ttbar) variables['beta'] = Variable(binning=hist1d(10,0, 1),label=r'$\beta_{z,%s}$ [GeV]'%ttbar) variables['dy'] = Variable(binning=hist1d(12,-3, 3),label=r'$\Delta|\text{y}|$') variables['dyres'] = Variable(binning=hist1d(12,-3, 3),label=r'$\Delta|\text{y}|$ Resolution') variables['deltay'] = variables['dy'] variables['mttbar'] = variables['mtt'] variables['pTttbar'] = variables['pttt'] variables['yttbar'] = variables['ytt'] variables['betatt'] = variables['beta'] variables['betattbar'] = variables['beta'] return variables
38cf74c3c4a1a136adfa81ee8ff675062caf1f7e
3,630,535
def _calcDistance(fiberMatrix1, fiberMatrix2): """ *INTERNAL FUNCTION* Computes average Euclidean distance INPUT: fiberMatrix1 - 3D matrix containing fiber spatial infomration fiberMatrix2 - 3D matrix containing fiber spatial information for comparison OUTPUT: Average Euclidean distance of sample points """ return np.mean(np.linalg.norm(np.subtract(fiberMatrix1, fiberMatrix2), axis=0), axis=1)
d50fef85c6a682c093ba62394103d525d23f58b7
3,630,536
def find_suppliers(client, framework, supplier_ids=None, map_impl=map, dry_run=False): """Return supplier details for suppliers with framework interest :param client: data api client :type client: dmapiclient.DataAPIClient :param dict framework: framework :param supplier_ids: list of supplier IDs to limit return to :type supplier_ids: Optional[List[Union[str, int]]] """ # get supplier details (returns a lazy generator) logger.debug(f"fetching records for {len(supplier_ids) if supplier_ids else 'all'} suppliers") records = find_suppliers_with_details_and_draft_service_counts( client, framework["slug"], supplier_ids, map_impl=map_impl, ) # we reuse code from another script to filter and flatten our supplier details _, rows = get_csv_rows( records, framework["slug"], framework_lot_slugs=tuple([lot["slug"] for lot in framework["lots"]]), count_statuses=("submitted",), dry_run=dry_run, include_central_supplier_details=True ) return rows
8343b53249d392a8cddae8d1ca1069736cd2cf9d
3,630,537
from typing import Dict def filter_topology(model: Dict[str, str], operator: str, value: str, component: str): """Check whether model should be included according to the user input. The model should be added if the its topology is consistent with the components requested by the user (number of controller or compute nodes). :param model: model information obtained from jenkins :type model: str :param operator: operator to filter the topology with :type operator: str :param value: Value to use in the comparison :param value: str :param component: Component of the topology to filter :param component: str :returns: Whether the model satisfies user input :rtype: bool """ topology = model["topology"] for part in topology.split(","): if component in part: _, amount = part.split(":") return RANGE_OPERATORS[operator](float(amount), float(value)) return False
c4e26a89a271e5f70b0ca1afae47a78a3e6acca0
3,630,538
def getAuctionPrice(the_auction: models.DutchAuction, bid: Transaction[BidParameter, TezlandDutchAuctionsStorage]): """Returns current price in mutez. More or less pasted from dutch auction contract.""" granularity = int(bid.storage.granularity) op_now = bid.data.timestamp # return start price if it hasn't started if (op_now <= the_auction.start_time): return the_auction.start_price else: # return end price if it's over if (op_now >= the_auction.end_time): return the_auction.end_price else: # alright, this works well enough. make 100% sure the math checks out (overflow, abs, etc) # probably by validating the input in create. to make sure intervals can't be negative. duration = abs(the_auction.end_time - the_auction.start_time) // granularity time_since_start = abs(op_now - the_auction.start_time) // granularity # NOTE: this can lead to a division by 0. auction duration must be > granularity. mutez_per_interval = (the_auction.start_price - the_auction.end_price) // duration.seconds time_deduction = mutez_per_interval * time_since_start.seconds current_price = the_auction.start_price - time_deduction return current_price
4503fe0212935a2b37a15000448dd0fe8b5674a4
3,630,539
def __all_paths_between_acceptance_states(Dfa): """Generates for each front acceptance state a copy of the complete graph which can be reached inside 'Dfa' starting from it until the next acceptance state. RETURNS: List of DFAs containing a tail for each found acceptance states. """ def _get_branch(Dfa, acceptance_si): result = Dfa.clone_subset(acceptance_si, Dfa.get_successors(acceptance_si)) # Clone acceptance state as init state, which does not accept. # Take over all transitions of the acceptance state. new_state = result.get_init_state().clone() new_state.set_acceptance(False) result.set_new_init_state(new_state) # Original acceptance only remains in place, if it is the target of a # transition. if not result.has_transition_to(acceptance_si): result.delete_state(acceptance_si) return result return [ _get_branch(Dfa, acceptance_si) for acceptance_si in Dfa.acceptance_state_index_list() ]
136ea0a999bff3b930ad4e05107042da36262d13
3,630,540
def hello(): """ An op definition. This example op outputs a single string. For more hints about writing Dagster ops, see our documentation overview on Ops: https://docs.dagster.io/concepts/ops-jobs-graphs/ops """ return "Hello, Dagster!"
cf701323e751122823f22bad864f7b1f0d700a97
3,630,541
def vtk_clean_polydata(surface): """ Clean surface by merging duplicate points, and/or removing unused points and/or removing degenerate cells. Args: surface (vtkPolyData): Surface model. Returns: cleanSurface (vtkPolyData): Cleaned surface model. """ # Clean surface surface_cleaner = vtk.vtkCleanPolyData() surface_cleaner.SetInputData(surface) surface_cleaner.Update() cleaned_surface = surface_cleaner.GetOutput() return cleaned_surface
340d1e9377b378ff007a6fd115508e7c25890de2
3,630,542
def custom_exception_handler(exc, context): """ A custom exception handler that makes sure errors are returned with a fixed format. Django, django-rest-framework, and the jwt framework all throw exceptions in slightly different ways. This has to be caught and each type of exception has to be converted to the common error format. This function is called whenever there is an uncaught exception in the code. It doesn't handle responses with error codes. As this is the last line of defense, it will check every possibility, and log all errors, so that they can be easily found and fixed. """ # Let django-rest-framework do its handling. This might return None # The response variable will be ignored at the end, so don't set the # status code or data directly on that. response = exception_handler(exc, context) # Define variables that can be modified during handling, and will be # converted to a Response at the end. We default to status code 400. status_code = response.status_code if response else '400' error = Error() try: if isinstance(exc, ValidationError): # It's a rest framework ValidationError error.error_type = ErrorType.FORM_ERROR if isinstance(exc.detail, dict): error.errors = [ErrorEntry.factory(obj) for obj in exc.detail.pop('non_field_errors', [])] error.field_errors = {key: [ErrorEntry.factory(obj) for obj in exc.detail[key]] for key in exc.detail} elif isinstance(exc.detail, list): error.errors = [ErrorEntry.factory(obj) for obj in exc.detail] else: logger.error('Unable to parse detail attribute (%s) on exception %s', exc.detail, repr(exc)) return server_error() elif isinstance(exc, InvalidToken): # It's an error from simplejwt error.error_type = ErrorType.JWT_ERROR if isinstance(exc.detail, dict): error.errors = [ErrorEntry.factory(exc.detail['detail'])] else: logger.error('Unable to parse detail attribute (%s) on exception %s', exc.detail, repr(exc)) return server_error() elif isinstance(exc, DjangoValidationError): # It's a django ValidationError error.error_type = ErrorType.FORM_ERROR status_code = status.HTTP_400_BAD_REQUEST # The ValidationError might, or might not, have the code attribute set. # It might also be None error_code = exc.code if hasattr(exc, 'code') and exc.code else 'invalid' try: # Validation errors are usually dicts with the field name as the key message_dict = exc.message_dict error.errors = [ErrorEntry(code=error_code, detail=message) for message in message_dict.pop('non_field_errors', [])] error.field_errors = {key: [ErrorEntry(code=error_code, detail=message) for message in messages] for key, messages in message_dict.items()} except AttributeError: # If an argument with an invalid format is used in a queryset `filter()` # we only get a list of errors, not a dict. error.errors = [ErrorEntry(code=error_code, detail=message) for message in exc] elif isinstance(exc, Http404): # It's a Http404 exception (probably from `get_or_404`) status_code = status.HTTP_404_NOT_FOUND error.errors = [ErrorEntry(detail=str(exc), code='not_found')] elif hasattr(exc, 'detail'): if isinstance(exc.detail, ErrorDetail): error.errors = [ErrorEntry.factory(exc.detail)] elif isinstance(exc.detail, list) and all(isinstance(detail, ErrorDetail) for detail in exc.detail): error.errors = [ErrorEntry.factory(item) for item in exc.detail] else: logger.error('Unable to parse detail attribute (%s) on exception %s', exc.detail, repr(exc)) return server_error() else: # It's an unknown exception, so we don't know how to get the data from it. # Return a 500, log the data, and hope someone sees it. logger.error('Unable to handle exception: %s', repr(exc)) return server_error() except Exception as e: # There was an exception while processing the exception. # Return a 500, log the data, and hope someone sees it. logger.error('Exception (%s) occurred during handling of exception: %s', repr(e), repr(exc)) return server_error() # Convert the Error object into a JSON response, and return it serializer = ErrorSerializer(error) return Response(data=serializer.data, status=status_code)
fbb3e8934851b53ea0e771b39d0d5e0fc992ff3e
3,630,543
def fetch_svc(k8s_host, **kwargs): """ Fetch named service definition from Kubernetes (output: dict) """ pass_headers = {} if 'k8s_api_headers' in kwargs: headers = kwargs.pop('k8s_api_headers') pass_headers.update(headers) namespace = kwargs['namespace'] service_name = kwargs['service_name'] url = '{}/{}/namespaces/{}/{}/{}'.format( k8s_host, K8S_API['services'], namespace, 'services', service_name ) svc = req('GET', url, pass_headers) if svc['spec']['type'] != 'NodePort': abort(422, 'Only services of type NodePort are supported') return svc
c302f0cbaf7026356e9f6f9adae2820bb9d17ae5
3,630,544
def mentionable(): """ :return: boolean True, if there is something mentionable to notify about False, if not """ # need to be implemented return False
d1dac607efb512771677aa7e8dd42a2c21251833
3,630,545
def cmp_ver(a, b): """Compare versions in the form 'a.b.c' """ for (i, j) in zip(split_ver(a), split_ver(b)): if i != j: return i - j return 0
d774235354c613cec15e2d438bc6a1e60e678ae7
3,630,546
import os import fnmatch def findFileTypes(wpath, type ='*.txt', verbose=False): """ to find all the files in wpath and below with file names matching fname """ alist=sorted(os.walk(wpath)) if verbose: print(' getting file list') listPath = [] listFile = [] fileName = [] for (dirpath,dirnames,filenames) in alist: if len(dirnames) == 0: for f in filenames: if fnmatch.fnmatch(f, type): file=os.path.join(dirpath,f) if os.path.isfile(file): listFile.append(file) listPath.append(dirpath) fileName.append(f) else: for f in filenames: if fnmatch.fnmatch(f, type): file=os.path.join(dirpath,f) if os.path.isfile(file): listFile.append(file) listPath.append(dirpath) fileName.append(f) if verbose: nfiles=len(listFile) print((' nfiles = %i'%(nfiles))) return {'fullName':listFile, 'fileName':fileName}
b424d8bb93bfa5540847a308de9259c5ba14a048
3,630,547
import argparse def _parse_args(argv=None): """Parse command-line args.""" def _positive_int(value): """Define a positive integer ArgumentParser type.""" value = int(value) if value <= 0: raise argparse.ArgumentTypeError( "Value must be positive, {} was passed.".format(value)) return value parser = argparse.ArgumentParser() parser.add_argument( "--file_pattern", required=True, help="File pattern for amazon qa files on Google cloud storage.", ) parser.add_argument( "--output_dir", required=True, help="Output directory to write the dataset on Google cloud storage.", ) parser.add_argument( "--max_words", type=_positive_int, default=59, help="Maximum number of words a Q or A can have to be included.", ) parser.add_argument( "--min_words", type=_positive_int, default=4, help="Minimum number of words a Q or A must have to be included.", ) parser.add_argument( "--train_split", default=0.9, type=float, help="The proportion of data to put in the training set.", ) parser.add_argument( "--num_shards_test", default=10, type=_positive_int, help="The number of shards for the test set.", ) parser.add_argument( "--num_shards_train", default=100, type=_positive_int, help="The number of shards for the train set.", ) return parser.parse_known_args(argv)
abb5d64089e200592f057ee1356d135328196dab
3,630,548
def delete_buckets(buckets) -> list: """Deletes all buckets from a list Args: buckets (list): A list of s3 buckets Returns: A list of terminated buckets """ terminated_buckets = [] for bucket in buckets: bucket_name = bucket["Name"] if helpers.check_in_whitelist(bucket_name, WHITELIST_NAME, is_global=True): continue s3 = boto3.resource(BOTO3_NAME) bucket = s3.Bucket(bucket_name) # There is a much easier to do this then with pagination. Not sure if it works. try: bucket.objects.all().delete() # Delete the content of the bucket bucket.delete() # Delete the bucket itself except ClientError as error: error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME, bucket_name) print(error_string) terminated_buckets.append(error_string) terminated_buckets.append(bucket_name) return terminated_buckets
17c1eaf5b277a3343fd4d0403150b571fa462ee9
3,630,549
def box_fusion( bounding_boxes, confidence_score, labels, mode='wbf', image_size=None, weights=None, iou_threshold=0.5): """ bounding boxes: list of boxes of same image [[box1, box2,...],[...]] if ensemble many models list of boxes of single image [[box1, box2,...]] if done on one model image size: [w,h] """ if image_size is not None: if isinstance(image_size, int): image_size = [image_size, image_size] normalized_boxes = [] for ens_boxes in bounding_boxes: if isinstance(ens_boxes, list): ens_boxes = np.array(ens_boxes) ens_boxes[:, 0] = ens_boxes[:, 0]*1.0/image_size[0] ens_boxes[:, 1] = ens_boxes[:, 1]*1.0/image_size[1] ens_boxes[:, 2] = ens_boxes[:, 2]*1.0/image_size[0] ens_boxes[:, 3] = ens_boxes[:, 3]*1.0/image_size[1] normalized_boxes.append(ens_boxes) normalized_boxes = np.array(normalized_boxes) else: normalized_boxes = bounding_boxes.copy() if mode == 'wbf': picked_boxes, picked_score, picked_classes = weighted_boxes_fusion( normalized_boxes, confidence_score, labels, weights=weights, iou_thr=iou_threshold, conf_type='avg', # [nms|avf] skip_box_thr=0.0001) elif mode == 'nms': picked_boxes, picked_score, picked_classes = nms( normalized_boxes, confidence_score, labels, weights=weights, iou_thr=iou_threshold) if image_size is not None: result_boxes = [] for ens_boxes in picked_boxes: ens_boxes[0] = ens_boxes[0]*image_size[0] ens_boxes[1] = ens_boxes[1]*image_size[1] ens_boxes[2] = ens_boxes[2]*image_size[0] ens_boxes[3] = ens_boxes[3]*image_size[1] result_boxes.append(ens_boxes) return np.array(result_boxes), np.array(picked_score), np.array(picked_classes)
59622e278e6805b1a726871bec089a90a68c1d4f
3,630,550
def get_worker_class(global_conf, message): """Returns class of worker needed to do message's work""" worker_type = 'worker-%s' % (message.body['worker_type']) if worker_type not in global_conf: raise RuntimeError("Invalid worker type '%s'" % (worker_type)) conf = global_conf[worker_type] import_target, class_name = conf['class'].rsplit('.', 1) module = __import__(import_target, fromlist=[import_target]) return getattr(module, class_name)
3f975caf97827fcfaf7d74141ea651c302e4781c
3,630,551
def is_valid_widget(widget): """ Checks if a widget is a valid in the backend :param widget: QWidget :return: bool, True if the widget still has a C++ object, False otherwise """ if widget is None: return False # Added try because Houdini does not includes Shiboken library by default # TODO: When Houdini app class implemented, add cleaner way try: if not shiboken.isValid(widget): return False except Exception: return True return True
62f01d4e5be2a29c2cb2fc3bd96c98ae18a6477b
3,630,552
from typing import Dict import os import yaml def get_builtin_configs() -> Dict[str, ClientConfig]: """ Return a cached mapping of preconfigured clients. """ path = os.path.join(os.path.dirname(__file__), "builtin_clients.yml") with open(path, "r", encoding="utf-8") as fdata: configs = yaml.safe_load(fdata) return { config_name: ClientConfig(**config_data) for config_name, config_data in configs.items() }
0aee618d7f376207c69abd4a8e7e4e2858d63d0d
3,630,553
def isConsistant(spectrum,kmer): """Checks whether a given kmer is consistent with a given spectrum or not. INPUT : spectrum: array-like. The spectrum required to check the given kmer against. kmer: string. The given kmer required to check its consistency. OUTPUT: .: bool. The consistency of the kmer against the spectrum. """ return subset_spectrum(spectrum,linear_spectrum(kmer))
0403fdcb324d40d10bdc4555f3d8eb26faebe8ed
3,630,554
def validate_java_file(java_file): """Validates a java file. Args: java_file_path: the path to the java file. Returns: a list of errors. """ file_status, java_file_path = java_file with open(java_file_path, "r") as fp: contents = fp.read() if not contents: return ["[ERROR] Errors exist in " + java_file_path, "\t- File is empty"] try: tree = javalang.parse.parse(contents) except javalang.parser.JavaSyntaxError: print("Javalang failed to parse '%s'. Skipping file..." % java_file_path) return [] errors = [] is_api = tree.package and api_pattern.search(tree.package.name) is not None for path, class_declaration in tree.filter(javalang.tree.ClassDeclaration): is_osgi = "OsgiServiceImpl" in [annotation.name for annotation in class_declaration.annotations] errors.extend(validate_class(class_declaration, is_api, is_osgi)) for path, interface_declaration in tree.filter(javalang.tree.InterfaceDeclaration): errors.extend(validate_interface(interface_declaration, is_api)) for path, enum_declaration in tree.filter(javalang.tree.EnumDeclaration): errors.extend(validate_enum(enum_declaration, is_api)) for path, import_declaration in tree.filter(javalang.tree.Import): errors.extend(validate_import(import_declaration)) errors.extend(validate_print_statements(contents)) if file_status == "A": errors.extend(validate_copyright_statement(contents)) if errors: errors.insert(0, "[ERROR] Errors exist in " + java_file_path) return errors
a0bde1c13ef1d7b5056fe47fefe05bfd8bbcf7b1
3,630,555
def convertVoltage(raw_voltage): """ Ground is 1 1.8 is 4095 """ converted_voltage = (raw_voltage/4095)*1.8 return "%.3f" % converted_voltage
4f404ff02449a231521f80a2b9a4ae443880e1b3
3,630,556
def fast_ica(image, components): """Reconstruct an image from Fast ICA compression using specific number of components to use Args: image: PIL Image, Numpy array or path of 3D image components: Number of components used for reconstruction Returns: Reconstructed image Example: >>> from PIL import Image >>> import numpy as np >>> from ipfml.processing import reconstruction >>> image_values = Image.open('./images/test_img.png') >>> reconstructed_image = reconstruction.fast_ica(image_values, 25) >>> reconstructed_image.shape (200, 200) """ lab_img = transform.get_LAB_L(image) lab_img = np.array(lab_img, 'uint8') ica = FastICA(n_components=50) # run ICA on image ica.fit(lab_img) # reconstruct image with independent components image_ica = ica.fit_transform(lab_img) restored_image = ica.inverse_transform(image_ica) return restored_image
49ade88a8d1fe6a7addf8eb35be326088b916846
3,630,557
from typing import Callable from typing import Optional from typing import Union from typing import Tuple from typing import Iterable def solve_nr( f: Callable[[float], float], df: Callable[[float], float], estimate: float, eps: Optional[float]=1.0e-6, max_num_iter=100, throw_if_failed_converge=True, return_vector=False) -> Union[Tuple[float, int], Iterable[float]]: """ Solves f(x) = 0 using Newton-Raphson method :param f: function of x to solve :param df: derivative of f(x) :param estimate: initial estimate :param eps: max absolute error; if None, will continue calculating until max_num_iter is reached :param max_num_iter: Max number of iterations :param throw_if_failed_converge: if True, will throw if fails to converge in max_num_iter (unless eps is None) :param return_vector: if true, returns vector of all iterated values :return: x, number of iterations; or vector of all iterations if return_vector is True """ if max_num_iter < 1: raise ValueError('max_num_iter must be at least 1') x = estimate xv = [] if return_vector else None n = 0 for n in range(1, max_num_iter + 1): if xv is not None: xv.append(x) residue = f(x) / df(x) x -= residue if eps and (abs(residue) < eps): break else: if throw_if_failed_converge and eps: raise RuntimeError('Failed to converge in %i iterations' % n) if xv is not None: return xv else: return x, n
c6ab8b6bb27f8b9be9c31fe7cbd58300637d9fef
3,630,558
import json def get_data(source): """fungsi ambil data pegawai, jadwal, judul, liburan""" with open(source, 'r') as srce: return json.load(srce)
964efdabcbd21486985bbc9189c5d07dbc800dd6
3,630,559
import os def load_testsets_by_path(path): """ load testcases from file path @param path: path could be in several type - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) @return testcase sets list, each testset is corresponding to a file [ testset_dict_1, testset_dict_2 ] """ if isinstance(path, (list, set)): testsets = [] for file_path in set(path): testset = load_testsets_by_path(file_path) if not testset: continue testsets.extend(testset) return testsets if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) if path in testcases_cache_mapping: return testcases_cache_mapping[path] if os.path.isdir(path): files_list = utils.load_folder_files(path) testcases_list = load_testsets_by_path(files_list) elif os.path.isfile(path): try: testset = load_test_file(path) if testset["testcases"] or testset["api"]: testcases_list = [testset] else: testcases_list = [] except exception.FileFormatError: testcases_list = [] else: logger.log_error(u"file not found: {}".format(path)) testcases_list = [] testcases_cache_mapping[path] = testcases_list return testcases_list
2bed9249d297734cfcf37de42a958890941dc647
3,630,560
def unpack_request(data): """Take a buffer and return a pair of the RequestHeader and app data""" start = MSG_TYPE_SIZE + REQUEST_HEADER_SIZE return (unpack_request_header(data), data[start:])
ed0e2baf9aa510e6460eb3a9b4e5e95295389bfe
3,630,561
def basic_pl_stats(degree_sequence): """ :param degree sequence of individual nodes """ results = Fit(degree_sequence,discrete=True) return (results.alpha,results.sigma)
a079169a328547a2f43eb6ed9431202895cd75f9
3,630,562
def merge_config_dictionaries(*dicts): """ Merges n dictionaries of configuration data :param list<dicts>: :return dict: """ res_dict = {} if isinstance(dicts, list): if len(dicts) == 1 and isinstance(dicts[0], dict): return dicts[0] else: for dictionary in dicts: if isinstance(dictionary, dict): res_dict.update(dictionary) return res_dict
c9711e897d5c7caa47a21f3e901025c91862327f
3,630,563
def op_catalog_size() -> int: """ Return number of entries in the operational-data catalog @return: integer """ return sum(len(entries) for entries in _op_catalog.values())
844519ec9afa3b06138128f5559acc2090ad4452
3,630,564
import json def worker_recv_export_job(request): """Worker进程(集群节点)请求接受给定ID(taskSettingId)的导出任务""" try: datas = json.loads(request.body.decode()) taskSettingId = int(datas["params"]["taskSettingId"]) # Worker进程可以有多个,为了防止并发请求导致job更新不及时被两个Worker消费,这里需要这样处理 updateRows = PlExportJob.objects.filter(task_setting_id=taskSettingId,status=1,req_stop=0).update( status = 2, # 更改为运行中 worker_name = datas["params"]["workerName"], run_time = timezone.now() ) if updateRows <= 0: return response(-3, message="符合领取条件的数据库记录不存在,或取消,或已被领取") # 返回导出任务的信息,Worker需要根据这些信息运作 result = response(0, data={ "exportJob": model_to_dict(PlExportJob.objects.get(task_setting_id=taskSettingId)) }) except ObjectDoesNotExist: result = response(-1, message="数据库记录不存在") except DatabaseError: result = response(-2, message="查询数据异常。") return result
084526b0021e7fb31d5edb041fde2c734729a5cc
3,630,565
def add_host_log_history( host_id, exception_when_existing=False, filename=None, user=None, session=None, **kwargs ): """add a host log history.""" host = _get_host(host_id, session=session) return utils.add_db_object( session, models.HostLogHistory, exception_when_existing, host.id, filename, **kwargs )
511213a6f00660c8e5921cf23974842e413e1725
3,630,566
import os import json def process_schools(schools_data, url): """ Calculate number of wheelchair-accessible and total stops for each school. type: str :param: schools_data: path to the school data json file :type: str :param: url: currently, this value is https://api-v3.mbta.com/stops """ if not os.path.exists(schools_data): raise Exception("Can't process a file that does not exist.") with open(schools_data) as f: schools = json.load(f) stops = get_stops(url) new_schools = [] for school in schools: if school.get('lat') and school.get('lng'): school['stops'] = 0 school['wheelchairs'] = 0 p1 = {'lat': school.get('lat'), 'lon': school.get('lng')} for stop in stops: attributes = stop.get('attributes') p2 = {'lat': attributes.get('latitude'), 'lon': attributes.get('longitude')} if get_distance(p1, p2) <= (1.609344)/2.0: school['stops'] += 1 if attributes.get('wheelchair_boarding') > 0: school['wheelchairs'] += 1 if school['stops'] > 0: school['ratio'] = school['wheelchairs']/school['stops'] else: school['ratio'] = 0 new_schools.append(school) return json.dumps(new_schools)
64bd2fe4319caeba56345dbe34261cc341ae94e1
3,630,567
import os def load_fiveplates_priority(platerun, filling_scheme): """ """ priority_file = paths.fiveplates_priority(platerun, filling_scheme) if priority_file.exists(): pass else: raise FileNotFoundError(os.fspath(priority_file)) priority_table = Table.read(os.fspath(priority_file), format='ascii.no_header') old_colname = priority_table.colnames[0] priority_indx = list(range(len(priority_table))) priority_table['order'] = priority_indx priority_table.rename_column(old_colname, 'program') priority_table.add_index('program') return priority_table
dd55b8536b33673b40e8b6aaf0170b118bc903d4
3,630,568
def get_main_window(): """Return the tkinter root window that Porcupine is using.""" if _root is None: raise RuntimeError("Porcupine is not running") return _root
8c4ff2126f3f9c3367214ff1fe186106e15e7f57
3,630,569
def calculate_cluster_spatial_enrichment(all_data, dist_mats, fovs=None, bootstrap_num=1000, dist_lim=100): """Spatial enrichment analysis based on cell phenotypes to find significant interactions between different cell types, looking for both positive and negative enrichment. Uses bootstrapping to permute cell labels randomly. Args: all_data: data including points, cell labels, and cell expression matrix for all markers dist_mats: A dictionary that contains a cells x cells matrix with the euclidian distance between centers of corresponding cells for every fov fovs: patient labels to include in analysis. If argument is none, default is all labels used bootstrap_num: number of permutations for bootstrap. Default is 1000 dist_lim: cell proximity threshold. Default is 100 Returns: values: a list with each element consisting of a tuple of closenum and closenumrand for each point included in the analysis stats: an Xarray with dimensions (points, stats, number of markers, number of markers) The included stats variables are: z, muhat, sigmahat, p, h, adj_p, and marker_titles for each point in the analysis""" fov_col = "SampleID" cell_type_col = "cell_type" flowsom_col = "FlowSOM_ID" cell_label_col = "cellLabelInImage" # Setup input and parameters if fovs is None: fovs = list(set(all_data[fov_col])) num_fovs = len(fovs) else: num_fovs = len(fovs) values = [] # Error Checking if not np.isin(fovs, all_data[fov_col]).all(): raise ValueError("Points were not found in Expression Matrix") # Extract the names of the cell phenotypes pheno_titles = all_data[cell_type_col].drop_duplicates() # Extract the columns with the cell phenotype codes pheno_codes = all_data[flowsom_col].drop_duplicates() # Get the total number of phenotypes pheno_num = len(pheno_codes) # Subset matrix to only include the columns with the patient label, cell label, and cell phenotype fov_cluster_data = all_data[[fov_col, cell_label_col, flowsom_col]] # Create stats Xarray with the dimensions (points, stats variables, number of markers, number of markers) stats_raw_data = np.zeros((num_fovs, 7, pheno_num, pheno_num)) coords = [fovs, ["z", "muhat", "sigmahat", "p_pos", "p_neg", "h", "p_adj"], pheno_titles, pheno_titles] dims = ["points", "stats", "pheno1", "pheno2"] stats = xr.DataArray(stats_raw_data, coords=coords, dims=dims) for i in range(0, len(fovs)): # Subsetting expression matrix to only include patients with correct label patient_ids = fov_cluster_data.iloc[:, 0] == fovs[i] fov_data = fov_cluster_data[patient_ids] # Subset the distance matrix dictionary to only include the distance matrix for the correct point dist_mat = dist_mats[str(fovs[i])] # Get close_num and close_num_rand close_num, pheno1_num, pheno2_num = spatial_analysis_utils.compute_close_cell_num( dist_mat=dist_mat, dist_lim=dist_lim, num=pheno_num, analysis_type="Cluster", fov_data=fov_data, pheno_codes=pheno_codes) close_num_rand = spatial_analysis_utils.compute_close_cell_num_random( pheno1_num, pheno2_num, dist_mat, pheno_num, dist_lim, bootstrap_num) values.append((close_num, close_num_rand)) # Get z, p, adj_p, muhat, sigmahat, and h stats_xr = spatial_analysis_utils.calculate_enrichment_stats(close_num, close_num_rand) stats.loc[fovs[i], :, :] = stats_xr.values return values, stats
62cef02346e1461c03a46dad81cb5814010cc8c7
3,630,570
import calendar import pytz def epoch(dt): """ Returns the epoch timestamp of a timezone-aware datetime object. """ return calendar.timegm(dt.astimezone(pytz.utc).timetuple())
027ea75bf75b6bb6b4da14b2bed1afc363a9121a
3,630,571
def main(): """Implements the main method running this smoke test.""" defaults = { 'TEST_STACK': str(OpenStackSmokeTestScenario.DEFAULT_TEST_ID), 'TEST_APP': 'openstack-smoketest' + OpenStackSmokeTestScenario.DEFAULT_TEST_ID } return citest.base.TestRunner.main( parser_inits=[OpenStackSmokeTestScenario.initArgumentParser], default_binding_overrides=defaults, test_case_list=[OpenStackSmokeTest])
02e542c2337f9582e38bedec6067c4290d0317f6
3,630,572
def is_pandas_series(value): """ Check if an object is a Pandas DataFrame :param value: :return: """ return isinstance(value, pd.Series)
3ea667302f4a60f68569555c650a297e6b7b3a18
3,630,573
from typing import Union import copy import numpy def copy_visibility(vis: Union[Visibility, BlockVisibility], zero=False) -> Union[ Visibility, BlockVisibility]: """Copy a visibility Performs a deepcopy of the data array :param vis: Visibility or BlockVisibility :returns: Visibility or BlockVisibility """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis newvis = copy.copy(vis) newvis.data = numpy.copy(vis.data) if isinstance(vis, Visibility): newvis.cindex = vis.cindex newvis.blockvis = vis.blockvis if zero: newvis.data['vis'][...] = 0.0 return newvis
451d0d365611d3538d8bc3e9d90fc82771df8290
3,630,574
def set_produce_compilation_cache(enabled: bool) -> dict: """Forces compilation cache to be generated for every subresource script. Parameters ---------- enabled: bool **Experimental** """ return {"method": "Page.setProduceCompilationCache", "params": {"enabled": enabled}}
3d2dd7fa6c8d04713ace26c666d9b00407a5a586
3,630,575
import argparse def get_args(strInput=None): """ Collect arguments from command-line, or from strInput if given (only used for debugging) """ parser = argparse.ArgumentParser(description="This program allows you to run the randomoverlaps3.py script against " "multiple variant files automatically for the given UCE files. You " "must pass at least one UCE file to the script to run. The script will" " use the appropriate genome spacing files for each type, which must be" " in the same directory") parser.add_argument('file', type=argparse.FileType('rU'), help="A file containing a list of paths to the files you want to process, separated by " "newlines") parser.add_argument('-c', '--cluster', type=int, help="The cluster size (kb)") parser.add_argument('-o', '--output', help="Output file for results [WARNING: Will overwrite any file with the " "same name in the current directory]") parser.add_argument('-a', '--all', type=argparse.FileType('rU'), help="A file containing [a]ll UCEs (exonic + intronic + intergenic)") parser.add_argument('-e', '--exonic', type=argparse.FileType('rU'), help="A file containing [e]xonic UCEs") parser.add_argument('-i', '--intronic', type=argparse.FileType('rU'), help="A file containing [i]ntronic UCEs") parser.add_argument('-t', '--intergenic', type=argparse.FileType('rU'), help="A file containing in[t]ergenic UCEs") parser.add_argument('-d', '--debug', action='store_true', help="Set logging level of randomoverlaps3.py to debug") if strInput: return parser.parse_args(strInput.split()) else: return parser.parse_args()
2e8f65a07e9f61a452a96699f94950f0117e86fa
3,630,576
def raw_input(prompt=None): # real signature unknown; restored from __doc__ """ raw_input([prompt]) -> string Read a string from standard input. The trailing newline is stripped. If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. On Unix, GNU readline is used if enabled. The prompt string, if given, is printed without a trailing newline before reading. """ return ""
ad09db4416e3705a34e4fc88c7df569693608c80
3,630,577
import zlib import os def get_checksum32(oqparam): """ Build an unsigned 32 bit integer from the input files of the calculation """ # NB: using adler32 & 0xffffffff is the documented way to get a checksum # which is the same between Python 2 and Python 3 checksum = 0 for key in sorted(oqparam.inputs): fname = oqparam.inputs[key] if not fname: continue elif key == 'source': # list of fnames and/or strings for f in fname: data = open(f, 'rb').read() checksum = zlib.adler32(data, checksum) & 0xffffffff elif os.path.exists(fname): data = open(fname, 'rb').read() checksum = zlib.adler32(data, checksum) & 0xffffffff else: raise ValueError('%s does not exist or is not a file' % fname) return checksum
c7aaf8d6aeefaa4b3f97dcac535c959b4ba06579
3,630,578
import asyncio def create_future(*, loop): """ Helper for `create a new future`_ with backward compatibility for Python 3.4 .. _create a new future: https://goo.gl/YrzGQ6 """ try: return loop.create_future() except AttributeError: return asyncio.Future(loop=loop)
1708ac124c46fa81b7ff3ca1d7b685e4835cd53a
3,630,579
def log_sum_exp(mat, axis=0): """ Computes the log-sum-exp of a matrix with a numerically stable scheme, in the user-defined summation dimension: exp is never applied to a number >= 0, and in each summation row, there is at least one "exp(0)" to stabilize the sum. For instance, if dim = 1 and mat is a 2d array, we output log( sum_j exp( mat[i,j] )) by factoring out the row-wise maximas. """ max_rc = mat.max(axis=axis) return max_rc + np.log(np.sum(np.exp(mat - np.expand_dims(max_rc, axis=axis)), axis=axis))
a72536b03e58eede19e6d18846b44fb1454891cb
3,630,580
def msd(n_x, yr, min_support): """Compute the Mean Squared Difference similarity between all pairs of users (or items). Only **common** users (or items) are taken into account. The Mean Squared Difference is defined as: .. math :: \\text{msd}(u, v) = \\frac{1}{|I_{uv}|} \cdot \\sum\\limits_{i \in I_{uv}} (r_{ui} - r_{vi})^2 or .. math :: \\text{msd}(i, j) = \\frac{1}{|U_{ij}|} \cdot \\sum\\limits_{u \in U_{ij}} (r_{ui} - r_{uj})^2 depending on the ``user_based`` field of ``sim_options`` (see :ref:`similarity_measures_configuration`). The MSD-similarity is then defined as: .. math :: \\text{msd_sim}(u, v) &= \\frac{1}{\\text{msd}(u, v) + 1}\\\\ \\text{msd_sim}(i, j) &= \\frac{1}{\\text{msd}(i, j) + 1} The :math:`+ 1` term is just here to avoid dividing by zero. For details on MSD, see third definition on `Wikipedia <https://en.wikipedia.org/wiki/Root-mean-square_deviation#Formula>`__. """ min_sprt = min_support sq_diff = np.zeros((n_x, n_x), np.double) freq = np.zeros((n_x, n_x), np.int) sim = np.zeros((n_x, n_x), np.double) for y, y_ratings in iteritems(yr): for xi, ri in y_ratings: for xj, rj in y_ratings: sq_diff[xi, xj] += (ri - rj)**2 freq[xi, xj] += 1 for xi in range(n_x): sim[xi, xi] = 1 # completely arbitrary and useless anyway for xj in range(xi + 1, n_x): if freq[xi, xj] < min_sprt: sim[xi, xj] == 0 else: # return inverse of (msd + 1) (+ 1 to avoid dividing by zero) sim[xi, xj] = 1 / (sq_diff[xi, xj] / freq[xi, xj] + 1) sim[xj, xi] = sim[xi, xj] return sim
867989ef28cbce2e4235cb2704ab39cab0eae5f3
3,630,581
import argparse def create_parser(): """ Parse command line arguments """ parser = argparse.ArgumentParser( description="Zeus Z80 assembler files converter") parser.add_argument( '-v', '--verbose', help="Increase output verbosity", action='store_true') subparsers = parser.add_subparsers(help="Available commands") subparsers.required = False info_parser = subparsers.add_parser( 'info', help="Show information about the specified Zeus Z80 assembler file") info_parser.add_argument( 'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0), help="Input file with Zeus Z80 assembler (usually FILENAME.$C)") info_parser.set_defaults(func=show_info) convert_parser = subparsers.add_parser( 'convert', help="Convert Zeus Z80 assembler file to a plain text file") convert_parser.add_argument( 'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0), help="Input file with Zeus Z80 assembler (usually FILENAME.$C)") convert_parser.add_argument( 'output_file', metavar='output-file', type=argparse.FileType('w'), help="Path to the output file") convert_parser.add_argument( '--include-code', dest='include_code', action='store_true', help="Include original code in the output file") convert_parser.set_defaults(func=convert_file) return parser
995b7b2280c13ed5c750186966c4ddffff6944d1
3,630,582
def wrap_deepmind(env, episode_life=True, resize=True, grayscale=True, width=84, height=84, scale=False, clip_rewards=True, frame_stack=True, stack=4): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = GrayResizeEnv(env, resize=resize, grayscale=grayscale, width=width, height=height) if scale: env = ScaleEnv(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = StackEnv(env, stack=stack) return env
83229995b9be22721e386e8162dde49516d4c0b5
3,630,583
def annotate_intersection(sv, elements, filetype='gtf'): """ Parameters ---------- sv : pbt.BedTool SV breakpoints and CNV intervals gencode : pbt.BedTool Gencode annotations """ # Number of fields in SV bedtool N_BED_FIELDS = 6 # Check intersection with gene boundaries sect = sv.intersect(elements, wa=True, wb=True) def _annotate(): for hit in sect.intervals: variant = hit.fields[:N_BED_FIELDS] variant_ID = variant[3] svtype = variant[4] # Noncoding data element = hit.fields[N_BED_FIELDS:] # Gencode data if filetype == 'gtf': fields = split_gencode_fields(element[-1]) gene_name = fields['gene_name'] element_type = element[2] # Noncoding elements else: gene_name = element[3] element_type = 'noncoding' hit_type = intersection_type(variant, element, filetype) disrupt_type = disruption_type(hit_type, svtype) yield (variant_ID, svtype, gene_name, element_type, hit_type, disrupt_type) columns = 'name svtype gene_name element_type hit_type disrupt_type' columns = columns.split() hits = pd.DataFrame.from_records(_annotate(), columns=columns) return hits
4a4b395438c3d1f2c8e1c7fdaccbd3acadbfc6d3
3,630,584
def analyzeGHP(ghp): """Analyze this libLF.GitHubProject Returns: (testsPassed, libLF.RegexUsage[]) """ dynoRegexFileName = getRegexOutputFileName() libLF.log("{}/{} will use dyno regex file {}".format(ghp.owner, ghp.name, dynoRegexFileName)) libLF.log("Untarring") untarDir = unpackTarball(ghp) libLF.log("Untarred to {}".format(untarDir)) libLF.log("Running preprocessing stage") preprocessProject(ghp, untarDir) libLF.log("Finding source files") sourceFiles = getSourceFiles(ghp, untarDir) libLF.log("source files: {}".format(sourceFiles)) libLF.log("Instrumenting source files") instrumentSourceFiles(ghp, sourceFiles, dynoRegexFileName) libLF.log("Running test suite") testsSucceeded = runTestSuite(ghp, untarDir) if testsSucceeded: libLF.log("Application test suite succeeded") else: libLF.log("Application test suite failed") regexes = [] if testsSucceeded or not REQUIRE_TESTS_PASS: libLF.log("Retrieving regexes from {}".format(dynoRegexFileName)) regexes = retrieveRegexes(dynoRegexFileName) libLF.log("Cleaning up untarDir {}".format(untarDir)) cleanUp(untarDir, dynoRegexFileName) return testsSucceeded, regexes
acc0303706ae87f9be643adb9512e32a16b1cfb3
3,630,585
import re def convert(name): """ Converts camelCase strings to snake_case ones. :param name """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
6a2177023e2f4cdc495aa0525790aeb40ea9d8b8
3,630,586
def normalize_inputs(py_dict: dict) -> dict: """Normalize a dictionary of inputs to contiguous numpy arrays.""" return {k: (Tensor(v) if isinstance(v, np.ndarray) else v) for k, v in py_dict.items()}
e82d877f98478b2483dca2d6ca8026dba5d87e10
3,630,587
def get_task_definition_arns(): """List all task definition ARNs.""" client = get_client("ecs") return client.list_task_definitions()
77b016f0f6911870a5f48edcd57fa90e596e1e8b
3,630,588
def clean_column(df, column): """ Function to return clean column text. Pass each cell to a cleaner and return the cleaned text for that specific column :params: -------- :df dataframe(): containing the column :column str(): in which column the text is located :returns: --------- :list(): of str containing the cleaned text """ # Remove all the NaN values and transform it into a list return list(df[column].dropna().values)
095a854c452f87b9a960eabb81ace5c18814f266
3,630,589
def get_sheet(): """ Get selected sheet or active view if sheet :return: Sheet :rtype: DB.ViewSheet """ sheets = get_selected_by_cat(DB.BuiltInCategory.OST_Sheets, as_list=True) if sheets: if len(sheets) > 1: raise ScriptError("Please select only one sheet") # FIXME return sheets[0] else: sheet = doc.ActiveView if sheet.ViewType != DB.ViewType.DrawingSheet: raise ScriptError("ActiveView is not sheet") # FIXME return sheet
a82eb2558707c51d22e6262ad89d7520a922a207
3,630,590
from typing import Optional from typing import Iterable def augment_cost_function( cost_function: CostFunction, cost_function_augmentations: Optional[Iterable[FunctionAugmentation]] = None, gradient_augmentations: Optional[Iterable[FunctionAugmentation]] = None, ): """Augment a function and its gradient. Args: cost_function: a function to be augmented. cost_function_augmentations: augmentations to be applied to cost_function itself, in left-to-right order. gradient_augmentations: augmentations to be applied to a gradient. If the cost_function has no `gradient` attribute, this argument is ignored. Returns: A function with all application applied to it. The returned object has all attributes present in cost_function. The same is true about gradient. The exact behaviour of the returned object depends on the augmentations used, and the order in which they were applied. """ attribute_overrides = ( { "gradient": _augment_function( cost_function.gradient, gradient_augmentations, {} ) } if hasattr(cost_function, "gradient") else {} ) augmented_cost_function = _augment_function( cost_function, cost_function_augmentations, attribute_overrides ) return augmented_cost_function
3cf0e1e51d63ccff75f7438ae052a2832fd9b9e3
3,630,591
def edit_name(request, name, editable_authorities): """View to edit an existing Name object.""" # Much of the code here is a duplicate or close copy of the code # in create_name. number_name_part_forms = 2 number_name_note_forms = 1 name_part_forms = [] name_note_forms = [] assertion = name.assertion authority_records = get_authority_records(assertion.entity, editable_authorities) extra_data = {} context_data = {} extra_data['name_types'] = NameType.objects.filter( authority__in=editable_authorities) name_part_types = NamePartType.objects.filter( authority__in=editable_authorities) if request.method == 'POST': if request.POST.get('submit_delete'): return HttpResponseRedirect(reverse(delete_object, kwargs={'model_name': 'name', 'object_id': name.id})) post_data = request.POST.copy() # NameType is limited by the Authority. name_form = NameForm(authority_records, extra_data, instance=name, data=post_data) if name_form.is_valid(): authority_record = name_form.cleaned_data['authority_record'] extra_data['name_types'] = NameType.objects.filter( authority=authority_record.authority) name_form = NameForm(authority_records, extra_data, instance=name, data=post_data) if name_form.is_valid(): # NamePartType is limited by the Authority and Language. language = name_form.cleaned_data['language'] name_part_types = NamePartType.objects\ .filter(system_name_part_type__languages=language)\ .filter(authority=authority_record.authority) else: post_data = None initial_data = {'authority_record': assertion.authority_record.id, 'is_preferred': assertion.is_preferred} extra_data['name_type_choices'] = NameType.objects.filter( authority=assertion.authority_record.authority) name_form = NameForm(authority_records, extra_data, instance=name, initial=initial_data) extra_data['name_part_type_choices'] = NamePartType.objects.filter( authority=assertion.authority_record.authority) # Create the inline forms. extra_data['name_part_types'] = name_part_types name_part_type_select_ids = [] # Instanced name part inline forms. for name_part in name.name_parts.all(): prefix = 'name_part_%d' % (name_part.id) name_part_form = NamePartForm(extra_data, prefix=prefix, instance=name_part, data=post_data) name_part_forms.append(name_part_form) # This is a gross hack. There really ought to be a way to get # the ID of the widget. name_part_type_select_ids.append( 'id_%s' % (name_part_form.add_prefix('name_part_type'))) # New name part inline forms. for i in range(1, number_name_part_forms + 1): prefix = 'name_part_new_%d' % (i) if post_data is not None: post_data['%s-name' % (prefix)] = name.id name_part_form = NamePartForm( extra_data, prefix=prefix, data=post_data) name_part_form.is_new_form = True name_part_forms.append(name_part_form) # This is a gross hack. There really ought to be a way to get # the ID of the widget. name_part_type_select_ids.append( 'id_%s' % (name_part_form.add_prefix('name_part_type'))) # Instance name note inline forms. for name_note in name.notes.all(): prefix = 'name_note_%d' % (name_note.id) name_note_form = NameNoteForm(prefix=prefix, instance=name_note, data=post_data) name_note_forms.append(name_note_form) # New name note inline forms. for i in range(1, number_name_note_forms + 1): prefix = 'name_note_new_%d' % (i) if post_data is not None: post_data['%s-name' % (prefix)] = name.id name_note_form = NameNoteForm(prefix=prefix, data=post_data) name_note_form.is_new_form = True name_note_forms.append(name_note_form) if post_data is not None: form_data = {'saves': [], 'creations': [], 'deletions': []} has_errors = False for form in [name_form] + name_part_forms + name_note_forms: if form.is_new_form: if form.has_post_data(): if form.is_valid(): if not form.cleaned_data.get('delete'): form_data['saves'].append(form) else: has_errors = True break elif form.is_valid(): if form.cleaned_data.get('delete'): form_data['deletions'].append(form) else: form_data['saves'].append(form) else: has_errors = True break if not has_errors: for form in form_data['saves']: form.save() for form in form_data['deletions']: form.instance.delete() # Save changes to the PropertyAssertion. assertion.is_preferred = name_form.cleaned_data.get('is_preferred') assertion.authority_record = name_form.cleaned_data.get( 'authority_record') assertion.save() kw_args = {} if request.POST.get('submit_continue'): kw_args['model_name'] = 'name' kw_args['object_id'] = name.id else: kw_args['model_name'] = 'entity' kw_args['object_id'] = assertion.entity.id return HttpResponseRedirect(reverse(edit_model_object, kwargs=kw_args)) context_data['assertion'] = assertion context_data['authority_records'] = authority_records context_data['name'] = name context_data['name_form'] = name_form # Create mappings between authority ID and name (part) types, for # use by the JavaScript select changer. context_data['name_type_map'] = map_by_authority(extra_data['name_types']) context_data['name_part_type_map'] = map_by_authority(name_part_types) context_data['name_part_type_select_ids'] = name_part_type_select_ids context_data['name_part_forms'] = name_part_forms context_data['name_note_forms'] = name_note_forms context_data['dates'] = assertion.dates.all() context_data['edit_form'] = True return render(request, 'eats/edit/edit_name.html', context_data)
075d6004f2415beaa5e09b529b9df6a0e77b7f89
3,630,592
import math def create_convolutional_autoencoder_model_2d(input_image_size, number_of_filters_per_layer=(32, 64, 128, 10), convolution_kernel_size=(5, 5), deconvolution_kernel_size=(5, 5) ): """ Function for creating a 2-D symmetric convolutional autoencoder model. Builds an autoencoder based on the specified array definining the number of units in the encoding branch. Ported from the Keras python implementation here: https://github.com/XifengGuo/DEC-keras Arguments --------- input_image_size : tuple A tuple defining the shape of the 2-D input image number_of_units_per_layer : tuple A tuple defining the number of units in the encoding branch. convolution_kernel_size : tuple or scalar Kernel size for convolution deconvolution_kernel_size : tuple or scalar Kernel size for deconvolution Returns ------- Keras models A convolutional encoder and autoencoder Keras model. Example ------- >>> autoencoder, encoder = create_convolutional_autoencoder_model_2d((128, 128, 3)) >>> autoencoder.summary() >>> encoder.summary() """ activation = 'relu' strides = (2, 2) number_of_encoding_layers = len(number_of_filters_per_layer) - 1 factor = 2 ** number_of_encoding_layers padding = 'valid' if input_image_size[0] % factor == 0: padding = 'same' inputs = Input(shape = input_image_size) encoder = inputs for i in range(number_of_encoding_layers): local_padding = 'same' kernel_size = convolution_kernel_size if i == (number_of_encoding_layers - 1): local_padding = padding kernel_size = tuple(np.array(convolution_kernel_size) - 2) encoder = Conv2D(filters=number_of_filters_per_layer[i], kernel_size=kernel_size, strides=strides, activation=activation, padding=local_padding)(encoder) encoder = Flatten()(encoder) encoder = Dense(units=number_of_filters_per_layer[-1])(encoder) autoencoder = encoder penultimate_number_of_filters = \ number_of_filters_per_layer[number_of_encoding_layers-1] input_image_size_factored = ((math.floor(input_image_size[0] / factor)), (math.floor(input_image_size[1] / factor))) number_of_units_for_encoder_output = (penultimate_number_of_filters * input_image_size_factored[0] * input_image_size_factored[1]) autoencoder = Dense(units=number_of_units_for_encoder_output, activation=activation)(autoencoder) autoencoder = Reshape(target_shape=(*input_image_size_factored, penultimate_number_of_filters))(autoencoder) for i in range(number_of_encoding_layers, 1, -1): local_padding = 'same' kernel_size = convolution_kernel_size if i == number_of_encoding_layers: local_padding = padding kernel_size = tuple(np.array(deconvolution_kernel_size) - 2) autoencoder = Conv2DTranspose(filters=number_of_filters_per_layer[i-2], kernel_size=kernel_size, strides=strides, activation=activation, padding=local_padding)(autoencoder) autoencoder = Conv2DTranspose(input_image_size[-1], kernel_size=deconvolution_kernel_size, strides=strides, padding='same')(autoencoder) autoencoder_model = Model(inputs=inputs, outputs=autoencoder) encoder_model = Model(inputs=inputs, outputs=encoder) return([autoencoder_model, encoder_model])
8bc7ad876f67591a81fb14299d6beb09c4e0cf65
3,630,593
from gdsfactory.pdk import GENERIC, get_active_pdk from typing import Dict from typing import Callable import importlib import warnings def _from_yaml( conf, routing_strategy: Dict[str, Callable] = routing_strategy_factories, label_instance_function: Callable = add_instance_label, ) -> Component: """Returns component from YAML decorated with cell for caching and autonaming. Args: conf: dict. routing_strategy: for each route. label_instance_function: to label each instance. """ c = Component() instances = {} routes = {} placements_conf = conf.get("placements") routes_conf = conf.get("routes") ports_conf = conf.get("ports") connections_conf = conf.get("connections") instances_dict = conf["instances"] pdk = conf.get("pdk") c.info = conf.get("info", {}) if pdk and pdk == "generic": GENERIC.activate() elif pdk: module = importlib.import_module(pdk) pdk = getattr(module, "PDK") if pdk is None: raise ValueError(f"'from {pdk} import PDK' failed") pdk.activate() pdk = get_active_pdk() for instance_name in instances_dict: instance_conf = instances_dict[instance_name] component = instance_conf["component"] settings = instance_conf.get("settings", {}) component_spec = {"component": component, "settings": settings} component = pdk.get_component(component_spec) ref = c << component instances[instance_name] = ref placements_conf = dict() if placements_conf is None else placements_conf connections_by_transformed_inst = transform_connections_dict(connections_conf) components_to_place = set(placements_conf.keys()) components_with_placement_conflicts = components_to_place.intersection( connections_by_transformed_inst.keys() ) for instance_name in components_with_placement_conflicts: placement_settings = placements_conf[instance_name] if "x" in placement_settings or "y" in placement_settings: warnings.warn( f"YAML defined: ({', '.join(components_with_placement_conflicts)}) " "with both connection and placement. Please use one or the other.", ) all_remaining_insts = list( set(placements_conf.keys()).union(set(connections_by_transformed_inst.keys())) ) while all_remaining_insts: place( placements_conf=placements_conf, connections_by_transformed_inst=connections_by_transformed_inst, instances=instances, encountered_insts=[], all_remaining_insts=all_remaining_insts, ) for instance_name in instances_dict: label_instance_function( component=c, instance_name=instance_name, reference=instances[instance_name] ) if routes_conf: for route_alias in routes_conf: route_names = [] ports1 = [] ports2 = [] routes_dict = routes_conf[route_alias] for key in routes_dict.keys(): if key not in valid_route_keys: raise ValueError( f"{route_alias!r} key={key!r} not in {valid_route_keys}" ) settings = routes_dict.pop("settings", {}) routing_strategy_name = routes_dict.pop("routing_strategy", "get_bundle") if routing_strategy_name not in routing_strategy: routing_strategies = list(routing_strategy.keys()) raise ValueError( f"{routing_strategy_name!r} not in routing_strategy {routing_strategies}" ) if "links" not in routes_dict: raise ValueError( f"You need to define links for the {route_alias!r} route" ) links_dict = routes_dict["links"] for port_src_string, port_dst_string in links_dict.items(): if ":" in port_src_string: src, src0, src1 = [s.strip() for s in port_src_string.split(":")] dst, dst0, dst1 = [s.strip() for s in port_dst_string.split(":")] instance_src_name, port_src_name = [ s.strip() for s in src.split(",") ] instance_dst_name, port_dst_name = [ s.strip() for s in dst.split(",") ] src0 = int(src0) src1 = int(src1) dst0 = int(dst0) dst1 = int(dst1) if src1 > src0: ports1names = [ f"{port_src_name}{i}" for i in range(src0, src1 + 1, 1) ] else: ports1names = [ f"{port_src_name}{i}" for i in range(src0, src1 - 1, -1) ] if dst1 > dst0: ports2names = [ f"{port_dst_name}{i}" for i in range(dst0, dst1 + 1, 1) ] else: ports2names = [ f"{port_dst_name}{i}" for i in range(dst0, dst1 - 1, -1) ] if len(ports1names) != len(ports2names): raise ValueError(f"{ports1names} different from {ports2names}") route_names += [ f"{instance_src_name},{i}:{instance_dst_name},{j}" for i, j in zip(ports1names, ports2names) ] instance_src = instances[instance_src_name] instance_dst = instances[instance_dst_name] for port_src_name in ports1names: if port_src_name not in instance_src.ports: raise ValueError( f"{port_src_name!r} not in {list(instance_src.ports.keys())}" f"for {instance_src_name!r} " ) ports1.append(instance_src.ports[port_src_name]) for port_dst_name in ports2names: if port_dst_name not in instance_dst.ports: raise ValueError( f"{port_dst_name!r} not in {list(instance_dst.ports.keys())}" f"for {instance_dst_name!r}" ) ports2.append(instance_dst.ports[port_dst_name]) else: instance_src_name, port_src_name = port_src_string.split(",") instance_dst_name, port_dst_name = port_dst_string.split(",") instance_src_name = instance_src_name.strip() instance_dst_name = instance_dst_name.strip() port_src_name = port_src_name.strip() port_dst_name = port_dst_name.strip() if instance_src_name not in instances: raise ValueError( f"{instance_src_name!r} not in {list(instances.keys())}" ) if instance_dst_name not in instances: raise ValueError( f"{instance_dst_name!r} not in {list(instances.keys())}" ) instance_src = instances[instance_src_name] instance_dst = instances[instance_dst_name] if port_src_name not in instance_src.ports: raise ValueError( f"{port_src_name!r} not in {list(instance_src.ports.keys())} for" f" {instance_src_name!r} " ) if port_dst_name not in instance_dst.ports: raise ValueError( f"{port_dst_name!r} not in {list(instance_dst.ports.keys())} for" f" {instance_dst_name!r}" ) ports1.append(instance_src.ports[port_src_name]) ports2.append(instance_dst.ports[port_dst_name]) route_name = f"{port_src_string}:{port_dst_string}" route_names.append(route_name) routing_function = routing_strategy[routing_strategy_name] route_or_route_list = routing_function( ports1=ports1, ports2=ports2, **settings, ) # FIXME, be more consistent if isinstance(route_or_route_list, list): for route_name, route_dict in zip(route_names, route_or_route_list): c.add(route_dict.references) routes[route_name] = route_dict.length elif isinstance(route_or_route_list, Route): c.add(route_or_route_list.references) routes[route_name] = route_or_route_list.length else: raise ValueError(f"{route_or_route_list} needs to be a Route or a list") if ports_conf: if not hasattr(ports_conf, "items"): raise ValueError(f"{ports_conf} needs to be a dict") for port_name, instance_comma_port in ports_conf.items(): if "," in instance_comma_port: instance_name, instance_port_name = instance_comma_port.split(",") instance_name = instance_name.strip() instance_port_name = instance_port_name.strip() if instance_name not in instances: raise ValueError( f"{instance_name!r} not in {list(instances.keys())}" ) instance = instances[instance_name] if instance_port_name not in instance.ports: raise ValueError( f"{instance_port_name!r} not in {list(instance.ports.keys())} for" f" {instance_name!r} " ) c.add_port(port_name, port=instance.ports[instance_port_name]) else: c.add_port(**instance_comma_port) c.routes = routes c.instances = instances return c
fd50e13c6082dd24ee263d4b84bc5afbe9dcd231
3,630,594
def to_volume(data): """Ensure that data is a numpy 3D array.""" assert isinstance(data, np.ndarray) if data.ndim == 2: data = data[np.newaxis,...] elif data.ndim == 3: pass elif data.ndim == 4: assert data.shape[0]==1 data = np.squeeze(data, axis=0) else: raise RuntimeError("data must be a numpy 3D array") assert data.ndim == 3 return data
d816dc16a1bdd27437d8c907c2dd8b18902edd80
3,630,595
def search_down(*args): """ search_down(sflag) -> bool Is the 'SEARCH_DOWN' bit set? @param sflag (C++: int) """ return _ida_search.search_down(*args)
c931b2152e8d5cf803aea9b1228fbd38dc8acc20
3,630,596
def assign_number_to_top_categories(paths): """Assign numbers to the top categories returned by split_path for consistency""" cats = {} def assign_number(path): name = path[0][1] n = cats.setdefault(name, len(cats) + 1) return [(n, name)] + path[1:] return map(assign_number, paths)
0027986bd9097819b76ef9358f3fb0b491456b48
3,630,597
def instrument_parameters_odim5(radar, odim_file): """ Builds the dictionary 'instrument_parameters' in the radar instance, using the parameter metadata in the input odim5 file. Parameters ---------- radar : Radar Py-ART radar structure odim_file : str Complete path and filename of input file Returns ------- radar : Radar Py-ART radar structure with added 'instrument_parameters' dictionary. """ ny, prt, prt_mode, prt_ratio, prf_flag = _get_prf_pars_odimh5(odim_file, nrays=radar.nrays, nsweeps=radar.nsweeps, sw_start_end=radar.get_start_end) # Create dictionaries mode_dict = {'comments': 'Pulsing mode Options are: "fixed", "staggered", "dual". Assumed "fixed" if missing.', 'meta_group': 'instrument_parameters', 'long_name': 'Pulsing mode', 'units': 'unitless', 'data': prt_mode} prt_dict = {'units': 'seconds', 'comments': 'Pulse repetition time. For staggered prt, also see prt_ratio.', 'meta_group': 'instrument_parameters', 'long_name': 'Pulse repetition time', 'data': prt} ratio_dict = {'units': 'unitless', 'meta_group': 'instrument_parameters', 'long_name': 'Pulse repetition frequency ratio', 'data': prt_ratio} ny_dict = {'units': 'meters_per_second', 'comments': 'Unambiguous velocity', 'meta_group': 'instrument_parameters', 'long_name': 'Nyquist velocity', 'data': ny} flag_dict = {'units': 'unitless', 'comments': 'PRF used to collect ray. 0 for high PRF, 1 for low PRF.', 'meta_group': 'instrument_parameters', 'long_name': 'PRF flag', 'data': prf_flag} # add metadata in radar object: radar.instrument_parameters = {'nyquist_velocity':ny_dict, 'prt':prt_dict, 'prt_ratio':ratio_dict, 'prt_mode':mode_dict, 'prf_flag':flag_dict} return radar
185838c40a8d8f41dd16dcd92bdb9082396a772f
3,630,598
def topodstostep_DecodeVertexError(*args): """ * Returns a new shape without undirect surfaces. :param E: :type E: TopoDSToStep_MakeVertexError :rtype: Handle_TCollection_HAsciiString """ return _TopoDSToStep.topodstostep_DecodeVertexError(*args)
6604b4f834d07ccb5ded4ebd8c573301e72b21ea
3,630,599