content
stringlengths
22
815k
id
int64
0
4.91M
def get_charges_single_serial(path_to_cif, create_cif=False, path_to_output_dir='.', add_string='_charged', use_default_model=True, path_to_pickle_obj='dummy_string'): """ Description Computes the partial charges for a single CIF file and returns an ASE atoms object updated with the estimated charges included as atoms.info['_atom_site_charges']. Features for each CIF is calculated in serial using Numpy. Options are included for using a different pickled sklearn model and for write an output CIF with the new charges. :type path_to_cif: string :param path_to_cif: path to the cif file as input` :type create_cif: bool :param create_cif: whether to output a new CIF file while '_atom_site_charges' added :type path_to_output_dir: string :param path_to_output_dir: path to the output directory for creating the new CIF file. :type add_string: string :param add_string: A string added to the filename to distinguish the output cif file from the original one. :type use_default_model: bool :param use_default_model: whether to use the pre-trained model or not. If set to False you can set path to a different pickle file using 'path_to_pickle_obj'. :type path_to_pickle_obj: string :param path_to_pickle_obj: path to a pickle file containing the scikit-learn model one wants to use. Is used only if use_default_model is set to False. :raises: :rtype: an ase atoms object with the partial charges added as atoms.info['_atom_site_charges'] and the feature vectors in atoms.info['features'] """ import numpy as np import joblib import os # * Get the path of the pickle and load the model print("Loading the model...") if use_default_model: this_dir, this_filename = os.path.split(__file__) path_to_pickle_obj = os.path.join(this_dir, "data", "Model_RF_DDEC.pkl") # print(path_to_pickle_obj) model = joblib.load(path_to_pickle_obj) else: model = joblib.load(path_to_pickle_obj) # print("Computing features...") data = get_features_from_cif_serial(path_to_cif) features = data.info['features'] print("Estimating charges for {}...".format(path_to_cif)) charges = model.predict(features) # charges = np.round(charges, decimals=4) # * Adjust the charges for neutrality charges_adj = charges - np.sum(charges) * np.abs(charges) / np.sum(np.abs(charges)) data.info['_atom_site_charge'] = charges_adj.tolist() if np.any(np.abs(charges - charges_adj) > 0.2): print("WARNING: Some charges were adjusted by more than 0.2 to maintain neutrality!") # if write_cif==True: if create_cif: print('Writing new cif file...') path_to_cif = os.path.abspath(path_to_cif) old_name = os.path.basename(path_to_cif) new_name = old_name.split('.')[-2] + add_string + '.cif' # data1 = data_all[i] # new_filename = path_to_cif.split('.')[-2].split('\\')[-1]+add_string+ '.cif' path_to_output_dir = os.path.abspath(path_to_output_dir) path_to_output_cif = os.path.join(path_to_output_dir, new_name) write_cif(path_to_output_cif, data) return data
5,330,900
def matchlist(page=1): """Respond with view for paginated match list.""" query = Match.query.order_by(Match.id.desc()) paginatedMatches = query.paginate(page, current_app.config['MATCHES_PER_PAGE'], False) return render_template('matchlist.html', matches=paginatedMatches.items, pagination=paginatedMatches)
5,330,901
def post_page_files(current_user, pid): """ Изменение файлов страницы""" try: page = SitePages.query.get(pid) if request.files.getlist('file[]'): page_files = request.files.getlist('file[]') na_files = [] for pfile in page_files: fsize_b = get_fsize(pfile) # Еще можно считывать файл в память до заданного размера # Но появляется проблема при записи файла, # так как он уже считан # MAX_FILE_SIZE = 1024 * 1024 + 1 # file_bytes = file.read(MAX_FILE_SIZE) fsize_mb = formatBytes(fsize_b, power=2)['number'] if ((pfile.content_type not in [ 'application/vnd.openxmlformats-officedocument' '.wordprocessingml.document', 'application/vnd.oasis.opendocument.text', 'application/pdf', 'application/zip', 'application/msword' ]) or (fsize_mb > 10)): na_files.append("«" + pfile.filename + "»") else: fsize = formatBytes(fsize_b) extension = pfile.filename.split(".")[-1] separator = ' ' new_file_name = uuid.uuid1().hex + '.' + extension ud = { "fid": str(uuid.uuid4().hex), "name": separator.join(pfile.filename.split(".")[:-1]), "size": str(fsize['number']) + ' ' + fsize['measure'], "fname": new_file_name, "extension": extension } page.files.append(ud) pfile.save( os.path.join( current_app.config['CMS_PAGE_FILES'], new_file_name)) flag_modified(page, 'files') db.session.commit() rtext = 'Файлы добавлены!' rtype = 'success' if na_files: rtext = 'Файлы добавлены, но файлы: ' separator = ', ' rtext = rtext + separator.join(na_files) rtext = rtext + ' были проигнорированы, т.к. ' \ 'либо превышен размер, либо не подходящий формат файла.' rtype = 'warning' response = Response( response=json.dumps({'type': rtype, 'text': rtext}), status=200, mimetype='application/json' ) else: response = Response( response=json.dumps({'type': 'danger', 'text': 'Вы не отправили' ' ни одного файла!'}), status=422, mimetype='application/json' ) return response except Exception: response = server_error(request.args.get("dbg")) return response
5,330,902
def get_data(filename: str) -> pd.DataFrame: """ Create a dataframe out of south_sudan_data.csv """ df = pd.read_csv(filename) return df
5,330,903
def train( net: Net, trainset: Fer2013Dataset, testset: Fer2013Dataset, pretrained_model: dict={}): """Main training loop and optimization setup.""" trainloader = torch.utils.data.DataLoader( trainset, batch_size=32, shuffle=True) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) best_test_acc = 0 def status_update(outputs: Variable, labels: Variable): """Print train, validation accuracies along with current loss.""" nonlocal best_test_acc train_acc = evaluate(outputs, labels) test_acc = evaluate(net(testset.X), testset.Y) print('[%d, %5d] loss: %.2f train acc: %.2f val acc: %.2f' % (epoch + 1, i + 1, running_loss / i, train_acc, test_acc)) if test_acc > best_test_acc: best_test_acc = test_acc save_state(epoch, net, optimizer) start_epoch = pretrained_model.get('epoch', 0) for epoch in range(start_epoch, start_epoch + 20): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs = Variable(data['image'].float()) labels = Variable(data['label'].long()) optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.data[0] if i % 100 == 99: status_update(outputs, labels)
5,330,904
def test_biblary_file_get(get_bibliography, client): """Test the :class:`biblary.views:BiblaryFileView` view ``GET`` method.""" with get_bibliography() as bibliography: content = b'some-content' file_type = FileType.MANUSCRIPT entry = list(bibliography.values())[0] url_kwargs = { 'file_type': file_type.value, 'identifier': entry.identifier, } bibliography.storage.put_file(content, entry, file_type) url = reverse('file', kwargs=url_kwargs) response = client.get(url) assert response.status_code == 200 assert response.content == content assert response.headers['Content-Type'] == 'application/pdf' assert response.headers['Content-Disposition'] == f'attachment; filename="{file_type.value}.pdf"'
5,330,905
def add_contact_annotation(annotations, matches): """ Converts matches to TextContactAnnotation objects and adds them to the annotations array specified. """ for match in matches: annotations.append(TextContactAnnotation( start=int(match['start']), length=int(match['end']) - int(match['start']), text=match['word'], contact_type=CONTACT_TYPES[match['entity_group']], confidence=float(match['score'] * 100) ))
5,330,906
def digital_PCR( primer_mappings ): """ Makes a "digital" PCR by looking at the mappings of primers and predict which will produce products, and more important multiple products """ primer_names = sorted(primer_mappings.keys()) nr_primer_names = len( primer_names ) mappings = {} products = {} # for i in range(0, nr_primer_names): for primer1 in primer_names: # primer1 = primer_names[ i ] if ( primer1 == 'FULLSEQ'): continue if ( not re.search(r'LEFT', primer1 )): continue mappings[ primer1 ] = {} products[ primer1 ] = {} for primer2 in primer_names: # for j in range(0, nr_primer_names): # primer2 = primer_names[ j ] if ( primer2 == 'FULLSEQ'): continue if ( not re.search(r'RIGHT', primer2 )): continue mappings[ primer1 ][ primer2 ] = [] products[ primer1 ][ primer2 ] = [] multiple_products = 0 # print " -- %s vs %s" % (primer1, primer2) for chr_index1 in range(0, len(primer_mappings[ primer1 ][ 'CHR' ])): for chr_index2 in range(0, len(primer_mappings[ primer2 ][ 'CHR' ])): chr1 = primer_mappings[ primer1 ][ 'CHR' ][ chr_index1 ] chr2 = primer_mappings[ primer2 ][ 'CHR' ][ chr_index2 ] pos1 = int( primer_mappings[ primer1 ][ 'POS' ][ chr_index1 ] ) pos2 = int( primer_mappings[ primer2 ][ 'POS' ][ chr_index2 ] ) strand1 = primer_mappings[ primer1 ][ 'STRAND' ][ chr_index1 ] strand2 = primer_mappings[ primer2 ][ 'STRAND' ][ chr_index2 ] # The primers map to different chromosomes if ( chr1 != chr2 ): continue # the primer are on the same strand. if ( strand1 == strand2 ): continue # Calculate the product size, and check if it is in a doable range product_size = ( pos2 - pos1 ) if ( product_size < 0 or product_size > config.MAX_PRODUCT_SIZE): continue # Make sure that the strand is in the right orientation. if ( pos1 < pos2 and strand1 != 'plus' and strand2 != 'minus'): continue elif( pos1 > pos2 and strand1 != 'minus' and strand2 != 'plus'): continue print "%s -- %s %s:%d:%s -> %s:%d:%s ==>> %d bp" %( primer1, primer2, chr1, pos1, strand1, chr2, pos2, strand2, product_size) mappings[ primer1 ][ primer2 ].append( product_size ) products[ primer1 ][ primer2 ].append( {'chr' : chr1, 'start_pos': pos1, 'end_pos': pos2, 'size': product_size} ) if ( len(products[ primer1 ][ primer2 ]) > 5): print "Got more than 5 hits... %s -- %s " % ( primer1, primer2 ) multiple_products = 1 break if ( multiple_products ): break # pp.pprint( products ) # pp.pprint( mappings ) return products
5,330,907
def database_find_user_salt(username:str)->str: """ Finds a users salt from there username Parameter: username (str): username selected by the user Returns: salt (str): The users salt from the database Example: >>> username = 'andrew' >>> database_find_user_salt(username) '0fYst66bDGTBi97El1rOzdbP0su8NOoAqNyYuekUb4Rav9WyYw6zOtjTqzhTHcn' """ database_cursor.execute( "SELECT salt FROM passwords WHERE username=:username", {"username": username}) return database_cursor.fetchone()[0]
5,330,908
def createSimulate(netParams=None, simConfig=None, output=False): """ Function for/to <short description of `netpyne.sim.wrappers.createSimulate`> Parameters ---------- netParams : <``None``?> <Short description of netParams> **Default:** ``None`` **Options:** ``<option>`` <description of option> simConfig : <``None``?> <Short description of simConfig> **Default:** ``None`` **Options:** ``<option>`` <description of option> output : bool <Short description of output> **Default:** ``False`` **Options:** ``<option>`` <description of option> """ from .. import sim (pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True) sim.simulate() if output: return (pops, cells, conns, stims, simData)
5,330,909
def lang_add(cursor, lang, trust): """Adds language for db""" if trust: query = 'CREATE TRUSTED LANGUAGE "%s"' % lang else: query = 'CREATE LANGUAGE "%s"' % lang cursor.execute(query) return True
5,330,910
def interpolate( a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, ): """ Basic Bicubic Interpolation inside the subgrid Four Neighbour Knots selects grid knots around each query point to make the interpolation: 4 knots on the x axis and 4 knots on the q2 axis are needed for each point, plus the pdf fvalues there. Default bicubic interpolation performs the interpolation itself Parameters ---------- a_x: tf.tensor of shape [None] query of values of log(x) a_q2: tf.tensor of shape [None] query of values of log(q2) padded_x: tf.tensor of shape [None] value for all the knots on the x axis padded with one zero at the beginning and one at the end to avoid out of range errors when queryingpoints near boundaries s_x: tf.tensor of shape [] size of x knots tensor without padding padded_q2: tf.tensor of shape [None] value for all the knots on the q2 axis padded with one zero at the beginning and one at the end to avoid out of range errors when querying points near boundaries s_q2: tf.tensor of shape [] size of q2 knots tensor without padding actual_padded: tf.tensor of shape [None,None] pdf values: first axis is the flattened padded (q2,x) grid, second axis is needed pid column (dimension depends on the query) """ x_bins, q2_bins, corn_x, corn_q2, pdf_vals = four_neighbour_knots( a_x, a_q2, padded_x, padded_q2, actual_padded ) return default_bicubic_interpolation( a_x, a_q2, x_bins, q2_bins, corn_x, corn_q2, pdf_vals, s_x, s_q2 )
5,330,911
def sobel_mag_thresh(img, ksize, min_thresh, max_thresh): """ Apply Sobel filter along x-axis, y-axis and calculate the magnitude, and returns a binary image according to the given thresholds. """
5,330,912
def setting(): """ SMS settings for the messaging framework """ tablename = "%s_%s" % (module, resourcename) table = s3db[tablename] table.outgoing_sms_handler.label = T("Outgoing SMS handler") table.outgoing_sms_handler.comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T("Outgoing SMS Handler"), T("Selects what type of gateway to use for outbound SMS")))) # CRUD Strings s3.crud_strings[tablename] = Storage( title_update = T("Edit SMS Settings"), msg_record_modified = T("SMS settings updated") ) def prep(r): if r.http == "POST": # Go to the details page for the chosen SMS Gateway outgoing_sms_handler = request.post_vars.get("outgoing_sms_handler", None) if outgoing_sms_handler == "WEB_API": s3mgr.configure(tablename, update_next = URL(f="api_settings", args=[1, "update"])) elif outgoing_sms_handler == "SMTP": s3mgr.configure(tablename, update_next = URL(f="smtp_to_sms_settings", args=[1, "update"])) elif outgoing_sms_handler == "MODEM": s3mgr.configure(tablename, update_next = URL(f="modem_settings", args=[1, "update"])) elif outgoing_sms_handler == "TROPO": s3mgr.configure(tablename, update_next = URL(f="tropo_settings", args=[1, "update"])) else: s3mgr.configure(tablename, update_next = URL(args=[1, "update"])) return True response.s3.prep = prep s3mgr.configure(tablename, deletable=False, listadd=False) #response.menu_options = admin_menu_options return s3_rest_controller()
5,330,913
def shutdown(proceed: bool = False) -> None: """Gets confirmation and turns off the machine. Args: proceed: Boolean value whether or not to get confirmation. """ if not proceed: speaker.say(f"{choice(confirmation)} turn off the machine?") speaker.runAndWait() converted = listener(3, 3) else: converted = 'yes' if converted != 'SR_ERROR': if any(word in converted.lower() for word in keywords.ok()): stop_terminal() call(['osascript', '-e', 'tell app "System Events" to shut down']) raise KeyboardInterrupt else: speaker.say("Machine state is left intact sir!") return
5,330,914
def strip_path(): """ pre routing hook to make "/x/y/z/" the same as "/x/y/z" """ request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')
5,330,915
def _cachegetter( attr: str, cachefactory: Callable[[], _CacheT] = WeakKeyDictionary, # WeakKewDict best for properties ) -> Callable[[_CIT], _CacheT]: """Returns a safer attrgetter which constructs the missing object with cachefactory May be used for normal methods, classmethods and properties, as default factory is a WeakKeyDictionary (good for storing weak-refs for self or cls). It may also safely be used with staticmethods, if first parameter is an object on which the cache will be stored. Better when used with key getter. If it's a tuple, you should use e.g. cachefactory=dict Example usage with cachetools.cachedmethod: class Foo: @property @cachedmethod(_cachegetter("__bar_cache")) def bar(self) -> _RT: return 2+3 """ def cachegetter(cls_or_obj: _CIT) -> _CacheT: cache = getattr(cls_or_obj, attr, None) if cache is None: cache = cachefactory() setattr(cls_or_obj, attr, cache) return cache return cachegetter
5,330,916
def __gt__(x1: array, x2: array, /) -> array: """ Note: __gt__ is a method of the array object. """ pass
5,330,917
def change_db_path(new_path: Path, cfg: TodoConfig) -> ErrMsg: """new_path 是一个不存在的文件或一个已存在的文件夹,不能是一个已存在的文件""" new_path = new_path.resolve() if new_path.is_dir(): new_path = new_path.joinpath(todo_db_name) if new_path.exists(): return f"{new_path} already exists." old_path = cfg["db_path"] shutil.copyfile(old_path, new_path) cfg["db_path"] = new_path.__str__() with open(todo_cfg_path, "w", encoding="utf-8") as f: json.dump(cfg, f, indent=4, ensure_ascii=False) os.remove(old_path) return ""
5,330,918
def format_time(time): """ It formats a datetime to print it Args: time: datetime Returns: a formatted string representing time """ m, s = divmod(time, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) return ('{:02d}d {:02d}h {:02d}m {:02d}s').format(int(d), int(h), int(m), int(s))
5,330,919
def egarch_recursion_python( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, p: int, o: int, q: int, nobs: int, backcast: float, var_bounds: Float64Array, lnsigma2: Float64Array, std_resids: Float64Array, abs_std_resids: Float64Array, ) -> Float64Array: """ Compute variance recursion for EGARCH models Parameters ---------- parameters : ndarray Model parameters resids : ndarray Residuals to use in the recursion sigma2 : ndarray Conditional variances with same shape as resids p : int Number of symmetric innovations in model o : int Number of asymmetric innovations in model q : int Number of lags of the (transformed) variance in the model nobs : int Length of resids backcast : float Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional variances for each time period lnsigma2 : ndarray Temporary array (overwritten) with same shape as resids std_resids : ndarray Temporary array (overwritten) with same shape as resids abs_std_resids : ndarray Temporary array (overwritten) with same shape as resids """ for t in range(nobs): loc = 0 lnsigma2[t] = parameters[loc] loc += 1 for j in range(p): if (t - 1 - j) >= 0: lnsigma2[t] += parameters[loc] * ( abs_std_resids[t - 1 - j] - SQRT2_OV_PI ) loc += 1 for j in range(o): if (t - 1 - j) >= 0: lnsigma2[t] += parameters[loc] * std_resids[t - 1 - j] loc += 1 for j in range(q): if (t - 1 - j) < 0: lnsigma2[t] += parameters[loc] * backcast else: lnsigma2[t] += parameters[loc] * lnsigma2[t - 1 - j] loc += 1 if lnsigma2[t] > LNSIGMA_MAX: lnsigma2[t] = LNSIGMA_MAX sigma2[t] = np.exp(lnsigma2[t]) if sigma2[t] < var_bounds[t, 0]: sigma2[t] = var_bounds[t, 0] lnsigma2[t] = np.log(sigma2[t]) elif sigma2[t] > var_bounds[t, 1]: sigma2[t] = var_bounds[t, 1] + np.log(sigma2[t]) - np.log(var_bounds[t, 1]) lnsigma2[t] = np.log(sigma2[t]) std_resids[t] = resids[t] / np.sqrt(sigma2[t]) abs_std_resids[t] = np.abs(std_resids[t]) return sigma2
5,330,920
def watermark(start_img, argument, filename): """ Watermarks `start_img` with the logo and position specified in `argument`, if any is specified. Default is colors and bottom right corner. Then saves the resulting image to `filename` in the same folder the script runs from. """ white_bg = Image.open("bg.png") # Must calculate ratio from white background, the size looks best that way. ratio = calculate_ratio(start_img, white_bg) logo = Image.open("logo-" + get_overlay_color(argument) + ".png") if get_overlay_color(argument) == "white": white_bg = invert_bg(white_bg) white_bg = white_bg.resize(new_overlay_size(white_bg, ratio), Image.ANTIALIAS) logo = logo.resize(new_overlay_size(logo, ratio), Image.ANTIALIAS) valid_pos_white = valid_overlay_positions(start_img, white_bg) valid_pos_logo = valid_overlay_positions(start_img, logo) # Transpose the background if another corner that bottom right is selected. if get_overlay_position(argument) == "tl": white_bg = white_bg.transpose(Image.ROTATE_180) elif get_overlay_position(argument) == "tr": white_bg = white_bg.transpose(Image.FLIP_TOP_BOTTOM) elif get_overlay_position(argument) == "bl": white_bg = white_bg.transpose(Image.FLIP_LEFT_RIGHT) selected_pos_white = valid_pos_white.get(get_overlay_position(argument)) selected_pos_logo = valid_pos_logo.get(get_overlay_position(argument)) """if argument: position = positions.get(argument[0]) or positions['br'] #bottom right is default else: position = positions['br'] #bottom right is default""" start_img.paste(white_bg, selected_pos_white, white_bg) start_img.paste(logo, selected_pos_logo, logo) start_img.save(filename)
5,330,921
def polpair_tuple2int(polpair, x_orientation=None): """ Convert a tuple pair of polarization strings/integers into an pol-pair integer. The polpair integer is formed by adding 20 to each standardized polarization integer (see polstr2num and AIPS memo 117) and then concatenating them. For example, polarization pair ('pI', 'pQ') == (1, 2) == 2122. Parameters ---------- polpair : tuple, length 2 A length-2 tuple containing a pair of polarization strings or integers, e.g. ('XX', 'YY') or (-5, -5). x_orientation: str, optional Orientation in cardinal direction east or north of X dipole. Default keeps polarization in X and Y basis. Returns ------- polpair : int Integer representation of polarization pair. """ # Recursive evaluation if isinstance(polpair, (list, np.ndarray)): return [polpair_tuple2int(p) for p in polpair] # Check types assert type(polpair) in (tuple,), "pol must be a tuple" assert len(polpair) == 2, "polpair tuple must have 2 elements" # Convert strings to ints if necessary pol1, pol2 = polpair if type(pol1) in (str, np.str): pol1 = polstr2num(pol1, x_orientation=x_orientation) if type(pol2) in (str, np.str): pol2 = polstr2num(pol2, x_orientation=x_orientation) # Convert to polpair integer ppint = (20 + pol1)*100 + (20 + pol2) return ppint
5,330,922
def compute_features(df): """Compute ReScore features.""" preds_dict = df_to_dict(df) rescore_features = [] spec_ids = [] charges = [] feature_names = [ "spec_pearson_norm", "ionb_pearson_norm", "iony_pearson_norm", "spec_mse_norm", "ionb_mse_norm", "iony_mse_norm", "min_abs_diff_norm", "max_abs_diff_norm", "abs_diff_Q1_norm", "abs_diff_Q2_norm", "abs_diff_Q3_norm", "mean_abs_diff_norm", "std_abs_diff_norm", "ionb_min_abs_diff_norm", "ionb_max_abs_diff_norm", "ionb_abs_diff_Q1_norm", "ionb_abs_diff_Q2_norm", "ionb_abs_diff_Q3_norm", "ionb_mean_abs_diff_norm", "ionb_std_abs_diff_norm", "iony_min_abs_diff_norm", "iony_max_abs_diff_norm", "iony_abs_diff_Q1_norm", "iony_abs_diff_Q2_norm", "iony_abs_diff_Q3_norm", "iony_mean_abs_diff_norm", "iony_std_abs_diff_norm", "dotprod_norm", "dotprod_ionb_norm", "dotprod_iony_norm", "cos_norm", "cos_ionb_norm", "cos_iony_norm", "spec_pearson", "ionb_pearson", "iony_pearson", "spec_spearman", "ionb_spearman", "iony_spearman", "spec_mse", "ionb_mse", "iony_mse", "min_abs_diff_iontype", "max_abs_diff_iontype", "min_abs_diff", "max_abs_diff", "abs_diff_Q1", "abs_diff_Q2", "abs_diff_Q3", "mean_abs_diff", "std_abs_diff", "ionb_min_abs_diff", "ionb_max_abs_diff", "ionb_abs_diff_Q1", "ionb_abs_diff_Q2", "ionb_abs_diff_Q3", "ionb_mean_abs_diff", "ionb_std_abs_diff", "iony_min_abs_diff", "iony_max_abs_diff", "iony_abs_diff_Q1", "iony_abs_diff_Q2", "iony_abs_diff_Q3", "iony_mean_abs_diff", "iony_std_abs_diff", "dotprod", "dotprod_ionb", "dotprod_iony", "cos", "cos_ionb", "cos_iony", ] # Suppress RuntimeWarnings about invalid values with warnings.catch_warnings(): warnings.simplefilter("ignore") for spec_id, preds in preds_dict.items(): spec_ids.append(spec_id) charges.append(preds["charge"]) # Create numpy arrays target_b = np.array(preds["target"]["B"]) target_y = np.array(preds["target"]["Y"]) target_all = np.concatenate([target_b, target_y]) prediction_b = np.array(preds["prediction"]["B"]) prediction_y = np.array(preds["prediction"]["Y"]) prediction_all = np.concatenate([prediction_b, prediction_y]) target_b_unlog = 2 ** target_b - 0.001 target_y_unlog = 2 ** target_y - 0.001 target_all_unlog = 2 ** target_all - 0.001 prediction_b_unlog = 2 ** prediction_b - 0.001 prediction_y_unlog = 2 ** prediction_y - 0.001 prediction_all_unlog = 2 ** prediction_all - 0.001 # Calculate absolute differences abs_diff_b = np.abs(target_b - prediction_b) abs_diff_y = np.abs(target_y - prediction_y) abs_diff_all = np.abs(target_all - prediction_all) abs_diff_b_unlog = np.abs(target_b_unlog - prediction_b_unlog) abs_diff_y_unlog = np.abs(target_y_unlog - prediction_y_unlog) abs_diff_all_unlog = np.abs(target_all_unlog - prediction_all_unlog) # Add features feats = np.array( [ # spec_id, # preds['charge'], # Features between spectra in log space pearsonr(target_all, prediction_all)[0], # Pearson all ions pearsonr(target_b, prediction_b)[0], # Pearson b ions pearsonr(target_y, prediction_y)[0], # Pearson y ions mse(target_all, prediction_all), # MSE all ions mse(target_b, prediction_b), # MSE b ions mse(target_y, prediction_y), # MSE y ions np.min(abs_diff_all), # min_abs_diff_norm np.max(abs_diff_all), # max_abs_diff_norm np.quantile(abs_diff_all, 0.25), # abs_diff_Q1_norm np.quantile(abs_diff_all, 0.5), # abs_diff_Q2_norm np.quantile(abs_diff_all, 0.75), # abs_diff_Q3_norm np.mean(abs_diff_all), # mean_abs_diff_norm np.std(abs_diff_all), # std_abs_diff_norm np.min(abs_diff_b), # ionb_min_abs_diff_norm np.max(abs_diff_b), # ionb_max_abs_diff_norm np.quantile(abs_diff_b, 0.25), # ionb_abs_diff_Q1_norm np.quantile(abs_diff_b, 0.5), # ionb_abs_diff_Q2_norm np.quantile(abs_diff_b, 0.75), # ionb_abs_diff_Q3_norm np.mean(abs_diff_b), # ionb_mean_abs_diff_norm np.std(abs_diff_b), # ionb_std_abs_diff_norm np.min(abs_diff_y), # iony_min_abs_diff_norm np.max(abs_diff_y), # iony_max_abs_diff_norm np.quantile(abs_diff_y, 0.25), # iony_abs_diff_Q1_norm np.quantile(abs_diff_y, 0.5), # iony_abs_diff_Q2_norm np.quantile(abs_diff_y, 0.75), # iony_abs_diff_Q3_norm np.mean(abs_diff_y), # iony_mean_abs_diff_norm np.std(abs_diff_y), # iony_std_abs_diff_norm np.dot(target_all, prediction_all), # Dot product all ions np.dot(target_b, prediction_b), # Dot product b ions np.dot(target_y, prediction_y), # Dot product y ions np.dot(target_all, prediction_all) / ( np.linalg.norm(target_all, 2) * np.linalg.norm(prediction_all, 2) ), # Cos similarity all ions np.dot(target_b, prediction_b) / ( np.linalg.norm(target_b, 2) * np.linalg.norm(prediction_b, 2) ), # Cos similarity b ions np.dot(target_y, prediction_y) / ( np.linalg.norm(target_y, 2) * np.linalg.norm(prediction_y, 2) ), # Cos similarity y ions # Same features in normal space pearsonr(target_all_unlog, prediction_all_unlog)[ 0 ], # Pearson all ions pearsonr(target_b_unlog, prediction_b_unlog)[0], # Pearson b ions pearsonr(target_y_unlog, prediction_y_unlog)[0], # Pearson y ions spearmanr(target_all_unlog, prediction_all_unlog)[ 0 ], # Spearman all ions spearmanr(target_b_unlog, prediction_b_unlog)[0], # Spearman b ions spearmanr(target_y_unlog, prediction_y_unlog)[0], # Spearman y ions mse(target_all_unlog, prediction_all_unlog), # MSE all ions mse(target_b_unlog, prediction_b_unlog), # MSE b ions mse(target_y_unlog, prediction_y_unlog), # MSE y ions, 0 if np.min(abs_diff_b_unlog) <= np.min(abs_diff_y_unlog) else 1, # Ion type with min absolute difference 0 if np.max(abs_diff_b_unlog) >= np.max(abs_diff_y_unlog) else 1, # Ion type with max absolute difference np.min(abs_diff_all_unlog), # min_abs_diff np.max(abs_diff_all_unlog), # max_abs_diff np.quantile(abs_diff_all_unlog, 0.25), # abs_diff_Q1 np.quantile(abs_diff_all_unlog, 0.5), # abs_diff_Q2 np.quantile(abs_diff_all_unlog, 0.75), # abs_diff_Q3 np.mean(abs_diff_all_unlog), # mean_abs_diff np.std(abs_diff_all_unlog), # std_abs_diff np.min(abs_diff_b_unlog), # ionb_min_abs_diff np.max(abs_diff_b_unlog), # ionb_max_abs_diff_norm np.quantile(abs_diff_b_unlog, 0.25), # ionb_abs_diff_Q1 np.quantile(abs_diff_b_unlog, 0.5), # ionb_abs_diff_Q2 np.quantile(abs_diff_b_unlog, 0.75), # ionb_abs_diff_Q3 np.mean(abs_diff_b_unlog), # ionb_mean_abs_diff np.std(abs_diff_b_unlog), # ionb_std_abs_diff np.min(abs_diff_y_unlog), # iony_min_abs_diff np.max(abs_diff_y_unlog), # iony_max_abs_diff np.quantile(abs_diff_y_unlog, 0.25), # iony_abs_diff_Q1 np.quantile(abs_diff_y_unlog, 0.5), # iony_abs_diff_Q2 np.quantile(abs_diff_y_unlog, 0.75), # iony_abs_diff_Q3 np.mean(abs_diff_y_unlog), # iony_mean_abs_diff np.std(abs_diff_y_unlog), # iony_std_abs_diff np.dot( target_all_unlog, prediction_all_unlog ), # Dot product all ions np.dot(target_b_unlog, prediction_b_unlog), # Dot product b ions np.dot(target_y_unlog, prediction_y_unlog), # Dot product y ions np.dot(target_all_unlog, prediction_all_unlog) / ( np.linalg.norm(target_all_unlog, 2) * np.linalg.norm(prediction_all_unlog, 2) ), # Cos similarity all ions np.dot(target_b_unlog, prediction_b_unlog) / ( np.linalg.norm(target_b_unlog, 2) * np.linalg.norm(prediction_b_unlog, 2) ), # Cos similarity b ions np.dot(target_y_unlog, prediction_y_unlog) / ( np.linalg.norm(target_y_unlog, 2) * np.linalg.norm(prediction_y_unlog, 2) ), # Cos similarity y ions ], dtype=np.float64, ) rescore_features.append(feats) rescore_features = np.vstack(rescore_features) rescore_features = pd.DataFrame(rescore_features, columns=feature_names) rescore_features["spec_id"] = spec_ids rescore_features["charge"] = charges return rescore_features
5,330,923
def collection_basic(commodities) -> CommodityCollection: """Returns a simple collection of commodities side effects testing.""" keys = ["9999_80_1", "9999.10_80_2", "9999.20_80_2"] return create_collection(commodities, keys)
5,330,924
def do_js_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files): """Javascript minimization strategy.""" # Start by using a generic line minimizer on the test. # Do two line minimizations to make up for the fact that minimzations on bots # don't always minimize as much as they can. for _ in range(2): data = do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) tokenizer = AntlrTokenizer(JavaScriptLexer) current_minimizer = js_minimizer.JSMinimizer( test_function, max_threads=threads, deadline=deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=cleanup_interval, get_temp_file=get_temp_file, delete_temp_files=delete_temp_files, tokenizer=tokenizer.tokenize, token_combiner=tokenizer.combine, progress_report_function=functools.partial(logs.log)) # Some tokens can't be removed until other have, so do 2 passes. try: for _ in range(2): data = current_minimizer.minimize(data) except minimizer.AntlrDecodeError: data = do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) # FIXME(mbarbella): Improve the JS minimizer so that this is not necessary. # Sometimes, lines that could not have been removed on their own can now be # removed since they have already been partially cleaned up. return do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files)
5,330,925
def printMessage(output_format, message): """ Prints the message in specified output format """ if output_format == "xml": print(cli_xml.createMessage(message)) else: print(message)
5,330,926
def percent_list(part_list, whole_list): """return percent of the part""" w = len(whole_list) if not w: return (w,0) p = 100 * float(len(part_list))/float(w) return (w,round(100-p, 2))
5,330,927
def test_graphs(): """Verify all devices have the correct number of qubits with each degree.""" test_edges = compute_edges(TestDevice._interaction_patterns) assert degree(test_edges, 0) == 2 assert degree(test_edges, 1) == 2 assert degree(test_edges, 2) == 2 aspen_edges = compute_edges(Aspen._interaction_patterns) for qubit in range(16): if qubit in {1, 2, 13, 14}: assert degree(aspen_edges, qubit) == 3 else: assert degree(aspen_edges, qubit) == 2 rochester_edges = compute_edges(Rochester._interaction_patterns) for qubit in range(53): if qubit in {51, 52}: assert degree(rochester_edges, qubit) == 1 elif qubit in {9, 11, 13, 21, 23, 25, 32, 34, 36, 44, 46, 48}: assert degree(rochester_edges, qubit) == 3 else: assert degree(rochester_edges, qubit) == 2
5,330,928
def corr_heatmap(df): """ The function corr_heatmap() is to show a correlation heatmap for all features in the dataframe. :param df: an pandas dataframe :return: null """ # correlation heatmap plt.figure(figsize=(16, 16)) sns.heatmap(df.corr()) plt.show()
5,330,929
def disp_calc_helper_NB(adata, min_cells_detected): """ Parameters ---------- adata min_cells_detected Returns ------- """ rounded = adata.raw.astype('int') if adata.raw is not None else adata.X lowerDetectedLimit = adata.uns['lowerDetectedLimit'] if 'lowerDetectedLimit' in adata.uns.keys() else 1 nzGenes = (rounded > lowerDetectedLimit).sum(axis=0) nzGenes = nzGenes > min_cells_detected # maybe we should normalized by Size_Factor anymore if we always normalize the data after calculating size factor? # x = rounded[:, nzGenes] / adata.obs['Size_Factor'][:, None] if 'Size_Factor' in adata.obs.columns else adata.X[:, nzGenes] x = rounded[:, nzGenes] / adata.obs['Size_Factor'][:, None] if adata.raw is not None else adata.X[:, nzGenes] xim = np.mean(1 / adata.obs['Size_Factor']) if 'Size_Factor' in adata.obs.columns else 1 f_expression_mean = x.mean(axis=0) # For NB: Var(Y) = mu * (1 + mu / k) # variance formula f_expression_var = np.mean((x - f_expression_mean) ** 2, axis=0) disp_guess_meth_moments = f_expression_var - xim * f_expression_mean disp_guess_meth_moments = disp_guess_meth_moments / np.power(f_expression_mean, 2) res = pd.DataFrame({"mu": f_expression_mean.squeeze(), "disp": disp_guess_meth_moments.squeeze()}) res.loc[res['mu'] == 0, 'mu'] = None res.loc[res['mu'] == 0, 'disp'] = None res.loc[res['disp'] < 0, 'disp'] = 0 res['gene_id'] = adata.var_names[nzGenes] return res
5,330,930
def prGreen(skk): """Prints Green Text to Console""" print("\033[92m{} \033[00m" .format(skk))
5,330,931
def update_dependency_options(value): """Handle Node dependencies The default value is created upton instantiation of a ZnTrackOption, if a new class is created via Instance.load() it does not automatically load the default_value Nodes, so we must to this manually here and call update_options. """ if isinstance(value, (list, tuple)): for item in value: update_dependency_options(item) if isinstance(value, Node): value._update_options()
5,330,932
def print_results(request): """Renders the results url, which is a placeholder copy of the root url of query interface, where any results are rendered alongside the table headers. """ if request.method == "POST": form = MetadataForm(request.POST) if form.is_valid(): query_results = Metadata.objects.filter(instrument__icontains=form.data["instrument"]) else: query_results = [] form = MetadataForm() return render(request, "query.html", {"data": query_results, "queryform": form, "render_table": True})
5,330,933
def capacity_rule(mod, g, p): """ The capacity of projects of the *gen_ret_bin* capacity type is a pre-specified number for each of the project's operational periods multiplied with 1 minus the binary retirement variable. """ return mod.gen_ret_bin_capacity_mw[g, p] \ * (1 - mod.GenRetBin_Retire[g, p])
5,330,934
def tag_evidence_subtype( evidence: Evidence, ) -> Tuple[str, Optional[str]]: """Returns the type and subtype of an evidence object as a string, typically the extraction rule or database from which the statement was generated. For biopax, this is just the database name. Parameters ---------- statement: The statement which we wish to subtype Returns ------- : A tuple with (type, subtype), both strings. Returns (type, None) if the type of statement is not yet handled in this function. """ source_api = evidence.source_api annotations = evidence.annotations if source_api == 'biopax': subtype = annotations.get('source_sub_id') elif source_api in ('reach', 'eidos'): if 'found_by' in annotations: from indra.sources.reach.processor import determine_reach_subtype if source_api == 'reach': subtype = determine_reach_subtype(annotations['found_by']) elif source_api == 'eidos': subtype = annotations['found_by'] else: subtype = None else: logger.debug('Could not find found_by attribute in reach ' 'statement annotations') subtype = None elif source_api == 'geneways': subtype = annotations['actiontype'] else: subtype = None return (source_api, subtype)
5,330,935
def block_deconv_k4s2p1_BN_RELU(in_channel_size, out_channel_size, leaky = 0): """ >>> block_deconv_k4s2p1_BN_RELU(13, 17, 0.02) Sequential( (0): ConvTranspose2d(13, 17, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False) (1): BatchNorm2d(17, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): LeakyReLU(negative_slope=0.02, inplace) ) """ model_list = [] model_list.append( nn.ConvTranspose2d( in_channel_size, out_channel_size, \ kernel_size=4, stride=2, padding=1, bias=False ) ) model_list.append( nn.BatchNorm2d(out_channel_size) ) model_list.append( nn.ReLU( inplace=True ) if leaky==0 else nn.LeakyReLU(leaky, inplace=True) ) return nn.Sequential(*model_list)
5,330,936
def get_one_organization_by_name(ctx, org_name): """Get one Atlas Organization by name. Prints "None" if no organization bearing the given name exists.""" pprint(cmd.get_one_organization_by_name( client=ctx.obj.client, organization_name=org_name))
5,330,937
def main( scope: str, secret_name: str, secret_value: str ): """ Run main function. Parameters ---------- scope : str Scope to use secret_name : str Name of the secret secret_value : str Value of the secret """ configuration = Configuration(file_load=True) client = ResourceClient( host=configuration.DATABRICKS_HOST, personal_token=configuration.DATABRICKS_TOKEN ) secret_client = Secret( client=client ) scopes = secret_client.scopes() if scope not in scopes: secret_client.add_scope( scope=scope ) secret_client.add_secret( scope=scope, secret_name=secret_name, secret_value=secret_value )
5,330,938
def demo_super_fast_representative_crop(image, crop_size=64000, display: bool = True): """ Demo for self-supervised denoising using camera image with synthetic noise """ Log.enable_output = True Log.set_log_max_depth(5) image = normalise(image.astype(numpy.float32)) image += 0.1 * normal(size=image.shape, scale=0.1) def _crop_fun(): return super_fast_representative_crop( image, crop_size=crop_size, display_crop=False ) # Warmup (numba compilation) # _crop_fun() with lsection(f"Computing crop for image of shape: {image.shape}"): # for _ in range(10): crop = _crop_fun() if display: import napari viewer = napari.Viewer() viewer.add_image(image, name='image') viewer.add_image(crop, name='crop') napari.run() lprint(f"Crop size requested: {crop_size} obtained: {crop.size}") assert crop.size >= int(crop_size * 0.5) and crop.size <= int(crop_size * 2)
5,330,939
async def start_response(writer, content_type='text/html', status=None, headers={}, exception=None): """ Low level HTTP response. Writes HTTP response. """ if exception and status is None: status = exception.status elif status is None: status = 200 writer.write(b'HTTP/1.1 %b %b\r\n' % (bytes(str(status), 'ascii'), HTTP_STATUS.get(status, 'NA'))) writer.write(b'Content-Type: %b\r\n' % bytes(content_type, 'ascii')) for name, value in headers.items(): if isinstance(value, int): value = str(value) if isinstance(value, str): value = bytes(value, 'ascii') writer.write(b'%b: %b\r\n' % (bytes(name, 'ascii'), value)) writer.write(b'\r\n') await writer.drain()
5,330,940
def encrypt_uid(user): """Encrypts the User id for plain """ uid_xor = htk_setting('HTK_USER_ID_XOR') crypt_uid = int_to_base36(user.id ^ uid_xor) return crypt_uid
5,330,941
def createNewVarName(varType): """An helper function that returns a new name for creating fresh variables. """ createNewVarName.counter += 1 # return "v_{}_{}".format(varType.lower(), createNewVarName.counter) return "v_{}".format(createNewVarName.counter)
5,330,942
def initialize_parameters(n_a, n_x, n_y): """ Initialize parameters with small random values Returns: parameters -- python dictionary containing: Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x) Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a) Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) b -- Bias, numpy array of shape (n_a, 1) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) """ Wax = np.random.randn(n_a, n_x) * 0.01 # input to hidden Waa = np.random.randn(n_a, n_a) * 0.01 # hidden to hidden Wya = np.random.randn(n_y, n_a) * 0.01 # hidden to output ba = np.zeros((n_a, 1)) # hidden bias by = np.zeros((n_y, 1)) # output bias parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by} return parameters
5,330,943
def enable_console_log(): """Enable console logging for all the new loggers and add console handlers to all the existing loggers.""" # pylint: disable=global-statement global LOG_TO_CONSOLE LOG_TO_CONSOLE = True console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter(DEFAULT_LOG_FMT)) _map_logger(lambda logger: logger.addHandler(console_handler))
5,330,944
def test_getPeakPositions(): """ test getPeakPositions function that returns a pandas dataframe. Check for shape of dataframe and column names """ chroms = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY'] tasks = { "task0_plus": { "strand": 0, "task_id": 0, "signal": "tests/test_data/single_task/" "stranded_with_controls/task0/plus.bw", "control": "tests/test_data/single_task/" "stranded_with_controls/task0/control_plus.bw", "peaks": "tests/test_data/single_task/" "stranded_with_controls/task0/peaks.bed"}, "task0_minus": { "strand": 1, "task_id": 0, "signal": "tests/test_data/single_task/" "stranded_with_controls/task0/minus.bw", "control": "tests/test_data/single_task/" "stranded_with_controls/task0/control_minus.bw", "peaks": "tests/test_data/single_task/" "stranded_with_controls/task0/peaks.bed"} } # read the chrom sizes into a dataframe and filter rows from # unwanted chromosomes chrom_sizes = pd.read_csv('tests/GRCh38_EBV.chrom.sizes', sep='\t', header=None, names=['chrom', 'size']) chrom_sizes = chrom_sizes[chrom_sizes['chrom'].isin(chroms)] # get peak positions for each task as one dataframe peaks_df = sequtils.getPeakPositions(tasks, chroms, chrom_sizes, flank=128, drop_duplicates=False) # check if columns match columns = ['chrom', 'pos'] assert all([a == b for a, b in zip(columns, peaks_df.columns)]) # check if the shape matches assert peaks_df.shape == (48, 2) # get peak positions for each task as one dataframe, this time # drop duplicates. Since we are using the same peaks.bed file # the total number of peak position should be reduced by half peaks_df = sequtils.getPeakPositions(tasks, chroms, chrom_sizes, flank=128, drop_duplicates=True) # check if columns match columns = ['chrom', 'pos'] assert all([a == b for a, b in zip(columns, peaks_df.columns)]) # check if the shape matches assert peaks_df.shape == (24, 2)
5,330,945
def secure_request(request, ssl: bool): """ :param ssl: :param request: :return: """ # request.headers['Content-Security-Policy'] = "script-src 'self' cdnjs.cloudflare.com ; " request.headers['Feature-Policy'] = "geolocation 'none'; microphone 'none'; camera 'self'" request.headers['Referrer-Policy'] = 'no-referrer' request.headers['x-frame-options'] = 'SAMEORIGIN' request.headers['X-Content-Type-Options'] = 'nosniff' request.headers['X-Permitted-Cross-Domain-Policies'] = 'none' request.headers['X-XSS-Protection'] = '1; mode=block' if ssl: request.headers['expect-ct'] = 'max-age=60, enforce' request.headers["Content-Security-Policy"] = "upgrade-insecure-requests" request.headers['Strict-Transport-Security'] = "max-age=60; includeSubDomains; preload" return request
5,330,946
def mock_invalid_login_data(): """Mock invalid login data.""" path = "homeassistant.components.yessssms.notify.YesssSMS.login_data_valid" with patch(path, return_value=False): yield
5,330,947
def f_mean(data: pd.DataFrame, tags=None, batch_col=None, phase_col=None): """ Feature: mean The arithmetic mean for the given tags in ``tags``, for each unique batch in the ``batch_col`` indicator column, and within each unique phase, per batch, of the ``phase_col`` column. """ base_name = "mean" prepared, tags, output, _ = _prepare_data(data, tags, batch_col, phase_col) f_names = [(tag + "_" + base_name) for tag in tags] output = prepared.mean() return output.rename(columns=dict(zip(tags, f_names)))[f_names]
5,330,948
def AtariConvInit(kernel_shape, rng, dtype=jnp.float32): """The standard init for Conv laters and Atari.""" filter_height, filter_width, fan_in, _ = kernel_shape std = 1 / jnp.sqrt(fan_in * filter_height * filter_width) return random.uniform(rng, kernel_shape, dtype, minval=-std, maxval=std)
5,330,949
def extract_fields_from_nest(nest): """Extract fields and the corresponding values from a nest if it's either a ``namedtuple`` or ``dict``. Args: nest (nest): a nested structure Returns: Iterable: an iterator that generates ``(field, value)`` pairs. The fields are sorted before being returned. Raises: AssertionError: if the nest is neither ``namedtuple`` nor ``dict``. """ assert is_namedtuple(nest) or isinstance(nest, dict), \ "Nest {} must be a dict or namedtuple!".format(nest) fields = nest.keys() if isinstance(nest, dict) else nest._fields for field in sorted(fields): value = nest[field] if isinstance(nest, dict) else getattr(nest, field) yield field, value
5,330,950
def masked_equal(x: numpy.ndarray, value: int): """ usage.dask: 5 usage.scipy: 4 """ ...
5,330,951
def scrape(url): """ Scrapes a url and returns the html using the proper User Agent """ UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9' urllib.quote(url.encode('utf-8')) req = urllib2.Request(url=url, headers={'User-Agent': UA}) hdl = urllib2.urlopen(req) html = hdl.read() return html
5,330,952
def _get_prefixed_values(data, prefix): """Collect lines which start with prefix; with trimming""" matches = [] for line in data.splitlines(): line = line.strip() if line.startswith(prefix): match = line[len(prefix):] match = match.strip() matches.append(match) return matches
5,330,953
def build_k5_graph(): """Makes a new K5 graph. Ref: http://mathworld.wolfram.com/Pentatope.html""" graph = UndirectedGraph() # K5 has 5 nodes for _ in range(5): graph.new_node() # K5 has 10 edges # --Edge: a graph.new_edge(1, 2) # --Edge: b graph.new_edge(2, 3) # --Edge: c graph.new_edge(3, 4) # --Edge: d graph.new_edge(4, 5) # --Edge: e graph.new_edge(5, 1) # --Edge: f graph.new_edge(1, 3) # --Edge: g graph.new_edge(1, 4) # --Edge: h graph.new_edge(2, 4) # --Edge: i graph.new_edge(2, 5) # --Edge: j graph.new_edge(3, 5) return graph
5,330,954
def bytes_to_msg(seq, standard="utf-8"): """Decode bytes to text.""" return seq.decode(standard)
5,330,955
def test_parses(article): """Verify we can parse the document.""" assert 'id="readabilityBody"' in article.readable
5,330,956
def cross_validate( estimator, input_relation: Union[str, vDataFrame], X: list, y: str, metric: Union[str, list] = "all", cv: int = 3, pos_label: Union[int, float, str] = None, cutoff: float = -1, show_time: bool = True, training_score: bool = False, **kwargs, ): """ --------------------------------------------------------------------------- Computes the K-Fold cross validation of an estimator. Parameters ---------- estimator: object Vertica estimator with a fit method and a database cursor. input_relation: str/vDataFrame Relation to use to train the model. X: list List of the predictor columns. y: str Response Column. metric: str/list, optional Metric used to do the model evaluation. It can also be a list of metrics. all: The model will compute all the possible metrics. For Classification: accuracy : Accuracy auc : Area Under the Curve (ROC) best_cutoff : Cutoff which optimised the ROC Curve prediction. bm : Informedness = tpr + tnr - 1 csi : Critical Success Index = tp / (tp + fn + fp) f1 : F1 Score logloss : Log Loss mcc : Matthews Correlation Coefficient mk : Markedness = ppv + npv - 1 npv : Negative Predictive Value = tn / (tn + fn) prc_auc : Area Under the Curve (PRC) precision : Precision = tp / (tp + fp) recall : Recall = tp / (tp + fn) specificity : Specificity = tn / (tn + fp) For Regression: aic : Akaike’s information criterion bic : Bayesian information criterion max : Max error mae : Mean absolute error median : Median absolute error mse : Mean squared error msle : Mean squared log error r2 : R-squared coefficient r2a : R2 adjusted rmse : Root-mean-squared error var : Explained variance cv: int, optional Number of folds. pos_label: int/float/str, optional The main class to be considered as positive (classification only). cutoff: float, optional The model cutoff (classification only). show_time: bool, optional If set to True, the time and the average time will be added to the report. training_score: bool, optional If set to True, the training score will be computed with the validation score. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. """ if isinstance(X, str): X = [X] check_types( [ ("X", X, [list],), ("input_relation", input_relation, [str, vDataFrame],), ("y", y, [str],), ("metric", metric, [str, list],), ("cv", cv, [int, float],), ("cutoff", cutoff, [int, float],), ] ) if isinstance(input_relation, str): input_relation = vdf_from_relation(input_relation, cursor=estimator.cursor) if cv < 2: raise ParameterError("Cross Validation is only possible with at least 2 folds") if category_from_model_type(estimator.type)[0] == "regressor": all_metrics = [ "explained_variance", "max_error", "median_absolute_error", "mean_absolute_error", "mean_squared_error", "root_mean_squared_error", "r2", "r2_adj", "aic", "bic", ] elif category_from_model_type(estimator.type)[0] == "classifier": all_metrics = [ "auc", "prc_auc", "accuracy", "log_loss", "precision", "recall", "f1_score", "mcc", "informedness", "markedness", "csi", ] else: raise Exception( "Cross Validation is only possible for Regressors and Classifiers" ) if metric == "all": final_metrics = all_metrics elif isinstance(metric, str): final_metrics = [metric] else: final_metrics = metric result = {"index": final_metrics} if training_score: result_train = {"index": final_metrics} total_time = [] if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])): from tqdm.auto import tqdm loop = tqdm(range(cv)) else: loop = range(cv) for i in loop: try: estimator.drop() except: pass random_state = verticapy.options["random_state"] random_state = ( random.randint(-10e6, 10e6) if not (random_state) else random_state + i ) train, test = input_relation.train_test_split( test_size=float(1 / cv), order_by=[X[0]], random_state=random_state ) start_time = time.time() estimator.fit( train, X, y, test, ) total_time += [time.time() - start_time] if category_from_model_type(estimator.type)[0] == "regressor": if metric == "all": result["{}-fold".format(i + 1)] = estimator.regression_report().values[ "value" ] if training_score: estimator.test_relation = estimator.input_relation result_train[ "{}-fold".format(i + 1) ] = estimator.regression_report().values["value"] elif isinstance(metric, str): result["{}-fold".format(i + 1)] = [estimator.score(metric)] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [estimator.score(metric)] else: result["{}-fold".format(i + 1)] = [estimator.score(m) for m in metric] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [ estimator.score(m) for m in metric ] else: if (len(estimator.classes_) > 2) and (pos_label not in estimator.classes_): raise ParameterError( "'pos_label' must be in the estimator classes, it must be the main class to study for the Cross Validation" ) elif (len(estimator.classes_) == 2) and ( pos_label not in estimator.classes_ ): pos_label = estimator.classes_[1] try: if metric == "all": result["{}-fold".format(i + 1)] = estimator.classification_report( labels=[pos_label], cutoff=cutoff ).values["value"][0:-1] if training_score: estimator.test_relation = estimator.input_relation result_train[ "{}-fold".format(i + 1) ] = estimator.classification_report( labels=[pos_label], cutoff=cutoff ).values[ "value" ][ 0:-1 ] elif isinstance(metric, str): result["{}-fold".format(i + 1)] = [ estimator.score(metric, pos_label=pos_label, cutoff=cutoff) ] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [ estimator.score(metric, pos_label=pos_label, cutoff=cutoff) ] else: result["{}-fold".format(i + 1)] = [ estimator.score(m, pos_label=pos_label, cutoff=cutoff) for m in metric ] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [ estimator.score(m, pos_label=pos_label, cutoff=cutoff) for m in metric ] except: if metric == "all": result["{}-fold".format(i + 1)] = estimator.classification_report( cutoff=cutoff ).values["value"][0:-1] if training_score: estimator.test_relation = estimator.input_relation result_train[ "{}-fold".format(i + 1) ] = estimator.classification_report(cutoff=cutoff).values[ "value" ][ 0:-1 ] elif isinstance(metric, str): result["{}-fold".format(i + 1)] = [ estimator.score(metric, cutoff=cutoff) ] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [ estimator.score(metric, cutoff=cutoff) ] else: result["{}-fold".format(i + 1)] = [ estimator.score(m, cutoff=cutoff) for m in metric ] if training_score: estimator.test_relation = estimator.input_relation result_train["{}-fold".format(i + 1)] = [ estimator.score(m, cutoff=cutoff) for m in metric ] try: estimator.drop() except: pass n = len(final_metrics) total = [[] for item in range(n)] for i in range(cv): for k in range(n): total[k] += [result["{}-fold".format(i + 1)][k]] if training_score: total_train = [[] for item in range(n)] for i in range(cv): for k in range(n): total_train[k] += [result_train["{}-fold".format(i + 1)][k]] result["avg"], result["std"] = [], [] if training_score: result_train["avg"], result_train["std"] = [], [] for item in total: result["avg"] += [statistics.mean([float(elem) for elem in item])] result["std"] += [statistics.stdev([float(elem) for elem in item])] if training_score: for item in total_train: result_train["avg"] += [statistics.mean([float(elem) for elem in item])] result_train["std"] += [statistics.stdev([float(elem) for elem in item])] total_time += [ statistics.mean([float(elem) for elem in total_time]), statistics.stdev([float(elem) for elem in total_time]), ] result = tablesample(values=result).transpose() if show_time: result.values["time"] = total_time if training_score: result_train = tablesample(values=result_train).transpose() if show_time: result_train.values["time"] = total_time if training_score: return result, result_train else: return result
5,330,957
def plot_tsne(embedding, labels, phase="train"): """Function to plot tsne Args: embedding (float Tensor): Embedding of data. Batch Size x Embedding Size labels (int): Ground truth. phase (str, optional): Is the plot for train data or validation data or test data? Defaults to "train". """ X_tsne = TSNE(n_components=2).fit_transform(embedding) tsne_x = X_tsne[:, 0] tsne_y = X_tsne[:, 1] tsne_x = sort_together([labels,tsne_x])[1] tsne_y = sort_together([labels,tsne_y])[1] labels = sort_together([labels,labels])[1] sym = [0, 1, 4, 24, 5, 3, 17, 13, 26, 20] classes = { 0: "plane", 1: "car", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck", } class_label = [classes[i] for i in labels] df = pd.DataFrame( list(zip(tsne_x, tsne_y, class_label)), columns=["x", "y", "Class"] ) fig = px.scatter( df, x="x", y="y", color="Class", symbol="Class", symbol_sequence=sym, hover_name=class_label, labels={"color": "Class"}, ) if g.wandb_log: if phase == "train": wandb.log({"t-SNE": fig, "epoch": g.epoch_global}) elif phase == "val": wandb.log({"t-SNE Eval": fig, "epoch": g.epoch_global}) elif phase == "test": wandb.log({"t-SNE Test": fig, "epoch": g.epoch_global}) else: raise Exception("Invalid data split!!") if g.log_offline: if phase == "train": fig.write_image(f"{g.log_dir}/metrics/tsne.png") elif phase == "val": fig.write_image(f"{g.log_dir}/metrics/tsneEval.png") elif phase == "test": fig.write_image(f"{g.log_dir}/metrics/tsneTest.png") else: raise Exception("Invalid data split!!")
5,330,958
def bonferroni_correction(pvals): """ Bonferroni correction. Reference: http://en.wikipedia.org/wiki/Bonferroni_correction """ n = len(pvals) return [min(x * n , 1.0) for x in pvals]
5,330,959
def to_r4(fhir_json: JsonObj, opts: Namespace, ifn: str) -> JsonObj: """ Convert the FHIR Resource in "o" into the R4 value notation :param fhir_json: FHIR resource :param opts: command line parser arguments :param ifn: input file name :return: reference to "o" with changes applied. Warning: object is NOT copied before change """ server = opts.fhirserver # If absent, the FILE becomes the base of the context def is_choice_element(name): # TODO: we really do need to be a lot more clever if this is to scale in the longer term. For now, we # assume that valueX is a choice unless it is an exception return name.startswith(VALUE_KEY) and name[VALUE_KEY_LEN:] and name[VALUE_KEY_LEN].isupper() and\ name[VALUE_KEY_LEN:] not in ['Set'] def map_element(element_key: str, element_value: Any, container_type: str, path: List[str], container: JsonObj, id_map: Optional[Dict[str, str]] = None, in_container: bool = False) -> None: """ Transform element_value into the R4 RDF json structure :param element_key: Key for element value :param element_value: Element itself. Can be any JSON object :param container_type: The type of the containing resource :param path: The path from the containing resource down to the element excluding element_key :param container: Dictionary that contains key/value :param id_map: Map from local resource to URI if inside a bundle :param in_container: True means don't tack the resource type onto the identifier """ if element_key.startswith('@'): # Ignore JSON-LD components return if not is_choice_element(element_key): path.append(element_key) if path == ['Coding', 'system']: add_type_arc(container) inner_type = local_name(getattr(container, RESOURCETYPE_KEY, None)) if isinstance(element_value, JsonObj): # Inner object -- process each element dict_processor(element_value, resource_type, path, id_map) elif isinstance(element_value, list): # List -- process each member individually container[element_key] = list_processor(element_key, element_value, resource_type, path, id_map) # We have a primitive JSON value elif element_key == RESOURCETYPE_KEY and not element_value.startswith('fhir:'): container[element_key] = 'fhir:' + element_value container['@context'] = f"{opts.contextserver}{element_value.lower()}.context.jsonld" elif element_key == ID_KEY: # Internal ids are relative to the document if in_container or getattr(container, RESOURCETYPE_KEY, None) is None: relative_id = '#' + element_value else: relative_id = element_value if element_value.startswith('#') else \ ((inner_type or container_type) + '/' + element_value) container_id = id_map.get(relative_id, relative_id) if id_map else relative_id if not hasattr(container, '@id'): # Bundle ids have already been added elsewhere container['@id'] = container_id container[element_key] = to_value(element_value) elif element_key not in [NODEROLE_KEY, INDEX_KEY, DIV_KEY]: # Convert most other nodes to value entries container[element_key] = to_value(element_value) if not isinstance(element_value, list): add_type_arcs(element_key, container[element_key], container, path, opts, server, id_map) if not is_choice_element(element_key): path.pop() def dict_processor(container: JsonObj, resource_type: Optional[str] = None, path: List[str] = None, id_map: Optional[Dict[str, str]] = None, in_container: bool = False) -> None: """ Process the elements in container :param container: JSON dictionary to be processed :param resource_type: type of resource that container appears in :param path: Full path from the base resource type to the actual element :param id_map: Map from local resource to URI if inside a bundle :param in_container: If True then don't tack they type onto the identifier """ # Rule: Whenever we find an embedded resourceType, we assume that we've encountered a brand new resource # Update the passed resource type (example: container is Observation, we're processing the subject node # and the inner resourceType is Patient # # Note: If there isn't a declared resourceType, it may be able to be extracted from the URL if the URL matches # the predefined FHIR structure if hasattr(container, RESOURCETYPE_KEY): resource_type = container[RESOURCETYPE_KEY] path = [resource_type] # If we've got bundle, build an id map to use in the interior id_map = bundle_urls(container) # Note that this will also assign ids to bundle entries # Add any contained resources to the contained URL map add_contained_urls(container, id_map) # Process each of the elements in the dictionary # Note: use keys() and re-look up to prevent losing the JsonObj characteristics of the values for k in [k for k in as_dict(container).keys() if not k.startswith('_')]: if is_choice_element(k): map_element(k, container[k], resource_type, [k[VALUE_KEY_LEN:]], container, id_map, in_container) else: map_element(k, container[k], resource_type, path, container, id_map, in_container) # Merge any extensions (keys that start with '_') into the base # This happens when either: # A) there is only an extension and no base # B) there is a base, but it isn't a JSON object for ext_key in [k for k in as_dict(container).keys() if k.startswith('_')]: base_key = ext_key[1:] ext_value = container[ext_key] del(container[ext_key]) if not hasattr(container, base_key): container[base_key] = ext_value # No base -- move the extension in elif not isinstance(container[base_key], JsonObj): container[base_key] = to_value(container[base_key]) # Base is not a JSON object container[base_key]['extension'] = ext_value['extension'] \ if isinstance(ext_value, JsonObj) else ext_value else: container[base_key]['extension'] = ext_value['extension'] map_element(base_key, ext_value, EXTENSION_RESOURCE_TYPE, [EXTENSION_RESOURCE_TYPE], container, id_map) def list_processor(list_key: str, list_object: List[Any], resource_type: str, path: List[str] = None, id_map: Optional[Dict[str, str]] = None) -> List[Any]: """ Process the elements in the supplied list adding indices and doing an iterative transformation on the interior nodes :param list_key: JSON key at the start of the list :param list_object: List to be processed :param resource_type: The type of resource containing the list :param path: JSON path to list element. Head of path is the root resource type :param id_map: Map from local resource to URI if inside a bundle :return Ordered list of entries """ def list_element(entry: Any, pos: int) -> Any: """ Add a list index to list element "e" :param entry: Element in a list :param pos: position of element :return: adjusted object """ if isinstance(entry, JsonObj): dict_processor(entry, resource_type, path, id_map, list_key == CONTAINED_KEY) if getattr(entry, INDEX_KEY, None) is not None and '_' not in opts.fsv.flat_path(path): print(f'{ifn} - problem: "{list_key}: {opts.fsv.flat_path(path)}" element {pos} already has an index') else: entry.index = pos # Add positioning if list_key == CODING_KEY: add_type_arc(entry) elif isinstance(entry, list): print(f"{ifn} - problem: {list_key} has a list in a list") else: entry = to_value(entry) add_type_arcs(list_key, entry, entry, path, opts, server, id_map) entry.index = pos return entry return [list_element(list_entry, pos) for pos, list_entry in enumerate(list_object)] # ========================= # Start of to_r4 base code # ========================= # Do the recursive conversion resource_type = fhir_json[RESOURCETYPE_KEY] # Pick this up before it processed for use in context below dict_processor(fhir_json) # Add nodeRole fhir_json['nodeRole'] = "fhir:treeRoot" # Traverse the graph adjusting relative URL's adjust_urls(fhir_json) # Add the "ontology header" hdr = JsonObj() if '@id' in fhir_json: hdr["@id"] = fhir_json['@id'] + ".ttl" hdr["owl:versionIRI"] = (opts.versionbase + ('' if opts.versionbase[-1] == '/' else '') + hdr['@id']) if opts.versionbase else hdr["@id"] hdr["owl:imports"] = "fhir:fhir.ttl" hdr["@type"] = 'owl:Ontology' fhir_json["@included"] = hdr else: print(f"{ifn} does not have an identifier") # Fill out the rest of the context if opts.addcontext: fhir_json['@context'] = [f"{opts.contextserver}{resource_type.lower()}.context.jsonld"] fhir_json['@context'].append(f"{opts.contextserver}root.context.jsonld") local_context = JsonObj() local_context["nodeRole"] = JsonObj(**{"@type": "@id", "@id": "fhir:nodeRole"}) if server: local_context["@base"] = server local_context['owl:imports'] = JsonObj(**{"@type": "@id"}) local_context['owl:versionIRI'] = JsonObj(**{"@type": "@id"}) fhir_json['@context'].append(local_context) return fhir_json
5,330,960
def make_fib(): """Returns a function that returns the next Fibonacci number every time it is called. >>> fib = make_fib() >>> fib() 0 >>> fib() 1 >>> fib() 1 >>> fib() 2 >>> fib() 3 >>> fib2 = make_fib() >>> fib() + sum([fib2() for _ in range(5)]) 12 """ from itertools import accumulate, chain, repeat return map(lambda x_y: x_y[0], accumulate(chain(((0, 1),), repeat(None)), lambda x_y, _: (x_y[1], x_y[0] + x_y[1]))).__next__
5,330,961
def find_issue(case): """ Find the issue sentence for a given case. """ if ".txt" not in case: case += ".txt" f = codecs.open(os.path.join(BASE_DIR, case), encoding="utf-8", errors="replace") issue, switch = "", False for line in f.readlines(): if line.startswith("THE ISSUE"): switch = True elif switch and line.strip() != "" and line == line.upper(): break elif switch: issue += line return "The issue is " + " ".join(issue.split())
5,330,962
def render_injected(http_resp, extra_html): """ render_injected(http_resp, extra_html) -> HttpResponse Inject the extra html into the content of the http_resp. ``extra_html`` can be a string or an object with an ``html`` method/field. """ assert isinstance(http_resp, HttpResponse) if 'text/html' not in http_resp.get('content-type', ''): return http_resp # Look for the </body> tag and inject the popup <div> markers = ('</body>', '</BODY>') content = http_resp.content for marker in markers: pos = content.rfind(marker) if pos != -1: break else: # No </body> return http_resp if hasattr(extra_html, 'html'): extra_html = extra_html.html if callable(extra_html): extra_html = extra_html() http_resp.content = ''.join((content[:pos], extra_html, content[pos:])) return http_resp
5,330,963
def prepare_image_folders(args): """ Prepares directory structure given below. data/ prepared/ -> 1400 train/ -> 896 total bear/ -> 224 elephant/ -> 224 leopard/ -> 224 zebra/ -> 224 test/ -> 280 total bear/ -> 70 elephant/ -> 70 leopard/ -> 70 zebra/ -> 70 val/ -> 224 total bear/ -> 56 elephant/ -> 56 leopard/ -> 56 zebra/ -> 56 """ prep_path = os.path.join(cfg.DATA_DIR, 'prepared') # create/clean train, val and test folders folders = ['train', 'val', 'test'] for folder in folders: folder_path = os.path.join(prep_path, folder) if os.path.exists(folder_path): shutil.rmtree(folder_path) # remove if already exists for category in cfg.CATEGORIES: os.makedirs(os.path.join(folder_path, category)) for category in cfg.CATEGORIES: category_path = os.path.join(cfg.DATA_DIR, 'animals', category) category_file_names = [] # get paths of files for file_name in os.listdir(category_path): category_file_names.append(file_name) # split paths into train, val and test train_file_names, test_file_names = train_test_split(category_file_names, test_size=0.2, random_state=args.seed) train_file_names, val_file_names = train_test_split(train_file_names, test_size=0.2, random_state=args.seed) print("{:<10s} -> Divided into {} train / {} val / {} test" .format(category, len(train_file_names), len(val_file_names), len(test_file_names)), end='') # copy train files for file_name in train_file_names: src = os.path.join(category_path, file_name) dst = os.path.join(prep_path, 'train', category, file_name) shutil.copyfile(src, dst) # copy val files for file_name in val_file_names: src = os.path.join(category_path, file_name) dst = os.path.join(prep_path, 'val', category, file_name) shutil.copyfile(src, dst) # copy test files for file_name in test_file_names: src = os.path.join(category_path, file_name) dst = os.path.join(prep_path, 'test', category, file_name) shutil.copyfile(src, dst) print(" -> Copied!")
5,330,964
def _extract_username(filename): """Return username (if found) from the credentials""" if not os.path.exists(filename): logger.warning("Cifs credentials file %s does not exist", filename) return for line in open(filename): if ("username" in line) and ("=" in line): username = line.split("=")[1] return username.strip()
5,330,965
def check_call(*popenargs, **kwargs): """Call a process and check result code. Note: This catches the error, and makes it nicer, and an error exit. So this is for tooling only. Note: We use same name as in Python stdlib, violating our rules to make it more recognizable what this does. """ try: subprocess.check_call(*popenargs, **kwargs) except OSError: general.sysexit( "Error, failed to execute '%s'. Is it installed?" % popenargs[0] )
5,330,966
def wait_for_tasks_to_complete( table_service, batch_client, entity_pk, entity_rk, job_id): """ Returns when all tasks in the specified job reach the Completed state. """ while True: entity = table_service.get_entity( 'AnalysisEntity', entity_pk, entity_rk) tasks = batch_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.id != 'JobManager' and task.state != batchmodels.TaskState.completed] complete_tasks = [task for task in tasks if task.id != 'JobManager' and task.state == batchmodels.TaskState.completed] failed_tasks = [task for task in complete_tasks if task.execution_info.exit_code != 0 or task.execution_info.scheduling_error is not None] queries = table_service.query_entities( 'AnalysisQueryEntity', filter="PartitionKey eq '{}'".format(entity.RowKey)) current_batch_count = 0 updateBatch = TableBatch() for task in tasks: matching_queries = [q for q in queries if q.RowKey == task.id] if not matching_queries: print('Could not find query {}'.format(task.id)) continue query = matching_queries[0] update = False state = get_query_state(task) if query._State != state: query._State = state update = True if task.state == batchmodels.TaskState.running: if not hasattr(query, 'StartTime'): query.StartTime = task.execution_info.start_time update = True if task.state == batchmodels.TaskState.completed: if not hasattr(query, 'EndTime'): query.EndTime = task.execution_info.end_time update = True if update: updateBatch.update_entity(query) current_batch_count += 1 if current_batch_count == 99: table_service.commit_batch('AnalysisQueryEntity', updateBatch) current_batch_count = 0 updateBatch = TableBatch() if current_batch_count > 0: table_service.commit_batch('AnalysisQueryEntity', updateBatch) all_tasks_complete = not incomplete_tasks any_failures = len(failed_tasks) > 0 entity.CompletedTasks = len(complete_tasks) entity._State = get_analysis_state(all_tasks_complete, any_failures) if not incomplete_tasks: entity.EndTime = datetime.datetime.utcnow() table_service.update_entity('AnalysisEntity', entity) return else: table_service.update_entity('AnalysisEntity', entity) time.sleep(5)
5,330,967
def mock_gitlab_api_projects(save=None, mergerequests_list=None): """A pseudo mock""" def get(*args, **kwargs): project = Mock('gitlab.v4.objects.Project') project.save = save project.mergerequests = \ Mock('gitlab.v4.objects.ProjectMergeRequestManager') project.mergerequests.list = mergerequests_list return project with patch('gitlab.Gitlab'): gitlab = GitlabAPI(uri=None, token=None, insecure=False) gitlab.api.projects = Mock('gitlab.v4.objects.ProjectManager') gitlab.api.projects.get = get return gitlab.api
5,330,968
def main(): """See module docstring at the top of this file.""" # initialize blob detector params = cv2.SimpleBlobDetector_Params() params.filterByColor = False params.filterByConvexity = False params.filterByInertia = False params.maxArea = 50000.0 params.minThreshold = 1 params.maxThreshold = 255 params.minDistBetweenBlobs = 200 blob_detector = cv2.SimpleBlobDetector_create(params) input_dir = os.path.normpath(sys.argv[1]) + os.sep output_dir = input_dir + 'cropped/' if not os.path.exists(output_dir): os.makedirs(output_dir) cropped_num = 0 for filename in sorted(os.listdir(input_dir)): if not filename.endswith('JPG'): continue print(filename) img = cv2.imread(input_dir + filename) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) keypoints = blob_detector.detect(gray) if len(keypoints) != 1: print('found ' + str(len(keypoints)) + ' features in ' + filename) continue center = keypoints[0].pt img_cropped = img[ int(center[1]) - PIXELS_FROM_CENTER : int(center[1]) + PIXELS_FROM_CENTER, int(center[0]) - PIXELS_FROM_CENTER : int(center[0]) + PIXELS_FROM_CENTER ] cv2.imwrite(output_dir + '%04d.jpg' % cropped_num, img_cropped) cropped_num += 1
5,330,969
def safety(session: Session) -> None: """Scan PROD dependencies for insecure packages.""" packages = ["safety"] install_with_constraints( session, include_dev=False, callback=safety_check, packages=packages )
5,330,970
def text_in_bytes(text, binary_data, encoding="utf-8"): """Return True of the text can be found in the decoded binary data""" return text in binary_data.decode(encoding)
5,330,971
def RASGeo2Shp(RAS_geo_file, output_folder): """ extracts centerline and cross-sections from HEC-RAS geometry file to shapefile Parameters ---------- RAS_geo_file : TYPE DESCRIPTION. output_folder : TYPE DESCRIPTION. Returns ------- Two shapefiles, one containing centerlines and another containing cross-sections """ try: g_filename = os.path.basename(RAS_geo_file).split(".")[0] out_file_Xs = os.path.join(output_folder,g_filename + "_XS.shp") out_file_CL = os.path.join(output_folder,g_filename + "_CL.shp") ctr=1 while(os.path.exists(out_file_Xs)): logging.warning("Output Xs file already exists: renaming file") out_file_Xs = os.path.join(output_folder,g_filename + str(ctr) + "_XS.shp") ctr=ctr+1 ctr=1 while(os.path.exists(out_file_CL)): logging.warning("Output CL file already exists: renaming file") out_file_CL = os.path.join(output_folder,g_filename + str(ctr) + "_CL.shp") ctr=ctr+1 # LOAD RAS GEOMTERY AND GET CRS RAS_geo_obj = prg.ParseRASGeo(RAS_geo_file) logging.info("Extracting projection system") epsg_code = [item.strip().split('=')[1] for item in RAS_geo_obj.geo_list if type(item)== str if "GIS Projection Zone" in item][0] # CREATE SHAPEFILES layerFields = qgis.core.QgsFields() layerFields.append(qgis.core.QgsField('Xs_ID', QVariant.Double)) layerFields.append(qgis.core.QgsField('River', QVariant.String)) layerFields.append(qgis.core.QgsField('Reach', QVariant.String)) Xs_file_writer = qgis.core.QgsVectorFileWriter(out_file_Xs, 'UTF-8', layerFields, QgsWkbTypes.LineStringZM, QgsCoordinateReferenceSystem('EPSG:' + epsg_code), 'ESRI Shapefile') layerFields_CL = qgis.core.QgsFields() layerFields_CL.append(qgis.core.QgsField('River', QVariant.String)) layerFields_CL.append(qgis.core.QgsField('Reach', QVariant.String)) CL_file_writer = qgis.core.QgsVectorFileWriter(out_file_CL, 'UTF-8', layerFields_CL, QgsWkbTypes.LineStringZM, QgsCoordinateReferenceSystem('EPSG:' + epsg_code), 'ESRI Shapefile') # LOAD XS in CREATED SHAPEFILE for Xs in RAS_geo_obj.get_cross_sections(): logging.info("Processing cross-section: " + str(Xs.header.station.value) + " River: " + Xs.river + " Reach: " + Xs.reach) # make polyline from cutline cutline_feat = QgsFeature() cutline_x = [float(x[0]) for x in Xs.cutline.points] cutline_y = [float(x[1]) for x in Xs.cutline.points] cutline_point_list = [QgsPoint(cutline_x[i],cutline_y[i]) for i in range(len(cutline_x))] cutline_feat.setGeometry(QgsGeometry.fromPolyline(cutline_point_list)) # Add 3D points to cutline Xs_pt_list = [] #get first pt separately Xs_pt = QgsPoint(cutline_x[0],cutline_y[0]) Xs_pt.addZValue() Xs_pt.setZ(Xs.sta_elev.points[0][1]) Xs_pt.addMValue() Xs_pt.setM(Xs.sta_elev.points[0][0]) Xs_pt_list.append(Xs_pt) # loop through rest of the points for sta_elev in Xs.sta_elev.points[1:-1]: Xs_pt = QgsPoint(cutline_feat.geometry().interpolate(sta_elev[0]).asPoint()) Xs_pt.addZValue() Xs_pt.setZ(sta_elev[1]) Xs_pt.addMValue() Xs_pt.setM(sta_elev[0]) Xs_pt_list.append(Xs_pt) # print(sta_elev) #get last pt separately Xs_pt = QgsPoint(cutline_x[-1],cutline_y[-1]) Xs_pt.addZValue() Xs_pt.setZ(Xs.sta_elev.points[-1][1]) Xs_pt.addMValue() Xs_pt.setM(Xs.sta_elev.points[-1][0]) Xs_pt_list.append(Xs_pt) Xs_feat = QgsFeature() Xs_feat.setGeometry(QgsGeometry.fromPolyline(Xs_pt_list)) Xs_feat.setAttributes([Xs.header.station.value,Xs.river, Xs.reach]) Xs_file_writer.addFeature(Xs_feat) # LOAD REACHES INTO CL SHAPEFILE for cur_CL in RAS_geo_obj.get_reaches(): CL_feat = QgsFeature() CL_x = [float(x[0]) for x in cur_CL.geo.points] CL_y = [float(x[1]) for x in cur_CL.geo.points] CL_point_list = [QgsPoint(CL_x[i],CL_y[i]) for i in range(len(CL_x))] CL_feat.setGeometry(QgsGeometry.fromPolyline(CL_point_list)) CL_feat.setAttributes([cur_CL.header.river_name, cur_CL.header.reach_name]) CL_file_writer.addFeature(CL_feat) del(Xs_file_writer) del(CL_file_writer) logging.info("Extraction complete for: " + RAS_geo_file.split("\\")[-1]) except: logging.error("Error in extracting geometry")
5,330,972
def make_auth(sub, tenant=None): """ Prepare an almost-valid JWT token header, suitable for consumption by our identity middleware (needs sub and optionally mender.tenant claims). The token contains valid base64-encoded payload, but the header/signature are bogus. This is enough for the identity middleware to interpret the identity and select the correct db; note that there is no gateway in the test setup, so the signature is never verified. If 'tenant' is specified, the 'mender.tenant' claim is added. """ payload = {"sub": sub} if tenant is not None: payload["mender.tenant"] = tenant payload = json.dumps(payload) payloadb64 = b64encode(payload.encode("utf-8")) jwt = "bogus_header." + payloadb64.decode() + ".bogus_sign" return {"Authorization": "Bearer " + jwt}
5,330,973
def available_mem(cores, mem, fmtstring=True): """Calculate available memory for a process Params: cores (int): number of cores mem (str): set memory as string with conversion (M, G, g) fmtstring (bool): return memory as formatted string """ prefix = "G" m = re.match("[0-9]+([a-zA-Z]*)", str(mem)) if m: prefix = m.groups()[0] requested_mem_per_core = int(re.sub("[a-zA-Z]*", "", str(mem))) core_mem = mem_per_core(prefix) requested_cores = min(cores, available_cpu_count()) mem = min(requested_cores * core_mem, requested_cores * requested_mem_per_core) if fmtstring: return "{}{}".format(mem, prefix) else: return mem
5,330,974
def normalize_v(v): """ Normalize velocity to [-1, 1]. Ref: https://github.com/microsoft/AirSim-Drone-Racing-VAE-Imitation/blob/e651be52ff8274c9f595e88b13fe42d51302403d/racing_utils/dataset_utils.py#L20 """ # normalization of velocities from whatever to [-1, 1] range v_x_range = [-1, 7] v_y_range = [-3, 3] v_z_range = [-3, 3] v_yaw_range = [-1, 1] if len(v.shape) == 1: # means that it's a 1D vector of velocities v[0] = 2.0 * (v[0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[1] = 2.0 * (v[1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[2] = 2.0 * (v[2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[3] = 2.0 * (v[3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 elif len(v.shape) == 2: # means that it's a 2D vector of velocities v[:, 0] = 2.0 * (v[:, 0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[:, 1] = 2.0 * (v[:, 1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[:, 2] = 2.0 * (v[:, 2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[:, 3] = 2.0 * (v[:, 3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 else: raise Exception('Error in data format of V shape: {}'.format(v.shape)) return v # Note: The version used in Shuang's code base is below, which should be equivalent to the above version. # self.targets[:, 0] = 2. * (self.targets[:, 0] + 1.) / (7. + 1.) - 1. # self.targets[:, 1] = 2. * (self.targets[:, 1] + 3.) / (3. + 3.) - 1. # self.targets[:, 2] = 2. * (self.targets[:, 2] + 3.) / (3. + 3.) - 1. # self.targets[:, 3] = 2. * (self.targets[:, 3] + 1.) / (1. + 1.) - 1.
5,330,975
def test_sftp_fetcher_load_system_keys_fails(tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None: """Test the sftp fetcher when SSHClient loading of system host keys fails.""" def ssh_load_system_host_keys_mock(): raise OSError('stuff') uri = 'sftp://username:password@some.host/path/to/file.json' fetcher = cache.FetcherFactory.get_fetcher(tmp_trestle_dir, uri) monkeypatch.setattr(SSHClient, 'load_system_host_keys', ssh_load_system_host_keys_mock) with pytest.raises(err.TrestleError): fetcher._update_cache()
5,330,976
def volume_restricted_metadata_delete(context, volume_id, key): """Delete the given restricted metadata item.""" IMPL.volume_restricted_metadata_delete(context, volume_id, key)
5,330,977
def allele_counts_dataframe(read_evidence_generator): """ Creates a DataFrame containing number of reads supporting the ref vs. alt alleles for each variant. """ return dataframe_from_generator( element_class=ReadEvidence, variant_and_elements_generator=read_evidence_generator, # DataFrameBuilder will take the length of these fields' values rename_dict={ "ref_reads": "num_ref_reads", "alt_reads": "num_alt_reads", "other_reads": "num_other_reads", }, extra_column_fns={ "num_ref_fragments": lambda _, x: len(x.ref_read_names), "num_alt_fragments": lambda _, x: len(x.alt_read_names), "num_other_fragments": lambda _, x: len(x.other_read_names) })
5,330,978
def validate_rule_paths(sched: schedule.Schedule) -> schedule.Schedule: """A validator to be run after schedule creation to ensure each path contains at least one rule with an expression or value. A ValueError is raised when this check fails.""" for path in sched.unfold(): if path.is_final and not list(path.rules_with_expr_or_value): raise ValueError( "No expression or value specified along the path {}." .format(path) ) return sched
5,330,979
def update_template(src): """ Updates existing templates """ # remove template_name = os.path.basename(src) remove(template_name) # add add_template(src)
5,330,980
def spiralcontrolpointsvert( x: int, y: int, step: int, growthfactor: float, turns: int): """Returns a list[(int, int)] of 2D vertices along a path defined by a square spiral Args: x, y: int centerpoint coordinates step: int step increment growthfactor: float multiplier to step increment to make exponential spirals turns: number of turns of the spiral Yields: list of vertices of the spiral list[[x: int, y: int]] """ v = [[x, y]] inc = step while turns > 0: x += step v.append([x, y]) step += inc y += step v.append([x, y]) step += inc x -= step v.append([x, y]) step += inc y -= step v.append([x, y]) turns -= 1 step += inc inc *= growthfactor return v
5,330,981
def determine_current_taxid(given_taxid): """Determine NCBI's current taxonomic ID given an (old) taxonomic ID Args: given_taxid: previously used NCBI taxonomic ID Returns: most current NCBI taxonomic ID """ taxid = given_taxid aka_taxid_in_xml = None redirects = 0 # Set at most 5 redirects to avoid infinite looping while ((aka_taxid_in_xml != 0) and (redirects <= 5)): tax_url = ncbi_taxonomy_url(taxid) tax_raw_xml = urlopen_with_tries(tax_url) aka_taxid_in_xml = parse_taxonomy_xml_for_aka_taxid(tax_raw_xml) if aka_taxid_in_xml != 0: logger.warning("Taxid %d is being redirected to taxid %d" % (taxid, aka_taxid_in_xml)) taxid = aka_taxid_in_xml else: logger.debug("Taxid %d did not need to be redirected" % (taxid)) redirects += 1 return taxid
5,330,982
def validate_dependencies(): """Validate external dependencies. This function does NOT have to exist. If it does exist the runtime will call and execute it during api initialization. The purpose of this function is to verify that external dependencies required to auto-generate a problem are properly installed and configured on this system. Some common tasks that may be performed are checking that a certain program is installed (such as 'javac') and that it is executable. You may also want to verify that template files that the generator modifies exist in the templates/ directory. If any dependency check fails the function should print out the respective error message and return False. If all checks pass correctly the function should return True. If the function does not exist the API initializer will assume that all dependencies are met and will add the generator to the pre-fetched generator list assuming there is an auto-generated problem in the database that has the given generator set for it's 'generator' field. The following code demonstrates how to check that the java compiler (javac) is present on the system and can be executed by the current user. """ print "DEPENDENCY CHECK - TEMPLATE.py (TEMPLATE)" javac_path = "/usr/bin/javac" # This should have scope across the entire module but doesn't for template purposes if not os.path.exists(javac_path): print "ERROR - TEMPLATE - The specified java compiler (%s) does not appear to exist." % javac_path return False if not os.access(javac_path, os.X_OK): print "ERROR - TEMPLATE - javac is not executable by the python runtime." return False return True
5,330,983
def find_closest_vertex(desired_hop, available_vertices): """ Find the closest downstream (greater than or equal) vertex in availbale vertices. If nothing exists, then return -1. Keyword arguments: desired_hop -- float representing the desired hop location available_location -- np array of available vertices in model Returns: vertex -- the closest available vertex that is >= desired_hop """ available_vertices = np.sort(available_vertices) forward_vertices = available_vertices[available_vertices >= desired_hop] if forward_vertices.size < 1: vertex = -1 else: vertex = forward_vertices[0] return vertex
5,330,984
def second_smallest(numbers): """Find second smallest element of numbers.""" m1, m2 = float('inf'), float('inf') for x in numbers: if x <= m1: m1, m2 = x, m1 elif x < m2: m2 = x return m2
5,330,985
def calc_sft_ccs_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data: bool = False): """Calculate structure factor tensor in CCS (X||a*, Z||c) based on the information given in dictionary. Output information is written in the same dictionary. """ dict_crystal_keys = dict_crystal.keys() dict_in_out_keys = dict_in_out.keys() necessary_crystal_keys = set(["unit_cell_parameters", ]) diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys)) if len(diff_set_crystal) != 0: raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}") flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0 flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0 flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0 if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems): raise AttributeError("The symmetry elements have to be defined.") necessary_in_out_keys = set(["index_hkl", ]) diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys)) if len(diff_set_in_out) != 0: raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}") index_hkl = dict_in_out["index_hkl"] non_zero_keys = set(["mag_atom_lande_factor", "mag_atom_kappa", "mag_atom_j0_parameters", "mag_atom_j2_parameters"]) diff_set_crystal = non_zero_keys.difference(set(dict_crystal_keys)) if len(diff_set_crystal) != 0: sft_ccs = numpy.zeros((9, index_hkl.shape[-1]), dtype=complex) dder = {} return sft_ccs, dder if "flag_only_orbital" in dict_in_out_keys: flag_only_orbital = dict_in_out["flag_only_orbital"] else: flag_only_orbital = False if flag_reduced_symm_elems: reduced_symm_elems = dict_crystal["reduced_symm_elems"] centrosymmetry = dict_crystal["centrosymmetry"] if centrosymmetry: centrosymmetry_position = dict_crystal["centrosymmetry_position"] else: centrosymmetry_position = None translation_elems = dict_crystal["translation_elems"] elif flag_full_symm_elems: full_symm_elems = dict_crystal["full_symm_elems"] reduced_symm_elems = full_symm_elems centrosymmetry = False centrosymmetry_position = None translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int) elif flag_full_mcif_elems: full_mcif_elems = dict_crystal["full_mcif_elems"] reduced_symm_elems = full_mcif_elems[:13] centrosymmetry = False centrosymmetry_position = None translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int) unit_cell_parameters = dict_crystal["unit_cell_parameters"] atom_para_index = dict_crystal["atom_para_index"] atom_para_fract_xyz = dict_crystal["atom_fract_xyz"][:, atom_para_index] atom_para_sc_fract = dict_crystal["atom_site_sc_fract"][:, atom_para_index] atom_para_sc_b = dict_crystal["atom_site_sc_b"][:, atom_para_index] atom_para_fract_xyz = calc_m_v( atom_para_sc_fract, numpy.mod(atom_para_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_para_sc_b atom_para_occupancy = dict_crystal["atom_occupancy"][atom_para_index] atom_para_b_iso = dict_crystal["atom_b_iso"][atom_para_index] atom_beta = dict_crystal["atom_beta"] if "atom_site_aniso_sc_beta" in dict_crystal_keys: atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"] atom_site_aniso_index = dict_crystal["atom_site_aniso_index"] atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float) atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1) atom_para_beta = atom_beta[:, atom_para_index] mag_atom_para_index = dict_crystal["mag_atom_para_index"] atom_para_lande_factor = dict_crystal["mag_atom_lande_factor"][mag_atom_para_index] atom_para_kappa = dict_crystal["mag_atom_kappa"][mag_atom_para_index] atom_para_j0_parameters = dict_crystal["mag_atom_j0_parameters"][:, mag_atom_para_index] atom_para_j2_parameters = dict_crystal["mag_atom_j2_parameters"][:, mag_atom_para_index] atom_para_susceptibility = dict_crystal["atom_para_susceptibility"] atom_para_sc_chi = dict_crystal["atom_para_sc_chi"] flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"]) flag_atom_para_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"][:, atom_para_index]) flag_atom_para_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"][atom_para_index]) flag_atom_para_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"][atom_para_index]) flag_atom_para_beta = numpy.any(dict_crystal["flags_atom_beta"][:, atom_para_index]) flag_atom_para_susceptibility = numpy.any(dict_crystal["flags_atom_para_susceptibility"]) flag_atom_para_lande_factor = numpy.any(dict_crystal["flags_mag_atom_lande_factor"][mag_atom_para_index]) flag_atom_para_kappa = numpy.any(dict_crystal["flags_mag_atom_kappa"][mag_atom_para_index]) sft_ccs, dder = calc_sft_ccs(index_hkl, reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems, unit_cell_parameters, atom_para_fract_xyz, atom_para_occupancy, atom_para_susceptibility, atom_para_b_iso, atom_para_beta, atom_para_lande_factor, atom_para_kappa, atom_para_j0_parameters, atom_para_j2_parameters, atom_para_sc_chi, dict_in_out=dict_in_out, flag_only_orbital=flag_only_orbital, flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_para_fract_xyz=flag_atom_para_fract_xyz, flag_atom_para_occupancy=flag_atom_para_occupancy, flag_atom_para_susceptibility=flag_atom_para_susceptibility, flag_atom_para_b_iso=flag_atom_para_b_iso, flag_atom_para_beta=flag_atom_para_beta, flag_atom_para_lande_factor=flag_atom_para_lande_factor, flag_atom_para_kappa=flag_atom_para_kappa, flag_use_precalculated_data=flag_use_precalculated_data) return sft_ccs, dder
5,330,986
def main(): """Main""" text = 'Scrolling ASCII text in console.' font = ImageFont.load_default() # font = ImageFont.truetype('arial.ttf', 16) # get size of space char space_width = get_text_size(font, ' ')[0] # get size of text text_height = get_text_size(font, text)[1] # resize console cols = 100 lines = int(text_height * 1.25) init_console(cols, lines) # add some padding to the text padding = ' ' * int(cols / space_width + 1) text = padding + text index = 0 try: while True: clear_console() print(text_to_ascii_text(font, text[index:], cols, lines)) index += 1 if index > len(text): index = 0 time.sleep(0.1) except KeyboardInterrupt: pass
5,330,987
def normalize_mesh(mesh, in_place=True): """Rescales vertex positions to lie inside unit cube.""" scale = 1.0 / np.max(mesh.bounds[1, :] - mesh.bounds[0, :]) centroid = mesh.centroid scaled_vertices = (mesh.vertices - centroid) * scale if in_place: scaled_mesh = mesh scaled_mesh.vertices = scaled_vertices else: scaled_mesh = mesh.copy() scaled_mesh.vertices = scaled_vertices scaled_mesh.fix_normals() return scaled_mesh
5,330,988
def assert_almost_equal( actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(1)"] ): """ usage.scipy: 1 """ ...
5,330,989
def basic(stocks): """ basic report of stocks """ # Get basic info from stocks basics = pd.DataFrame([get_basic_info(code) for code in stocks]) basics.set_index(['股票代码'], inplace=True) year_yoy_list = [] quarter_yoy_list = [] quarter_qoq_list = [] for code in stocks: # get annual report of stock annual_report = get_annual_report(code) # YoY year_yoy = pct_change(annual_report, axis=1).iloc[:, -1] # format year_yoy.rename(index=lambda x: x + '(%)', inplace=True) year_yoy = (year_yoy * 100).round(2) year_yoy.loc['股票代码'] = code year_yoy.loc['年报时间'] = str(year_yoy.name)[:10] # append year_yoy_list.append( pd.concat([year_yoy, get_level0_report(annual_report.iloc[:, -1])])) # get quarterly results quarterly_results = get_quarterly_results(code) if str(quarterly_results.columns[-1])[4:10] != '-12-31': # measure='YoY' quarter_yoy = pct_change( quarterly_results, periods=4, axis=1).iloc[:, -1] # YoY quarter_yoy.rename(index=lambda x: x + '(%)', inplace=True) quarter_yoy = (quarter_yoy * 100).round(2) quarter_yoy.loc['股票代码'] = code quarter_yoy.loc['季报时间'] = str(quarter_yoy.name)[:10] quarter_yoy_list.append( pd.concat([ quarter_yoy, get_level0_report(quarterly_results.iloc[:, -1]) ])) if str(quarterly_results.columns[-1])[4:10] != '-03-31': # measure='QoQ' quarter_qoq = pct_change( quarterly_results, periods=1, axis=1).iloc[:, -1] # QoQ quarter_qoq.rename(index=lambda x: x + '(%)', inplace=True) quarter_qoq = (quarter_qoq * 100).round(2) quarter_qoq.loc['股票代码'] = code quarter_qoq.loc['季报时间'] = str(quarter_qoq.name)[:10] quarter_qoq_list.append( pd.concat([ quarter_qoq, get_level0_report(quarterly_results.iloc[:, -1]) ])) writer = ExcelWriter("orange.xls") basics.to_excel(writer, "基本信息") year_yoys = pd.DataFrame(year_yoy_list) year_yoys.set_index(['股票代码', '年报时间'], inplace=True) year_yoys.to_excel(writer, "年报对比") if quarter_yoy_list: quarter_yoys = pd.DataFrame(quarter_yoy_list) quarter_yoys.set_index(['股票代码', '季报时间'], inplace=True) quarter_yoys.to_excel(writer, "季报同比") if quarter_qoq_list: quarter_qoqs = pd.DataFrame(quarter_qoq_list) quarter_qoqs.set_index(['股票代码', '季报时间'], inplace=True) quarter_qoqs.to_excel(writer, "季报环比") writer.save()
5,330,990
def test_file_long_format(accelize_drm, conf_json, cred_json, async_handler, request, log_file_factory): """Test logging file long format""" driver = accelize_drm.pytest_fpga_driver[0] async_cb = async_handler.create() async_cb.reset() msg = 'This is a message' regex_long = REGEX_FORMAT_LONG % msg logfile = log_file_factory.create(2, 1, LOG_FORMAT_LONG) conf_json.reset() conf_json['settings'].update(logfile.json) conf_json['settings']['log_verbosity'] = 6 conf_json.save() with accelize_drm.DrmManager( conf_json.path, cred_json.path, driver.read_register_callback, driver.write_register_callback, async_cb.callback ) as drm_manager: drm_manager.set(log_message_level=2) drm_manager.set(log_message=msg) log_content = logfile.read() m = search(regex_long, log_content, MULTILINE) assert m is not None assert m.group(1) == 'info' async_cb.assert_NoError() logfile.remove()
5,330,991
def test_roundtrip(tmpdir: Path): """ Check that we can write DL1+DL2 info to files and read them back Parameters ---------- tmpdir : temp directory fixture """ output_path = Path(tmpdir / "events.DL1DL2.h5") source = EventSource( get_dataset_path("gamma_LaPalma_baseline_20Zd_180Az_prod3b_test.simtel.gz"), max_events=20, allowed_tels=[1, 2, 3, 4], ) calibrate = CameraCalibrator(subarray=source.subarray) events = [] with DataWriter( event_source=source, output_path=output_path, write_parameters=False, write_images=True, transform_image=True, image_dtype="int32", image_scale=10, transform_peak_time=True, peak_time_dtype="int16", peak_time_scale=100, write_stereo_shower=True, write_mono_shower=True, ) as write: write.log.level = logging.DEBUG for event in source: calibrate(event) write(event) generate_dummy_dl2(event) events.append(deepcopy(event)) write.write_simulation_histograms(source) assert DataLevel.DL1_IMAGES in write.datalevels assert DataLevel.DL1_PARAMETERS not in write.datalevels assert DataLevel.DL2 in write.datalevels assert output_path.exists() # check we can get the subarray description: sub = SubarrayDescription.from_hdf(output_path) assert sub.num_tels > 0 # check a few things in the output just to make sure there is output. For a # full test of the data model, a verify tool should be created. with tables.open_file(output_path) as h5file: images = h5file.get_node("/dl1/event/telescope/images/tel_001") assert len(images) > 0 assert images.col("image").dtype == np.int32 assert images.col("peak_time").dtype == np.int16 assert images.col("image").max() > 0.0 # check that DL2 info is there dl2_energy = h5file.get_node("/dl2/event/subarray/energy/ImPACTReconstructor") assert np.allclose(dl2_energy.col("energy"), 10) assert np.count_nonzero(dl2_energy.col("tel_ids")[0]) == 3 dl2_tel_energy = h5file.get_node( "/dl2/event/telescope/energy/HillasReconstructor/tel_001" ) assert np.allclose(dl2_tel_energy.col("energy"), 10) assert "tel_ids" not in dl2_tel_energy # make sure it is readable by the event source and matches the images for event in EventSource(output_path): for tel_id, dl1 in event.dl1.tel.items(): original_image = events[event.count].dl1.tel[tel_id].image read_image = dl1.image assert np.allclose(original_image, read_image, atol=0.1) original_peaktime = events[event.count].dl1.tel[tel_id].peak_time read_peaktime = dl1.peak_time assert np.allclose(original_peaktime, read_peaktime, atol=0.01)
5,330,992
def _updateKeyword(key,inhdr,outhdr,default='UNKNOWN'): """ Safely updates keyword key in outhdr from value in inhdr. Uses value given by 'default' if keyword is not found in input header. """ try: _keyw = inhdr[key] except KeyError: _keyw = default outhdr[key] = _keyw
5,330,993
def get_run_name(): """ A unique name for each run """ return datetime.now().strftime( '%b%d-%H-%M-%S') + '_' + socket.gethostname()
5,330,994
def process_request(identifier, browser, document_type='Annual Return', num_doc=1, status_df=None): """ Search ICRIS for the passed identifier, analyze the returned documents, and cart the documents depending on whether we purchased the document before. Parameters ---------- identifier : str Name or Companies Registry Number of the company to purchase documents for browser : selenium.webdriver.remote.webdriver.WebDriver An instance of Selenium WebDriver document_type : str, optional Type of document to be purchased, default `Annual Return` num_doc : int, optional Number of documents of type `document_type` to be purchased status_df : pandas.DataFrame Dataframe object to append data related to the status of the operations to Returns ------- status_df : pandas.DataFrame Dataframe object containing information about the status of the carting operations with the following columns """ if status_df is None: status_df = pd.DataFrame() cart_number = 0 try: try: # Check if there were no matches for the passed identifier companies = CompaniesIndexPage(browser) companies.NO_MATCHES() raise Exception(f"No matches found for identifier: {identifier}") except NoSuchElementException: pass main_menu, search, companies, info, doc_index = init_webpages(browser) exception = 'None' main_menu.navigate_to_search_page() if identifier.isdigit(): search.crNo_search(identifier) else: search.name_search(identifier) if identifier.isdigit(): try: companies.choose_number(identifier) except TimeoutError: raise Exception(f"No companies found for company number {identifier}") else: try: companies.choose_name(identifier) except TimeoutError: raise Exception(f"No companies found for company name {identifier}") info.proceed() doc_index.list_documents() cart_status, cart_number = doc_index.index_and_cart(document_type, num_doc) row = pd.Series([identifier,document_type, str(cart_status).upper(), cart_number, exception]) status_df = status_df.append(row, ignore_index = True) return status_df except Exception: exception = traceback.format_exc(7) try: cart_status except NameError: cart_status = False row = pd.Series([identifier, document_type, str(cart_status).upper(), cart_number, exception]) status_df = status_df.append(row, ignore_index = True) return status_df
5,330,995
def create_blackboard(): """ Create a blackboard with a few variables. Fill with as many different types as we need to get full coverage on pretty printing blackboard tests. """ Blackboard.clear() blackboard = Client(name="Tester") for key in {"foo", "some_tuple", "nested", "nothing"}: blackboard.register_key( key=key, access=py_trees.common.Access.READ ) for key in {"foo", "some_tuple", "nested", "nothing"}: blackboard.register_key( key=key, access=py_trees.common.Access.WRITE ) blackboard.foo = "bar" blackboard.some_tuple = (1, "bar") blackboard.nested = Nested() blackboard.nothing = None return blackboard
5,330,996
def adminRoomDelete(*args, **kwargs): """ 删除房间 """ params = kwargs['params'] filters = { Room.room_uuid == params['room_uuid'] } Room().delete(filters) filters = { UserRoomRelation.room_uuid == params['room_uuid'] } UserRoomRelation().delete(filters) return BaseController().successData()
5,330,997
def test_representation(target, expected_str): """Ensure ``MigrationNotInPlan`` has expected string representation.""" assert str(exceptions.MigrationNotInPlan(target)) == expected_str
5,330,998
def who_is_it(image_path, database, model): """ Arguments: image_path -- path to an image database -- database containing image encodings along with the name of the person on the image model -- your Inception model instance in Keras Returns: min_dist -- the minimum distance between image_path encoding and the encodings from the database identity -- string, the name prediction for the person on image_path """ ### START CODE HERE ### ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line) encoding = create_encoding(image_path, model) ## Step 2: Find the closest encoding ## # Initialize "min_dist" to a large value, say 100 (≈1 line) min_dist = 100 # Loop over the database dictionary's names and encodings. for (name, db_enc) in database.items(): # Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line) dist = np.linalg.norm(encoding-db_enc) # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines) if dist < min_dist: min_dist = dist identity = name ### END CODE HERE ### if min_dist > 0.85: print("Not in the database.") print("distance", min_dist) identity = "Unknown" else: print ("it's " + str(identity) + ", the distance is " + str(min_dist)) return min_dist, identity
5,330,999