content
stringlengths
22
815k
id
int64
0
4.91M
def vgg13_bn(**kwargs): """ VGG 13-layer model (configuration "B") with batch normalization """ model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) return model
5,325,700
def lr_insight_wr(): """Return 5-fold cross validation scores r2, mae, rmse""" steps = [('scaler', t.MyScaler(dont_scale='for_profit')), ('knn', t.KNNKeepDf())] pipe = Pipeline(steps) pipe.fit(X_raw) X = pipe.transform(X_raw) lr = LinearRegression() lr.fit(X, y) cv_results = cross_validate(lr, X, y, scoring=['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error'], return_train_score=True) output = pd.DataFrame( {'train_r2': [cv_results['train_r2'].mean()], 'train_rmse': [np.mean( [np.sqrt(abs(i)) for i in cv_results['train_neg_mean_squared_error']])], 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())], 'test_r2': [cv_results['test_r2'].mean()], 'test_rmse': [np.mean( [np.sqrt(abs(i)) for i in cv_results['test_neg_mean_squared_error']])], 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())] }, index=['LR'] ) return output
5,325,701
def complete_list_value(exe_context, return_type, field_asts, info, result): """ Complete a list value by completing each item in the list with the inner type """ assert isinstance(result, collections.Iterable), \ ('User Error: expected iterable, but did not find one ' + 'for field {}.{}.').format(info.parent_type, info.field_name) item_type = return_type.of_type completed_results = [] contains_promise = False index = 0 path = info.path[:] for item in result: info.path = path + [index] completed_item = complete_value_catching_error(exe_context, item_type, field_asts, info, item) if not contains_promise and is_thenable(completed_item): contains_promise = True completed_results.append(completed_item) index += 1 return Promise.all(completed_results) if contains_promise else completed_results
5,325,702
def projection_ERK(rkm, dt, f, eta, deta, w0, t_final): """Explicit Projection Runge-Kutta method.""" rkm = rkm.__num__() w = np.array(w0) # current value of the unknown function t = 0 # current time ww = np.zeros([np.size(w0), 1]) # values at each time step ww[:,0] = w.copy() tt = np.zeros(1) # time points for ww tt[0] = t b = rkm.b s = len(rkm) y = np.zeros((s, np.size(w0))) # stage values F = np.zeros((s, np.size(w0))) # stage derivatives eta0 = eta(w0) while t < t_final and not np.isclose(t, t_final): if t + dt > t_final: dt = t_final - t for i in range(s): y[i,:] = w.copy() for j in range(i): y[i,:] += rkm.A[i,j]*dt*F[j,:] F[i,:] = f(y[i,:]) w = w + dt*sum([b[i]*F[i] for i in range(s)]) t += dt lamda = 0 dlam = 10 while dlam >1.e-14: dg = deta(w) dlam = -(eta(w+dg*lamda)-eta0)/(np.dot(dg,dg)+1.e-16) lamda += dlam w = w + dg*lamda tt = np.append(tt, t) ww = np.append(ww, np.reshape(w.copy(), (len(w), 1)), axis=1) return tt, ww
5,325,703
def integer(name, value): """Validate that the value represents an integer :param name: Name of the argument :param value: A value representing an integer :returns: The value as an int, or None if value is None :raises: InvalidParameterValue if the value does not represent an integer """ if value is None: return try: return int(value) except (ValueError, TypeError): raise exception.InvalidParameterValue( _('Expected an integer for %s: %s') % (name, value))
5,325,704
def generate_Euler_Maruyama_propagators(): """ importer function function that creates two functions: 1. first function created is a kernel propagator (K) 2. second function returns the kernel ratio calculator """ # let's make the kernel propagator first: this is just a batched ULA move kernel_propagator = ULA_move # let's make the kernel ratio calculator kernel_ratio_calculator = Euler_Maruyama_log_proposal_ratio #return both return kernel_propagator, kernel_ratio_calculator
5,325,705
def test_save_works(client): """Save should work.""" proto_reminder['message'] = 'test_save_works' res = client.post('/api/reminders', json=proto_reminder) assert res.status_code == 201 assert res.content_type == 'application/json' assert proto_reminder['message'].encode() in res.data
5,325,706
def get_vtx_neighbor(vtx, faces, n=1, ordinal=False, mask=None): """ Get one vertex's n-ring neighbor vertices Parameters ---------- vtx : integer a vertex's id faces : numpy array the array of shape [n_triangles, 3] n : integer specify which ring should be got ordinal : bool True: get the n_th ring neighbor False: get the n ring neighbor mask : 1-D numpy array specify a area where the ROI is in. Return ------ neighbors : set contain neighbors of the vtx """ n_ring_neighbors = _get_vtx_neighbor(vtx, faces, mask) n_th_ring_neighbors = n_ring_neighbors.copy() for i in range(n-1): neighbors_tmp = set() for neighbor in n_th_ring_neighbors: neighbors_tmp.update(_get_vtx_neighbor(neighbor, faces, mask)) if i == 0: neighbors_tmp.discard(vtx) n_th_ring_neighbors = neighbors_tmp.difference(n_ring_neighbors) n_ring_neighbors.update(n_th_ring_neighbors) if ordinal: return n_th_ring_neighbors else: return n_ring_neighbors
5,325,707
def assert_value_in_cell_is_equal(test_case, expected_cell, actual_cell): """ :param TestCase test_case: :param Cell expected_cell: :param Cell actual_cell: :return: """ expected_val = expected_cell.value actual_val = actual_cell.value if isinstance(expected_val, str): if expected_val.isnumeric() or expected_val.isnumeric(): test_case.assertEqual( Decimal(expected_val), Decimal(actual_val) ) elif isinstance(expected_val, (int, Decimal)): test_case.assertEqual( Decimal(expected_val), Decimal(actual_val) ) elif isinstance(expected_val, float): test_case.assertAlmostEqual(float(expected_val), float(actual_val)) else: test_case.assertEqual( expected_val, actual_val, "value in cell ({}.{}) is not equal".format(expected_cell.row, expected_cell.column) )
5,325,708
def run_from_text(text: str, n_merges: int=sys.maxsize) -> Tuple[str, int, Optional[List[BpePerformanceStatsEntry]]]: """ >>> def run_and_get_merges(text: str): ... return [(m, occ) for m, occ, _ in run_from_text(text)] >>> run_and_get_merges("a") [] >>> run_and_get_merges("ab") [('a b', 1)] >>> run_and_get_merges("abcdbc") [('b c', 2), ('a bc', 1), ('abc d', 1), ('abcd bc', 1)] >>> run_and_get_merges("aaa") [('a a', 1), ('aa a', 1)] >>> run_and_get_merges("aaaa") [('a a', 2), ('aa aa', 1)] >>> run_and_get_merges("aaaaa") [('a a', 2), ('aa aa', 1), ('aaaa a', 1)] >>> run_and_get_merges("aaaaaa") [('a a', 3), ('aa aa', 1), ('aaaa aa', 1)] >>> run_and_get_merges("aaaaaab") [('a a', 3), ('aa aa', 1), ('aaaa aa', 1), ('aaaaaa b', 1)] >>> run_and_get_merges("aaaaaaaa") [('a a', 4), ('aa aa', 2), ('aaaa aaaa', 1)] >>> run_and_get_merges("there|is|a|thin|tooth|in|the|tooth") [('t h', 5), ('th e', 2), ('| i', 2), ('n |', 2), ('t o', 2), ('to o', 2), ('too th', 2), \ ('the r', 1), ('ther e', 1), ('there |i', 1), ('there|i s', 1), ('there|is |', 1), ('there|is| a', 1), \ ('there|is|a |', 1), ('there|is|a| th', 1), ('there|is|a|th i', 1), ('there|is|a|thi n|', 1), \ ('there|is|a|thin| tooth', 1), ('there|is|a|thin|tooth |i', 1), ('there|is|a|thin|tooth|i n|', 1), \ ('there|is|a|thin|tooth|in| the', 1), ('there|is|a|thin|tooth|in|the |', 1), \ ('there|is|a|thin|tooth|in|the| tooth', 1)] """ return run(iter(text), n_merges)
5,325,709
def load_line_delimited_json(filename): """Load data from the file that is stored as line-delimited JSON. Parameters ---------- filename : str Returns ------- dict """ objects = [] with open(filename) as f_in: for i, line in enumerate(f_in): text = line.strip() if not text: continue try: objects.append(json.loads(text)) except Exception: logger.exception("Failed to decode line number %s in %s", i, filename) raise logger.debug("Loaded data from %s", filename) return objects
5,325,710
def tolower(x: StringOrIter) -> StringOrIter: """Convert strings to lower case Args: x: A string or vector of strings Returns: Converted strings """ x = as_character(x) if is_scalar(x): return x.lower() return Array([elem.lower() for elem in x])
5,325,711
def initialize_course(header, stu): """ Helper function for initialize. This function takes in the request header and uses either the UQ SSO header or default values to add the student and course pairing into the database. :param header: a Http Request received by the initialize view :param stu: an instance of the student to pair the courses with :return: Nothing """ sem = 1 year = 2021 mode = 'EXTERNAL' # Initialize UQ if needed if len(Institution.objects.filter(name="University of Queensland")) == 0: UQ = Institution(name="University of Queensland") UQ.save() courses = [] try: groups = json.loads(header['X-Kvd-Payload'])['groups'] for g in groups: match = re.search(r'uq:[a-zA-Z]{4}[0-9]{4}_*_*', g) if match: print(match.string.split("uq:")[1].split("_")[0]) courses.append(match.string.split("uq:")[1].split("_")[0]) except: courses = ['COMS4507', "STAT3007", "COMP3400", "COMP4403"] for course in courses: if len(Course.objects.filter(name=course, mode=mode, semester=sem, year=year)) == 0: # course not already in database print("saving course...") UQ = Institution.objects.get(name="University of Queensland") course_obj = Course(name=course, mode=mode, semester=sem, year=year, institution=UQ) course_obj.save() course_obj = Course.objects.filter(name=course, mode=mode, semester=sem, year=year)[0] print(course_obj) # SAVE STUDENT COURSES if len(StudentCourse.objects .filter(student=stu, course=course_obj)) == 0: print("saving studentCourse...") stu_course = StudentCourse(student=stu, course=course_obj) stu_course.save()
5,325,712
def MWA_Tile_analytic(za, az, freq=100.0e6, delays=None, zenithnorm=True, power=False, dipheight=config.DIPOLE_HEIGHT, dip_sep=config.DIPOLE_SEPARATION, delay_int=config.DELAY_INT, jones=False, amps=None): """ gainXX,gainYY=MWA_Tile_analytic(za, az, freq=100.0e6, delays=None, zenithnorm=True, power=True, dipheight=0.278, dip_sep=1.1, delay_int=435.0e-12) if power=False, then gains are voltage gains - should be squared for power otherwise are power za is zenith-angle in radians az is azimuth in radians, phi=0 points north freq in Hz, height, sep in m delays should be a numpy array of size (2,16), although a (16,) list or a (16,) array will also be accepted """ theta = za phi = az # wavelength in meters lam = C / freq if (delays is None): delays = 0 if (isinstance(delays, float) or isinstance(delays, int)): delays = delays * numpy.ones((16)) if (isinstance(delays, numpy.ndarray) and len(delays) == 1): delays = delays[0] * numpy.ones((16)) if isinstance(delays, list): delays = numpy.array(delays) assert delays.shape == (2, 16) or delays.shape == (16,), "Delays %s have unexpected shape %s" % (delays, delays.shape) if len(delays.shape) > 1: delays = delays[0] if amps is None: amps = numpy.ones((16)) # direction cosines (relative to zenith) for direction az,za projection_east = numpy.sin(theta) * numpy.sin(phi) projection_north = numpy.sin(theta) * numpy.cos(phi) # projection_z = numpy.cos(theta) if dip_sep == config.DIPOLE_SEPARATION: dipole_north = DIPOLE_NORTH dipole_east = DIPOLE_EAST # dipole_z = DIPOLE_Z else: # compute dipole position within the tile using a custom dipole separation value dipole_north = dip_sep * numpy.array([1.5, 1.5, 1.5, 1.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -1.5, -1.5, -1.5, -1.5]) dipole_east = dip_sep * numpy.array([-1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5]) # dipole_z = dip_sep * numpy.zeros(dipole_north.shape) # loop over dipoles array_factor = 0.0 for k in range(16): # relative dipole phase for a source at (theta,phi) phase = amps[k] * numpy.exp((1j) * 2 * math.pi / lam * (dipole_east[k] * projection_east + dipole_north[k] * projection_north # + dipole_z[k] * projection_z - delays[k] * C * delay_int)) array_factor += phase / 16.0 ground_plane = 2 * numpy.sin(2 * math.pi * dipheight / lam * numpy.cos(theta)) # make sure we filter out the bottom hemisphere ground_plane *= (theta <= math.pi / 2) # normalize to zenith if (zenithnorm): # print "Normalisation factor (analytic) = %.4f" % (2*numpy.sin(2*math.pi*dipheight/lam)) ground_plane /= 2 * numpy.sin(2 * math.pi * dipheight / lam) # response of the 2 tile polarizations # gains due to forshortening dipole_ns = numpy.sqrt(1 - projection_north * projection_north) dipole_ew = numpy.sqrt(1 - projection_east * projection_east) # voltage responses of the polarizations from an unpolarized source # this is effectively the YY voltage gain gain_ns = dipole_ns * ground_plane * array_factor # this is effectively the XX voltage gain gain_ew = dipole_ew * ground_plane * array_factor if jones: # Calculate Jones matrices dipole_jones = numpy.array([[numpy.cos(theta) * numpy.sin(phi), 1 * numpy.cos(phi)], [numpy.cos(theta) * numpy.cos(phi), -numpy.sin(phi)]]) j = dipole_jones * ground_plane * array_factor # print "dipole_jones = %s" % (dipole_jones) # print "ground_plane = %s , array_factor = %s" % (ground_plane,array_factor) # Use swapaxis to place jones matrices in last 2 dimensions # insead of first 2 dims. if len(j.shape) == 4: j = numpy.swapaxes(numpy.swapaxes(j, 0, 2), 1, 3) elif len(j.shape) == 3: # 1-D j = numpy.swapaxes(numpy.swapaxes(j, 1, 2), 0, 1) else: # single value pass return j if power: return numpy.real(numpy.conj(gain_ew) * gain_ew), numpy.real(numpy.conj(gain_ns) * gain_ns) return gain_ew, gain_ns
5,325,713
def traindata(): """Generate Plots in the traindata page. Args: None Returns: render_template(render_template): Render template for the plots """ # read data and create visuals df_features = read_data_csv("./data/features_data.csv") table_2 = data_table( drop_cols=["Unnamed: 0", "FeatureVector", "ScaledFeatures"], num_cols=["Days", "UpPerSong", "DownPerSong", "SongsPerHour"], title="Transformed Dataset - Sample Records", ) graphs = [table_2, heat_map(df_features)] # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template("traindata.html", ids=ids, graphJSON=graphJSON)
5,325,714
def import_file(file_path, title, source_mime_type, dest_mime_type): """Imports a file with conversion to the native Google document format. Expects the env var GOOGLE_APPLICATION_CREDENTIALS to be set for credentials. Args: path (str): Path to file to import title(str): The title of the document to create source_mime_type(str): Original mime type of file dest_mime_type(str): Mime type to convert to Returns: str: The ID of the new file in drive """ credentials, _ = auth.default() drive_service = build('drive', 'v3', credentials=credentials) file_metadata = { 'name': title, 'mimeType': dest_mime_type } media = MediaFileUpload(file_path, mimetype=source_mime_type) file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() return file.get('id')
5,325,715
def init_prop_sheet(target: T.Target, prefix=''): """ References: https://stackoverflow.com/questions/2611892/how-to-get-the-parents-of-a -python-class """ assert all(( hasattr(target, 'qid'), # hasattr(target, 'name'), hasattr(target, '_properties'), )) bases = target.__class__.__bases__ assert len(bases) > 1 and issubclass(bases[-1], PropSheet) # ^ assert 2+ ^ assert target's mixin classes[-1] is PropSheet for prop_name, constructor in _get_all_props(bases[-1]): # noinspection PyProtectedMember target._properties[prop_name] = constructor( target.qid, prefix + '.' + prop_name if prefix else prop_name # ^ e.g. 'anchors.top' ^ e.g. 'width' )
5,325,716
def test_extract_requested_slot_from_text_with_not_intent(): """Test extraction of a slot value from text with certain intent """ # noinspection PyAbstractClass class CustomFormAction(FormAction): def slot_mappings(self): return {"some_slot": self.from_text(not_intent='some_intent')} form = CustomFormAction() tracker = Tracker('default', {'requested_slot': 'some_slot'}, {'text': 'some_text', 'intent': {'name': 'some_intent', 'confidence': 1.0}}, [], False, None, {}, 'action_listen') slot_values = form.extract_requested_slot(CollectingDispatcher(), tracker, {}) # check that the value was extracted for correct intent assert slot_values == {} tracker = Tracker('default', {'requested_slot': 'some_slot'}, {'text': 'some_text', 'intent': {'name': 'some_other_intent', 'confidence': 1.0}}, [], False, None, {}, 'action_listen') slot_values = form.extract_requested_slot(CollectingDispatcher(), tracker, {}) # check that the value was not extracted for incorrect intent assert slot_values == {'some_slot': 'some_text'}
5,325,717
def test_dewpoint(): """Test dewpoint calculation.""" assert_almost_equal(dewpoint(6.112 * units.mbar), 0. * units.degC, 2)
5,325,718
def stop(): """Stop ckan service (gunicorn).""" control('stop')
5,325,719
def _get_info_file_path(): """Get path to info file for the current process. As with `_get_info_dir`, the info directory will be created if it does not exist. """ return os.path.join(_get_info_dir(), "pid-%d.info" % os.getpid())
5,325,720
def create_generic_io_object(ioclass, filename=None, directory=None, return_path=False, clean=False): """ Create an io object in a generic way that can work with both file-based and directory-based io objects If filename is None, create a filename. If return_path is True, also return the full path to the file. If directory is not None and path is not an absolute path already, use the file from the given directory. If return_path is True, return the full path of the file along with the io object. return reader, path. Default is False. If clean is True, try to delete existing versions of the file before creating the io object. Default is False. """ filename = get_test_file_full_path(ioclass, filename=filename, directory=directory, clean=clean) try: # actually create the object if ioclass.mode == 'file': ioobj = ioclass(filename=filename) elif ioclass.mode == 'dir': ioobj = ioclass(dirname=filename) else: ioobj = None except: print(filename) raise # return the full path if requested, otherwise don't if return_path: return ioobj, filename return ioobj
5,325,721
def select_channels(img_RGB): """ Returns the R' and V* channels for a skin lesion image. Args: img_RGB (np.array): The RGB image of the skin lesion """ img_RGB_norm = img_RGB / 255.0 img_r_norm = img_RGB_norm[..., 0] / ( img_RGB_norm[..., 0] + img_RGB_norm[..., 1] + img_RGB_norm[..., 2] ) img_v = np.max(img_RGB, axis=2) return (img_r_norm, img_v)
5,325,722
def upper_case(string): """ Returns its argument in upper case. :param string: str :return: str """ return string.upper()
5,325,723
def repositoryDefinitions(): """ Load repositoryDefinitions page """ i_d = wmc.repository.get_definition_details() p_d = json.dumps(i_d, indent=4) + " " msg = Markup(JSONtoHTML(p_d)) return render_template('repositoryDefinitions.html', data=msg)
5,325,724
def available_adapter_names(): """Return a string list of the available adapters.""" return [str(adp.name) for adp in plugins.ActiveManifest().adapters]
5,325,725
def parse_user_arguments(*args, **kwds): """ Parses the arguments of the program """ parser = argparse.ArgumentParser( description = "Generate the profiles of the input drug", epilog = "@oliva's lab 2017") parser.add_argument('-d','--drug_name',dest='drug_name',action = 'store', help = """ Name of the drug. If you do not provide targets for this drug or the number of targets is not large enough, the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used for naming purposes and will be completely optional. If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between double quotes. """) parser.add_argument('-t','--targets',dest='targets',action = 'store', help = 'Input file with the targets of the drug. Each target must be separated by a newline character.') parser.add_argument('-pt','--proteins_type_id',dest='proteins_type_id',action = 'store', default='geneid', help = 'Input the type of ID of the targets introduced / proteins of the network. It must be the same! (default is geneid).') parser.add_argument('-sif','--sif_file',dest='sif',action = 'store', help = """" Input file with a protein-protein interaction network in SIF format. If not introduced, the program will create a network of expansion using the targets as center and expanding as many neighbors as specified in the parameter radius. """) parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store', help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing: - Different numbers that will be the threshold values separated by newline characters. For example, a file called "top_threshold.list" containing: 0.1 0.5 1 5 10 """) parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'), help = """Define the workspace directory where the data directory and the results directory will be created""") options=parser.parse_args() return options
5,325,726
def apply_d2v_rel_corpus(): """ Evaluate the doc2vec feature vectors on the smaller corpus for cited/random and relevant/irrelevant labellings """ # load text #pat_corpus = PatentCorpus() #pat_corpus.mode = 'd2v' #list(pat_corpus) combis = np.load('human_eval/corpus_info/combis.npy') target_ids = list(set([comb[0] for comb in combis])) #pat_corpus = np.load('human_eval/doc2vec/corpus.npy') #pat_corpus = [gensim.models.doc2vec.TaggedDocument(a[0], a[1]) for a in pat_corpus if a[1][0] not in target_ids] ## Plot AUC # load model trained on entire patent corpus model = pkl.load(open("../doc2vec/models/full_pat_corpus_dm_50_min5_iter18.model")) #model = pkl.load(open("../doc2vec/models/full_pat_corpus_dm_50_min5_iter18.model")) #model = train_doc2vec(pat_corpus) # get doc2vec feature vectors single_pat_corpus = np.load('human_eval/corpus_info/single_pat_corpus.npy').item() patfeats_d2v = infer_patfeats(single_pat_corpus, model) #patfeats_d2v = corpus_to_patfeats(model, single_pat_corpus, []) #patfeats_d2v = make_doc2vec_corpus(model, single_pat_corpus, target_ids) pat_ids = np.load('human_eval/corpus_info/pat_ids.npy') binary_label_pairs = np.load('human_eval/corpus_info/binary_label_pairs.npy').item() human_label_pairs = np.load('human_eval/corpus_info/human_label_pairs.npy').item() binary_sim_combis, binary_diff_combis = group_combis(binary_label_pairs) human_sim_combis, human_diff_combis = group_combis(human_label_pairs) for simcoef in ['linear']: binary_scores = calc_simcoef_distr(patfeats_d2v, ['random', 'cited'], {'cited': binary_sim_combis, 'random': binary_diff_combis}, simcoef) human_scores = calc_simcoef_distr(patfeats_d2v, ['irrelevant', 'relevant'], {'relevant': human_sim_combis, 'irrelevant': human_diff_combis}, simcoef) binary_auc = calc_auc(binary_scores['cited'], binary_scores['random'])[2] human_auc = calc_auc(human_scores['relevant'], human_scores['irrelevant'])[2] plot_score_distr('human_eval', simcoef, ['random', 'cited'], {'cited': binary_scores['cited'], 'random': binary_scores['random']}, binary_auc, ['cited'], histdir='doc2vec_full50_rel_corp', bins=20) plot_score_distr('human_eval', simcoef, ['irrelevant', 'relevant'], {'relevant': human_scores['relevant'], 'irrelevant': human_scores['irrelevant']}, human_auc, ['relevant'], histdir='doc2vec_full50_rel_corp', bins=20)
5,325,727
def error(msg, **kwargs): """log level error""" log(LEVELS['error'], msg, **kwargs)
5,325,728
def createNewLetterSession(letter): """ # Take letter and create next session folder (session id is current max_id + 1) # Return path to session directory """ # Search for last training folder path = "gestures_database/"+letter+"/" last = -1 for r,d,f in os.walk(path): for folder in d: last = max(last, (int(folder))) # Create next data folder for current session path += str(last + 1).zfill(3) os.mkdir(path) path += "/" return path
5,325,729
def count_sort(seq): """ perform count sort and return sorted sequence without affecting the original """ counts = defaultdict(list) for elem in seq: counts[elem].append(elem) result = [] for i in range(min(seq), max(seq)+1): result.extend(counts[i]) return result
5,325,730
def detect_overrides(cls, obj): """ For each active plugin, check if it wield a packet hook. If it does, add make a not of it. Hand back all hooks for a specific packet type when done. """ res = set() for key, value in cls.__dict__.items(): if isinstance(value, classmethod): value = getattr(cls, key).__func__ if isinstance(value, (FunctionType, classmethod)): meth = getattr(obj, key) if meth.__func__ is not value: res.add(key) return res
5,325,731
def morningCalls(): """localhost:8080/morningcalls""" session = APIRequest.WebServiceSafra() return session.listMorningCalls()
5,325,732
def resize_image(img, h, w): """ resize image """ image = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST) return image
5,325,733
def train_one_epoch(train_loader, model, criterion, optimizer, epoch, writer): """ Run one train epoch """ batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to train mode model.train() print_period = (len(train_loader) // args.print_freq) + 1 log_period = (len(train_loader) // args.log_freq) + 1 end = time.time() for i, (inputs, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = inputs.cuda() target = target.cuda() if args.half: input_var = input_var.half() # compute output output = model(input_var) loss = criterion(output, target) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() output = output.float() loss = loss.float() # measure accuracy and record loss prec1 = accuracy(output.data, target)[0] losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_period == print_period-1: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f}\t' 'DL {data_time.val:.3f}\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1)) if i % log_period == log_period-1: writer.add_scalar("Prec1/train", top1.avg, epoch + i/len(train_loader)) writer.add_scalar("Loss/train", losses.avg, epoch + i/len(train_loader)) print('Epoch: [{0}][done]\t' 'Time {batch_time.val:.3f}\t' 'DL {data_time.val:.3f}\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( epoch, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1)) writer.add_scalar("Prec1/train", top1.avg, epoch+1) writer.add_scalar("Loss/train", losses.avg, epoch+1)
5,325,734
def view_vwap( s_ticker: str, ohlc: pd.DataFrame, offset: int = 0, s_interval: str = "", export: str = "", external_axes: Optional[List[plt.Axes]] = None, ): """Plots EMA technical indicator Parameters ---------- s_ticker : str Ticker ohlc : pd.DataFrame Dataframe of prices offset : int Offset variable s_interval : str Interval of data export : str Format to export data external_axes : Optional[List[plt.Axes]], optional External axes (3 axes are expected in the list), by default None """ ohlc.index = ohlc.index.tz_localize(None) ohlc["Day"] = [idx.date() for idx in ohlc.index] day_df = ohlc[ohlc.Day == ohlc.Day[-1]] df_vwap = overlap_model.vwap(day_df, offset) candle_chart_kwargs = { "type": "candle", "style": theme.mpf_style, "volume": True, "xrotation": theme.xticks_rotation, "scale_padding": {"left": 0.3, "right": 1.2, "top": 0.8, "bottom": 0.8}, "update_width_config": { "candle_linewidth": 0.6, "candle_width": 0.8, "volume_linewidth": 0.8, "volume_width": 0.8, }, "warn_too_much_data": 10000, } # This plot has 2 axes if external_axes is None: candle_chart_kwargs["returnfig"] = True candle_chart_kwargs["figratio"] = (10, 7) candle_chart_kwargs["figscale"] = 1.10 candle_chart_kwargs["figsize"] = plot_autoscale() candle_chart_kwargs["addplot"] = mpf.make_addplot( df_vwap, width=theme.line_width ) fig, ax = mpf.plot(day_df, **candle_chart_kwargs) fig.suptitle( f"{s_ticker} {s_interval} VWAP", x=0.055, y=0.965, horizontalalignment="left", ) lambda_long_number_format_y_axis(day_df, "Volume", ax) theme.visualize_output(force_tight_layout=False) else: if len(external_axes) != 3: logger.error("Expected list of three axis items.") console.print("[red]Expected list of 3 axis items./n[/red]") return (ax1, ax2, ax3) = external_axes candle_chart_kwargs["ax"] = ax1 candle_chart_kwargs["volume"] = ax2 candle_chart_kwargs["addplot"] = mpf.make_addplot( df_vwap, width=theme.line_width, ax=ax3 ) mpf.plot(day_df, **candle_chart_kwargs) export_data( export, os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"), "VWAP", df_vwap, )
5,325,735
def _parse_pars(pars) -> Dict: """ Takes dictionary of parameters, converting values to required type and providing defaults for missing values. Args: pars: Parameters dictionary. Returns: Dictionary of converted (and optionally validated) parameters. """ # Fallback to default for missing values and perform conversion. for k in PARAM_CONVERSION: if pars.get(k) is None: pars[k] = PARAM_CONVERSION[k][1] # _logger.warning(f"No value found for parameter '{k}'. Using "f"default value {pars[k]}.") else: conversion_func = PARAM_CONVERSION[k][0] if conversion_func: try: pars[k] = conversion_func(pars[k]) except ValueError as e: _logger.error( f"Unable to convert '{k}': {pars[k]} to " f"expected type {conversion_func.__name__}.") raise e # Fallback to default for missing paths. for p in DEFAULT_TO_OBS_DIR: if pars.get(p) is None: pars[p] = pars[OBS_DIR] return pars
5,325,736
def _get_base_class_names_of_parent_and_child_from_edge(schema_graph, current_location): """Return the base class names of a location and its parent from last edge information.""" edge_direction, edge_name = _get_last_edge_direction_and_name_to_location(current_location) edge_element = schema_graph.get_edge_schema_element_or_raise(edge_name) if edge_direction == INBOUND_EDGE_DIRECTION: parent_base_class_name = edge_element.base_out_connection child_base_class_name = edge_element.base_in_connection elif edge_direction == OUTBOUND_EDGE_DIRECTION: parent_base_class_name = edge_element.base_in_connection child_base_class_name = edge_element.base_out_connection else: raise AssertionError( "Expected edge direction to be either inbound or outbound." "Found: edge {} with direction {}".format(edge_name, edge_direction) ) return parent_base_class_name, child_base_class_name
5,325,737
def projection( v: GridVariableVector, solve: Callable = solve_fast_diag, ) -> GridVariableVector: """Apply pressure projection to make a velocity field divergence free.""" grid = grids.consistent_grid(*v) pressure_bc = boundaries.get_pressure_bc_from_velocity(v) q0 = grids.GridArray(jnp.zeros(grid.shape), grid.cell_center, grid) q0 = grids.GridVariable(q0, pressure_bc) q = solve(v, q0) q = grids.GridVariable(q, pressure_bc) q_grad = fd.forward_difference(q) v_projected = tuple( grids.GridVariable(u.array - q_g, u.bc) for u, q_g in zip(v, q_grad)) return v_projected
5,325,738
def a07_curve_function(curve: CustomCurve): """Computes the embedding degree (with respect to the generator order) and its complement""" q = curve.q() if q.nbits()>300: return {"embedding_degree_complement":None,"complement_bit_length":None} l = curve.order() embedding_degree = curve.embedding_degree() embedding_degree_complement = ZZ(euler_phi(l) / embedding_degree) complement_bit_length = embedding_degree_complement.nbits() curve_results = { "embedding_degree_complement": embedding_degree_complement, "complement_bit_length": complement_bit_length, } return curve_results
5,325,739
def _horizontal_metrics_from_coordinates(xcoord,ycoord): """Return horizontal scale factors computed from arrays of projection coordinates. Parameters ---------- xcoord : xarray dataarray array of x_coordinate used to build the grid metrics. either plane_x_coordinate or projection_x_coordinate assume that the order of the dimensions is ('y','x'). ycoord :xarray dataarray array of y_coordinate used to build the grid metrics. either plane_y_coordinate or projection_y_coordinate assume that the order of the dimensions is ('y','x'). Return ------ e1 : xarray dataarray Array of grid cell width corresponding to cell_x_size_at_*_location e2 : xarray dataarray Array of grid cell width corresponding to cell_y_size_at_*_location """ #- Compute the centered first order derivatives of proj. coordinate arrays dy_dj,dy_di = _horizontal_gradient(ycoord) dx_dj,dx_di = _horizontal_gradient(xcoord) #- Compute the approximate size of the cells in x and y direction e1 = sqrt( dx_di**2. + dy_di**2. ) e2 = sqrt( dx_dj**2. + dy_dj**2. ) return e1,e2
5,325,740
def ultosc( df, high, low, close, ultosc, time_period_1=7, time_period_2=14, time_period_3=28, ): """ The Ultimate Oscillator (ULTOSC) by Larry Williams is a momentum oscillator that incorporates three different time periods to improve the overbought and oversold signals. Parameters: df (pd.DataFrame): DataFrame which contain the asset information. high (string): the column name for the period highest price of the asset. low (string): the column name for the period lowest price of the asset. close (string): the column name for the closing price of the asset. ultosc (string): the column name for the ultimate oscillator values. time_period_1 (int): The first time period for the indicator. By default, 7. time_period_2 (int): The second time period for the indicator. By default, 14. time_period_3 (int): The third time period for the indicator. By default, 28. Returns: df (pd.DataFrame): Dataframe with ultimate oscillator of the asset calculated. """ df[ultosc + "previous_close"] = df[close].shift(1) df = trange(df, high, low, close, ultosc + "_true_range") df = df.dropna().reset_index(drop=True) df[ultosc + "_true_low"] = df[[low, ultosc + "previous_close"]].min(axis=1) df[ultosc + "_close-tl"] = df[close] - df[ultosc + "_true_low"] df = sma(df, ultosc + "_close-tl", ultosc + "_a1", time_period_1) df = sma(df, ultosc + "_true_range", ultosc + "_b1", time_period_1) df = sma(df, ultosc + "_close-tl", ultosc + "_a2", time_period_2) df = sma(df, ultosc + "_true_range", ultosc + "_b2", time_period_2) df = sma(df, ultosc + "_close-tl", ultosc + "_a3", time_period_3) df = sma(df, ultosc + "_true_range", ultosc + "_b3", time_period_3) a1_b1 = df[ultosc + "_a1"] / df[ultosc + "_b1"] a2_b2 = df[ultosc + "_a2"] / df[ultosc + "_b2"] a3_b3 = df[ultosc + "_a3"] / df[ultosc + "_b3"] df[ultosc] = 100 * ((4 * a1_b1) + (2 * a2_b2) + a3_b3) / 7.0 df.drop( [ ultosc + "_true_range", ultosc + "previous_close", ultosc + "_true_low", ultosc + "_close-tl", ultosc + "_a1", ultosc + "_b1", ultosc + "_a2", ultosc + "_b2", ultosc + "_a3", ultosc + "_b3", ], axis=1, inplace=True, ) df = df.dropna().reset_index(drop=True) return df
5,325,741
def mating(child_id, parent1, parent2, gt_matrix): """ Given the name of a child and two parents + the genotype matrices, mate them """ child_gen = phase_parents(parent1, parent2, gt_matrix) parent1.add_children(child_id) parent2.add_children(child_id) child = Person(child_id) child.set_parents(parent1.get_name(), parent2.get_name()) return child, child_gen
5,325,742
def fix_bathy(infile, mindep): """ Simple script to enforce minimum depth and fix the longitudes. Run this on the file produced by agrif_create_bathy.exe. """ with nc.Dataset(infile, 'r+') as f: # Enforce minimum bathymetry bm = f.variables['Bathymetry'][:] idx = (bm > 0) & (bm < mindep) if np.any(idx): md = np.min(bm[idx]) print("Min depth {:3f} m, resetting to {:3f} m".format(md, mindep)) bm[idx] = mindep f.variables['Bathymetry'][:] = bm # Enforce nav_lon to be in [-180,180] and not [0,360] lon = f.variables['nav_lon'][:] if np.any(lon > 180): lon[lon > 180] -= 360 f.variables['nav_lon'][:] = lon f.variables['nav_lon'].valid_min = np.min(lon) f.variables['nav_lon'].valid_max = np.max(lon)
5,325,743
def chunks( l, n ): """ Yield successive n-sized chunks from l. """ tmp = None for i in l: if tmp is None: tmp = [] tmp.append( i ) if n == len( tmp ): yield tmp tmp = None if tmp is not None: yield tmp
5,325,744
def _tmp( generator_reconstructed_encoded_fake_data, encoded_random_latent_vectors, real_data, encoded_real_data, generator_reconstructed_encoded_real_data, alpha=0.7, scope="anomaly_score", add_summaries=False): """anomaly score. See https://arxiv.org/pdf/1905.11034.pdf for more details """ with tf.name_scope(scope): gen_rec_loss = tf.math.reduce_sum( tf.math.pow(real_data - generator_reconstructed_encoded_fake_data, 2), axis=[-2, -1]) gen_rec_loss_predict = tf.math.reduce_sum( tf.math.pow(real_data - generator_reconstructed_encoded_real_data, 2), axis=[-1]) real_to_orig_dist = tf.math.reduce_sum( tf.math.pow(encoded_real_data - encoded_random_latent_vectors, 2), axis=[-2, -1]) # real_to_orig_dist_predict = tf.math.reduce_sum( # tf.math.pow(encoded_real_data, 2), axis=[-1]) anomaly_score = (gen_rec_loss_predict * alpha) + ((1 - alpha) * real_to_orig_dist) if add_summaries: tf.summary.scalar(name=scope + "_gen_rec_loss", data=gen_rec_loss, step=None, description=None) tf.summary.scalar(name=scope + "_orig_loss", data=real_to_orig_dist, step=None, description=None) tf.summary.scalar(name=scope, data=anomaly_score, step=None, description=None) return anomaly_score, gen_rec_loss, real_to_orig_dist, gen_rec_loss_predict,
5,325,745
def print_success(message: str): """ Print success :param message: message to print :type message: str """ global verbose if verbose: print("%s%s%s" % (KGRN, message, KNRM))
5,325,746
def get_key_metrics_fig(confirmed_ser, recovered_ser, deaths_ser, metric_type): """ Return key metrics graph object figure Parameters ---------- confirmed_ser: pandas.Series Confirmed pandas series objects with index=dates, values=number of cases recovered_ser: pandas.Series Recovered pandas series objects with index=dates, values=number of cases deaths_ser: pandas.Series Deaths pandas series objects with index=dates, values=number of cases metric_type: str One of ['cumulative', 'new] """ fig = go.Figure() if metric_type == 'cumulative': mode = 'number+delta' delta_confirmed = { 'reference': confirmed_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'blue', 'increasing.symbol': '+' } delta_recovered = { 'reference': recovered_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'green', 'increasing.symbol': '+' } delta_deaths = { 'reference': deaths_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'red', 'increasing.symbol': '+' } elif metric_type == 'new': mode = 'number' delta_confirmed = None delta_recovered = None delta_deaths = None fig.add_trace(go.Indicator( mode=mode, value=confirmed_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'blue', } }, domain={'row': 0, 'column': 0}, title={ 'text': 'Confirmed', 'font': { 'size': 24, 'color': 'blue', } }, delta=delta_confirmed)) fig.add_trace(go.Indicator( mode=mode, value=recovered_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'green', } }, domain={'row': 0, 'column': 1}, title={ 'text': 'Recovered', 'font': { 'size': 24, 'color': 'green', } }, delta=delta_recovered)) fig.add_trace(go.Indicator( mode=mode, value=deaths_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'red', } }, domain={'row': 0, 'column': 2}, title={ 'text': 'Deaths', 'font': { 'size': 24, 'color': 'red', } }, delta=delta_deaths)) fig.update_layout( grid={'rows': 1, 'columns': 3}, autosize=True, # width=500, height=300, # margin={'t': 100, 'b': 100, 'l': 0, 'r': 0} ) return fig
5,325,747
def cms_post_popup(r): """ Customized Map popup for cms_post resource - style like the cards - currently unused """ record = r.record pkey = "cms_post.id" # Construct the item ID map_id = "default_map" # @ToDo: provide the map_id as a var in order to be able to support multiple maps record_id = record[pkey] item_id = "%s-%s" % (map_id, record_id) item_class = "thumbnail" db = current.db table = db.cms_post series = table.series_id.represent(record.series_id) date = table.date.represent(record.date) body = record.body location_id = record.location_id location = table.location_id.represent(location_id) location_url = URL(c="gis", f="location", args=[location_id]) author_id = record.created_by author = table.created_by.represent(author_id) s3db = current.s3db ltable = s3db.pr_person_user ptable = db.pr_person query = (ltable.user_id == author_id) & \ (ltable.pe_id == ptable.pe_id) row = db(query).select(ptable.id, limitby=(0, 1) ).first() if row: person_url = URL(c="hrm", f="person", args=[row.id]) else: person_url = "#" author = A(author, _href=person_url, ) utable = db.auth_user otable = db.org_organisation query = (utable.id == author_id) & \ (otable.id == utable.organisation_id) row = db(query).select(otable.id, otable.name, otable.logo, limitby=(0, 1) ).first() if row: organisation_id = row.id organisation = row.name org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) logo = URL(c="default", f="download", args=[row.logo]) else: organisation_id = 0 organisation = "" org_url = "" logo = "" avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px;", _class="media-object") avatar = A(avatar, _href=org_url, _class="pull-left", ) # Edit Bar permit = current.auth.s3_has_permission if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="cms", f="post", args=[record_id, "update.popup"], #vars={"refresh": listid, # "record": record_id} ), _class="s3_modal", _title=T("Edit %(type)s") % dict(type=T(series)), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-remove-sign"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) # Dropdown of available documents dtable = db.doc_document query = (table.doc_id == dtable.doc_id) & \ (dtable.deleted == False) documents = db(query).select(dtable.file) if documents: doc_list = UL(_class="dropdown-menu", _role="menu", ) retrieve = db.doc_document.file.retrieve for doc in documents: filename = doc.file try: doc_name = retrieve(filename)[0] except IOError: doc_name = current.messages["NONE"] doc_url = URL(c="default", f="download", args=[filename]) doc_item = LI(A(I(_class="icon-file"), " ", doc_name, _href=doc_url, ), _role="menuitem", ) doc_list.append(doc_item) docs = DIV(A(I(_class="icon-paper-clip"), SPAN(_class="caret"), _class="btn dropdown-toggle", _href="#", **{"_data-toggle": "dropdown"} ), doc_list, _class="btn-group attachments dropdown pull-right", ) else: docs = "" icon = series.lower().replace(" ", "_") card_label = TAG[""](I(_class="icon icon-%s" % icon), SPAN(" %s" % T(series), _class="card-title")) # Type cards if series == "Incident": # Apply additional highlighting for Incidents item_class = "%s disaster" % item_class # Render the item item = DIV(DIV(card_label, SPAN(A(location, _href=location_url, ), _class="location-title", ), SPAN(date, _class="date-title", ), #edit_bar, _class="card-header", ), DIV(avatar, DIV(DIV(body, DIV(author, " - ", A(organisation, _href=org_url, _class="card-organisation", ), _class="card-person", ), _class="media", ), _class="media-body", ), _class="media", ), docs, _class=item_class, _id=item_id, ) return item
5,325,748
def pr_curve(results: List[TrecEvalResults]) -> plt: """ Create a precision-recall graph from trec_eval results. :param results: A list of TrecEvalResults files. :return: a matplotlib plt object """ names = [r.run_id for r in results] iprec = [[r.results['iprec_at_recall_0.00'], r.results['iprec_at_recall_0.10'], r.results['iprec_at_recall_0.20'], r.results['iprec_at_recall_0.30'], r.results['iprec_at_recall_0.40'], r.results['iprec_at_recall_0.50'], r.results['iprec_at_recall_0.60'], r.results['iprec_at_recall_0.70'], r.results['iprec_at_recall_0.80'], r.results['iprec_at_recall_0.90'], r.results['iprec_at_recall_1.00']] for r in results] recall = np.arange(0, 1.1, 0.1) mpl.rc('xtick', labelsize=35) mpl.rc('ytick', labelsize=35) plt.xlabel('Recall', fontsize=35) plt.ylabel('Interpolated Precision', fontsize=35) for p in iprec: plt.plot(recall, p, linewidth=10) plt.legend(names, fontsize=35) return plt
5,325,749
def test_6_1_4_etc_shadow_isfile(host): """ CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4 Tests if /etc/shadow is a file """ assert host.file(ETC_SHADOW).is_file
5,325,750
def plotData(datalist, part = "real", progressive = True, color = None, clip = False, tcutoff = None): """Plot real or imaginary parts of a given list of functions. arguments: datalist (list of tuples, each tuple of form (xlist,ylist)): data to plot; xlist should be real numbers, ylist can be complex part (str): "real" or "imag"; determines which part of ylist we use progressive (bool): if True, alphalevel starts at 0 and increases linearly as we go through datalist color (str): a color supported by axes.plot; if None then gets set to "blue" or "red" for real/imag parts respectively; if "rainbow" then color varies from blue to green as we go through datalist clip (bool): remove first and last items before plotting""" fig = plt.figure() axes = fig.add_subplot(1,1,1) alphalevel = 0.0 if color is None: if part == "real": color = "blue" if part == "imag": color = "red" if color == "rainbow": red = 0.5 green = 0.0 blue = 1.0 # iterate over list of functions n = len(datalist) for k,data in enumerate(datalist): if tcutoff is not None: tlist,zlist = zip(*[[t,z] for t,z in zip(*data) if abs(t) <= tcutoff]) else: tlist,zlist = data if clip: tlist = tlist[1:-1] zlist = zlist[1:-1] if part == "real": ylist = [z.real for z in zlist] if part == "imag": ylist = [z.imag for z in zlist] # if "progressive" is set then vary alphalevel as we go through datalist if progressive: alphalevel += 1.0 / n alphalevel = min(alphalevel, 1.0) else: alphalevel = 1.0 if color == "rainbow": hue = float(k) / n currentcolor = colors.hsv_to_rgb([hue, 0.8, 0.8]) else: currentcolor = color # now plot the points axes.plot(tlist, ylist, color=currentcolor, alpha=alphalevel) return fig
5,325,751
def expand_word_wrap(base_url, name, tokens): """Expand the ``word-wrap`` legacy property. See http://http://www.w3.org/TR/css3-text/#overflow-wrap """ keyword = overflow_wrap(tokens) if keyword is None: raise InvalidValues yield 'overflow-wrap', keyword
5,325,752
def train_and_test(model, dataset, robustness_tests=None, base_config_dict=None, save_model=True): """ Train a recommendation model and run robustness tests. Args: model (str): Name of model to be trained. dataset (str): Dataset name; must match the dataset's folder name located in 'data_path' path. base_config_dict: Configuration dictionary. If no config passed, takes default values. save_model (bool): Determines whether or not to externally save the model after training. robustness_tests (dict): Configuration dictionary for robustness tests. Returns: """ config_dict = get_config_dict(robustness_tests, base_config_dict) config = Config(model=model, dataset=dataset, config_dict=config_dict) init_seed(config['seed'], config['reproducibility']) logger = getLogger() if len(logger.handlers) != 0: logger.removeHandler(logger.handlers[1]) init_logger(config) logger.info(config) # dataset filtering dataset = create_dataset(config) logger.info(dataset) # dataset splitting train_data, valid_data, test_data, robustness_testing_data = data_preparation(config, dataset, save=True) for robustness_test in robustness_testing_data: if robustness_testing_data[robustness_test] is not None: logger.info(set_color('Robustness Test', 'yellow') + f': {robustness_test}') # model loading and initialization model = get_model(config['model'])(config, train_data).to(config['device']) logger.info(model) # trainer loading and initialization trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model) # model training best_valid_score, best_valid_result = trainer.fit( train_data, valid_data, saved=save_model, show_progress=config['show_progress'] ) # model evaluation test_result = trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}') logger.info(set_color('test result', 'yellow') + f': {test_result}') test_result_transformation, test_result_sparsity, \ test_result_slice, test_result_distributional_slice = None, None, None, None if robustness_testing_data['slice'] is not None: test_result_slice = trainer.evaluate(robustness_testing_data['slice'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for slice', 'yellow') + f': {test_result_slice}') if robustness_testing_data['distributional_slice'] is not None: test_result_distributional_slice = trainer.evaluate(robustness_testing_data['distributional_slice'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for distributional slice', 'yellow') + f': ' f'{test_result_distributional_slice}') if robustness_testing_data['transformation_test'] is not None: test_result_transformation = trainer.evaluate(robustness_testing_data['transformation_test'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for transformation on test', 'yellow') + f': {test_result_transformation}') if robustness_testing_data['transformation_train'] is not None: transformation_model = get_model(config['model'])(config, robustness_testing_data['transformation_train']).to( config['device']) logger.info(transformation_model) transformation_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, transformation_model) best_valid_score_transformation, best_valid_result_transformation = transformation_trainer.fit( robustness_testing_data['transformation_train'], valid_data, saved=save_model, show_progress=config['show_progress']) test_result_transformation = transformation_trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info( set_color('best valid for transformed training set', 'yellow') + f': {best_valid_result_transformation}') logger.info(set_color('test result for transformed training set', 'yellow') + f': {test_result_transformation}') if robustness_testing_data['sparsity'] is not None: sparsity_model = get_model(config['model'])(config, robustness_testing_data['sparsity']).to(config['device']) logger.info(sparsity_model) sparsity_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, sparsity_model) best_valid_score_sparsity, best_valid_result_sparsity = sparsity_trainer.fit( robustness_testing_data['sparsity'], valid_data, saved=save_model, show_progress=config['show_progress']) test_result_sparsity = sparsity_trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('best valid for sparsified training set', 'yellow') + f': {best_valid_result_sparsity}') logger.info(set_color('test result for sparsified training set', 'yellow') + f': {test_result_sparsity}') logger.handlers.clear() shutdown() del logger return { 'test_result': test_result, 'distributional_test_result': test_result_distributional_slice, 'transformation_test_result': test_result_transformation, 'sparsity_test_result': test_result_sparsity, 'slice_test_result': test_result_slice }
5,325,753
def total_cost(content_cost, style_cost, alpha, beta): """Return a tensor representing the total cost.""" return alpha * content_cost + beta * style_cost
5,325,754
def create_command_using_pip_action( num_bash_entries=10, uninstall_use_creation_time=False, skip=0): """Create commands using latest pip action.""" valid_pip_commands = get_valid_pip_history(num_bash_entries)[skip:] assert valid_pip_commands, 'No undoable pip commands.' last_valid_pip_command = valid_pip_commands[0] last_valid_pip_action = last_valid_pip_command.split()[1] command = '' if uninstall_use_creation_time: command = 'pip uninstall -y {}'.format(get_uninstall_candidates()) elif last_valid_pip_action == 'install': command = create_command_using_packages(get_pip_command_packages( last_valid_pip_command)) elif last_valid_pip_action == 'uninstall': command = 'pip install {}'.format(get_reinstall_candidates()) elif last_valid_pip_action == 'download': command = 'rm {}'.format(get_file_candidates()) assert command, 'No undoable pip commands.' return command
5,325,755
def extract_word_pos_sequences(form, unknown_category, morpheme_splitter=None, extract_morphemes=False): """Return the unique word-based pos sequences, as well as (possibly) the morphemes, implicit in the form. :param form: a form model object :param morpheme_splitter: callable that splits a strings into its morphemes and delimiters :param str unknown_category: the string used in syntactic category strings when a morpheme-gloss pair is unknown :param morphology: the morphology model object -- needed because its extract_morphemes_from_rules_corpus attribute determines whether we return a list of morphemes. :returns: 2-tuple: (set of pos/delimiter sequences, list of morphemes as (pos, (mb, mg)) tuples). """ if not form.syntactic_category_string: return None, None morpheme_splitter = morpheme_splitter or get_morpheme_splitter() pos_sequences = set() morphemes = [] sc_words = form.syntactic_category_string.split() mb_words = form.morpheme_break.split() mg_words = form.morpheme_gloss.split() for sc_word, mb_word, mg_word in zip(sc_words, mb_words, mg_words): pos_sequence = tuple(morpheme_splitter(sc_word)) if unknown_category not in pos_sequence: pos_sequences.add(pos_sequence) if extract_morphemes: morpheme_sequence = morpheme_splitter(mb_word)[::2] gloss_sequence = morpheme_splitter(mg_word)[::2] for pos, morpheme, gloss in zip(pos_sequence[::2], morpheme_sequence, gloss_sequence): morphemes.append((pos, (morpheme, gloss))) return pos_sequences, morphemes
5,325,756
def download_hmi(date: datetime, outdir=None, resolution='1k'): """ Downloads hourly HMI files for the date selected if not already available locally. """ if outdir is None: outdir = pathlib.Path.cwd() / 'solar_images' else: outdir = pathlib.Path(outdir) if not outdir.exists(): # Make our directory structure if it doesn't exist yet print(f"Making directory: {outdir}") outdir.mkdir(parents=True, exist_ok=True) base_url = "http://jsoc.stanford.edu/data/hmi/images/" # Files are stored in YYYY/MM/DD directories, with # specific times in the filenames after that. # This is the alternative request to search for nearest that gets # forwarded to the below url # ("http://jsoc.stanford.edu/cgi-bin/hmiimage.pl" # "?Year=2017&Month=09&Day=06&Hour=00&Minute=00" # "&Kind=_M_color_&resolution=4k") # url = ("http://jsoc.stanford.edu/data/hmi/images/" # "2017/09/06/20170906_000000_M_color_4k.jpg") dt = timedelta(hours=1) for timestep in range(24): t = date + timestep*dt url_dir = t.strftime("%Y/%m/%d/") fname = t.strftime(f"%Y%m%d_%H0000_M_color_{resolution}.jpg") url = base_url + url_dir + fname download_path = outdir / fname if download_path.exists(): # We have already downloaded this file, so no need to repeat print(f"Already downloaded, skipping: {fname}") continue # Make the download request try: req = urllib.request.urlopen(url) except urllib.error.HTTPError: # Just ignore the URL not found errors for now print(f"Could not download {url}") continue # Write out the response to our local file with open(download_path, "wb") as f: f.write(req.read()) print(f"Downloaded {url}")
5,325,757
def model_test(Py, Px_y, testDataArr, testLabelArr): """ 模型测试 @Args: Py: 先验概率分布 Px_y: 条件概率分布 testDataArr: 测试集数据 testLabelArr: 测试集标签 @Returns: 准确率 @Riase: """ # 错误值 errorCnt = 0 # 循环遍历测试集中的每一个样本 for i in range(len(testDataArr)): # 获取预测值 presict = NaiveBayes(Py, Px_y, testDataArr[i]) if presict != testLabelArr[i]: errorCnt += 1 # 返回准确率 return 1 - (errorCnt / len(testDataArr))
5,325,758
def test_more(): """Test on more than one file""" # rv, out = getstatusoutput(f'{prg} {fox} {sonnet}') expected = (' 1 9 45 ../inputs/fox.txt\n' ' 17 118 661 ../inputs/sonnet-29.txt\n' ' 18 127 706 total') # assert rv == 0 # assert out.rstrip() == expected output = "" try: output = subprocess.check_output(["python3", prg, fox, sonnet]) # print ("Command output: " + output.decode('utf-8')) except subprocess.CalledProcessError as e: print("Command error: " + e.output) print("Command output: " + output) sys.exit(e.returncode) # assert False,output.decode('utf-8').lower() assert output.decode("utf-8").rstrip() == expected
5,325,759
def int2c2e(shortname: str, wrapper: LibcintWrapper, other: Optional[LibcintWrapper] = None) -> torch.Tensor: """ 2-centre 2-electron integrals where the `wrapper` and `other1` correspond to the first electron, and `other2` corresponds to another electron. The returned indices are sorted based on `wrapper`, `other1`, and `other2`. The available shortname: "ar12" """ # don't really care, it will be ignored rinv_pos = torch.zeros(1, dtype=wrapper.dtype, device=wrapper.device) # check and set the others otherw = _check_and_set(wrapper, other) return _Int2cFunction.apply( *wrapper.params, rinv_pos, [wrapper, otherw], IntorNameManager("int2c2e", shortname))
5,325,760
def send_group_membership_request(request, group_id, template='group_send_request.html'): """ Send membership request to the administrator of a private group. """ if request.method == 'POST': form = GroupMembershipRequestForm(request.POST) if form.is_valid(): group = Group.objects.get(pk=group_id) form.save(user=request.user, group=group) return redirect('group:group_list') form = GroupMembershipRequestForm() return render(request, template, {'form': form})
5,325,761
def arg_parse(): """Base all default folders from script location scripts: ./pymetric/tools/cimis tools: ./pymetric/tools output: ./pymetric/cimis """ script_folder = sys.path[0] code_folder = os.path.dirname(script_folder) project_folder = os.path.dirname(code_folder) cimis_folder = os.path.join(project_folder, 'cimis') asc_folder = os.path.join(cimis_folder, 'input_asc') img_folder = os.path.join(cimis_folder, 'input_img') parser = argparse.ArgumentParser( description='CIMIS extract/convert', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--start', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD', help='Start date') parser.add_argument( '--end', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD', help='End date') parser.add_argument( '--ascii', default=asc_folder, metavar='PATH', help='Input ascii root folder path') parser.add_argument( '--img', default=img_folder, metavar='PATH', help='Output IMG raster folder path') parser.add_argument( '--stats', default=False, action="store_true", help='Compute raster statistics') parser.add_argument( '-o', '--overwrite', default=False, action="store_true", help='Force overwrite of existing files') parser.add_argument( '-d', '--debug', default=logging.INFO, const=logging.DEBUG, help='Debug level logging', action="store_const", dest="loglevel") args = parser.parse_args() # Convert relative paths to absolute paths if args.ascii and os.path.isdir(os.path.abspath(args.ascii)): args.ascii = os.path.abspath(args.ascii) if args.img and os.path.isdir(os.path.abspath(args.img)): args.img = os.path.abspath(args.img) return args
5,325,762
def _dataset_type_dir(signer): """Returns the directory name of the corresponding dataset type. There is a `TFRecord` file written for each of the 25 signers. The `TFRecord` files of the first 17 signers are assigned to the train dataset, the `TFRecord` files of the next 4 signers are assigned to the validation dataset, and the `TFRecord` files of the last 4 signers are assigned to the test dataset. Arguments: signer: The index of the signer. Returns: The directory name of the corresponding dataset type. """ if signer > 20: return DatasetType.TEST.value elif signer > 16: return DatasetType.VALIDATION.value else: return DatasetType.TRAIN.value
5,325,763
def _merge_blanks(src, targ, verbose=False): """Read parallel corpus 2 lines at a time. Merge both sentences if only either source or target has blank 2nd line. If both have blank 2nd lines, then ignore. Returns tuple (src_lines, targ_lines), arrays of strings sentences. """ merges_done = [] # array of indices of rows merged sub = None # replace sentence after merge with open(src, 'rb') as src_file, open(targ, 'rb') as targ_file: src_lines = src_file.readlines() targ_lines = targ_file.readlines() print("src: %d, targ: %d" % (len(src_lines), len(targ_lines))) print("=" * 30) for i in range(0, len(src_lines) - 1): s = src_lines[i].decode().rstrip() s_next = src_lines[i + 1].decode().rstrip() t = targ_lines[i].decode().rstrip() t_next = targ_lines[i + 1].decode().rstrip() if t == '.': t = '' if t_next == '.': t_next = '' if (len(s_next) == 0) and (len(t_next) > 0): targ_lines[i] = "%s %s" % (t, t_next) # assume it has punctuation targ_lines[i + 1] = b'' src_lines[i] = s if len(s) > 0 else sub merges_done.append(i) if verbose: print("t [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i])) print() elif (len(s_next) > 0) and (len(t_next) == 0): src_lines[i] = "%s %s" % (s, s_next) # assume it has punctuation src_lines[i + 1] = b'' targ_lines[i] = t if len(t) > 0 else sub merges_done.append(i) if verbose: print("s [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i])) print() elif (len(s) == 0) and (len(t) == 0): # both blank -- remove merges_done.append(i) else: src_lines[i] = s if len(s) > 0 else sub targ_lines[i] = t if len(t) > 0 else sub # handle last line s_last = src_lines[-1].decode().strip() t_last = targ_lines[-1].decode().strip() if (len(s_last) == 0) and (len(t_last) == 0): merges_done.append(len(src_lines) - 1) else: src_lines[-1] = s_last targ_lines[-1] = t_last # remove empty sentences for m in reversed(merges_done): del src_lines[m] del targ_lines[m] print("merges done: %d" % len(merges_done)) return (src_lines, targ_lines)
5,325,764
def get_line(file_path, line_rule): """ 搜索指定文件的指定行到指定行的内容 :param file_path: 指定文件 :param line_rule: 指定行规则 :return: """ s_line = int(line_rule.split(',')[0]) e_line = int(line_rule.split(',')[1][:-1]) result = [] # with open(file_path) as file: file = codecs.open(file_path, "r", encoding='utf-8', errors='ignore') line_number = 0 for line in file: line_number += 1 if s_line <= line_number <= e_line: result.append(line) return result
5,325,765
def save_with_metadata_temp(metadata, data, dir=None): """saves the data and metadata to a csv file at the end of the test, named "Test_temp.csv" """ if dir == None: filename = "test_temp.csv" else: filename = os.path.join(dir,"test_temp.csv") emptyrow = [] fieldnames = [*data] rowData = transpone(list(data.values())) with open (filename,'w', newline='') as csvfile: writer = csv.writer(csvfile,'excel') writer.writerows(metadata) #streamdict_to_csv(data,filename,'a') writer.writerow(emptyrow) writer.writerow(fieldnames) writer.writerows(rowData)
5,325,766
def dlls_setup(base_address): """Find the pattern for loading dynamically DLLs by using LoadLibrary API, set the name for the retrieved dll. Args: base_address -- the address for Oski procsSetup function """ """LoadLibrary pattern mov eax, aBcryptdll push eax ; lpLibFileName call LoadLibraryA mov [ebp+hModule], eax """ pattern = ['mov', 'push', 'call', 'mov'] dism_addr_list = list(FuncItems(base_address)) pFunc = idaapi.get_func(base_address) pFrame = ida_frame.get_frame(base_address) counter_dlls = 0 for i in range(0, len(dism_addr_list)-len(pattern)): if check_pattern(dism_addr_list[i:i + len(pattern)], pattern): if "LoadLibrary" in print_operand(dism_addr_list[i + 2], 0): dll_string_addr = list(DataRefsFrom(dism_addr_list[i]))[0] dll_name = get_cmt(dll_string_addr,0) # rename stack variable inst = DecodeInstruction(dism_addr_list[i + 3]) offset = (idaapi.calc_stkvar_struc_offset(pFunc, inst, 0)) name = sanitize_string(dll_name, 'dll') ida_struct.set_member_name(pFrame, offset, 'h%s' % name) counter_dlls += 1 print("%s dynamic dlls" % counter_dlls)
5,325,767
def files_in(directory: Path) -> Iterable[Path]: """Yields every file that is a descendent of directory.""" try: for subpath in directory.iterdir(): if subpath.is_dir(): for file_path in files_in(subpath): yield file_path else: yield subpath except FileNotFoundError: pass
5,325,768
def _generate_odd_sequence(sequence_id: int, start_value: int, k_factor: int, max_iterations: int): """ This method generates a Collatz sequence containing only odd numbers. :param sequence_id: ID of the sequence. :param start_value: The integer value to start with. The value must be a natural number > 0. If an even number is handed over, the next odd number will be used as start value. :param k_factor: The factor by which odd numbers are multiplied in the sequence. :param max_iterations: The maximum number of iterations performed before the method exits. :return: The Collatz sequence as a pandas data frame. """ odds = commons.odd_collatz_sequence(start_value, k_factor, max_iterations=max_iterations) next_odds = odds[1:] odds.pop() collatz_frame = pd.DataFrame({"v_i": odds}) collatz_frame["sequence_id"] = sequence_id collatz_frame["sequence_len"] = len(collatz_frame) collatz_frame["n"] = collatz_frame.index + 1 collatz_frame["k_factor"] = k_factor collatz_frame["v_1"] = start_value collatz_frame["kv_i+1"] = collatz_frame["v_i"].apply( commons.next_collatz_number, args=(k_factor,)) collatz_frame["v_i+"] = next_odds collatz_frame["terminal"] = collatz_frame["v_i+"] == 1 collatz_frame["cycle"] = collatz_frame["v_i+"] == collatz_frame["v_1"] # Logs collatz_frame["v_i_log2"] = collatz_frame["v_i"].apply(log2) collatz_frame["kv_i+1_log2"] = collatz_frame["kv_i+1"].apply(log2) collatz_frame["v_i+_log2"] = collatz_frame["v_i+"].apply(log2) # Binary strings collatz_frame["v_1_bin"] = collatz_frame["v_1"].apply(commons.to_binary) collatz_frame["v_i_bin"] = collatz_frame["v_i"].apply(commons.to_binary) # Mods collatz_frame["v_i_mod4"] = collatz_frame["v_i"] % 4 collatz_frame["kv_i+1_mod4"] = collatz_frame["kv_i+1"] % 4 collatz_frame["v_i+_mod4"] = collatz_frame["v_i+"] % 4 # Alpha collatz_frame["alpha_i"] = collatz_frame["kv_i+1"].apply(commons.trailing_zeros) collatz_frame["alpha_i"] = collatz_frame["alpha_i"].astype('int64') collatz_frame["alpha_i_max"] = log2(k_factor) + collatz_frame["v_i"].apply(log2) collatz_frame["alpha_i_max"] += (1 + 1 / (k_factor * collatz_frame["v_i"])).apply(log2) # Round result here to avoid loss of precision errors collatz_frame["alpha_i_max"] = collatz_frame["alpha_i_max"].round(9) collatz_frame["alpha"] = collatz_frame["alpha_i"].cumsum() collatz_frame["alpha_cycle"] = (log2(k_factor) * collatz_frame["n"]).astype('int64') + 1 collatz_frame["alpha_max"] = log2(start_value) + (collatz_frame["n"] * log2(k_factor)) collatz_frame["alpha_max"] = collatz_frame["alpha_max"].astype('int64') + 1 # Beta collatz_frame["beta_i"] = 1 + 1 / (k_factor * collatz_frame["v_i"]) collatz_frame["beta"] = collatz_frame["beta_i"].cumprod() # Lambda collatz_frame["bin_len"] = collatz_frame["v_i_log2"].astype('int64') + 1 collatz_frame["next_bin_len"] = collatz_frame["kv_i+1_log2"].astype('int64') + 1 collatz_frame["bin_diff"] = collatz_frame["next_bin_len"] - collatz_frame["bin_len"] collatz_frame["lambda_i"] = collatz_frame["bin_diff"] collatz_frame.loc[collatz_frame["lambda_i"] < 0, "lambda_i"] = 0 collatz_frame["lambda"] = collatz_frame["lambda_i"].cumsum() collatz_frame["lambda_i_min"] = int(log2(k_factor)) collatz_frame["lambda_i_max"] = int(log2(k_factor) + 1) collatz_frame["lambda_hyp"] = (collatz_frame["n"] * log2(k_factor)) collatz_frame["lambda_min"] = collatz_frame["lambda_hyp"].astype('int64') collatz_frame["lambda_max"] = collatz_frame["lambda_hyp"].astype('int64') + 2 # Omega collatz_frame["omega_i"] = collatz_frame["lambda_i"] - collatz_frame["alpha_i"] collatz_frame["omega"] = collatz_frame["lambda"] - collatz_frame["alpha"] collatz_frame["omega_i_max"] = collatz_frame["lambda_i_max"] - 1 collatz_frame["omega_max"] = collatz_frame["lambda_max"] - collatz_frame["n"] result_frame = collatz_frame[[ "sequence_id", "sequence_len", "n", "k_factor", "v_1", "v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2", "v_i_mod4", "kv_i+1_mod4", "v_i+_mod4", "v_1_bin", "v_i_bin", "terminal", "cycle", "alpha_i", "alpha_i_max", "alpha", "alpha_cycle", "alpha_max", "beta_i", "beta", "bin_len", "next_bin_len", "lambda_i", "lambda_i_min", "lambda_i_max", "lambda", "lambda_min", "lambda_max", "omega_i", "omega_i_max", "omega", "omega_max"]] result_frame.columns = [ "sequence_id", "sequence_len", "n", "k", "v_1", "v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2", "v_i_mod4", "kv_i+1_mod4", "v_i+_mod4", "v_1_bin", "v_i_bin", "terminal", "cycle", "a_i", "a_i_max", "a", "a_cycle", "a_max", "b_i", "b", "bin_len", "next_bin_len", "l_i", "l_i_min", "l_i_max", "l", "l_min", "l_max", "o_i", "o_i_max", "o", "o_max"] return result_frame
5,325,769
def find_initcpio_features(partitions, root_mount_point): """ Returns a tuple (hooks, modules, files) needed to support the given @p partitions (filesystems types, encryption, etc) in the target. :param partitions: (from GS) :param root_mount_point: (from GS) :return 3-tuple of lists """ hooks = ["base", "udev", "autodetect", "modconf", "block", "keyboard", "keymap", "consolefont"] modules = [] files = [] swap_uuid = "" uses_btrfs = False uses_lvm2 = False encrypt_hook = False openswap_hook = False unencrypted_separate_boot = False # It is important that the plymouth hook comes before any encrypt hook if detect_plymouth(): hooks.append("plymouth") for partition in partitions: if partition["fs"] == "linuxswap" and not partition.get("claimed", None): # Skip foreign swap continue if partition["fs"] == "linuxswap": swap_uuid = partition["uuid"] if "luksMapperName" in partition: openswap_hook = True if partition["fs"] == "btrfs": uses_btrfs = True if "lvm2" in partition["fs"]: uses_lvm2 = True if partition["mountPoint"] == "/" and "luksMapperName" in partition: encrypt_hook = True if (partition["mountPoint"] == "/boot" and "luksMapperName" not in partition): unencrypted_separate_boot = True if partition["mountPoint"] == "/usr": hooks.append("usr") if encrypt_hook: if detect_plymouth() and unencrypted_separate_boot: hooks.append("plymouth-encrypt") else: hooks.append("encrypt") if not unencrypted_separate_boot and \ os.path.isfile( os.path.join(root_mount_point, "crypto_keyfile.bin") ): files.append("/crypto_keyfile.bin") if uses_lvm2: hooks.append("lvm2") if swap_uuid != "": if encrypt_hook and openswap_hook: hooks.extend(["openswap"]) hooks.extend(["resume", "filesystems"]) else: hooks.extend(["filesystems"]) if uses_btrfs: modules.append("crc32c-intel" if cpuinfo().is_intel else "crc32c") else: hooks.append("fsck") return (hooks, modules, files)
5,325,770
def _is_unique_rec_name(info_name): """ helper method to see if we should use the uniqueness recommendation on the fact comparison """ UNIQUE_INFO_SUFFIXES = [".ipv4_addresses", ".ipv6_addresses", ".mac_address"] UNIQUE_INFO_PREFIXES = ["fqdn"] if info_name.startswith("network_interfaces.lo."): return False for prefix in UNIQUE_INFO_PREFIXES: if info_name.startswith(prefix): return True for suffix in UNIQUE_INFO_SUFFIXES: if info_name.endswith(suffix): return True return False
5,325,771
def do_volume_create(cs, args): """Add a new volume.""" cs.volumes.create(args.size, args.snapshot_id, args.display_name, args.display_description)
5,325,772
def initialized(): """ Connection finished initializing? """ return __context__["netmiko_device"].get("initialized", False)
5,325,773
def n_round(a, b): """safe round""" element_round = np.vectorize(np.round) return element_round(a, intify(b))
5,325,774
def send_single (addr, port, data): """ Send a single packet Args: addr: a string representation of destination address port: destination port data: data to be sent """ family, sockaddr = get_addr(addr, port) with create_socket(family) as sock: send_data(sock, sockaddr, data)
5,325,775
def _json_view_params(shape, affine, vmin, vmax, cut_slices, black_bg=False, opacity=1, draw_cross=True, annotate=True, title=None, colorbar=True, value=True): """ Create a dictionary with all the brainsprite parameters. Returns: params """ # Set color parameters if black_bg: cfont = '#FFFFFF' cbg = '#000000' else: cfont = '#000000' cbg = '#FFFFFF' # Deal with limitations of json dump regarding types if type(vmin).__module__ == 'numpy': vmin = vmin.tolist() # json does not deal with numpy array if type(vmax).__module__ == 'numpy': vmax = vmax.tolist() # json does not deal with numpy array params = {'canvas': '3Dviewer', 'sprite': 'spriteImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'overlay': {'sprite': 'overlayImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'opacity': opacity}, 'colorBackground': cbg, 'colorFont': cfont, 'crosshair': draw_cross, 'affine': affine.tolist(), 'flagCoordinates': annotate, 'title': title, 'flagValue': value, 'numSlice': {'X': cut_slices[0] - 1, 'Y': cut_slices[1] - 1, 'Z': cut_slices[2] - 1}} if colorbar: params['colorMap'] = {'img': 'colorMap', 'min': vmin, 'max': vmax} return params
5,325,776
def get_timestamps_from_df_data(df) -> List[datetime.datetime]: """Get a list of timestamp from rows of a DataFrame containing raw data. """ timestamps = [] for index, row in df.iterrows(): year = int(row["dteday"][:4]) month = int(row["dteday"][5:7]) day = int(row["dteday"][-2:]) hour = int(row["hr"]) timestamp = datetime.datetime(year, month, day, hour) timestamps.append(timestamp) return timestamps
5,325,777
def list_security_configurations(Marker=None): """ Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls. See also: AWS API Documentation Exceptions :example: response = client.list_security_configurations( Marker='string' ) :type Marker: string :param Marker: The pagination token that indicates the set of results to retrieve. :rtype: dict ReturnsResponse Syntax{ 'SecurityConfigurations': [ { 'Name': 'string', 'CreationDateTime': datetime(2015, 1, 1) }, ], 'Marker': 'string' } Response Structure (dict) -- SecurityConfigurations (list) --The creation date and time, and name, of each security configuration. (dict) --The creation date and time, and name, of a security configuration. Name (string) --The name of the security configuration. CreationDateTime (datetime) --The date and time the security configuration was created. Marker (string) --A pagination token that indicates the next set of results to retrieve. Include the marker in the next ListSecurityConfiguration call to retrieve the next page of results, if required. Exceptions EMR.Client.exceptions.InternalServerException EMR.Client.exceptions.InvalidRequestException :return: { 'SecurityConfigurations': [ { 'Name': 'string', 'CreationDateTime': datetime(2015, 1, 1) }, ], 'Marker': 'string' } """ pass
5,325,778
def test_enum_symbols_validation__correct(symbol): """https://github.com/fastavro/fastavro/issues/551""" invalid_schema = { "type": "record", "name": "my_schema", "fields": [ { "name": "enum_field", "type": { "name": "my_enum", "type": "enum", "symbols": [symbol], }, } ], } try: parse_schema(invalid_schema) except SchemaParseException: pytest.fail(f"valid symbol {symbol} has been incorrectly marked as invalid.")
5,325,779
def debug_user(request): """ Allows superusers to log in as a regular user to troubleshoot problems. """ if not settings.DEBUG: messages.error(request, "Can only use when in debug mode.") redirect("/") target = request.GET.get("uid", "") profile = Profile.objects.filter(uid=target).first() if not profile: messages.error(request, "User does not exists.") return redirect("/") user = profile.user login(request, user, backend="django.contrib.auth.backends.ModelBackend") messages.success(request, "Login successful!") logger.info(f"""uid={request.user.profile.uid} impersonated uid={profile.uid}.""") return redirect("/")
5,325,780
def select(var_name, attr_name=None): """ Return attribute(s) of a variable given the variable name and an optional field name, or list of attribute name(s) :param var_name: Name of the variable we're interested in. :param attr_name: A string representing the name of the attribute whose value we want to fetch. This can also be a list of strings in case of multiple attributes. If None, all attributes of the variable are returned. :return: A dictionary of attribute => value mappings if multiple attributes were requested (i.e. attr_name is a list), or a string value if a single attribute name was requested (i.e. attr_name is a string) """ single = isinstance(attr_name, str) if attr_name is not None: if single: params = {attr_name: attr_name} else: params = dict([(f, f) for f in attr_name]) else: params = None endpoint = 'variable/%s' % var_name data = _get(endpoint, params) return data[attr_name] if single else data
5,325,781
def print_last_word(words): """Prints the last word after popping it off.""" #prints the last word, or "first negative" word #think about this as being the word furthest form the top word = words.pop(-1) print word
5,325,782
def _parseWinBuildTimings(logfile): """Variant of _parseBuildTimings for Windows builds.""" res = {'Compile': re.compile(r'\d+>Time Elapsed (\d+):(\d+):([0-9.]+)'), 'Test running': re.compile(r'.*?\.+.*?([0-9.]+) sec')} times = dict([(k, 0.0) for k in res]) for line in logfile: for key, regexp in res.iteritems(): m = regexp.match(line) if m: multiplier = 1 for time_part in reversed(m.groups()): times[key] += float(time_part) * multiplier multiplier *= 60 break times['Total'] = sum(times.values()) return times
5,325,783
def _try_to_extract_issuing_url_from_well_known_metadata(well_known_address: str) -> typing.Optional[str]: """ Try to extract token issuing url from well-known location :param well_known_address: well-known URL :type well_known_address: str :return: str or None -- token issuing URL """ try: LOGGER.debug('Trying to extract well-known information from address %r', well_known_address) response = requests.get(url=well_known_address) data = response.json() except requests.HTTPError as http_error: LOGGER.debug('Failed to extract well-known information from address %r - %s', well_known_address, http_error) return None except ValueError as value_error: LOGGER.debug('Failed to parse well-known information from address %r - %s', well_known_address, value_error) return None token_endpoint = data.get('token_endpoint') if not token_endpoint: LOGGER.debug('well-known information does not contain token_endpoint (%s)', well_known_address) return return token_endpoint
5,325,784
def get_malid(anime: AnimeThemeAnime) -> int: """ Returns anime theme of resource. """ for resource in anime['resources']: if resource["site"] == "MyAnimeList": return resource['external_id']
5,325,785
def wav16khz2mfcc(dir_name): """ Loads all *.wav files from directory dir_name (must be 16kHz), converts them into MFCC features (13 coefficients) and stores them into a dictionary. Keys are the file names and values and 2D numpy arrays of MFCC features. """ features = {} for f in glob(dir_name + '/*.wav'): print('Processing file: ', f) rate, s = wavfile.read(f) assert(rate == 16000) features[f] = mfcc(s, 400, 240, 512, 16000, 23, 13) return features
5,325,786
def add_metadata(infile, outfile, sample_metadata): """Add sample-level metadata to a biom file. Sample-level metadata should be in a format akin to http://qiime.org/tutorials/tutorial.html#mapping-file-tab-delimited-txt :param infile: String; name of the biom file to which metadata shall be added :param outfile: String; name of the resulting metadata-enriched biom file :param sample_metadata: String; name of the sample-level metadata tab-delimited text file. Sample attributes are taken from this file. Note: the sample names in the `sample_metadata` file must match the sample names in the biom file. External dependencies - biom-format: http://biom-format.org/ """ return { "name": "biom_add_metadata: " + infile, "actions": [("biom add-metadata" " -i "+infile+ " -o "+outfile+ " -m "+sample_metadata)], "file_dep": [infile], "targets": [outfile] }
5,325,787
def test_something(): """ Test function for a method from module package_name.module_name """ assert True
5,325,788
def multinomial(n): """Finds the multinomial coefficient for a given array of numbers. Args: n (list): the interegs to be used. """ binomials = [[np.sum(n),n[0]]] for i in range(1,len(n)): new_sum = binomials[i-1][0]-binomials[i-1][1] binomials.append([new_sum,n[i]]) bins = [] for b in binomials: bins.append(binomial_coefficient(b[0],b[1])) return np.prod(bins)
5,325,789
def get_qc_data(sample_prj, p_con, s_con, fc_id=None): """Get qc data for a project, possibly subset by flowcell. :param sample_prj: project identifier :param p_con: object of type <ProjectSummaryConnection> :param s_con: object of type <SampleRunMetricsConnection> :returns: dictionary of qc results """ project = p_con.get_entry(sample_prj) application = project.get("application", None) if project else None samples = s_con.get_samples(fc_id=fc_id, sample_prj=sample_prj) qcdata = {} for s in samples: qcdata[s["name"]]={"sample":s.get("barcode_name", None), "project":s.get("sample_prj", None), "lane":s.get("lane", None), "flowcell":s.get("flowcell", None), "date":s.get("date", None), "application":application, "TOTAL_READS":int(s.get("picard_metrics", {}).get("AL_PAIR", {}).get("TOTAL_READS", -1)), "PERCENT_DUPLICATION":s.get("picard_metrics", {}).get("DUP_metrics", {}).get("PERCENT_DUPLICATION", "-1.0"), "MEAN_INSERT_SIZE":float(s.get("picard_metrics", {}).get("INS_metrics", {}).get("MEAN_INSERT_SIZE", "-1.0").replace(",", ".")), "GENOME_SIZE":int(s.get("picard_metrics", {}).get("HS_metrics", {}).get("GENOME_SIZE", -1)), "FOLD_ENRICHMENT":float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("FOLD_ENRICHMENT", "-1.0").replace(",", ".")), "PCT_USABLE_BASES_ON_TARGET":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_USABLE_BASES_ON_TARGET", "-1.0"), "PCT_TARGET_BASES_10X":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_TARGET_BASES_10X", "-1.0"), "PCT_PF_READS_ALIGNED":s.get("picard_metrics", {}).get("AL_PAIR", {}).get("PCT_PF_READS_ALIGNED", "-1.0"), } target_territory = float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("TARGET_TERRITORY", -1)) pct_labels = ["PERCENT_DUPLICATION", "PCT_USABLE_BASES_ON_TARGET", "PCT_TARGET_BASES_10X", "PCT_PF_READS_ALIGNED"] for l in pct_labels: if qcdata[s["name"]][l]: qcdata[s["name"]][l] = float(qcdata[s["name"]][l].replace(",", ".")) * 100 if qcdata[s["name"]]["FOLD_ENRICHMENT"] and qcdata[s["name"]]["GENOME_SIZE"] and target_territory: qcdata[s["name"]]["PERCENT_ON_TARGET"] = float(qcdata[s["name"]]["FOLD_ENRICHMENT"]/ (float(qcdata[s["name"]]["GENOME_SIZE"]) / float(target_territory))) * 100 return qcdata
5,325,790
def sql_sanitize(sql_name): """ Return a SQL name (table or column) cleaned of problematic characters. ex. punctuation )(][; whitespace Don't use with values, which can be properly escaped with parameterization. Ideally retaining only alphanumeric char. Credits: Donald Miner, Source: StackOverflow, DateAccessed: 2020-02-20 """ sanitize_name = "".join(char for char in sql_name if char.isalnum()) return sanitize_name
5,325,791
def test_get_portfolio_cash_balance(): """ Tests get_portfolio_cash_balance method for: * Raising ValueError if portfolio_id not in keys * Correctly obtaining the value after cash transfers """ start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC) exchange = ExchangeMock() data_handler = DataHandlerMock() sb = SimulatedBroker(start_dt, exchange, data_handler) # Raising ValueError if portfolio_id not in keys with pytest.raises(ValueError): sb.get_portfolio_cash_balance("5678") # Create fund transfers and portfolio sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1") sb.subscribe_funds_to_account(175000.0) sb.subscribe_funds_to_portfolio("1234", 100000.00) # Check correct values obtained after cash transfers assert sb.get_portfolio_cash_balance("1234") == 100000.0
5,325,792
def add_event_number(job_metrics, workdir): """ Extract event number from file and add to job metrics if it exists :param job_metrics: job metrics (string). :param workdir: work directory (string). :return: updated job metrics (string). """ path = os.path.join(workdir, 'eventLoopHeartBeat.txt') if os.path.exists(path): last_line = find_last_line(path) if last_line: event_number = get_number_in_string(last_line) if event_number: job_metrics += get_job_metrics_entry("eventnumber", event_number) else: logger.debug('file %s does not exist (skip for now)', path) return job_metrics
5,325,793
def exists(name): """ `True` if a category named `name` exists; `False` otherwise. """ return db.cursor().execute('SELECT COUNT(*) FROM categories WHERE name = ?', (name,)).fetchone()[0] != 0
5,325,794
def ESMP_MeshGetOwnedElementCount(mesh): """ Preconditions: An ESMP_Mesh has been created.\n Postconditions: The owned elementCount for 'mesh' has been returned.\n Arguments:\n :RETURN: integer :: elementCount\n ESMP_Mesh :: mesh\n """ lec = ct.c_int(0) rc = _ESMF.ESMC_MeshGetOwnedElementCount(mesh.struct.ptr, ct.byref(lec)) if rc != constants._ESMP_SUCCESS: raise ValueError('ESMC_MeshGetOwnedElementCount() failed with rc = '+ str(rc)+'. '+constants._errmsg) elementCount = lec.value return elementCount
5,325,795
def remove_control_chars_author(input): """ :param input: :return: """ return CONTROL_CHAR_RE.sub('', input)
5,325,796
def publish_pages(name, paths, git_repo, published_repo, extra_message=''): """publish helm chart index to github pages""" version = last_modified_commit(*paths) checkout_dir = '{}-{}'.format(name, version) subprocess.check_call([ 'git', 'clone', '--no-checkout', 'git@github.com:{}'.format(git_repo), checkout_dir], ) subprocess.check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir) # package the latest version into a temporary directory # and run helm repo index with --merge to update index.yaml # without refreshing all of the timestamps with TemporaryDirectory() as td: subprocess.check_call([ 'helm', 'package', name, '--destination', td + '/', ]) subprocess.check_call([ 'helm', 'repo', 'index', td, '--url', published_repo, '--merge', os.path.join(checkout_dir, 'index.yaml'), ]) # equivalent to `cp td/* checkout/` # copies new helm chart and updated index.yaml for f in os.listdir(td): shutil.copy2( os.path.join(td, f), os.path.join(checkout_dir, f) ) subprocess.check_call(['git', 'add', '.'], cwd=checkout_dir) if extra_message: extra_message = '\n\n%s' % extra_message else: extra_message = '' subprocess.check_call([ 'git', 'commit', '-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message) ], cwd=checkout_dir) subprocess.check_call( ['git', 'push', 'origin', 'gh-pages'], cwd=checkout_dir, )
5,325,797
def PrintCounts(group): """ Print the most common values for each key in a collection of dicts. This is a (currently unused) utility method for examining the patterns in the various JSON files, to get a feel for them. It can tell you which fields carry no data (because they're always set to the same value), and which have some meaning but also have a consistent default. """ counts = {} for e in group: for k, v in e.items(): if type(v) == list or type(v) == dict: v = repr(v) counts.setdefault(k, collections.Counter())[v] += 1 for k, v in counts.items(): print('"%s": %s' % (k, v.most_common(3)))
5,325,798
def write_version(filename: str = os.path.join(*["airflow", "git_version"])): """ Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65". :param str filename: Destination file to write """ text = "{}".format(git_version(version)) with open(filename, 'w') as file: file.write(text)
5,325,799