content
stringlengths
22
815k
id
int64
0
4.91M
def scale_loss(loss, optimizer, model=None, delay_unscale=False): """ On context manager entrance, creates ``scaled_loss = (loss.float())*current loss scale``. ``scaled_loss`` is yielded so that the user can call ``scaled_loss.backward()``:: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() On context manager exit (if ``delay_unscale=False``), the gradients are checked for infs/NaNs and unscaled, so that ``optimizer.step()`` can be called. .. note:: If Amp is using explicit FP32 master params (which is the default for ``opt_level=O2``, and can also be manually enabled by supplying ``master_weights=True`` to ``amp.initialize``) any FP16 gradients are copied to FP32 master gradients before being unscaled. ``optimizer.step()`` will then apply the unscaled master gradients to the master params. .. warning:: If Amp is using explicit FP32 master params, only the FP32 master gradients will be unscaled. The direct ``.grad`` attributes of any FP16 model params will remain scaled after context manager exit. This subtlety affects gradient clipping. See "Gradient clipping" under `Advanced Amp Usage`_ for best practices. Args: loss(Tensor): Typically a scalar Tensor. The ``scaled_loss`` that the context manager yields is simply ``loss.float()*loss_scale``, so in principle ``loss`` could have more than one element, as long as you call ``backward()`` on ``scaled_loss`` appropriately within the context manager body. optimizer: Must be an optimizer returned from an earlier call to ``amp.initialize``. model(torch.nn.Module, optional, default=None): Currently unused, reserved to enable future optimizations. delay_unscale(bool, default=False): Don't unscale the gradients or perform model->master gradient copies on context manager exit. `Advanced Amp Usage`_ illustrates situations where this is necessary. .. warning:: If ``delay_unscale`` is ``True`` for a given backward pass, ``optimizer.step()`` cannot be called yet after context manager exit, and must wait for another, later backward context manager invocation with ``delay_unscale`` left to False. See `Advanced Amp Usage`_ for examples. .. _`Advanced Amp Usage`: https://nvidia.github.io/apex/advanced.html """ if not _amp_state.opt_properties.enabled: yield loss return if optimizer.loss_scaler is None: raise RuntimeError("optimizer passed to scale_loss does not have a loss_scaler.") # this is what happens when i have to support tools from different sources under the same API... # TODO: Rewrite FusedAdam to use multi-tensor apply and the same loss scaler. if isinstance(optimizer, FP16_Optimizer_for_fused): loss_scale = optimizer.cur_scale else: loss_scale = optimizer.loss_scaler.loss_scale() if ((not _amp_state.opt_properties.master_weights) and (not optimizer.loss_scaler.dynamic) and loss_scale == 1.0): yield loss.float() # Needing to drop the cache here as well is an ugly gotcha. # But for now I think it's necessary to short-circuit. # Probably ok to skip this if not delay_unscale if _amp_state.opt_properties.patch_torch_functions: _amp_state.handle._clear_cache() return yield (loss.float())*loss_scale # this isn't pretty but it unifies things. Once I deprecate the old API entirely, # I will have freedom to clean this up. Maybe instead of wrapping optimizers, # I can simply construct a set of attributes (e.g. master params) and assign them # directly to optimizer instances. if not delay_unscale: # The FP16_Optimizer for FusedAdam will take care of unscaling as part of # its step() method. if not isinstance(optimizer, FP16_Optimizer_for_fused): if isinstance(optimizer, FP16_Optimizer_general): optimizer.update_master_grads() else: optimizer.loss_scaler.clear_overflow_state() optimizer.loss_scaler.unscale( master_params(optimizer), master_params(optimizer), loss_scale) # For future fused optimizers that enable sync-free dynamic loss scaling, # should_skip will always be False. should_skip = optimizer.loss_scaler.update_scale() if should_skip: optimizer_step = optimizer.step def skip_step(): maybe_print("Gradient overflow. Skipping step, reducing " + "loss scale to {}".format(optimizer.loss_scaler.loss_scale())) optimizer.step = optimizer_step optimizer.step = skip_step # Probably ok to skip this if not delay_unscale if _amp_state.opt_properties.patch_torch_functions: _amp_state.handle._clear_cache()
10,500
def get_top_words(keywords): """ Orders the topics from most common to least common for displaying. """ keywords = itertools.chain.from_iterable(map(str.split, keywords)) top_words = list(Counter(keywords)) return top_words
10,501
def is_mac(): """ Check if mac os >>> print(is_mac()) True or False Returns ------- bool: bool True or False. """ if sys.platform == 'darwin': return True return False
10,502
def login(i): """ Input: { (sudo) - if 'yes', add sudo } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ import os s='docker login' if i.get('sudo','')=='yes': s='sudo '+s os.system(s) return {'return':0}
10,503
def main(): """Execute setup and start the application.""" loop = asyncio.get_event_loop() score = pull_from_disk('score') banned = pull_from_disk('banned') asyncio.ensure_future(run_client(loop, **kwargs)) asyncio.ensure_future(save_to_disk('score', score)) asyncio.ensure_future(save_to_disk('banned', banned)) asyncio.ensure_future(check_for_runners(loop)) logging.info("running loop forever") loop.run_forever() return
10,504
def now_playing(ctx): """ Show now playing metadata. """ client = ctx.obj['client'] client.now_playing()
10,505
def streamline_to_data(x_val, y_val, x0, y0): """ save streamline to bokeh.models.ColumnDataSource :param x_val: streamline data x :param y_val: streamline data y :param x0: initial value x of streamline :param y0: initial value y of streamline :return: """ source_initialvalue.data = dict(x0=[x0], y0=[y0]) source_streamline.data = dict(x=x_val, y=y_val)
10,506
def test_global_parser(): """ Check that the global_parser does what's expected """ global_test_str = ('global color=green dashlist=8 3 width=1 ' 'font="helvetica 10 normal roman" select=1 ' 'highlite=1 dash=0 fixed=0 edit=1 move=1 ' 'delete=1 include=1 source=1') global_parser = _DS9Parser(global_test_str) exp = {'dash': '0', 'source': '1', 'move': '1', 'font': 'helvetica 10 normal roman', 'dashlist': '8 3', 'include': True, 'highlite': '1', 'color': 'green', 'select': '1', 'fixed': '0', 'width': '1', 'edit': '1', 'delete': '1'} assert dict(global_parser.global_meta) == exp
10,507
def obj_prop(*args, **kwargs): """ Build an object property wrapper. If no arguments (or a single ``None`` argument) are suppled, return a dummy property. If one argument is supplied, return :class:`AttrObjectProperty` for a property with a given name. Otherwise, return :class:`MethodObjectProperty` property. """ if len(args)==0: return empty_object_property() if len(args)==1: if args[0] is None: # empty property return empty_object_property() return AttrObjectProperty(args[0],**kwargs) elif len(args)<=3: return MethodObjectProperty(*args,**kwargs) else: raise ValueError("invalid number of arguments")
10,508
def CreateDataObject(**kwargs): """ Creates a new Data Object by issuing an identifier if it is not provided. :param kwargs: :return: """ # TODO Safely create body = kwargs['body']['data_object'] doc = create(body, 'data_objects') return({"data_object_id": doc['id']}, 200)
10,509
def insertion_sort(A): """Sort list of comparable elements into nondecreasing order.""" for k in range(1, len(A)): # from 1 to n-1 cur = A[k] j = k # find correct index j for current while j > 0 and A[j-1] > cur: # element A[j-1] must be after current A[j] = A[j-1] j -= 1 A[j] = cur
10,510
def generate_sample_files_at(folder_path: PosixPath): """Generaetes sample files at a given folder path.""" for sample_file in constants.SAMPLE_FILES: sample_file_path = folder_path / sample_file with open(sample_file_path, "w") as f: f.write("") # Make empty folder to go with the sample files (folder_path / "sample_folder").mkdir()
10,511
def _check_name(name: str, invars: Iterable[str]) -> str: """Check if count is valid""" if name is None: name = _n_name(invars) if name != "n": logger.warning( "Storing counts in `%s`, as `n` already present in input. " 'Use `name="new_name" to pick a new name.`' ) elif not isinstance(name, str): raise ValueError("`name` must be a single string.") return name
10,512
def test_count_animal_parametrise(spark, animal, expected_count): """ You can easily test different parameters for the same test by adding parametrisation. Here we are generalising the test_count_animal from test_basic by adding an argument for animal and expected_count. Be careful with the syntax here: the first argument is one string: "animal, expected_count" not "animal", "expected_count" It is then passed through to the test was an argument: test_count_animal_parametrize(spark, animal, expected_count) animal and expected_count can then be used as variables in the test Also ensure you use the American spelling (with a z) for @pytest.mark.parametrize This is only a simple example; for more detail on different ways to parametrise tests, see Mitch Edmunds testing tips repository: https://github.com/mitches-got-glitches/testing-tips """ # Arrange df = spark.createDataFrame([ # Test lowercase [1, "cat"], # Test first letter capitalised [2, "Cat"], # Test uppercase [3, "CAT"], # Check that non cats are not included in the count [4, "dog"], ], ["id", "animal_group"]) # Act actual_count = more_functions.count_animal(df, animal) #Assert assert actual_count == expected_count
10,513
def test_depth(run_line, go_ep1_id): """ Confirms setting depth to 1 on a --recursive ls of EP1:/ finds godata but not file1.txt """ load_response_set("cli.transfer_activate_success") load_response_set("cli.ls_results") result = run_line(f"globus ls -r --recursive-depth-limit 1 {go_ep1_id}:/") assert "file1.txt" not in result.output
10,514
def apply_random_filters(batches, filterbank, max_freq, max_db, min_std=5, max_std=7): """ Applies random filter responses to logarithmic-magnitude mel spectrograms. The filter response is a Gaussian of a standard deviation between `min_std` and `max_std` semitones, a mean between 150 Hz and `max_freq`, and a strength between -/+ `max_db` dezibel. Assumes the mel spectrograms have been transformed with `filterbank` and cover up to `max_freq` Hz. """ for spects, labels in batches: batchsize, length, bands = spects.shape bins = len(filterbank) # sample means and std deviations on logarithmic pitch scale min_pitch = 12 * np.log2(150) max_pitch = 12 * np.log2(max_freq) mean = min_pitch + (np.random.rand(batchsize) * (max_pitch - min_pitch)) std = min_std + np.random.randn(batchsize) * (max_std - min_std) # convert means and std deviations to linear frequency scale std = 2**((mean + std) / 12) - 2**(mean / 12) mean = 2**(mean / 12) # convert means and std deviations to bins mean = mean * bins / max_freq std = std * bins / max_freq # sample strengths uniformly in dB strength = max_db * 2 * (np.random.rand(batchsize) - .5) # create Gaussians filt = (strength[:, np.newaxis] * np.exp(np.square((np.arange(bins) - mean[:, np.newaxis]) / std[:, np.newaxis]) * -.5)) # transform from dB to factors filt = 10**(filt / 20.) # transform to mel scale filt = np.dot(filt.astype(spects.dtype), filterbank) # logarithmize filt = np.log(filt) # apply (it's a simple addition now, broadcasting over the second axis) yield spects + filt[:, np.newaxis, :], labels
10,515
def get_documents(corpus_tag): """ Returns a list of documents with a particular corpus tag """ values = db.select(""" SELECT doc_id FROM document_tag WHERE tag=%(tag)s ORDER BY doc_id """, tag=corpus_tag) return [x.doc_id for x in values]
10,516
def version_patch(monkeypatch): """Monkeypatch version to stable value to compare with static test assets.""" monkeypatch.setattr(erd, "__version__", "TEST")
10,517
def draw_with_indeces(settings): """ Drawing function that displays the input smiles string with all atom indeces """ m = Chem.MolFromSmiles(settings['SMILESSTRING']) dm = Draw.PrepareMolForDrawing(m) d2d = Draw.MolDraw2DSVG(350,350) opts = d2d.drawOptions() for i in range(m.GetNumAtoms()): opts.atomLabels[i] = m.GetAtomWithIdx(i).GetSymbol()+str(i) d2d.DrawMolecule(dm) d2d.FinishDrawing() return d2d.GetDrawingText()
10,518
def test_options_with_api_client(): """Checks that client can receive token, timeout values, and enable_compression """ client = lokalise.Client( "123abc", connect_timeout=5, read_timeout=3, enable_compression=True) opts = options(client) assert opts["headers"]["X-Api-Token"] == "123abc" assert "Authorization" not in opts["headers"] assert opts["headers"]["Accept-Encoding"] == "gzip,deflate,br" assert opts["timeout"] == (5, 3)
10,519
def get_main_page_info(): """获取首页统计信息 :return info: Dict 统计信息 """ from app.extensions.celerybackend import models from app.extensions.logger.models import Log from app.modules.auth.models import User from app.utils import local task_cnt = models.Tasks.objects(time_start__gte=local.localdate()).count() user_cnt = User.query.count() new_user_cnt = User.query.filter(User.created > local.localdate()).count() log_cnt = Log.objects( created__gte=local.localdate(), module__nin=["static", "admin", "unknown"] ).count() task_success_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="success" ).count() task_run_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="run" ).count() task_fail_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="fail" ).count() if task_success_cnt == 0: task_success = 0 else: task_success = int(task_success_cnt / task_cnt * 100) if task_run_cnt == 0: task_run = 0 else: task_run = int(task_run_cnt / task_cnt * 100) if task_fail_cnt == 0: task_fail = 0 else: task_fail = int(task_fail_cnt / task_cnt * 100) info = { "task": task_cnt, "user": user_cnt, "new_user": new_user_cnt, "log": log_cnt, } return info, task_success, task_run, task_fail
10,520
def get_tablenames(cur): """ Conveinience: """ cur.execute("SELECT name FROM sqlite_master WHERE type='table'") tablename_list_ = cur.fetchall() tablename_list = [str(tablename[0]) for tablename in tablename_list_ ] return tablename_list
10,521
def reverse_geocode(userCoords): """ Returns the city, state (or equivalent administrative region), and country that the specified point is in userCoords is a tuple: (latitude, longitude) """ lat, lng = userCoords latlng = "{0},{1}".format(lat, lng) data = urllib.parse.urlencode({"latlng" : latlng, "result_type" : "locality", "key" : API_KEY}) result = make_google_api_request(API_URL + data) if result["status"] == "OK": return result["results"][0]["formatted_address"] else: return "Status: " + result["status"]
10,522
def match_red_baselines(model, model_antpos, data, data_antpos, tol=1.0, verbose=True): """ Match unique model baseline keys to unique data baseline keys based on positional redundancy. Ideally, both model and data contain only unique baselines, in which case there is a one-to-one mapping. If model contains extra redundant baselines, these are not propagated to new_model. If data contains extra redundant baselines, the lowest ant1-ant2 pair is chosen as the baseline key to insert into model. Parameters: ----------- model : type=DataContainer, model dictionary holding complex visibilities must conform to DataContainer dictionary format. model_antpos : type=dictionary, dictionary holding antennas positions for model dictionary keys are antenna integers, values are ndarrays of position vectors in meters data : type=DataContainer, data dictionary holding complex visibilities. must conform to DataContainer dictionary format. data_antpos : type=dictionary, dictionary holding antennas positions for data dictionary same format as model_antpos tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters) Output: (data) ------- new_model : type=DataContainer, dictionary holding complex visibilities from model that had matching baselines to data """ # create baseline keys for model model_keys = list(model.keys()) model_bls = np.array(list(map(lambda k: Baseline(model_antpos[k[1]] - model_antpos[k[0]], tol=tol), model_keys))) # create baseline keys for data data_keys = list(data.keys()) data_bls = np.array(list(map(lambda k: Baseline(data_antpos[k[1]] - data_antpos[k[0]], tol=tol), data_keys))) # iterate over data baselines new_model = odict() for i, bl in enumerate(model_bls): # compre bl to all model_bls comparison = np.array(list(map(lambda mbl: bl == mbl, data_bls)), np.str) # get matches matches = np.where((comparison == 'True') | (comparison == 'conjugated'))[0] # check for matches if len(matches) == 0: echo("found zero matches in data for model {}".format(model_keys[i]), verbose=verbose) continue else: if len(matches) > 1: echo("found more than 1 match in data to model {}: {}".format(model_keys[i], list(map(lambda j: data_keys[j], matches))), verbose=verbose) # assign to new_data if comparison[matches[0]] == 'True': new_model[data_keys[matches[0]]] = model[model_keys[i]] elif comparison[matches[0]] == 'conjugated': new_model[data_keys[matches[0]]] = np.conj(model[model_keys[i]]) return DataContainer(new_model)
10,523
def _choose_random_genes(individual): """ Selects two separate genes from individual. Args: individual (np.array): Genotype of individual. Returns: gene1, gene2 (tuple): Genes separated by at least another gene. """ gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten()) while gene2 - gene1 < 2: gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten()) return (gene1, gene2)
10,524
def incomplete_sample_detection(device_name): """Introspect whether a device has 'incomplete sample detection', described here: www.ni.com/documentation/en/ni-daqmx/latest/devconsid/incompletesampledetection/ The result is determined empirically by outputting a pulse on one counter and measuring it on another, and seeing whether the first sample was discarded or not. This requires a non-simulated device with at least two counters. No external signal is actually generated by the device whilst this test is performed. Credit for this method goes to Kevin Price, who provided it here: forums.ni.com/t5/Multifunction-DAQ/_/td-p/3849429 This workaround will hopefully be deprecated if and when NI provides functionality to either inspect this feature's presence directly, or to disable it regardless of its presence. """ if is_simulated(device_name): msg = "Can only detect incomplete sample detection on non-simulated devices" raise ValueError(msg) if not supports_period_measurement(device_name): msg = "Device doesn't support period measurement" raise ValueError(msg) CI_chans = get_CI_chans(device_name) if len(CI_chans) < 2: msg = "Need at least two counters to detect incomplete sample detection" raise ValueError(msg) # The counter we will produce a test signal on: out_chan = CI_chans[0] # The counter we will measure it on: meas_chan = CI_chans[1] # Set up the output task: out_task = daqmx.Task() out_task.CreateCOPulseChanTime( out_chan, "", c.DAQmx_Val_Seconds, c.DAQmx_Val_Low, 0, 1e-3, 1e-3 ) # Prevent the signal being output on the physical terminal: out_task.SetCOPulseTerm("", "") # Force CO into idle state to prevent spurious edges when the task is started: out_task.TaskControl(c.DAQmx_Val_Task_Commit) # Set up the measurement task meas_task = daqmx.Task() meas_task.CreateCIPeriodChan( meas_chan, "", 1e-3, 1.0, c.DAQmx_Val_Seconds, c.DAQmx_Val_Rising, c.DAQmx_Val_LowFreq1Ctr, 10.0, 0, "", ) meas_task.CfgImplicitTiming(c.DAQmx_Val_ContSamps, 1) # Specify that we are measuring the internal output of the other counter: meas_task.SetCIPeriodTerm("", '/' + out_chan + 'InternalOutput') try: meas_task.StartTask() out_task.StartTask() out_task.WaitUntilTaskDone(10.0) # How many samples are in the read buffer of the measurement task? samps_avail = types.uInt32() meas_task.GetReadAvailSampPerChan(samps_avail) if samps_avail.value == 0: # The device discarded the first edge return True elif samps_avail.value == 1: # The device did not discard the first edge return False else: # Unexpected result msg = "Unexpected number of samples: %d" % samps_avail.value raise ValueError(msg) finally: out_task.ClearTask() meas_task.ClearTask()
10,525
def diag_numba(A, b): """ Fill matrix A with a diagonal represented by vector b. Parameters ---------- A : array Base matrix. b : array Diagonal vector to fill with. Returns ------- array Matrix A with diagonal filled. """ for i in range(b.shape[0]): A[i, i] = b[i] return A
10,526
def get_symminfo(newsymms: dict) -> str: """ Adds text about the symmetry generators used in order to add symmetry generated atoms. """ line = 'Symmetry transformations used to generate equivalent atoms:\n' nitems = len(newsymms) n = 0 for key, value in newsymms.items(): sep = ';' if n == nitems: sep = '' n += 1 line += "#{}: {}{} ".format(key, value, sep) if newsymms: return line else: return ''
10,527
def get_random_sequences( self, n=10, length=200, chroms=None, max_n=0.1, outtype="list" # noqa ): """ Return random genomic sequences. Parameters ---------- n : int , optional Number of sequences to return. length : int , optional Length of sequences to return. chroms : list , optional Return sequences only from these chromosomes. max_n : float , optional Maximum fraction of Ns. outtype : string , optional return the output as list or string. Options: "list" or "string", default: "list". Returns ------- list coordinates as lists or strings: List with [chrom, start, end] genomic coordinates. String with "chrom:start-end" genomic coordinates (can be used as input for track2fasta). """ if not chroms: chroms = self.keys() # dict of chromosome sizes after subtracting the number of Ns sizes = dict( [(chrom, len(self[chrom]) - self.gaps.get(chrom, 0)) for chrom in chroms] ) # list of (tuples with) chromosomes and their size # (if that size is long enough for random sequence selection) lengths = [ (sizes[x], x) for x in chroms if sizes[x] / len(self[x]) > 0.1 and sizes[x] > 10 * length ] if len(lengths) == 0: raise Exception("No contigs of sufficient size were found.") # random list of chromosomes from lengths (can have duplicates) chroms = weighted_selection(lengths, n) coords = [] retries = 100 cutoff = length * max_n for chrom in chroms: for _ in range(retries): start = int(random() * (sizes[chrom] - length)) end = start + length count_n = self[chrom][start:end].seq.upper().count("N") if count_n <= cutoff: break else: raise Exception( f"Random subset ran {retries} times, " f"but could not find a sequence with less than {cutoff} N's in {chrom}.\n" "You can specify contigs using the CHROMS argument." ) # list output example ["chr1", 123, 456] coords.append([chrom, start, end]) if outtype != "list": # bed output example: "chr1:123-456" for i, region in enumerate(coords): coords[i] = [f"{region[0]}:{region[1]}-{region[2]}"] return coords
10,528
def get_sampleentropy(data): """Sample entropy, using antropy.sample_entropy, in the ML and AP directions. """ x, y = np.array(data[4]), np.array(data[5]) sample_entropy_ML = ant.sample_entropy(x) sample_entropy_AP = ant.sample_entropy(y) return sample_entropy_ML, sample_entropy_AP
10,529
def export_pdf(imgname, autotm, default_dpi, outfile, tformat, tleft, ttop, twidth, theight): """Trim the image and creates a PDF with the same size.""" if outfile == '': outfile = '%s.pdf' % (imgname) outtrim = '%s-trim.' % (outfile) outtrim = outtrim + tformat pdf = Canvas(outfile, pageCompression=1) dpi = default_dpi im = Image.open(imgname) w, h = im.size width = round(w * 72.0 / dpi, 3) height = round(h * 72.0 / dpi, 3) pdf.setPageSize((width, height)) if ((tleft < 0) or (ttop < 0) or (twidth < 0) or (theight < 0)): if autotm: trimbox = autocrop(im, 255) else: if im.mode == 'RGB': trimbox = trim(im, (255, 255, 255)) else: trimbox = trim(im, 255) else: trimbox = (tleft, ttop, (tleft + twidth), (ttop + theight)) if trimbox: print trimbox x1, y1, x2, y2 = trimbox wt = round((x2 - x1) * 72.0 / dpi, 3) ht = round((y2 - y1) * 72.0 / dpi, 3) x = round(x1 * 72.0 / dpi, 3) y = height - round(y2 * 72.0 / dpi, 3) trimim = im.crop(trimbox) trimim.save(outtrim) pdf.drawImage(outtrim, x, y, width=wt, height=ht) else: # found no content raise ValueError('cannot trim; image was empty') pdf.showPage() pdf.save()
10,530
def test_kernels_feedback_matrix(with_tf_random_seed, kernel_setup): """Test the feedback_matrices correspond to the state transitions for each kernel.""" time_points, kernel_factory, _ = kernel_setup tf_time_points = tf.constant(time_points) tf_time_deltas = to_delta_time(tf_time_points) kernel = kernel_factory.create_kernel() state_transitions = kernel.state_transitions(tf_time_points, tf_time_deltas) state_transitions_feedback = tf.linalg.expm( kernel.feedback_matrix * tf_time_deltas[..., None, None] ) np.testing.assert_allclose(state_transitions, state_transitions_feedback)
10,531
def from_json(filename, columns=None, process_func=None): """Read data from a json file Args: filename: path to a json file columns (list, optional): list of columns to keep. All columns are kept by default process_func (function, optional): A callable object that you can pass to process you data in a specific way Returns: pandas.DataFrame: return a dataframe object """ df = pd.read_json(filename) return __process_data(df, columns, process_func)
10,532
def save_books(books, file_path): """ Дописываем в таблицу все книги из списка :param books: list - список книг (классов Book) :param file_path: string - путь к таблице """ with open(file_path, 'a', encoding='utf-8') as file: if os.path.getsize(file_path) == 0: file.write('Name\tAuthor\tStatus\tMy Rating\tDate\tLink\n') for book in books: file.write(str(book) + '\n')
10,533
def send_admin_email(db_obj, support_case): """ Send announcement for each member of 'admin' groups When the support case is being created """ msg = Message('Autopsy Web app: a support case has been created') msg.sender = app.config['MAIL_USERNAME'] email_dl = db_obj.session.query( UserRoles.role_id, User.user_email).filter( UserRoles.role_id == 1).join(User).all() case_author = User.query.filter(User.id==support_case.user_id).first() admin_dl = [mail[1] for mail in email_dl] msg.recipients = admin_dl msg.body = f""" Autopsy Web App: new support has been created! A new support case has been opened by {case_author.user_name} Date: {support_case.support_created} Subject: {support_case.support_subject} Content: {support_case.support_content} ---- This is an auto-generated message, please don't reply. """ mail.send(msg)
10,534
def render_content(tab): """ This function displays tabs based on user selection of tab """ if tab == 'tab-2': return filter_based_recommendation.TAB2_LAYOUT return choice_based_recommendation.CHOICE_BASED_RECOMMENDATION_LAYOUT
10,535
def course(name, reviews = False): """ Get a course. Parameters ---------- name: string The name of the course. reviews: bool, optional Whether to also return reviews for the course, specifically reviews for professors that taught the course and have the course listed as the one being reviewed. Defaults to False. """ params = {"name" : name, "reviews": "true" if reviews else "false"} url = BASE_URL + "course?" + urlencode(params) return requests.get(url).json()
10,536
def track_job(job, total, details = "", update_interval=1): """ Tracks map_async job. Parameters ---------- job : AsyncResult object total : total number of jobs update_interval : interval of tracking """ # if not isinstance(job, mp.pool.AsyncResult): # raise ValueError("`job` argument should be an AsyncResult object.") pb = Progressbar(total) while job._number_left > 0: rc = total-(job._number_left*job._chunksize) pb.show(rc, details = details) time.sleep(update_interval) if job._number_left == 0: pb.show(total, details = details) print("")
10,537
def task(weight=1): """ Used as a convenience decorator to be able to declare tasks for a TaskSet inline in the class. Example:: class ForumPage(TaskSet): @task(100) def read_thread(self): pass @task(7) def create_thread(self): pass """ def decorator_func(func): func.locust_task_weight = weight return func """ Check if task was used without parentheses (not called), like this:: @task def my_task() pass """ if callable(weight): func = weight weight = 1 return decorator_func(func) else: return decorator_func
10,538
def unprotect_host(hostname): """ Cause an host to able to be acted on by the retirement queue Args: hostname - The hostname to remove from protection """ reporting_conn = get_mysqlops_connections() cursor = reporting_conn.cursor() sql = ("DELETE FROM mysqlops.retirement_protection " "WHERE hostname = %(hostname)s") cursor.execute(sql, {'hostname': hostname}) reporting_conn.commit() log.info(cursor._executed)
10,539
def generate_age(issue_time): """Generate a age parameter for MAC authentication draft 00.""" td = datetime.datetime.now() - issue_time age = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 return unicode_type(age)
10,540
def out_atok_dic(out_dir, entries, dic_type): """ ATOK用の辞書を作成する。 ファイルのencodingはutf-16, 改行コードはCR/LF, タブ区切り。 :param out_dir: 出力先ディレクトリ :param entries: 出力するエントリ :param dic_type: 辞書の種別 """ file_path = os.path.join(out_dir, 'atok_dic_{}.txt'.format(dic_type.value)) with open(file_path, 'w', encoding='utf-16') as f: for entry in reduce_entries_for_suggest_dic(entries): pos = entry.pos if pos == '人名': pos = '固有人名' elif pos == '固有名詞': pos = '固有一般' f.write('{}\t{}\t{}\n'.format(entry.yomi, entry.normalized_surface, pos))
10,541
def measures_hrna_basepairs(name, s, path_to_3D_data, thr_idx): """ Open a rna_only/ file, and run measures_hrna_basepairs_chain() on every chain """ setproctitle(f"RNANet statistics.py Worker {thr_idx+1} measures_hrna_basepairs({name})") l = [] chain = next(s[0].get_chains()) # do not recompute something already computed if os.path.isfile(runDir + "/results/geometry/HiRE-RNA/basepairs/basepairs_"+name+".csv"): return df = pd.read_csv(os.path.abspath(path_to_3D_data +"datapoints/" + name)) # if df['index_chain'][0] == 1: # ignore files with numbering errors : TODO : remove when we get DSSR Pro, there should not be numbering errors anymore l = measures_hrna_basepairs_chain(name, chain, df, thr_idx) df_calc = pd.DataFrame(l, columns=["type_LW", "nt1_idx", "nt1_res", "nt2_idx", "nt2_res", "Distance", "211_angle", "112_angle", "dB1", "dB2", "alpha1", "alpha2", "3211_torsion", "1123_torsion"]) df_calc.to_csv(runDir + "/results/geometry/HiRE-RNA/basepairs/"+'basepairs_' + name + '.csv', float_format="%.3f")
10,542
def _bqm_from_1sat(constraint): """create a bqm for a constraint with only one variable bqm will have exactly classical gap 2. """ configurations = constraint.configurations num_configurations = len(configurations) bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) if num_configurations == 1: val, = next(iter(configurations)) v, = constraint.variables bqm.add_variable(v, -1 if val > 0 else +1) else: bqm.add_variables_from((v, 0.0) for v in constraint.variables) return bqm.change_vartype(constraint.vartype)
10,543
def read_workdir(conffile): """read working dir from a config file in the users larch dir compare save_workdir(conffile) which will save this value can be used to ensure that application startup starts in last working directory """ try: w_file = os.path.join(user_larchdir, conffile) if os.path.exists(w_file): line = open(w_file, 'r').readlines() workdir = line[0][:-1] os.chdir(workdir) except: pass
10,544
def _ssh(server): """ SSH into a Server """ remote_user = server.remote_user private_key = server.private_key if not private_key or not remote_user: if remote_user: return {"result": "Critical. Missing Private Key", "status": 3, } elif private_key: return {"result": "Critical. Missing Remote User", "status": 3, } else: return {"result": "Critical. Missing Remote User & Private Key", "status": 3, } # SSH in & run check try: import paramiko except ImportError: return {"result": "Critical. Paramiko required.", "status": 3, } keyfile = open(os.path.join(current.request.folder, "uploads", private_key), "r") mykey = paramiko.RSAKey.from_private_key(keyfile) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(hostname = server.host_ip, username = remote_user, pkey = mykey) except paramiko.ssh_exception.AuthenticationException: import traceback tb_parts = sys.exc_info() tb_text = "".join(traceback.format_exception(tb_parts[0], tb_parts[1], tb_parts[2])) return {"result": "Critical. Authentication Error\n\n%s" % tb_text, "status": 3, } except paramiko.ssh_exception.SSHException: import traceback tb_parts = sys.exc_info() tb_text = "".join(traceback.format_exception(tb_parts[0], tb_parts[1], tb_parts[2])) return {"result": "Critical. SSH Error\n\n%s" % tb_text, "status": 3, } return ssh
10,545
def _create_group_codes_and_info( states: pd.DataFrame, assort_bys: Dict[str, List[str]], contact_models: Dict[str, Dict[str, Any]], ) -> Tuple[pd.DataFrame, Dict[str, Dict[str, Any]]]: """Create group codes and additional information. Args: states (pd.DataFrame): The states. assort_bys (Dict[str, List[str]]): The assortative variables for each contact model. contact_models (Dict[str, Dict[str, Any]]): The contact models. Returns: A tuple containing: - states (pandas.DataFrame): The states. - group_codes_info (Dict[str, Dict[str, Any]]): A dictionary where keys are names of contact models and values are dictionaries containing the name and the original codes of the assortative variables. """ group_codes_names = _create_group_codes_names(contact_models, assort_bys) group_codes_info = {} for model_name, assort_by in assort_bys.items(): is_factorized = contact_models[model_name].get("is_factorized", False) group_code_name = group_codes_names[model_name] # Create the group code column if it is not available or if it exists - meaning # we are resuming a simulation - to recover the groups. if (group_code_name not in states.columns) or ( group_code_name in states.columns and not is_factorized ): states[group_code_name], groups = factorize_assortative_variables( states, assort_by ) elif group_code_name in states.columns and is_factorized: states[group_code_name] = states[group_code_name].astype(DTYPE_GROUP_CODE) unsorted_groups = states[group_code_name].unique() groups = np.sort(unsorted_groups[unsorted_groups != -1]) else: groups = states[group_code_name].cat.categories if is_factorized: groups = groups[groups != -1] group_codes_info[model_name] = {"name": group_code_name, "groups": groups} return states, group_codes_info
10,546
def print_version(ctx, param, value): """Print suricataindex version to stdout.""" if value_check(value, ctx): return click.echo(str(__version__)) ctx.exit()
10,547
def json_reader(input_data): """A generator that converts an iterable of newline-delimited JSON objects ('input_data' could be a 'list' for testing purposes) into an iterable of Python dict objects. If the line cannot be parsed as JSON, the exception thrown by the json.loads() is yielded back, instead of the json object. The calling code can check for this exception with an isinstance() function, then continue processing the rest of the file. """ for line in input_data: try: yield json.loads(line) except Exception as e: yield e
10,548
def test_pip_std_install(tmpdir): """ Test Pip Standard Install """ python_dir = tmpdir.join("lib", "python2.7", "site-packages", "qibuild") python_dir.ensure("__init__.py", file=True) cmake_dir = tmpdir.join("share", "cmake") cmake_dir.ensure("qibuild", "qibuild-config.cmake", file=True) res = qibuild.cmake.find_installed_cmake_qibuild_dir(python_dir.strpath) assert res == cmake_dir.strpath
10,549
def quiver_plotter(X, Y, Z, plot_range=None, mes_unit='', title='', x_label=r'$x$', y_label=r'$y$', show_plot=True, dark=False): """ Generates a plot of some vector fields. Parameters ---------- X : numpy.ndarray Matrix with values for the first axis on all the rows. Y : numpy.ndarray Matrix with values for the second axis on all the columns. Z : numpy.ndarray or list of numpy.ndarray Either a matrix with 3 dimension and the last two dimensions like the dimensions of X and Y or a list of two matricies with the same size as X and Y. plot_range : list of floats, optional List with the range for the plot. The defualt is None. mes_unit : str, optional Units of measure of the vectors shown. The default is ''. title : str, optional Title of the plot. The default is ''. x_label : str, optional The name on the first axis. The default is r'$x$'. y_label : str, optional Name on the second axis. The default is r'$y$'. show_plot : bool, optional Flag for printing the figure with plt.show(). The default is True. dark : bool, optional Flag for changing the graph color to a dark theme. The default is False. Raises ------ ValueError If the size of either X, Y or Z don't match. TypeError If the Z parameter is neither a list of numpy.ndarray or a numpy.ndarray Returns ------- fig : matplotlib.figure.Figure Figure with the plot. """ if isinstance(Z, list): if len(Z) != 2: raise ValueError("The argument z should be a list of two elements.") else: q_x = Z[0] q_y = Z[1] elif isinstance(Z, np.ndarray): if len(Z.shape) != 3 or Z.shape[0] < 2: raise ValueError( "The argument z should be a numpy array of dimension 3 with at least 2 values on the first axis.") else: q_x = Z[0, :] q_y = Z[1, :] else: raise TypeError( "The argument z should be a list of numpy.ndarray or an instance of numpy.ndarray.") range_reduction = True if plot_range == None: range_reduction = False elif not isinstance(plot_range, list): raise TypeError('The argument should be a list of floats.') elif len(plot_range) != 4: raise ValueError( 'The number of elements in plot_range should be 4, here it is {}'.format(len(plot_range))) if q_x.shape != X.shape or q_x.shape != Y.shape or q_y.shape != X.shape or q_y.shape != Y.shape: raise ValueError("The shape of X, Y and the two elements in Z must coincide.") if range_reduction: x_max = plot_range[1] x_min = plot_range[0] y_max = plot_range[3] y_min = plot_range[2] idx_x_min, idx_x_max = _crop_array_idxs(X[:, 0], x_min, x_max) idx_y_min, idx_y_max = _crop_array_idxs(Y[0, :], y_min, y_max) X = X[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] Y = Y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] q_x = q_x[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] q_y = q_y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] # plotting of the function fig = plt.figure(figsize=fig_size) ax = fig.gca() Q = ax.quiver(X, Y, q_x, q_y, pivot='tail') ax.quiverkey(Q, 0.9, 0.9, 1, '1' + mes_unit, labelpos='E', coordinates='figure') if range_reduction: ax.axis(plot_range) ax.set_xlabel(x_label) ax.set_ylabel(y_label) title = ax.set_title(title) if dark: _darkizer(fig, ax, title) if show_plot: plt.show() return fig
10,550
def get_attr(item, name, default=None): """ similar to getattr and get but will test for class or dict :param item: :param name: :param default: :return: """ try: val = item[name] except (KeyError, TypeError): try: val = getattr(item, name) except AttributeError: val = default return val
10,551
def float2(val, min_repeat=6): """Increase number of decimal places of a repeating decimal. e.g. 34.111111 -> 34.1111111111111111""" repeat = 0 lc = "" for i in range(len(val)): c = val[i] if c == lc: repeat += 1 if repeat == min_repeat: return float(val[:i+1] + c * 10) else: lc = c repeat = 1 return float(val)
10,552
def load_array_meta(loader, filename, index): """ Load the meta-data data associated with an array from the specified index within a file. """ return loader(filename, index)
10,553
def viterbi_value(theta: np.ndarray, operator: str = 'hardmax') \ -> float: """ Viterbi operator. :param theta: _numpy.ndarray, shape = (T, S, S), Holds the potentials of the linear chain CRF :param operator: str in {'hardmax', 'softmax', 'sparsemax'}, Smoothed max-operator :return: float, DTW value $Vit(\theta)$ """ return viterbi_grad(theta, operator)[0]
10,554
def pack32(n): """Convert a Python int to a packed signed long (4 bytes).""" return pack('<i', n)
10,555
def run_rollout( policy, env, horizon, use_goals=False, render=False, video_writer=None, video_skip=5, terminate_on_success=False, ): """ Runs a rollout in an environment with the current network parameters. Args: policy (RolloutPolicy instance): policy to use for rollouts. env (EnvBase instance): environment to use for rollouts. horizon (int): maximum number of steps to roll the agent out for use_goals (bool): if True, agent is goal-conditioned, so provide goal observations from env render (bool): if True, render the rollout to the screen video_writer (imageio Writer instance): if not None, use video writer object to append frames at rate given by @video_skip video_skip (int): how often to write video frame terminate_on_success (bool): if True, terminate episode early as soon as a success is encountered Returns: results (dict): dictionary containing return, success rate, etc. """ assert isinstance(policy, RolloutPolicy) assert isinstance(env, EnvBase) policy.start_episode() ob_dict = env.reset() goal_dict = None if use_goals: # retrieve goal from the environment goal_dict = env.get_goal() results = {} video_count = 0 # video frame counter total_reward = 0. success = { k: False for k in env.is_success() } # success metrics try: for step_i in range(horizon): # get action from policy ac = policy(ob=ob_dict, goal=goal_dict) # play action ob_dict, r, done, _ = env.step(ac) # render to screen if render: env.render(mode="human") # compute reward total_reward += r cur_success_metrics = env.is_success() for k in success: success[k] = success[k] or cur_success_metrics[k] # visualization if video_writer is not None: if video_count % video_skip == 0: video_img = env.render(mode="rgb_array", height=512, width=512) video_writer.append_data(video_img) video_count += 1 # break if done if done or (terminate_on_success and success["task"]): break except env.rollout_exceptions as e: print("WARNING: got rollout exception {}".format(e)) results["Return"] = total_reward results["Horizon"] = step_i + 1 results["Success_Rate"] = float(success["task"]) # log additional success metrics for k in success: if k != "task": results["{}_Success_Rate".format(k)] = float(success[k]) return results
10,556
def create_test_client( route_handlers: Union[ Union[Type[Controller], BaseRouteHandler, Router, AnyCallable], List[Union[Type[Controller], BaseRouteHandler, Router, AnyCallable]], ], after_request: Optional[AfterRequestHandler] = None, allowed_hosts: Optional[List[str]] = None, backend: str = "asyncio", backend_options: Optional[Dict[str, Any]] = None, base_url: str = "http://testserver", before_request: Optional[BeforeRequestHandler] = None, cors_config: Optional[CORSConfig] = None, dependencies: Optional[Dict[str, Provide]] = None, exception_handlers: Optional[Dict[Union[int, Type[Exception]], ExceptionHandler]] = None, guards: Optional[List[Guard]] = None, middleware: Optional[List[Union[Middleware, Type[BaseHTTPMiddleware], Type[MiddlewareProtocol]]]] = None, on_shutdown: Optional[List[LifeCycleHandler]] = None, on_startup: Optional[List[LifeCycleHandler]] = None, openapi_config: Optional[OpenAPIConfig] = None, template_config: Optional[TemplateConfig] = None, plugins: Optional[List[PluginProtocol]] = None, raise_server_exceptions: bool = True, root_path: str = "", static_files_config: Optional[Union[StaticFilesConfig, List[StaticFilesConfig]]] = None, cache_config: CacheConfig = DEFAULT_CACHE_CONFIG, ) -> TestClient: """Create a TestClient""" return TestClient( app=Starlite( after_request=after_request, allowed_hosts=allowed_hosts, before_request=before_request, cors_config=cors_config, dependencies=dependencies, exception_handlers=exception_handlers, guards=guards, middleware=middleware, on_shutdown=on_shutdown, on_startup=on_startup, openapi_config=openapi_config, template_config=template_config, plugins=plugins, route_handlers=cast(Any, route_handlers if isinstance(route_handlers, list) else [route_handlers]), static_files_config=static_files_config, cache_config=cache_config, ), backend=backend, backend_options=backend_options, base_url=base_url, raise_server_exceptions=raise_server_exceptions, root_path=root_path, )
10,557
def queue_tabnav(context): """Returns tuple of tab navigation for the queue pages. Each tuple contains three elements: (tab_code, page_url, tab_text) """ counts = context['queue_counts'] request = context['request'] listed = not context.get('unlisted') if listed: tabnav = [('nominated', 'queue_nominated', (ungettext('New Add-on ({0})', 'New Add-ons ({0})', counts['nominated']) .format(counts['nominated']))), ('pending', 'queue_pending', (ungettext('Update ({0})', 'Updates ({0})', counts['pending']) .format(counts['pending']))), ('moderated', 'queue_moderated', (ungettext('Moderated Review ({0})', 'Moderated Reviews ({0})', counts['moderated']) .format(counts['moderated'])))] if acl.action_allowed(request, amo.permissions.ADDONS_POST_REVIEW): tabnav.append( ('auto_approved', 'queue_auto_approved', (ungettext('Auto Approved Add-on ({0})', 'Auto Approved Add-ons ({0})', counts['auto_approved']) .format(counts['auto_approved']))), ) else: tabnav = [ ('all', 'unlisted_queue_all', ugettext('All Unlisted Add-ons')) ] return tabnav
10,558
def _plot_feature_correlations(ax, correlation_matrix, cmap="coolwarm", annot=True, fmt=".2f", linewidths=.05): """ Creates a heatmap plot of the feature correlations Args: :ax: the axes object to add the plot to :correlation_matrix: the feature correlations :cmap: the color map :annot: whether to annotate the heatmap :fmt: how to format the annotations :linewidths: line width in the plot Returns: The heatmap """ hm = sns.heatmap(correlation_matrix, ax=ax, cmap=cmap, annot=annot, fmt=fmt, linewidths=linewidths) return hm
10,559
def _fix_mark_history(user): """ Goes through a users complete mark history and resets all expiration dates. The reasons for doing it this way is that the mark rules now insist on marks building on previous expiration dates if such exists. Instead of having the entire mark database be a linked list structure, it can be simplified to guarantee the integrity of the expiration dates by running this whenever; * new Mark is saved or deleted * a new MarkUser entry is made * an existing MarkUser entry is deleted """ markusers = MarkUser.objects.filter(user=user).order_by('mark__added_date') last_expiry_date = None for entry in markusers: # If there's a last_expiry date, it means a mark has been processed already. # If that expiration date is within a DURATION of this added date, build on it. if last_expiry_date and entry.mark.added_date - timedelta(days=DURATION) < last_expiry_date: entry.expiration_date = _get_with_duration_and_vacation(last_expiry_date) # If there is no last_expiry_date or the last expiry date is over a DURATION old # we add DURATIION days from the added date of the mark. else: entry.expiration_date = _get_with_duration_and_vacation(entry.mark.added_date) entry.save() last_expiry_date = entry.expiration_date
10,560
def create_asset(show_name, asset_name, asset_type, hero, target_date): """Create asset entity within a show""" db = con.server.hydra db.assets.insert( { "name": asset_name, "show": show_name, "type": asset_type, "hero": hero, "target_date": target_date } ) db.shows.update( {"name": show_name}, {"$push": {"assets": {"name": asset_name}} } )
10,561
def test_par_04(floatprecision='double'): """Test setters""" assert floatprecision in ['double', 'float'] const = N.array([1.5, 2.6, 3.7], dtype=floatprecision[0]) var = R.parameter(floatprecision)('testpar', const.size) taintflag = R.taintflag('tflag') var.subscribe(taintflag) taintflag.set(False) var.set(const) check('C array', None, list(var.values()), const, taintflag) const+=1.0 vec = C.stdvector(const) var.set(vec) check('std vector', None, list(var.values()), const, taintflag)
10,562
def graphviz_visualization(activities_count, dfg, image_format="png", measure="frequency", max_no_of_edges_in_diagram=100000, start_activities=None, end_activities=None, soj_time=None, font_size="12", bgcolor="transparent", stat_locale: dict = None): """ Do GraphViz visualization of a DFG graph Parameters ----------- activities_count Count of attributes in the log (may include attributes that are not in the DFG graph) dfg DFG graph image_format GraphViz should be represented in this format measure Describes which measure is assigned to edges in direcly follows graph (frequency/performance) max_no_of_edges_in_diagram Maximum number of edges in the diagram allowed for visualization start_activities Start activities of the log end_activities End activities of the log soj_time For each activity, the sojourn time in the log stat_locale Dict to locale the stat strings Returns ----------- viz Digraph object """ if start_activities is None: start_activities = {} if end_activities is None: end_activities = {} if stat_locale is None: stat_locale = {} filename = tempfile.NamedTemporaryFile(suffix='.gv') viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor}) # first, remove edges in diagram that exceeds the maximum number of edges in the diagram dfg_key_value_list = [] for edge in dfg: dfg_key_value_list.append([edge, dfg[edge]]) # more fine grained sorting to avoid that edges that are below the threshold are # undeterministically removed dfg_key_value_list = sorted(dfg_key_value_list, key=lambda x: (x[1], x[0][0], x[0][1]), reverse=True) dfg_key_value_list = dfg_key_value_list[0:min(len(dfg_key_value_list), max_no_of_edges_in_diagram)] dfg_allowed_keys = [x[0] for x in dfg_key_value_list] dfg_keys = list(dfg.keys()) for edge in dfg_keys: if edge not in dfg_allowed_keys: del dfg[edge] # calculate edges penwidth penwidth = assign_penwidth_edges(dfg) activities_in_dfg = set() activities_count_int = copy(activities_count) for edge in dfg: activities_in_dfg.add(edge[0]) activities_in_dfg.add(edge[1]) # assign attributes color activities_color = get_activities_color(activities_count_int) # represent nodes viz.attr('node', shape='box') if len(activities_in_dfg) == 0: activities_to_include = sorted(list(set(activities_count_int))) else: # take unique elements as a list not as a set (in this way, nodes are added in the same order to the graph) activities_to_include = sorted(list(set(activities_in_dfg))) activities_map = {} for act in activities_to_include: if "frequency" in measure and act in activities_count_int: viz.node(str(hash(act)), act + " (" + str(activities_count_int[act]) + ")", style='filled', fillcolor=activities_color[act], fontsize=font_size) activities_map[act] = str(hash(act)) else: stat_string = human_readable_stat(soj_time[act], stat_locale) viz.node(str(hash(act)), act + f" ({stat_string})", fontsize=font_size) activities_map[act] = str(hash(act)) # make edges addition always in the same order dfg_edges = sorted(list(dfg.keys())) # represent edges for edge in dfg_edges: if "frequency" in measure: label = str(dfg[edge]) else: label = human_readable_stat(dfg[edge], stat_locale) viz.edge(str(hash(edge[0])), str(hash(edge[1])), label=label, penwidth=str(penwidth[edge]), fontsize=font_size) start_activities_to_include = [act for act in start_activities if act in activities_map] end_activities_to_include = [act for act in end_activities if act in activities_map] if start_activities_to_include: viz.node("@@startnode", "<&#9679;>", shape='circle', fontsize="34") for act in start_activities_to_include: label = str(start_activities[act]) if isinstance(start_activities, dict) else "" viz.edge("@@startnode", activities_map[act], label=label, fontsize=font_size) if end_activities_to_include: # <&#9632;> viz.node("@@endnode", "<&#9632;>", shape='doublecircle', fontsize="32") for act in end_activities_to_include: label = str(end_activities[act]) if isinstance(end_activities, dict) else "" viz.edge(activities_map[act], "@@endnode", label=label, fontsize=font_size) viz.attr(overlap='false') viz.format = image_format return viz
10,563
def sig_io_func(p, ca, sv): # The method input gives control over how the Nafion conductivity is # calculated. Options are 'lam' for laminar in which an interpolation is # done using data from [1], 'bulk' for treating the thin Nafion shells the # as a bulk-like material using NR results from [5], and 'mix' which uses a # weighted parallel mixutre of 'lam' and 'bulk' based on how much Pt vs C # exists at current conditions. This is because it is speculated that Pt # may have lamellae although C may not. 'sun' was also added to the # agglomerate model options which takes constant values used in [2]. # Inputs: Temperature [K], Nafion shell thickness [m], rel. humiditiy [%], # Pt coverage [%], p['eps/tau2_n'] [-] and p['p_eff_SAnaf'] [-], # and calculation method [-] """ Lamellae Method """ # Data below is taken from "Proton Transport in Supported Nafion Nanothin # Films by Electrochemical Impedence Spectroscopy" by Paul, MacCreery, and # Karan in their Supporting Information Document [1]. The data was given in # mS/cm and converted to S/m for the model calling this function. # indecies: temperature [C], Nafion shell thickness [nm], RH [%] sig_data = np.zeros([5,5,5]) temp_vals = np.array([25,30,40,50,60]) thick_vals = np.array([4,10,55,160,300]) RH_vals = np.array([20,40,60,80,95]) # v_w = np.zeros([p['Ny'],p['Nr']]) # for i in range(p['Ny']): # ih_n = ca.naf_b[i].species_index('H(Naf)') # ih2o_n = ca.naf_b[i].species_index('H2O(Naf)') # for j in range(p['Nr']): # ca.naf_b[i].Y = sv[ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r']] # v_k = ca.naf_b[i].X*ca.naf_b[i].partial_molar_volumes # v_w[i,j] = v_k[ih2o_n] / sum(v_k) # v_w_a = np.sum(p['Vf_shl']*v_w,axis=1) # lamb_n = np.clip((v_w_a / (1 - v_w_a) *983/1980 *1100/18.02), 0., 22.) rho_naf_w = np.zeros([p['Ny'],p['Nr']]) for i in range(p['Ny']): ih2o_n = ca.naf_b[i].species_index('H2O(Naf)') for j in range(p['Nr']): ind = ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r'] rho_naf_w[i,j] = sv[ind][ih2o_n] rho_naf_av = np.sum(p['Vf_shl']*rho_naf_w,axis=1) RH, RH_C = np.zeros(p['Ny']), np.zeros(p['Ny']) for i in range(p['Ny']): av = rho_naf_av[i] if av > 0: RH[i] = RH_eq_func(av,p,i)*100 RH_C[i] = RH_eq_func(av/2,p,i)*100 else: RH[i] = min(RH_vals) RH_C[i] = min(RH_vals) "Data for 25C as thickness[nm] for rows and RH[%] for columns" sig_data[0,:,:] = np.array([[0.0002,0.0206,0.4138,4.9101,21.888], # t 4nm [0.0002,0.0199,0.4073,5.1758,23.9213], # t 10nm [0.0002,0.0269,0.5448,5.3493,22.753], # t 55nm [0.3362,3.2505,8.3065,27.0725,54.0428], # t 160nm [1.5591,8.8389,19.6728,None,None]]) # t 300nm "Data for 30C as thickness[nm] for rows and RH[%] for columns" sig_data[1,:,:] = np.array([[0.0001,0.012,0.278,3.432,21.481], # t 4nm [0.0003,0.018,0.339,3.895,22.062], # t 10nm [0.0004,0.028,0.550,4.296,20.185], # t 55nm [0.0016,0.081,1.120,9.244,34.810], # t 160nm [0.0071,0.359,2.797,10.978,43.913]]) # t 300nm "Data for 40C as thickness[nm] for rows and RH[%] for columns" sig_data[2,:,:] = np.array([[0.0003,0.029,0.585,6.164,30.321], # t 4nm [0.0009,0.034,0.625,5.374,48.799], # t 10nm [0.0011,0.065,0.931,6.909,40.439], # t 55nm [0.0032,0.152,1.770,14.162,68.326], # t 160nm [0.0140,0.605,4.939,17.083,68.334]]) # t 300nm "Data for 50C as thickness[nm] for rows and RH[%] for columns" sig_data[3,:,:] = np.array([[0.001,0.062,1.087,8.335,37.686], # t 4nm [0.002,0.077,1.031,8.127,57.339], # t 10nm [0.002,0.121,1.603,9.149,48.934], # t 55nm [0.007,0.247,2.704,19.221,72.006], # t 160nm [0.031,1.076,7.185,20.981,83.923]]) # t 300nm "Data for 60C as thickness[nm] for rows and RH[%] for columns" sig_data[4,:,:] = np.array([[0.003,0.14,1.51,11.16,55.18], # t 4nm [0.003,0.17,1.72,13.67,62.39], # t 10nm [0.007,0.24,2.29,16.60,63.20], # t 55nm [0.015,0.45,4.31,26.63,93.33], # t 160nm [0.009,0.44,3.43,26.73,100.60]]) # t 300nm "Create interpolation function for relavent ranges" from scipy.interpolate import RegularGridInterpolator sig_io_int = RegularGridInterpolator((temp_vals,thick_vals,RH_vals),sig_data) "Call interpolation function for model specified paramaters" # Multiplication by 0.1 is unit conversion from mS/cm to S/m. Runner file # stores T and t_naf in [K] and [m] so are also converted inside the # interpolation function to the same units as original data [C] and [nm]. RH = np.clip(RH, min(RH_vals), max(RH_vals)) RH_C = np.clip(RH_C, min(RH_vals), max(RH_vals)) pts = np.zeros([p['Ny'],3]) for i in range(p['Ny']): pts[i,:] = [p['T']-273, p['t_naf'][i]*1e9, RH[i]] sig_io_lam = sig_io_int(pts) *0.1 """ Bulk Method """ # This method assumes that the thin shell of Nafion is treated the same as # the bulk material. Lambda is calculated using an empirical relationship. # Then the sig_io formula from [5] for a bulk membrane is used and scaled # by the scaling factor, also from [5]. # The loop below assumes RH is not RH_eq and instead is the actual local # gas-phase RH. if p['sig_method'] == 'lit': for i in range(p['Ny']): ih2o_g = ca.gas.species_index('H2O') rho_gas_k = sv[ca.ptr['rho_gas_k'] +i*p['nxt_y']] ca.gas.TDY = p['T'], sum(rho_gas_k), rho_gas_k RH[i] = ca.gas.X[ih2o_g]*ca.gas.P / 19946 *100 lamb_n = 0.3 + 10.8*(RH/100) - 16*(RH/100)**2 + 14.1*(RH/100)**3 sig_io_lit = (0.5139*lamb_n - 0.326)*np.exp(1268*(1/303 - 1/p['T'])) sig_io_bulk = sig_io_lit *0.672 """ Mix Method """ # Using a parallel resistor network to weight the conductivity through # lamellae and that through bulk-like material is performed with respect to # the amount of Pt and C areas respectively. sig_io_mix = 1 / (p['p_Pt']/100 /sig_io_lam +(1-p['p_Pt']/100) /sig_io_bulk) " Set conductivity depending on method " # Based on the method, return the appropriate conductivity. if p['sig_method'] == 'lam': sig_io = sig_io_lam elif p['sig_method'] == 'bulk': sig_io = sig_io_bulk elif p['sig_method'] == 'mix': sig_io = sig_io_mix elif p['sig_method'] == 'lit': sig_io = sig_io_lit # Output returns ionic conductivity [S/m] return sig_io
10,564
def srwl_opt_setup_cyl_fiber(_foc_plane, _delta_ext, _delta_core, _atten_len_ext, _atten_len_core, _diam_ext, _diam_core, _xc, _yc): """ Setup Transmission type Optical Element which simulates Cylindrical Fiber :param _foc_plane: plane of focusing: 1- horizontal (i.e. fiber is parallel to vertical axis), 2- vertical (i.e. fiber is parallel to horizontal axis) :param _delta_ext: refractive index decrement of extenal layer :param _delta_core: refractive index decrement of core :param _atten_len_ext: attenuation length [m] of external layer :param _atten_len_core: attenuation length [m] of core :param _diam_ext: diameter [m] of external layer :param _diam_core: diameter [m] of core :param _xc: horizontal coordinate of center [m] :param _yc: vertical coordinate of center [m] :return: transmission (SRWLOptT) type optical element which simulates Cylindrical Fiber """ def ray_path_in_cyl(_dx, _diam): r = 0.5*_diam pathInCyl = 0 if((_dx > -r) and (_dx < r)): pathInCyl = 2*sqrt(r*r - _dx*_dx) return pathInCyl ne = 1 nx = 101 ny = 1001 rx = 10e-03 ry = _diam_ext*1.2 if(_foc_plane == 1): #focusing plane is horizontal nx = 1001 ny = 101 rx = _diam_ext*1.2 ry = 10e-03 opT = SRWLOptT(nx, ny, rx, ry, None, 1, 1e+23, 1e+23, _xc, _yc) hx = rx/(nx - 1) hy = ry/(ny - 1) ofst = 0 pathInExt = 0 pathInCore = 0 if(_foc_plane == 2): #focusing plane is vertical y = -0.5*ry #cylinder is always centered on the grid, however grid can be shifted for iy in range(ny): pathInExt = 0; pathInCore = 0 if(_diam_core > 0): pathInCore = ray_path_in_cyl(y, _diam_core) pathInExt = ray_path_in_cyl(y, _diam_ext) - pathInCore argAtten = -0.5*pathInExt/_atten_len_ext if(_atten_len_core > 0): argAtten -= 0.5*pathInCore/_atten_len_core ampTr = exp(argAtten) #amplitude transmission optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference for ix in range(nx): opT.arTr[ofst] = ampTr #amplitude transmission opT.arTr[ofst + 1] = optPathDif #optical path difference ofst += 2 y += hy else: #focusing plane is horizontal perY = 2*nx x = -0.5*rx #cylinder is always centered on the grid, however grid can be shifted for ix in range(nx): pathInExt = 0; pathInCore = 0 if(_diam_core > 0): pathInCore = ray_path_in_cyl(x, _diam_core) pathInExt = ray_path_in_cyl(x, _diam_ext) - pathInCore argAtten = -0.5*pathInExt/_atten_len_ext if(_atten_len_core > 0): argAtten -= 0.5*pathInCore/_atten_len_core ampTr = exp(argAtten) #amplitude transmission optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference ix2 = ix*2 for iy in range(ny): ofst = iy*perY + ix2 opT.arTr[ofst] = ampTr #amplitude transmission opT.arTr[ofst + 1] = optPathDif #optical path difference x += hx return opT
10,565
def __create_menu_elements() -> Enum: """Create Menu Elements. :return: Menu elements as an enum in the format KEY_WORD -> Vales(char, KeyWord) """ menu_keys = ["MAIN_MENU", "PROFILE", "CLEAN_TIME", "READINGS", "PRAYERS", "DAILY_REFLECTION", "JUST_FOR_TODAY", "LORDS_PRAYER", "SERENITY_PRAYER", "ST_JOSEPHS_PRAYER", "TENDER_AND_COMPASSIONATE_GOD", "THIRD_STEP_PRAYER", "SEVENTH_STEP_PRAYER", "ELEVENTH_STEP_PRAYER"] menu_values_chr = [chr(ch) for ch in range(len(menu_keys))] menu_values_str = ["MainMenu", "Profile", "CleanTime", "Readings", "Prayers", "DailyReflection", "JustForToday", "LordsPrayer", "SerenityPrayer", "StJosephsPrayer", "TenderAndCompassionateGod", "ThirdStepPrayer", "SeventhStepPrayer", "EleventhStepPrayer"] return Enum('MenuElements', {k: MenuElementValues(data=v1, name=v2) for k, v1, v2 in zip(menu_keys, menu_values_chr, menu_values_str)})
10,566
def solve(coordinates): """ 알고리즘 풀이 함수 : 두 점의 최단거리를 구해주는 함수 :param coordinates: 좌표들 :return: 두 점의 최단거리 """ n = len(coordinates) x_coordinates = [coordinate[0] for coordinate in coordinates] y_coordinates = [coordinate[1] for coordinate in coordinates] middle_point = (sum_of_list(x_coordinates) / n, sum_of_list(y_coordinates) / n) # print(middle_point) # test distances = [distance(middle_point, point) for point in coordinates] # print(distances) # test distance_difference = list() for i in range(n - 1): coordinate_info = { 'indices': (i, i + 1), 'difference': math.fabs(distances[i] - distances[i + 1]) } distance_difference.append(coordinate_info) # print(distance_difference) # test indices = get_indices(distance_difference) return distance(coordinates[indices[0]], coordinates[indices[1]])
10,567
def get_cache_key(account, container=None, obj=None): """ Get the keys for both memcache and env['swift.infocache'] (cache_key) where info about accounts, containers, and objects is cached :param account: The name of the account :param container: The name of the container (or None if account) :param obj: The name of the object (or None if account or container) :returns: a string cache_key """ if obj: if not (account and container): raise ValueError('Object cache key requires account and container') cache_key = 'object/%s/%s/%s' % (account, container, obj) elif container: if not account: raise ValueError('Container cache key requires account') cache_key = 'container/%s/%s' % (account, container) else: cache_key = 'account/%s' % account # Use a unique environment cache key per account and one container. # This allows caching both account and container and ensures that when we # copy this env to form a new request, it won't accidentally reuse the # old container or account info return cache_key
10,568
def manipComponentPivot(*args, **kwargs): """ Dynamic library stub function Derived from mel command `maya.cmds.manipComponentPivot` """ pass
10,569
def dir_plus_file(fname): """Splits pathnames into the dirname plus the filename.""" return os.path.split(fname)
10,570
def test_flatten_list() -> None: """Flatten a list.""" nested_list: List[List[int]] = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] assert [num for elem in nested_list for num in elem] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
10,571
def arctan(x): """Returns arctan(x)""" if type(x) in (float,_numpy._numpy.float64): x = _numpy._numpy.array([x]) a = abs(x) r = arctan_1px( a - 1. ) f = arctan_series( a ) eps = _numpy._numpy.finfo(1.).eps g = arctan_series( 1. / maximum( 0.125, a ) ) g = 0.5 * _numpy._numpy.pi - g j = ( a < 0.5 ) r[j] = f[j] j = ( a > 2. ) r[j] = g[j] j = ( x<0 ) r[j] = -r[j] if r.size==1: return r[0] return r
10,572
def uniform_dist(low, high): """Return a random variable uniformly distributed between `low` and `high`. """ return sp_uniform(low, high - low)
10,573
def flatten(dictionary, parent_key=False, separator='_'): """ Turn a nested dictionary into a flattened dictionary :param dictionary: The dictionary to flatten :param parent_key: The string to prepend to dictionary's keys :param separator: The string used to separate flattened keys :return: A flattened dictionary """ items = [] for key, value in list(dictionary.items()): if crumbs: print(('checking:',key)) new_key = (re.sub('[^A-Za-z0-9]+', '', str(parent_key)) + separator + re.sub('[^A-Za-z0-9]+', '', key) if parent_key else key).lower() if isinstance(value, MutableMapping): if crumbs: print((new_key,': dict found')) if not list(value.items()): if crumbs: print(('Adding key-value pair:',new_key,None)) items.append((new_key,None)) else: items.extend(list(flatten(value, new_key, separator).items())) elif isinstance(value, list): if crumbs: print((new_key,': list found')) if len(value): for k, v in enumerate(value): items.extend(list(flatten({str(k): v}, new_key).items())) else: if crumbs: print(('Adding key-value pair:',new_key,None)) items.append((new_key,None)) else: if crumbs: print(('Adding key-value pair:',new_key,value)) items.append((new_key, value)) return dict(items)
10,574
def format_sec_to_hms(sec): """Format seconds to hours, minutes, seconds. Args: sec: float or int Number of seconds in a period of time Returns: str Period of time represented as a string on the form ``0d\:00h\:00m``. """ rem_int, s_int = divmod(int(sec), 60) h_int, m_int, = divmod(rem_int, 60) return "{}h {:02d}m {:02d}s".format(h_int, m_int, s_int)
10,575
def one_hot_encode(data): """turns data into onehot encoding Args: data (np.array): (n_samples,) Returns: np.array: shape (n_samples, n_classes) """ n_classes = np.unique(data).shape[0] onehot = np.zeros((data.shape[0], n_classes)) for i, val in enumerate(data.astype(int)): onehot[i, val] = 1. return onehot
10,576
def null_write_block(fo, block_bytes): """Write block in "null" codec.""" write_long(fo, len(block_bytes)) fo.write(block_bytes)
10,577
def is_number(input_str): """Check if input_str is a string number Args: input_str (str): input string Returns: bool: True if input_str can be parse to a number (float) """ try: float(input_str) return True except ValueError: return False
10,578
def test_swapping(exopy_qtbot, task_workbench, dialog_sleep): """Test moving a view between containers. """ task = RootTask() view = RootTaskView(task=task, core=task_workbench.get_plugin('enaml.workbench.core')) subtask = ComplexTask(name='Test') subview = view.view_for(subtask) task.add_child_task(0, subtask) cont = Container() show_widget(exopy_qtbot, cont) view.set_parent(cont) view.refresh() def assert_children(): assert cont.children == [view] exopy_qtbot.wait_until(assert_children) exopy_qtbot.wait(dialog_sleep) view.set_parent(None) subview.set_parent(cont) subview.refresh() def assert_children(): assert cont.children == [subview] exopy_qtbot.wait_until(assert_children) exopy_qtbot.wait(dialog_sleep) subview.set_parent(None) view.set_parent(cont) view.refresh() def assert_children(): assert cont.children == [view] exopy_qtbot.wait_until(assert_children) assert subview.visible exopy_qtbot.wait(dialog_sleep)
10,579
def run_command( info, host, port, reload, debugger, eager_loading, with_threads, extra_files ): """Run a local development server. This server is for development purposes only. It does not provide the stability, security, or performance of production WSGI servers. The reloader and debugger are enabled by default if FLASK_ENV=development or FLASK_DEBUG=1. """ debug = get_debug_flag() if reload is None: reload = debug if debugger is None: debugger = debug show_server_banner(get_env(), debug, info.app_import_path, eager_loading) app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) from werkzeug.serving import run_simple webbrowser.open_new(f'http://{host}:{port}/') run_simple( host, port, app, use_reloader=reload, use_debugger=debugger, threaded=with_threads, extra_files=extra_files, )
10,580
def impute_between(coordinate_a, coordinate_b, freq): """ Args: coordinate_a: coordinate_b: freq: Returns: """ metrics = discrete_velocity(coordinate_a, coordinate_b) b, d, sec = metrics['binning'], metrics['displacement'], metrics['time_delta'] if b != 'stationary' or d > 75 or sec > 60**2*12: return None a_lat, a_lon, a_ts = coordinate_a b_lat, b_lon, b_ts = coordinate_b if not (isinstance(a_ts, dt.datetime) and isinstance(b_ts, dt.datetime)): raise TypeError('third element of each coordinate tuple must be dt') fill_range = list(pd.date_range(a_ts, b_ts, freq=freq)) # ensure the returned dataframe range is exclusive if fill_range[0] == a_ts: fill_range.remove(fill_range[0]) if len(fill_range) == 0: return None if fill_range[-1] == b_ts: fill_range.remove(fill_range[-1]) fill_lat = np.linspace(a_lat, b_lat, len(fill_range)) fill_lon = np.linspace(a_lon, b_lon, len(fill_range)) t = dict(lat=fill_lat, lon=fill_lon, ts=fill_range) return pd.DataFrame(t)
10,581
def seq(fr,to,by): """An analogous function to 'seq' in R Parameters: 1. fr: from 2. to: to 3. by: by (interval) """ if fr<to: return range(fr,to+abs(by),abs(by)) elif fr>to: if by>0: aseq = range(fr,to-by,-1*by) else: aseq = range(fr,to+by,by) else: aseq = [fr] if aseq[-1]>to: return aseq[:-1] else: return aseq
10,582
def _get_config_from_context(ctx): """ :param ctx: :return: :rtype: semi.config.configuration.Configuration """ return ctx.obj["config"]
10,583
def _invert_options(matrix=None, sparse=None): """Returns |invert_options| (with default values) for a given |NumPy| matrix. See :func:`sparse_options` for documentation of all possible options for sparse matrices. Parameters ---------- matrix The matrix for which to return the options. sparse Instead of providing a matrix via the `matrix` argument, `sparse` can be set to `True` or `False` to requset the invert options for sparse or dense matrices. Returns ------- A tuple of all possible |invert_options|. """ global _dense_options, _dense_options_sid, _sparse_options, _sparse_options_sid assert (matrix is None) != (sparse is None) sparse = sparse if sparse is not None else issparse(matrix) if sparse: if not _sparse_options or _sparse_options_sid != defaults_sid(): _sparse_options = sparse_options() _sparse_options_sid = defaults_sid() return _sparse_options else: return _sparse_options else: if not _dense_options or _dense_options_sid != defaults_sid(): _dense_options = dense_options() _dense_options_sid = defaults_sid() return _dense_options else: return _dense_options
10,584
def directory_is_empty(path: AnyStr) -> bool: """ :param path: a directory path :return: True if directory is empty, False otherwise """ return not any(os.scandir(path))
10,585
def get_Carrot_scramble(n=70): """ Gets a Carrot-notation scramble of length `n` for a Megaminx. Defaults to csTimer's default length of 70. """ return _UTIL_SCRAMBLER.call("util_scramble.getMegaminxCarrotScramble", n).replace('\n','').replace(' ',' ').replace(' ',' ')
10,586
def convex_env_train(Xs, Ys): """ Identify the convex envelope on the set of models from the train set. """ # Sort the list in either ascending or descending order of the # items values in Xs key_X_pairs = sorted(Xs.items(), key=lambda x: x[1], reverse=False) # this is a list of (key, val) pairs # Start the Pareto frontier with the first key value in the sorted list p_front = [key_X_pairs[0][0]] # Loop through the sorted list count = 0 for (key, X) in key_X_pairs: if Ys[key] <= Ys[p_front[-1]]: # Look for lower values of Y if count > 0: p_front.append(key) count = count + 1 return remove_interior(p_front, Xs, Ys)
10,587
def main(): """ Main function Main function that distributes the files that will be parsed and writes the result into database """ # Get list of names of the files that will be parsed file_list=[] # Open the file and read file names f = open( config.files["HYDRAFILES"], "r" ) for x in f: file_list.append( x.strip() ) # Create Spark session appName = "BeautifulSoup" master = config.spark["MASTERIP"] spark = SparkSession.builder \ .appName( appName ) \ .config( "spark.cassandra.connection.host", config.cassandra["HOSTS"] ) \ .config( 'spark.cassandra.connection.port', config.cassandra["PORT"] ) \ .config( 'spark.cassandra.output.consistency.level', 'ONE' ) \ .config( 'spark.kryoserializer.buffer.max', '2047m' ) \ .config( 'spark.driver.port', config.cassandra["DRIVERPORT"] ) \ .config( 'spark.network.timeout', '10000000' ) \ .master( master ) \ .getOrCreate() # Number of partitions n_parts = 100 # Distribute files among worker nodes using Map-Reduce and put results into rdd rdd_file = spark.sparkContext.parallelize( file_list, n_parts ) rdd = rdd_file.mapPartitions( parse_files ) # Create a schema for the dataframe schema = StructType([ StructField( 'marketplace', StringType(), True ), StructField( 'product_name', StringType(), True ), StructField( 'price', FloatType(), True ), StructField( 'category', StringType(), True ), StructField( 'vendor', StringType(), True ), StructField( 'description', StringType(), True ), StructField( 'ad_date', DateType(), True ), StructField( 'ship_to', StringType(), True ), StructField( 'ship_from', StringType(), True ), StructField( 'image_url', StringType(), True ) ]) # Create data frame from rdd using the schema above df = spark.createDataFrame( rdd,schema ) # Print example of data and statistics df.show() print( "-------------------Total rows:" + str( df.count() ) ) # Write result to Cassandra df.write\ .format( "org.apache.spark.sql.cassandra" ) \ .mode( "append" ) \ .options( table = config.cassandra["MARKETPLACE"], keyspace = config.cassandra["KEYSPACE"] ) \ .save()
10,588
def handle_control_command(sniffer, arg, typ, payload): """Handle command from control channel""" if arg == CTRL_ARG_DEVICE: if payload == b' ': scan_for_devices(sniffer) else: follow_device(sniffer, payload) elif arg == CTRL_ARG_KEY: set_passkey_or_OOB(sniffer, payload) elif arg == CTRL_ARG_ADVHOP: set_advhop(sniffer, payload)
10,589
def randthresh(Y,K,p=np.inf,stop=False,verbose=False,varwind=False,knownull=True): """ Wrapper for random threshold functions (without connexity constraints) In: Y (n,) Observations K <int> Some positive integer (lower bound on the number of null hypotheses) p <float> lp norm stop <bool> Stop when minimum is attained (save computation time) verbose <bool> 'Chatty' mode varwind <bool> Varying window variant (vs. fixed window, with width K) knownull <bool> Known null distribution (observations assumed Exp(1) under H0) versus unknown (observations assumed Gaussian under H0) Out: A dictionary D containing the following fields: "C" (n-K) Lp norm of partial sums fluctuation about their conditional expectation "thresh" <float> Detection threshold "detect" (k,) Index of detected activations "v" <float> Estimated null variance (if knownull is False) Note: Random thresholding is performed only if null hypothesis of no activations is rejected at level 5% """ D = {} # Test presence of activity if knownull: X = Y else: v = np.square(Y).mean() X = np.clip(-np.log(1 - ST.chi2.cdf(Y**2, 1, 0, scale=v)), 0, 1 / tol) D["v"] = v T = test_stat(X,p=np.inf) if T <= 0.65: print "No activity detected at 5% level" D["detect"] = np.array([]) D["thresh"] = np.inf else: # Find optimal threshold if varwind: if knownull: C = randthresh_varwind_knownull(Y,K,p,stop,verbose) else: C, V = randthresh_varwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose) else: if knownull: C = randthresh_fixwind_knownull(Y,K,p,stop,verbose) else: C, V = randthresh_fixwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose) n = len(X) if stop: I = np.where(C > 0)[0] if len(I) > 0: ncoeffs = I[-1] else: ncoeffs = n - K else: I = np.where((C[2:] > C[1:-1]) * (C[1:-1] < C[:-2]))[0] if len(I) > 0: ncoeffs = I[np.argmin(C[1:-1][I])] + 1 else: ncoeffs = n - K thresh = np.sort(np.abs(Y))[-ncoeffs] # Detected activations detect = np.where(np.abs(Y) > thresh)[0] D["C"] = C[2:] D["thresh"] = thresh D["detect"] = detect if not knownull: D["v"] = V[2:] return D
10,590
def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ? """get functions defined inside of func (e.g. inner functions in a closure) NOTE: results may differ if the function has been executed or not. If len(nestedcode(func)) > len(referrednested(func)), try calling func(). If possible, python builds code objects, but delays building functions until func() is called. """ if PY3: att1 = '__code__' att0 = '__func__' else: att1 = 'func_code' # functions att0 = 'im_func' # methods import gc funcs = set() # get the code objects, and try to track down by referrence for co in nestedcode(func, recurse): # look for function objects that refer to the code object for obj in gc.get_referrers(co): # get methods _ = getattr(obj, att0, None) # ismethod if getattr(_, att1, None) is co: funcs.add(obj) # get functions elif getattr(obj, att1, None) is co: funcs.add(obj) # get frame objects elif getattr(obj, 'f_code', None) is co: funcs.add(obj) # get code objects elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj) # frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars # funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames # frameobjs are not found, however funcobjs are... # (see: test_mixins.quad ... and test_mixins.wtf) # after execution, code objects get compiled, and then may be found by gc return list(funcs)
10,591
def format_organizations_output(response: Dict[str, Any], page_number: int, limit: int) -> Tuple[list, int]: """ Formatting list organizations command outputs. Args: response (Dict[str,Any): The response from the API call. limit (int): Maximum number of results to return. page_number(int): The Page number to retrieve. Returns: Tuple[list,int]: Formatted command output and total results. """ formatted_organizations = [] relevant_output_entities, total_page_number = format_list_commands_output(response, ['response', 'result', 'domains', 'domain'], page_number, limit) for organization in relevant_output_entities: formatted_organization = {} for key, value in organization.items(): if key.startswith('@'): formatted_organization[key[1:]] = value else: formatted_organization[key] = value formatted_organizations.append(formatted_organization) return formatted_organizations, total_page_number
10,592
def which_db_version(cursor): """ Return version of DB schema as string. Return '5', if iOS 5. Return '6', if iOS 6 or iOS 7. """ query = "select count(*) from sqlite_master where name = 'handle'" cursor.execute(query) count = cursor.fetchone()[0] if count == 1: db_version = '6' else: db_version = '5' return db_version
10,593
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the LiteJet switch platform.""" litejet_ = hass.data['litejet_system'] devices = [] for i in litejet_.button_switches(): name = litejet_.get_switch_name(i) if not litejet.is_ignored(hass, name): devices.append(LiteJetSwitch(hass, litejet_, i, name)) add_entities(devices, True)
10,594
async def get_number_of_images_from_category(category : str): """ TODO docstring Get number of images in category """ categories_query = CATEGORIES_DB.search(where('category') == category) if not categories_query: return {"number_of_images": 0} return {"number_of_images": len(categories_query)}
10,595
def gaussian_total_correlation(cov): """Computes the total correlation of a Gaussian with covariance matrix cov. We use that the total correlation is the KL divergence between the Gaussian and the product of its marginals. By design, the means of these two Gaussians are zero and the covariance matrix of the second Gaussian is equal to the covariance matrix of the first Gaussian with off-diagonal entries set to zero. Args: cov: Numpy array with covariance matrix. Returns: Scalar with total correlation. """ return 0.5 * (np.sum(np.log(np.diag(cov))) - np.linalg.slogdet(cov)[1])
10,596
def S3list(s3bucket, fdate, instrm, network='OKLMA'): """ get list of files in a s3 bucket for a specific fdate and instrument (prefix) fdate: e.g. '2017-05-17' instrm: e.g. 'GLM' """ prefix = {'GLM': 'fieldcampaign/goesrplt/GLM/data/L2/' + fdate + '/OR_GLM-L2-LCFA_G16', 'LIS': 'fieldcampaign/goesrplt/ISS_LIS/data/' + fdate + '/ISS_LIS_SC_V1.0_', # 'FEGS': 'fieldcampaign/goesrplt/FEGS/data/goesr_plt_FEGS_' + fdate.replace('-', '') + '_Flash', 'CRS': 'fieldcampaign/goesrplt/CRS/data/GOESR_CRS_L1B_' + fdate.replace('-', ''), 'NAV': 'fieldcampaign/goesrplt/NAV_ER2/data/goesrplt_naver2_IWG1_' + fdate.replace('-', ''), 'LMA': 'fieldcampaign/goesrplt/LMA/' + network + '/data/' + fdate + '/goesr_plt_' + network + '_' + fdate.replace( '-', '')} print("S3list searching for ", prefix[instrm]) s3 = boto3.resource('s3') bucket = s3.Bucket(s3bucket) keys = [] for obj in bucket.objects.filter(Prefix=prefix[instrm]): keys.append(obj.key) return keys
10,597
def formatted_karma(user, activity): """ Performs a karma check for the user and returns a String that's already formatted exactly like the usual response of the bot. :param user: The user the karma check will be performed for. :return: A conveniently formatted karma check response. """ response = good_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0]) if activity[3] > activity[0]/3: response = bad_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[4], activity[0], activity[3]) elif activity[1] < 2 and activity[2] < 5: response = new_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0]) return response
10,598
def setsysvolacl(samdb, netlogon, sysvol, uid, gid, domainsid, dnsdomain, domaindn, lp, use_ntvfs): """Set the ACL for the sysvol share and the subfolders :param samdb: An LDB object on the SAM db :param netlogon: Physical path for the netlogon folder :param sysvol: Physical path for the sysvol folder :param uid: The UID of the "Administrator" user :param gid: The GID of the "Domain adminstrators" group :param domainsid: The SID of the domain :param dnsdomain: The DNS name of the domain :param domaindn: The DN of the domain (ie. DC=...) """ s4_passdb = None if not use_ntvfs: # This will ensure that the smbd code we are running when setting ACLs # is initialised with the smb.conf s3conf = s3param.get_context() s3conf.load(lp.configfile) # ensure we are using the right samba_dsdb passdb backend, no matter what s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url) passdb.reload_static_pdb() # ensure that we init the samba_dsdb backend, so the domain sid is # marked in secrets.tdb s4_passdb = passdb.PDB(s3conf.get("passdb backend")) # now ensure everything matches correctly, to avoid wierd issues if passdb.get_global_sam_sid() != domainsid: raise ProvisioningError('SID as seen by smbd [%s] does not match SID as seen by the provision script [%s]!' % (passdb.get_global_sam_sid(), domainsid)) domain_info = s4_passdb.domain_info() if domain_info["dom_sid"] != domainsid: raise ProvisioningError('SID as seen by pdb_samba_dsdb [%s] does not match SID as seen by the provision script [%s]!' % (domain_info["dom_sid"], domainsid)) if domain_info["dns_domain"].upper() != dnsdomain.upper(): raise ProvisioningError('Realm as seen by pdb_samba_dsdb [%s] does not match Realm as seen by the provision script [%s]!' % (domain_info["dns_domain"].upper(), dnsdomain.upper())) try: if use_ntvfs: os.chown(sysvol, -1, gid) except OSError: canchown = False else: canchown = True # Set the SYSVOL_ACL on the sysvol folder and subfolder (first level) setntacl(lp,sysvol, SYSVOL_ACL, str(domainsid), use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=s4_passdb, service=SYSVOL_SERVICE) for root, dirs, files in os.walk(sysvol, topdown=False): for name in files: if use_ntvfs and canchown: os.chown(os.path.join(root, name), -1, gid) setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid), use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=s4_passdb, service=SYSVOL_SERVICE) for name in dirs: if use_ntvfs and canchown: os.chown(os.path.join(root, name), -1, gid) setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid), use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=s4_passdb, service=SYSVOL_SERVICE) # Set acls on Policy folder and policies folders set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp, use_ntvfs, passdb=s4_passdb)
10,599