content
stringlengths
22
815k
id
int64
0
4.91M
def test_exact_enumeration(): """ Testing the Exact Enumeration class. """ solver = ExactEnumeration() W = np.random.uniform(0, 1, (100, 7)) solver.solve_game(W, q=2.5) Phi = solver.get_solution() Phi_tilde = solver.get_average_shapley() entropy = solver.get_shapley_entropy() assert Phi.shape == W.shape assert Phi_tilde.shape == (W.shape[1],) assert -math.log(1.0 / W.shape[1]) - entropy > -0.001 solver = ExactEnumeration() W = np.random.uniform(0, 1, (100, 6)) solver.solve_game(W, q=2) Phi = solver.get_solution() Phi_tilde = solver.get_average_shapley() entropy = solver.get_shapley_entropy() assert Phi.shape == W.shape assert Phi_tilde.shape == (W.shape[1],) assert -math.log(1.0 / W.shape[1]) - entropy > -0.001 solver = ExactEnumeration() W = np.random.uniform(0, 1, (10, 5)) solver.solve_game(W, q=3) Phi = solver.get_solution() Phi_tilde = solver.get_average_shapley() entropy = solver.get_shapley_entropy() assert Phi.shape == W.shape assert Phi_tilde.shape == (W.shape[1],) assert -math.log(1.0 / W.shape[1]) - entropy > -0.001
11,400
def soup_from_name(username): """ Grabs bs4 object from html page """ # html_source = urlopen('https://www.instagram.com/'+ str(username) + '/') url = 'https://www.instagram.com/'+ str(username) + '/' headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0)" \ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"} html_source = requests.get(url, headers=headers).text return BeautifulSoup(html_source, 'html.parser') #react-root > section > main > div > div.Nd_Rl._2z6nI > article > div._4Kbb_ > div > h2 # print(soup.body.span.section.main.div.div.article.div.div.h2)
11,401
def test_parse_single_mark_en() -> None: """Testing a single bookmark in English.""" raw_string = """Book 2 (Spanish Edition) (Author 2) - Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33 """ clipping = Clipping.parse_single_highlight(raw_string) assert clipping is not None assert clipping.title == "Book 2 (Spanish Edition)" assert clipping.author == "Author 2" assert ( clipping.metadata == "- Your Bookmark on Location 1012 | Added on Saturday, February 9, 2013 10:40:33" ) assert clipping.content == "" clipping.parse_metadata("en") assert clipping.position_start == 1012 assert clipping.type == "MARK"
11,402
def iface_down(ifname): """Flush all ip addresses belonging to the interface. """ if ifname is None: return ipr = IPRoute() try: ipr.flush_addr(label=ifname) x = ipr.link_lookup(ifname=ifname)[0] ipr.link('set', index=x, state='down') except Exception: pass ipr.close()
11,403
def SectionMenu(rating_key, title=None, base_title=None, section_title=None, ignore_options=True, section_items_key="all"): """ displays the contents of a section :param section_items_key: :param rating_key: :param title: :param base_title: :param section_title: :param ignore_options: :return: """ items = get_all_items(key=section_items_key, value=rating_key, base="library/sections") kind, deeper = get_items_info(items) title = unicode(title) section_title = title title = base_title + " > " + title oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True) if ignore_options: add_ignore_options(oc, "sections", title=section_title, rating_key=rating_key, callback_menu=IgnoreMenu) return dig_tree(oc, items, MetadataMenu, pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": "section", "previous_rating_key": rating_key})
11,404
def round_floats_for_json(obj, ndigits=2, key_ndigits=None): """ Tries to round all floats in obj in order to reduce json size. ndigits is the default number of digits to round to, key_ndigits allows you to override this for specific dictionary keys, though there is no concept of nested keys. It converts numpy arrays and iterables to lists, so it should only be used when serializing to json """ if key_ndigits is None: key_ndigits = {} if isinstance(obj, np.floating): obj = float(obj) elif isinstance(obj, np.ndarray): obj = obj.tolist() if isinstance(obj, float): obj = round(obj, ndigits) elif isinstance(obj, dict): new_obj = {} for k, v in obj.items(): this_ndigits = key_ndigits.get(k, ndigits) new_obj[k] = round_floats_for_json(v, this_ndigits, key_ndigits) return new_obj elif isinstance(obj, str): return obj else: try: return [round_floats_for_json(x, ndigits, key_ndigits) for x in obj] except TypeError: pass return obj
11,405
def visualize(**images): """PLot images in one row.""" n = len(images) plt.figure(figsize=(16, 5)) for i, (name, image) in enumerate(images.items()): plt.subplot(1, n, i + 1) plt.xticks([]) plt.yticks([]) plt.title(' '.join(name.split('_')).title()) plt.imshow(image) plt.show()
11,406
def read_shared(function_name, verb, request, local_variables=None): """all the shared code for each of thse read functions""" command = function_name.split('_')[1] # assumes fn name is query_<command> command_args, verb_args = create_filters(function_name, command, request, local_variables) verb = cleanup_verb(verb) columns = local_variables.get('columns', None) format = local_variables.get('format', None) ret, svc_inst = run_command_verb( command, verb, command_args, verb_args, columns, format) return ret
11,407
def http_put_request( portia_config: dict, endpoint: str, payload: dict, params: dict=None, optional_headers: dict=None ) -> object: """Makes an HTTP PUT request. Arguments: portia_config {dict} -- Portia's configuration arguments endpoint {str} -- endpoint to make the request to payload {dict} -- payload to send to the service Keyword Arguments: params {dict} -- params to send to the service (default: {None}) optional_headers {dict} -- dictionary with other headers (default: {None}) Returns: object -- response object """ headers = { 'Authorization': 'Bearer {0}' \ .format(portia_config.get('authorization')) } if optional_headers is not None: headers = {**headers, **optional_headers} start = time.time() response = requests.put( '{0}{1}'.format(portia_config.get('baseurl'), endpoint), headers=headers, params=params, json=payload ) end = time.time() if portia_config.get('debug') == True: print( '[portia-debug]: status: {0} | {1:.4f} sec. | {2}' \ .format(response.status_code, end - start, response.url) ) return response
11,408
def construct_SN_default_rows(timestamps, ants, nif, gain=1.0): """ Construct list of ants dicts for each timestamp with REAL, IMAG, WEIGHT = gains """ default_nif = [gain] * nif rows = [] for ts in timestamps: rows += [{'TIME': [ts], 'TIME INTERVAL': [0.1], 'ANTENNA NO.': [antn], 'REAL1': default_nif, 'REAL2': default_nif, 'IMAG1': default_nif, 'IMAG2': default_nif, 'WEIGHT 1': default_nif, 'WEIGHT 2': default_nif} for antn in ants] return rows
11,409
def test_is_matching(load_email): """it should be able to match a deposit email""" html = load_email(EMAIL_PATH) assert deposit_email.is_matching(html)
11,410
async def test_disable(aresponses): """Test disabling AdGuard Home query log.""" async def response_handler(request): data = await request.json() assert data == {"enabled": False, "interval": 1} return aresponses.Response(status=200) aresponses.add( "example.com:3000", "/control/querylog_info", "GET", aresponses.Response( status=200, headers={"Content-Type": "application/json"}, text='{"interval": 1}', ), ) aresponses.add( "example.com:3000", "/control/querylog_config", "POST", response_handler ) aresponses.add( "example.com:3000", "/control/querylog_info", "GET", aresponses.Response( status=200, headers={"Content-Type": "application/json"}, text='{"interval": 1}', ), ) aresponses.add( "example.com:3000", "/control/querylog_config", "POST", aresponses.Response(status=500), ) async with aiohttp.ClientSession() as session: adguard = AdGuardHome("example.com", session=session) await adguard.querylog.disable() with pytest.raises(AdGuardHomeError): await adguard.querylog.disable()
11,411
def load_checkpoint(path: str, device: torch.device = None, logger: logging.Logger = None) -> MoleculeModel: """ Loads a model checkpoint. :param path: Path where checkpoint is saved. :param device: Device where the model will be moved. :param logger: A logger for recording output. :return: The loaded :class:`~chemprop.models.model.MoleculeModel`. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Load model and args state = torch.load(path, map_location=lambda storage, loc: storage) args = TrainArgs() args.from_dict(vars(state['args']), skip_unsettable=True) loaded_state_dict = state['state_dict'] if device is not None: args.device = device if args.quantileregression != 'None': model = PB_MoleculeModel(args) else: model = MoleculeModel(args) model_state_dict = model.state_dict() # Skip missing parameters and parameters of mismatched size pretrained_state_dict = {} for param_name in loaded_state_dict.keys(): if param_name not in model_state_dict: info(f'Warning: Pretrained parameter "{param_name}" cannot be found in model parameters.') elif model_state_dict[param_name].shape != loaded_state_dict[param_name].shape: info(f'Warning: Pretrained parameter "{param_name}" ' f'of shape {loaded_state_dict[param_name].shape} does not match corresponding ' f'model parameter of shape {model_state_dict[param_name].shape}.') else: debug(f'Loading pretrained parameter "{param_name}".') pretrained_state_dict[param_name] = loaded_state_dict[param_name] # Load pretrained weights model_state_dict.update(pretrained_state_dict) model.load_state_dict(model_state_dict) if args.cuda: debug('Moving model to cuda') model = model.to(args.device) return model
11,412
def kron_diag(*lts): """Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors""" lead_diag = lts[0].diag() if len(lts) == 1: # base case: return lead_diag trail_diag = kron_diag(*lts[1:]) diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1) return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1)
11,413
def makeBundleObj(config_fname, getPackage, getPackageLength): """Given a description of a thandy bundle in config_fname, return a new unsigned bundle object. getPackage must be a function returning a package object for every package the bundle requires when given the package's name as input. getPacakgeLength must be a function returning the length of the package file. """ packages = [] def ShortGloss(lang, val): packages[-1]['gloss'][lang] = val def LongGloss(lang, val): packages[-1]['longgloss'][lang] = val def Package(name, order, version=None, path=None, optional=False): packages.append({'name' : name, 'version' : version, 'path' : path, 'order' : order, 'optional' : optional, 'gloss' : {}, 'longgloss' : {} }) preload = { 'ShortGloss' : ShortGloss, 'LongGloss' : LongGloss, 'Package' : Package } r = readConfigFile(config_fname, ['name', 'os', 'version', 'location', ], ['arch'], preload) result = { '_type' : "Bundle", 'at' : formatTime(time.time()), 'name' : r['name'], 'os' : r['os'], 'version' : r['version'], 'location' : r['location'], 'packages' : packages } if r.has_key('arch'): result['arch'] = r['arch'] for p in packages: try: pkginfo = getPackage(p['name']) except KeyError: raise thandy.FormatException("No such package as %s"%p['name']) p['hash'] = formatHash(getDigest(pkginfo)) p['length'] = getPackageLength(p['name']) if p['path'] == None: p['path'] = pkginfo['location'] if p['version'] == None: p['version'] = pkginfo['version'] BUNDLE_SCHEMA.checkMatch(result) return result
11,414
async def test_availability_without_topic(hass, mqtt_mock): """Test availability without defined availability topic.""" await help_test_availability_without_topic( hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG )
11,415
def find_English_term(term: list) -> tuple: """ Find the English and numbers from a term list and remove the English and numbers from the term :param term: the term list :return term: the term removed the English and numbers :return Eng_terms: the removed English """ temp_terms = [] Eng_terms = [] for i in range(len(term)): string = term[i] result = re.findall(r'[a-zA-Z0-9]+', string) for j in result: temp_terms.append(j) term[i] = re.sub(pattern=j, repl='', string=term[i]) temp_terms = set(temp_terms) for k in temp_terms: Eng_terms.append(k) return term, Eng_terms
11,416
def run(argv=None): """Build and run the pipeline.""" parser = argparse.ArgumentParser() parser.add_argument( '--output', required=True, help=('Output file to write to')) parser.add_argument( '--input', required=True, help=('Input PubSub topic of the form ' '"projects/<PROJECT>/topics/<TOPIC>".')) known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True pipeline_options.view_as(StandardOptions).streaming = True p = beam.Pipeline(options=pipeline_options) results = (p | 'Read messages' >> beam.io.ReadStringsFromPubSub(topic=known_args.input) | 'Assign keys' >> beam.Map(lambda a:(a.split(',')[0],a)) | 'Add timestamps' >> beam.ParDo(DuplicateWithLagDoFn()).with_outputs('lag_output', main='main_output')) windowed_main = results.main_output | 'Window main output' >> beam.WindowInto(window.FixedWindows(WINDOW_SECONDS)) windowed_lag = results.lag_output | 'Window lag output' >> beam.WindowInto(window.FixedWindows(WINDOW_SECONDS)) merged = (windowed_main, windowed_lag) | 'Join Pcollections' >> beam.CoGroupByKey() merged | 'Compare' >> beam.ParDo(CompareDoFn()) result = p.run() result.wait_until_finish()
11,417
def listplaylists(context): """ *musicpd.org, stored playlists section:* ``listplaylists`` Prints a list of the playlist directory. After each playlist name the server sends its last modification time as attribute ``Last-Modified`` in ISO 8601 format. To avoid problems due to clock differences between clients and the server, clients should not compare this value with their local clock. Output format:: playlist: a Last-Modified: 2010-02-06T02:10:25Z playlist: b Last-Modified: 2010-02-06T02:11:08Z """ result = [] for playlist in context.backend.stored_playlists.playlists.get(): result.append((u'playlist', playlist.name)) last_modified = (playlist.last_modified or dt.datetime.now()).isoformat() # Remove microseconds last_modified = last_modified.split('.')[0] # Add time zone information # TODO Convert to UTC before adding Z last_modified = last_modified + 'Z' result.append((u'Last-Modified', last_modified)) return result
11,418
def add_target_variable(df: pd.DataFrame) -> pd.DataFrame: """Add column with the target variable to the given dataframe.""" return df.assign(y=df.rent + df.admin_fee)
11,419
def extension_list(filepath): """Show list of extensions.""" path = pathlib.Path.cwd()/pathlib.Path(filepath) file_extensions = [FileExtension(p.relative_to(p.cwd())) for p in path.iterdir() if p.is_dir() and not p.name.startswith(('.', '__'))] py_extensions = [PyExtension(p.relative_to(p.cwd())) for p in path.glob('*.py') if not p.name.startswith(('.', '__'))] extensions = sorted(file_extensions + py_extensions, key=lambda x: str(x)) for extension in extensions: if extension.has_setup(): if version := extension.get_version() is not None: text = f'{extension} {version}' else: text = str(extension) click.echo(click.style(text, fg='yellow', bold=True)) else: click.echo(click.style(str(extension), fg='blue'))
11,420
def get_address_from_public_key(public_key): """ Get bytes from public key object and call method that expect bytes :param public_key: Public key object :param public_key: ec.EllipticCurvePublicKey :return: address in bytes :rtype: bytes """ public_key_bytes = get_public_key_bytes_compressed(public_key) return get_address_from_public_key_bytes(public_key_bytes)
11,421
def test_that_we_dont_revert_finalized_cp(db): """ This tests that the chain is the chain is """ test_string = 'B J0 J1 J2 B B B S0 B V0 V1 V2 B V0 V1 V2 B S1 R0 B B B B B B B B V0 V1 V2 B1 H1' test = TestLangHybrid(5, 100, 0.02, 0.002) test.parse(test_string)
11,422
def generate_validation() -> 'Pickle File': """Generates the validation set of Qualtrics Codes from surveys library""" url = 'https://{}.qualtrics.com/API/v3/surveys/'.format(qualtrics_settings['Data Center']) Qualtrics_Codes = {} """Sets the Years""" years = [time.localtime().tm_year-1, time.localtime().tm_year] strptime = '%Y-%m-%d %H:%M:%S' while url is not None: """Pulls Surveys from the Past Number of Designated Years""" pull = {i['name']: i['id'] for i in tqdm(json.loads(r.get(url, headers=headers) .content.decode('utf-8'))['result']['elements'], desc="loading validation code set") if datetime.strptime(replacer(i['lastModified'], ' ', 'T', 'Z'), strptime).year in years} """Updates Dictionary with Relevant Codes""" Qualtrics_Codes = {**Qualtrics_Codes, **pull} """Gets Next Offset - API Update in the Works to be Able to Pull All""" url = json.loads(r.get(url, headers=headers).content.decode('utf-8'))['result']['nextPage'] save_settings('wb', Qualtrics_Codes, 'Qualtrics_Codes') Qualtrics_Codes = reload_codes()
11,423
def setup(bot: ModmailBot) -> None: """Add the paginator cleaner to the bot.""" bot.add_cog(PaginatorManager(bot))
11,424
def main() -> None: """Times the given command. TODO: consider ways of making this fancier. - Print memory usage? - Recurrent? """ ...
11,425
def read_ORIGEN_gamma_spectrum(output_filename, cooling_time_string): """ Function for reading a gamma spectrum from an ORIGEN output file. """ #Too long text may cause problems, so check for it. if len(cooling_time_string) >= 10: print("The cooling time could not be found in the input, the header text \"" + cooling_time_string + "\" is too long.") return 0,0 found_spectrum = False bin_count = [0] bin_edges = [0] f = open(output_filename, 'r') ORIGEN = f.read() if len(ORIGEN) < 1: #Did not read anything, or read an empty file. Return empty arrays print("Failed to open ORIGEN output file " + output_filename) return bin_edges, bin_count #get the gamma spectra form the output #The header we are looking for starts with this string, and ends with a total row, the data we want is in between. spectrumpattern = re.compile("gamma spectra, photons\/sec\/basis(.*?)\s*totals", re.DOTALL) if re.search(spectrumpattern, ORIGEN): spectrum_list = re.findall(spectrumpattern, ORIGEN) else: #Failed to find any gamma spectrum, return empty arrays print("Failed to find a gamma spectrum in ORIGEN output file " + output_filename) return bin_edges, bin_count for spectrum in spectrum_list: spectrum_textlines = spectrum.splitlines() #Get the spectrum table header, search for cooling_time_string in the header headers = spectrum_textlines[3] #after removing the 23 first characters, each column header should start with a space, followed #by possibly more spaces for right-alignmnet, and then the cooling time string. #Each such header is 10 characters long. header_columns = headers[23:] #Column headers are padded with spaces at the beginning to be 10 characters wide. header_string = cooling_time_string.strip() while len(header_string ) < 10: header_string = ' ' + header_string if header_columns.find(header_string) != -1: column = math.ceil(header_columns.find(header_string)/10) found_spectrum = True #allocate memory bin_count = [0] * (len(spectrum_textlines)-4) bin_edges = [0] * (len(spectrum_textlines)-3) #Table should start at row 4. for i in range(4,len(spectrum_textlines)): #read the gamma spectrum line = spectrum_textlines[i].strip() split_line = line.split(" ") #The split lines should have the following format: # <line number> <low bin edge> <hyphen> <high bin edge> #<first cooling time bin count> <second cooling time bin count> <third...> bin_count[i-4] = float(split_line[column + 3]) bin_edges[i-4] = float(split_line[1]) #Final upper bin edge. bin_edges[len(spectrum_textlines)-4] = float(split_line[3]) if found_spectrum == False: #Did not find the requested spectra in the file, return empty arrays. print("Unable to find a gamma spectrum with cooling time " + cooling_time_string + " in ORIGEN output file " + output_filename) bin_count = [0] bin_edges = [0] return bin_edges, bin_count else: #Found the requested gamma spectrum, return it. #If several are found, this will return the last one, which is typically the one of interest. return bin_edges, bin_count
11,426
def process_cases(list_): """Process cases and determine whether group flag or empty line.""" # Get information is_empty = (len(list_) == 0) if not is_empty: is_group = list_[0].isupper() is_comment = list_[0][0] == '#' else: is_group = False is_comment = False # Finishing return is_empty, is_group, is_comment
11,427
def updateNestedDicts(d1, d2): """Updates two dictionaries, assuming they have the same entries""" finalDict = createDictionary() for key in d1: #print(key) newDict = updateDicts(d1[key], d2[key]) finalDict[key] = newDict return finalDict
11,428
def process_data(window): """ clean and save data ... previous executed manually with submit button """ # try: clean_code, char_cnt, code_stats = clean_data(window) save_data(clean_code, code_stats, window) display_charts(char_cnt, window) display_stats(code_stats, window) window["-TAB RAW-"].select()
11,429
def page_not_found(e): """error handler for page not found""" flash(e.description, 'danger') return render_template('main/404.html'), 404
11,430
def get_cifar10_raw_data(): """ Gets raw CIFAR10 data from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz. Returns: X_train: CIFAR10 train data in numpy array with shape (50000, 32, 32, 3). Y_train: CIFAR10 train labels in numpy array with shape (50000, ). X_test: CIFAR10 test data in numpy array with shape (10000, 32, 32, 3). Y_test: CIFAR10 test labels in numpy array with shape (10000, ). """ X_train, Y_train, X_test, Y_test = load_cifar10(CIFAR10_FOLDER) return X_train, Y_train, X_test, Y_test
11,431
def random_first_file(rootpath: Union[str, Path]) -> Path: """Donne un fichier aléatoire d'une arborescence, en descendant en profondeur d'abord. Args: rootpath (Union[str, Path]): chemin racine de recherche Returns: Path: Un chemin vers le fichier """ iterator = os.walk(rootpath) e = next(iterator) while e[2]: # == []: e = next(iterator) return Path(e[0]) / e[2][random.randint(0, len(e[2]))]
11,432
def MovieMaker(images, dpath, site, scheck, coords, bandlist, datelist, bands): """ Function to build the movie """ failed = 0 while failed <2: spath = dpath + "UoL/FIREFLIES/VideoExports/%s" % coords["name"] # for bands in bandcombo: print("\n starting %s at:" % bands, pd.Timestamp.now()) # ========== Create a single dataarray for the raster images =========== sets = OrderedDict() if type(bands) == str: imstack = images[bands] sets[bands] = xr.concat(imstack, dim="time") fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bands) elif type(bands) == list: bndnm = "multi_" + "_".join(bands) for bnd in bands: imstack = images[bnd] sets[bnd] = xr.concat(imstack, dim="time") fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bndnm) else: ipdb.set_trace() # ========== Loop over each frame of the video ========== nx = [] def frame_maker(index): # ========== Pull the infomation from the pandas part of the loop ========== indx = int(index) info = datelist.iloc[int(indx)] #rowinfo[1] # # ========== Check the dates i'm exporting ========== # nx.append(frame.time.values) # ========== create and internal subplot ========== def _subplotmaker(ax, bnds, spt): # ========== Get the data for the frame ========== frame = sets[bnds].isel(time=int(indx)) # ========== Set the colors ========== # if bnds == "NRG": color = "blue" # else: # color = "purple" # ========== Grab the data ========== frame.plot.imshow(ax=ax, rgb="band")# , transform=ccrs.PlateCarree()) ## =========== Setup the annimation =========== ax.set_title(spt) ax.scatter(coords.lon, coords.lat, 5, c=color, marker='+')#, transform=ccrs.PlateCarree()) # ========== Set up the box ========== blonO = np.min([coords["lonb_COP_min"], coords["lonb_MOD_min"]]) blatO = np.min([coords["latb_COP_min"], coords["latb_MOD_min"]]) blonM = np.max([coords["lonb_COP_max"], coords["lonb_MOD_max"]]) blatM = np.max([coords["latb_COP_max"], coords["latb_MOD_max"]]) rect = mpl.patches.Rectangle( (blonO,blatO), blonM-blonO, blatM-blatO,linewidth=2,edgecolor=color,facecolor='none') ax.add_patch(rect) # +++++ change the number od ticks ax.xaxis.set_major_locator(plt.MaxNLocator(5)) # ========== Build the plots ========== if type(bands) == str: # Set up the figure fig, axs = plt.subplots(1, figsize=(11,10)) # create the title spt = "%s %s %s frame %d" % (bands, info.satellite, info.date.split(" ")[0], datelist.iloc[indx]["index"]) # make the figure _subplotmaker(axs, bands, spt) plt.axis('scaled') else: # Set up the figure fig, axs = plt.subplots(1,len(bands), sharey=True, figsize=(20,8),) # +++++ Loop over the figure combo +++++ for ax, bnds, in zip(axs, bands): # make the figure _subplotmaker(ax, bnds, bnds) ax.set_aspect('equal') # Get rid of the excess lats for ax in axs.flat: ax.label_outer() # ========== Change parms for the entire plot ========= fig.suptitle("%s %s - Frame%03d" % ( info.satellite, pd.Timestamp(info.date).strftime('%d-%m-%Y'), datelist.iloc[indx]["index"])) # ipdb.set_trace() plt.axis('scaled') # +++++ Make the images bigger by eleminating space +++++ fig.subplots_adjust(left=0.1, right=0.9, top=1, bottom=0, wspace=0, hspace=0) #top = 1, bottom = 1, right = 1, left = 1, plt.tight_layout() plt.margins(0,0) return mplfig_to_npimage(fig) # ========== Eposrt the videos ========== mov = mpe.VideoClip(frame_maker, duration=int(datelist.shape[0])) # plays the clip (and its mask and sound) twice faster # newclip = clip.fl_time(lambda: 2*t, apply_to=['mask','audio']) # fnout = "%s/LANDSAT_5_7_8_%s_complete.txt" % (spath, coords["name"]) print("Starting Write of the data at:", pd.Timestamp.now()) try: mov.write_videofile(fnout, fps=1) return except Exception as ex: warn.warn(str(ex)) print("Movie making failed. This will need to be redone") failed +=1 raise ValueError
11,433
def test_list_blocks_233(): """ Test case 233: (part 1) Here are some examples showing how far content must be indented to be put under the list item: Note: The tokens are correct. The blank line on line 2 forces the paragraph closed. As it is still allowed inside of the list, only affecting the looseness of the list , the list remains open. With the paragraph closed, the `two` on line 3 is not paragraph continuation, and due to the indent, it is not part of the list. At that point, the list is closed and the new paragraph is started. """ # Arrange source_markdown = """- one two""" expected_tokens = [ "[ulist(1,1):-::2:]", "[para(1,3):]", "[text(1,3):one:]", "[end-para:::True]", "[BLANK(2,1):]", "[end-ulist:::True]", "[para(3,2): ]", "[text(3,2):two:]", "[end-para:::True]", ] expected_gfm = """<ul> <li>one</li> </ul> <p>two</p>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
11,434
def transform(args, argv): """ Usage: {0} transform [--regulate] <sourceconfig> FILE ... {0} transform [--regulate] <sourceconfig> --directory=DIR {0} transform [--regulate] --ids <raw_data_ids>... Options: -r, --regulate Run the Regulator on the transformed graph -d, --directory=DIR Transform all JSON files in DIR -i, --ids Provide RawDatum IDs to transform Transform all given JSON files. Results will be printed to stdout. """ from ipdb import launch_ipdb_on_exception def run_transformer(config, id, datum): transformer = config.get_transformer() with launch_ipdb_on_exception(): graph = transformer.transform(datum) if args.get('--regulate'): Regulator(source_config=config).regulate(graph) print('Parsed raw data "{}" into'.format(id)) pprint(graph.to_jsonld(in_edges=False)) print('\n') ids = args['<raw_data_ids>'] if ids: qs = RawDatum.objects.filter(id__in=ids) for raw in qs.iterator(): run_transformer(raw.suid.source_config, raw.id, raw.datum) return if args['FILE']: files = args['FILE'] else: files = [os.path.join(args['--directory'], x) for x in os.listdir(args['--directory']) if not x.startswith('.')] config = SourceConfig.objects.get(label=args['<sourceconfig>']) for name in files: with open(name) as fobj: data = fobj.read() run_transformer(config, name, data)
11,435
def TypeUrlToMessage(type_url): """Returns a message instance corresponding to a given type URL.""" if not type_url.startswith(TYPE_URL_PREFIX): raise ValueError("Type URL has to start with a prefix %s: %s" % (TYPE_URL_PREFIX, type_url)) full_name = type_url[len(TYPE_URL_PREFIX):] try: return symbol_database.Default().GetSymbol(full_name)() except KeyError as e: raise ProtobufTypeNotFound(e.message)
11,436
def encode_sentence(tokenized_sentence, max_word_len): """ Encode sentence as one-hot tensor of shape [None, MAX_WORD_LENGTH, CHARSET_SIZE]. """ encoded_sentence = [] sentence_len = len(tokenized_sentence) for word in tokenized_sentence: # Encode every word as matrix of shape [MAX_WORD_LENGTH, # CHARSET_SIZE] where each valid character gets encoded as one-hot # row vector of word matrix. encoded_word = np.zeros([max_word_len, len(CHARSET)]) for char, encoded_char in zip(word, encoded_word): if char in CHARSET: encoded_char[ENCODER[char]] = 1.0 encoded_sentence.append(encoded_word) return np.array(encoded_sentence), sentence_len
11,437
def main(unused_argv): """训练入口""" global total_feature_columns, label_feature_columns dense_feature_columns, category_feature_columns, label_feature_columns = create_feature_columns() total_feature_columns = dense_feature_columns + category_feature_columns params = { "dense_feature_columns": dense_feature_columns, "category_feature_columns": category_feature_columns, "hidden_units": FLAGS.hidden_units.split(','), "dropout_rate": FLAGS.dropout_rate, "batch_norm": FLAGS.batch_norm, "learning_rate": FLAGS.learning_rate, "num_experts": FLAGS.num_experts, "num_tasks": FLAGS.num_tasks, "expert_hidden_units": FLAGS.expert_hidden_units, "task_names": FLAGS.task_names.split(','), } print(params) # 任务数要和任务名列表长度一致 assert params["num_tasks"] == len(params["task_names"]), "num_tasks must equals length of task_names" estimator = tf.estimator.Estimator( model_fn=mmoe_model_fn, params=params, config=tf.estimator.RunConfig(model_dir=FLAGS.model_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps) ) train_spec = tf.estimator.TrainSpec( input_fn=lambda: train_input_fn(filepath=FLAGS.train_data, example_parser=example_parser, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, shuffle_buffer_size=FLAGS.shuffle_buffer_size), max_steps=FLAGS.train_steps ) feature_spec = tf.feature_column.make_parse_example_spec(total_feature_columns) serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec) exporters = [ tf.estimator.BestExporter( name="best_exporter", serving_input_receiver_fn=serving_input_receiver_fn, exports_to_keep=5) ] eval_spec = tf.estimator.EvalSpec( input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size), throttle_secs=600, steps=None, exporters=exporters ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Evaluate Metrics. metrics = estimator.evaluate(input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)) for key in sorted(metrics): print('%s: %s' % (key, metrics[key])) results = estimator.predict(input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)) predicts_df = pd.DataFrame.from_dict(results) for task_name in params["task_names"]: predicts_df[f"{task_name}_probabilities"] = predicts_df[f"{task_name}_probabilities"].apply(lambda x: x[0]) test_df = pd.read_csv("../../dataset/wechat_algo_data1/dataframe/test.csv") for task_name in params["task_names"]: predicts_df[task_name] = test_df[task_name] predicts_df.to_csv("predictions.csv") print("after evaluate")
11,438
def footnote_ref(key, index): """Renders a footnote :returns: list of `urwid Text markup <http://urwid.org/manual/displayattributes.html#text-markup>`_ tuples. """ return render_no_change(key)
11,439
def calendar(rating): """ Generate the calendar for a given rating. """ cal = rating["cal"] garages = set() dates = set() services = {} for record in cal: if not isinstance(record, CalendarDate): continue garages.add(record.garage) dates.add(record.date) key = (record.date, record.garage) services[key] = record.service_key garages = sorted(garages) yield ["date", *garages] for date in sorted(dates): date_str = date.strftime("%Y-%m-%d") garage_values = (services.get((date, garage), "") for garage in garages) yield [date_str, *garage_values]
11,440
def mconcat(*args): """ Apply monoidal concat operation in arguments. This function infers the monoid from value, hence it requires at least one argument to operate. """ values = args[0] if len(args) == 1 else args instance = semigroup[type(values[0])] return instance(*values)
11,441
def parallelMeasurements(filename='CCD204_05325-03-02_Hopkinson_EPER_data_200kHz_one-output-mode_1.6e10-50MeV.txt', datafolder='/Users/sammy/EUCLID/CTItesting/data/', gain1=1.17, limit=105, returnScale=False): """ :param filename: :param datafolder: :param gain1: :param limit: :return: """ tmp = np.loadtxt(datafolder + filename, usecols=(0, 5)) #5 = 152.55K ind = tmp[:, 0] values = tmp[:, 1] values *= gain1 if returnScale: return ind, values else: values = values[ind > -5.] values = np.abs(values[:limit]) return values
11,442
def _parse_env(name, default=None, dtype=None): """Parse input variable from `os.environ`. Parameters ---------- name : str Name of the variable to parse from env. default : any, optional Set default value of variable. If None (default), parameter is considered required and so must be defined in environment. Otherwise, RuntimeError will be raised. dtype : type or None, optional Expected dtype of the variable. If None (default), variable will be parsed as a string. Other accepted values are: float, int, bool, str. """ try: val = os.environ[name] except KeyError: if default is not None: # Let's use the default value if var not in env return default raise RuntimeError("variable {:} not specified".format(name)) # Parse var from env using the specified dtype if dtype is None or dtype == str: return str(val) if dtype == int or dtype == float or dtype == bool: return dtype(val) else: raise TypeError( "accepted dtypes are int, float, bool, str (or None)")
11,443
def get_all_urls(the_json: str) -> list: """ Extract all URLs and title from Bookmark files Args: the_json (str): All Bookmarks read from file Returns: list(tuble): List of tublle with Bookmarks url and title """ def extract_data(data: dict): if isinstance(data, dict) and data.get('type') == 'url': urls.append({'name': data.get('name'), 'url': data.get('url')}) if isinstance(data, dict) and data.get('type') == 'folder': the_children = data.get('children') get_container(the_children) def get_container(o: Union[list, dict]): if isinstance(o, list): for i in o: extract_data(i) if isinstance(o, dict): for k, i in o.items(): extract_data(i) urls = list() get_container(the_json) s_list_dict = sorted(urls, key=lambda k: k['name'], reverse=False) ret_list = [(l.get('name'), l.get('url')) for l in s_list_dict] return ret_list
11,444
def draw_camera_wireframe(ax: plt.Axes, wireframe: Wireframe) -> None: """Draws a camera wireframe onto the axes.""" ax.add_collection( mc.LineCollection( segments=[[l.start, l.end] for l in wireframe.lines], colors=[l.color for l in wireframe.lines], linewidths=[l.width for l in wireframe.lines], antialiased=True)) dots_sizes = [d.size for d in wireframe.dots] ax.add_collection( mc.EllipseCollection( widths=dots_sizes, heights=dots_sizes, angles=0, units='xy', offsets=[d.location for d in wireframe.dots], facecolors=[d.color for d in wireframe.dots], edgecolors=[d.actual_border_color for d in wireframe.dots], linewidth=OCCLUDED_BORDER_WIDTH, transOffset=ax.transData, antialiased=True))
11,445
def _goertzel( block_size: int, sample_rate: float, freq: float ) -> Callable[[Iterable[float]], float]: """ Goertzel algorithm info: https://www.ti.com/lit/an/spra066/spra066.pdf """ k = round(block_size * (freq / sample_rate)) omega = (2 * pi * k) / block_size cos_omega = 2 * cos(omega) def _filter(samples: Iterable[float]) -> float: s_0 = 0 s_1 = 0 s_2 = 0 for x_n in samples: s_0 = x_n + cos_omega * s_1 - s_2 s_2 = s_1 s_1 = s_0 return s_0 - exp(-1.0 * omega) * s_1 return _filter
11,446
def SetFrames(windowID): """ Querys start and end frames and set thems for the window given by windowID """ start = cmds.playbackOptions(minTime=True, query=True) end = cmds.playbackOptions(maxTime=True, query=True) # Query start and end froms. cmds.intField(OBJECT_NAMES[windowID][0] + "_FrameStartField", edit=True, value=start) cmds.intField(OBJECT_NAMES[windowID][0] + "_FrameEndField", edit=True, value=end) UpdateFrameRange(windowID)
11,447
def rotate_xyz(x,y,z,angles=None,inverse=False): """ Rotate a set of vectors pointing in the direction x,y,z angles is a list of longitude and latitude angles to rotate by. First the longitude rotation is applied (about z axis), then the latitude angle (about y axis). """ if angles==None: return x,y,z xyz = np.array([x,y,z]) for dphi,dlon,dlat in angles: dphi*=c dlon*=c dlat*=c m0 = np.array([[1,0,0], [0, np.cos(dphi),np.sin(dphi)], [0, -np.sin(dphi), np.cos(dphi)]]) m1 = np.array([[np.cos(dlon),-np.sin(dlon),0], [np.sin(dlon), np.cos(dlon),0], [0,0,1]]) m2 = np.array([[np.cos(dlat),0,-np.sin(dlat)], [0,1,0], [np.sin(dlat), 0, np.cos(dlat)]]) m = np.dot(np.dot(m1,m2),m0) if inverse: m = np.linalg.inv(m) xyz2 = np.dot(m,xyz) return xyz2
11,448
def indented_open(Filename, Indentation = 3): """Opens a file but indents all the lines in it. In fact, a temporary file is created with all lines of the original file indented. The filehandle returned points to the temporary file.""" IndentString = " " * Indentation try: fh = open(Filename, "rb") except: print "%s:error: indented opening of file '%s' " % (this_name, Filename) sys.exit(-1) new_content = "" for line in fh.readlines(): new_content += IndentString + line fh.close() tmp_filename = Filename + ".tmp" if tmp_filename not in temporary_files: temporary_files.append(copy(tmp_filename)) fh = open(tmp_filename, "wb") fh.write(new_content) fh.close() fh = open(tmp_filename) return fh
11,449
def gan_loss( gan_model: tfgan.GANModel, generator_loss_fn=tfgan.losses.modified_generator_loss, discriminator_loss_fn=tfgan.losses.modified_discriminator_loss, gradient_penalty_weight=None, gradient_penalty_epsilon=1e-10, gradient_penalty_target=1.0, feature_matching=False, add_summaries=False): """ Create A GAN loss set, with support for feature matching. Args: bigan_model: the model feature_matching: Whether to add a feature matching loss to the encoder and generator. """ gan_loss = tfgan.gan_loss( gan_model, generator_loss_fn=generator_loss_fn, discriminator_loss_fn=discriminator_loss_fn, gradient_penalty_weight=gradient_penalty_weight, gradient_penalty_target=1.0, add_summaries=add_summaries) if feature_matching: fm_loss = feature_matching_loss(scope=gan_model.discriminator_scope.name) if add_summaries: tf.summary.scalar("feature_matching_loss", fm_loss) # or combine the original adversarial loss with FM gen_loss = gan_loss.generator_loss + fm_loss disc_loss = gan_loss.discriminator_loss gan_loss = tfgan.GANLoss(gen_loss, disc_loss) return gan_loss
11,450
def precip_workflow(data, valid, xtile, ytile, tile_bounds): """Drive the precipitation workflow""" load_stage4(data, valid, xtile, ytile) # We have MRMS a2m RASTER files prior to 1 Jan 2015, but these files used # a very poor choice of data interval of 0.1mm, which is not large enough # to capture low intensity events. Files after 1 Jan 2015 used a better # 0.02mm resolution if valid.year < 2015: load_precip_legacy(data, valid, tile_bounds) else: load_precip(data, valid, tile_bounds) qc_precip(data, valid, xtile, ytile) write_grid(np.sum(data["precip"], 2), valid, xtile, ytile)
11,451
def list_subjects(): """ List all subjects """ check_admin() subjects = Subject.query.all() return render_template('admin/subjects/subjects.html', subjects=subjects, title="Subjects")
11,452
async def async_setup(opp, config): """Set up the Tibber component.""" opp.data[DATA_OPP_CONFIG] = config if DOMAIN not in config: return True opp.async_create_task( opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config[DOMAIN], ) ) return True
11,453
def beautifyValue(v): """ Converts an object to a better version for printing, in particular: - if the object converts to float, then its float value is used - if the object can be rounded to int, then the int value is preferred Parameters ---------- v : object the object to try to beautify Returns ------- object or float or int the beautified value """ try: v = float(v) if v.is_integer(): return int(v) return v except: if type(v) == np.str_: v = v.replace('\n', '').replace(' ', '_') return v
11,454
def english_to_french(english_text): """ A function written using ibm api to translate from english to french""" translation = LT.translate(text=english_text, model_id='en-fr').get_result() french_text = translation['translations'][0]['translation'] return french_text
11,455
def get_responsibilities(): """Returns a list of the rooms in the approvers responsibility.""" email = get_jwt_identity() # Checks if the reader is an approver approver = Approver.query.filter_by(email=email).first() if not approver: return bad_request("This user does not have the approver role!") room_list = get_responsibilites_helper(approver) return ok({"responsibilities": room_list})
11,456
def test_country_code_field(result, country_code_list): """Check that a value of 'countryCode' is present in the list of country codes.""" assert result.json()['countryCode'] in country_code_list, \ "The value of field 'countryCode' not present in the list of country codes."
11,457
def get_test_loader(dataset): """ Get test dataloader of source domain or target domain :return: dataloader """ if dataset == 'MNIST': transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.MNIST(root= params.mnist_path, train= False, transform= transform, download= True) dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False) elif dataset == 'MNIST_M': transform = transforms.Compose([ # transforms.RandomCrop((28)), transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.ImageFolder(root=params.mnistm_path + '/test', transform= transform) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) elif dataset == 'MNIST_M_5': transform = transforms.Compose([ # transforms.RandomCrop((28)), transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.ImageFolder(root=params.mnistm_5_path + '/test', transform= transform) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) elif dataset == 'SVHN': transform = transforms.Compose([ transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std = params.dataset_std) ]) data = datasets.SVHN(root= params.svhn_path, split= 'test', transform = transform, download= True) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) #elif dataset == 'SynDig': # transform = transforms.Compose([ # transforms.CenterCrop((28)), # transforms.ToTensor(), # transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std) # ]) # # data = SynDig.SynDig(root= params.syndig_path, split= 'test', transform= transform, download= False) # # dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False) elif dataset == 'dslr': transform = transforms.Compose([ transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std) ]) data = datasets.ImageFolder(params.dslr_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) elif dataset == "art": transform = transforms.Compose([ ResizeImage(256), transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]) ]) data = datasets.ImageFolder(params.art_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) elif dataset == "clipart": transform = transforms.Compose([ ResizeImage(256), transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) data = datasets.ImageFolder(params.clipart_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) else: raise Exception('There is no dataset named {}'.format(str(dataset))) return dataloader
11,458
def get_wem_facility_intervals(from_date: Optional[datetime] = None) -> WEMFacilityIntervalSet: """Obtains WEM facility intervals from NEM web. Will default to most recent date @TODO not yet smart enough to know if it should check current or archive """ content = wem_downloader(_AEMO_WEM_SCADA_URL, from_date) _models = parse_wem_facility_intervals(content) wem_set = WEMFacilityIntervalSet( crawled_at=datetime.now(), live=False, source_url=_AEMO_WEM_SCADA_URL, intervals=_models ) return wem_set
11,459
def push( message, user: str = None, api_token: str = None, device: str = None, title: str = None, url: str = None, url_title: str = None, priority: str = None, timestamp: str = None, sound: str = None, ) -> typing.Union[http.client.HTTPResponse, typing.BinaryIO]: """Pushes the notification. API Reference: https://pushover.net/api Args: message: Your message user: The user/group key (not e-mail address) of your user (or you), viewable when logged into our dashboard (often referred to as USER_KEY in our documentation and code examples) api_token: Your application's API token device: Your user's device name to send the message directly to that device, rather than all of the user's devices title: Your message's title, otherwise your app's name is used url: A supplementary URL to show with your message url_title: A title for your supplementary URL, otherwise just the URL is shown priority: Send as:1 to always send as a quiet notification, 1 to display as high--priority and bypass the user's quiet hours, or 2 to also require confirmation from the user timestamp: A Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API sound: The name of one of the sounds supported by device clients to override the user's default sound choice Returns: HTTP response from API call """ if user is None or api_token is None: user, api_token = get_credentials() api_url = "https://api.pushover.net/1/messages.json" if title is None: if getattr(__main__, "__file__", None): title = os.path.basename(__main__.__file__) else: title = "n8scripts" payload_dict = { "token": api_token, "user": user, "message": message, "device": device, "title": title, "url": url, "url_title": url_title, "priority": priority, "timestamp": timestamp, "sound": sound, } payload = urllib.parse.urlencode({k: v for k, v in payload_dict.items() if v}) with urllib.request.urlopen(api_url, data=payload.encode()) as resp: return resp
11,460
def _get_static_predicate(pred): """Helper function for statically evaluating predicates in `cond`.""" if pred in {0, 1}: # Accept 1/0 as valid boolean values pred_value = bool(pred) elif isinstance(pred, bool): pred_value = pred elif isinstance(pred, tf.Tensor): pred_value = tf.get_static_value(pred) # TODO(jamieas): remove the dependency on `pywrap_tensorflow`. # pylint: disable=protected-access if pred_value is None: pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph, pred._as_tf_output()) # pylint: enable=protected-access else: raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. " "Found instead: %s" % pred) return pred_value
11,461
async def test_webhook_platform_init(hass, webhook_platform): """Test initialization of the webhooks platform.""" assert hass.services.has_service(DOMAIN, SERVICE_SEND_MESSAGE) is True
11,462
def add_init_or_construct(template, variable_slot, new_data, scope, add_location=-1): """Add init or construct statement.""" if isinstance(new_data, list): template[variable_slot][scope].extend(new_data) return template if add_location < 0: template[variable_slot][scope].append(new_data) else: template[variable_slot][scope].insert(add_location, new_data) return template
11,463
def im2col_indices(x, field_height, field_width, padding=1, stride=1): """ An implementation of im2col based on some fancy indexing """ # Zero-pad the input p = padding x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride) cols = x_padded[:, k, i, j] C = x.shape[1] cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1) return cols
11,464
def guesses_left(): """ Displays remaining number of guesses """ print "Number of remaining guesses is", GUESSES
11,465
def get_subdirs(dir): """Get the sub-directories of a given directory.""" return [os.path.join(dir,entry) for entry in os.listdir(dir) \ if os.path.isdir(os.path.join(dir,entry))]
11,466
def nms_1d(src, win_size, file_duration): """1D Non maximum suppression src: vector of length N """ pos = [] src_cnt = 0 max_ind = 0 ii = 0 ee = 0 width = src.shape[0]-1 while ii <= width: if max_ind < (ii - win_size): max_ind = ii - win_size ee = np.minimum(ii + win_size, width) while max_ind <= ee: src_cnt += 1 if src[int(max_ind)] > src[int(ii)]: break max_ind += 1 if max_ind > ee: pos.append(ii) max_ind = ii+1 ii += win_size ii += 1 pos = np.asarray(pos).astype(np.int) val = src[pos] # remove peaks near the end inds = (pos + win_size) < src.shape[0] pos = pos[inds] val = val[inds] # set output to between 0 and 1, then put it in the correct time range pos = pos / float(src.shape[0]) pos = pos*file_duration return pos, val
11,467
def tensor_text_to_canvas(image, text=None, col=8, scale=False): """ :param image: Tensor / numpy in shape of (N, C, H, W) :param text: [str, ] * N :param col: :return: uint8 numpy of (H, W, C), in scale [0, 255] """ if scale: image = image / 2 + 0.5 if torch.is_tensor(image): image = image.cpu().detach().numpy() image = write_text_on_image(image, text) # numpy (N, C, H, W) in scale [0, 1] image = vutils.make_grid(torch.from_numpy(image), nrow=col) # (C, H, W) image = image.numpy().transpose([1, 2, 0]) image = np.clip(255 * image, 0, 255).astype(np.uint8) return image
11,468
def camera_loop(app): """ Check if camera is in use or not and free resources :param app: tornado application instance """ if app.camera: if app.config.CAMERA["ffmpeg"]: diff = datetime.datetime.now() - handlers.VideoHandler.last_packet else: diff = datetime.datetime.now() - handlers.ImageHandler.last_packet if diff > datetime.timedelta(seconds=15): try: app.camera.stop() app.camera = None except Exception as exc: logging.error("cannot stop camera %s", exc) tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(seconds=15), partial(camera_loop, app))
11,469
def is_sequence(input): """Return a bool indicating whether input is a sequence. Parameters ---------- input The input object. Returns ------- bool ``True`` if input is a sequence otherwise ``False``. """ return (isinstance(input, six.collections_abc.Sequence) and not isinstance(input, six.string_types))
11,470
def extract_filename(path): """Parse out the file name from a file path Parameters ---------- path : string input path to parse filename from Returns ------- file_name : string file name (last part of path), empty string if none found """ # get last group of a path if path: file_name = os.path.basename(path) file_name = re.match(".*?\s*(\S+\.[^ \s,]+)\s*", file_name) if file_name: return file_name.group(1) return ''
11,471
def auto_prefetch_relationship(name, prepare_related_queryset=noop, to_attr=None): """ Given the name of a relationship, return a prepare function which introspects the relationship to discover its type and generates the correct set of `select_related` and `include_fields` calls to apply to efficiently load it. A queryset function may also be passed, which will be applied to the related queryset. This is by far the most complicated part of the entire library. The reason it's so complicated is because Django's related object descriptors are inconsistent: each type has a slightly different way of accessing its related queryset, the name of the field on the other side of the relationship, etc. """ def prepare(queryset): related_descriptor = getattr(queryset.model, name) if type(related_descriptor) in ( ForwardOneToOneDescriptor, ForwardManyToOneDescriptor, ): return prefetch_forward_relationship( name, related_descriptor.field.related_model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ReverseOneToOneDescriptor: return prefetch_reverse_relationship( name, related_descriptor.related.field.name, related_descriptor.related.field.model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ReverseManyToOneDescriptor: return prefetch_reverse_relationship( name, related_descriptor.rel.field.name, related_descriptor.rel.field.model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ManyToManyDescriptor: field = related_descriptor.rel.field if related_descriptor.reverse: related_queryset = field.model.objects.all() else: related_queryset = field.target_field.model.objects.all() return prefetch_many_to_many_relationship( name, related_queryset, prepare_related_queryset, to_attr, )(queryset) return prepare
11,472
def no_conjugate_member(magic_flag): """should not raise E1101 on something.conjugate""" if magic_flag: something = 1.0 else: something = 1.0j if isinstance(something, float): return something return something.conjugate()
11,473
def sub_ntt(f_ntt, g_ntt): """Substraction of two polynomials (NTT representation).""" return sub_zq(f_ntt, g_ntt)
11,474
def is_pareto_efficient(costs): """ Find the pareto-efficient points given an array of costs. Parameters ---------- costs : np.ndarray Array of shape (n_points, n_costs). Returns ------- is_efficient_maek : np.ndarray (dtype:bool) Array of which elements in costs are pareto-efficient. """ is_efficient = np.arange(costs.shape[0]) n_points = costs.shape[0] next_point_index = 0 # Next index in the is_efficient array to search for while next_point_index<len(costs): nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1) nondominated_point_mask[next_point_index] = True is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points costs = costs[nondominated_point_mask] next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1 is_efficient_mask = np.zeros(n_points, dtype=bool) is_efficient_mask[is_efficient] = True return is_efficient_mask
11,475
def merge_dict(base, delta, merge_lists=False, skip_empty=False, no_dupes=True, new_only=False): """ Recursively merges two dictionaries including dictionaries within dictionaries. Args: base: Target for merge delta: Dictionary to merge into base merge_lists: if a list is found merge contents instead of replacing skip_empty: if an item in delta is empty, dont overwrite base no_dupes: when merging lists deduplicate entries new_only: only merge keys not yet in base """ for k, d in delta.items(): b = base.get(k) if isinstance(d, dict) and isinstance(b, dict): merge_dict(b, d, merge_lists, skip_empty, no_dupes, new_only) else: if new_only and k in base: continue if skip_empty and not d: # dont replace if new entry is empty pass elif all((isinstance(b, list), isinstance(d, list), merge_lists)): if no_dupes: base[k] += [item for item in d if item not in base[k]] else: base[k] += d else: base[k] = d return base
11,476
def database_names(): """ Display database names """ click.echo(neohelper.get_database_names())
11,477
def test_nullable( dtype: pandas_engine.DataType, data: st.DataObject, ): """Test nullable checks on koalas dataframes.""" checks = None if dtypes.is_datetime(type(dtype)) and MIN_TIMESTAMP is not None: checks = [pa.Check.gt(MIN_TIMESTAMP)] nullable_schema = pa.DataFrameSchema( {"field": pa.Column(dtype, checks=checks, nullable=True)} ) nonnullable_schema = pa.DataFrameSchema( {"field": pa.Column(dtype, checks=checks, nullable=False)} ) null_sample = data.draw(nullable_schema.strategy(size=5)) nonnull_sample = data.draw(nonnullable_schema.strategy(size=5)) # for some reason values less than MIN_TIMESTAMP are still sampled. if dtype is pandas_engine.DateTime or isinstance( dtype, pandas_engine.DateTime ): if MIN_TIMESTAMP is not None and (null_sample < MIN_TIMESTAMP).any( axis=None ): with pytest.raises( OverflowError, match="mktime argument out of range" ): ks.DataFrame(null_sample) return if MIN_TIMESTAMP is not None and (nonnull_sample < MIN_TIMESTAMP).any( axis=None ): with pytest.raises( OverflowError, match="mktime argument out of range" ): ks.DataFrame(nonnull_sample) return else: ks_null_sample = ks.DataFrame(null_sample) ks_nonnull_sample = ks.DataFrame(nonnull_sample) n_nulls = ks_null_sample.isna().sum().item() assert ks_nonnull_sample.notna().all().item() assert n_nulls >= 0 if n_nulls > 0: with pytest.raises(pa.errors.SchemaError): nonnullable_schema(ks_null_sample)
11,478
def redirect_or_error(opt, key, override=''): """ Tests if a redirect URL is available and redirects, or raises a MissingRequiredSetting exception. """ r = (override or opt) if r: return redirect(r) raise MissingRequiredSetting('%s.%s' % ( options.KEY_DATA_DICT, key))
11,479
def extract(struc, calc): """ Extract & write electrostatic potential and densities. Arg: struc: an internal molecular structure object calc: internal calculation object """ # Extract the ESP esp = calc.get_electrostatic_potential() # Convert units esp_hartree = esp / Hartree write('esp.cube', struc, data=esp_hartree) # Psedo-density, does not seem to be used in workflow # rho_pseudo = calc.get_pseudo_density() # rho_pseudo_per_bohr_cube = rho_pseudo * Bohr**3 # write('rho_pseudo.cube', struc, data=rho_pseudo_per_bohr_cube) # Density rho = calc.get_all_electron_density() rho_per_bohr_cube = rho * Bohr**3 write('rho.cube', struc, data=rho_per_bohr_cube)
11,480
def stdev(df): """Calculate standard deviation of a dataframe.""" return np.std(df['rate'] - df['w1_rate'])
11,481
def add_to_cart(listing_id): """Adds listing to cart with specified quantity""" listing = Listing.query.filter_by(id=listing_id, available=True).first() if not listing: abort(404) if not request.json: abort(400) if ('quantity' not in request.json or type(request.json['quantity']) is not int): abort(400) cart_item = CartItem.query.filter_by( merchant_id=current_user.id, listing_id=listing_id ).first() new_quantity = request.json['quantity'] is_currently_incart = cart_item is not None if new_quantity == 0 and is_currently_incart: db.session.delete(cart_item) elif new_quantity != 0 and is_currently_incart: cart_item.quantity = new_quantity elif new_quantity != 0 and not is_currently_incart: db.session.add( CartItem( merchant_id=current_user.id, listing_id=listing_id, quantity=new_quantity ) ) db.session.commit() name = Listing.query.filter_by(id=listing_id).first().name return jsonify({'quantity': new_quantity, 'name': name})
11,482
def get_zip_code_prefixes(df_geolocation : pd.DataFrame) -> pd.DataFrame: """ Gets the first three and four first digits of zip codes. """ df = df_geolocation.copy() df['geolocation_zip_code_prefix_1_digits'] = df['geolocation_zip_code_prefix'].str[0:1] df['geolocation_zip_code_prefix_2_digits'] = df['geolocation_zip_code_prefix'].str[0:2] df['geolocation_zip_code_prefix_3_digits'] = df['geolocation_zip_code_prefix'].str[0:3] df['geolocation_zip_code_prefix_4_digits'] = df['geolocation_zip_code_prefix'].str[0:4] return df
11,483
def elina_scalar_infty(scalar): """ Return -1 if an ElinaScalar is -infinity, 0 if it is finite and 1 if it is +infinity. Parameters ----------- scalar : ElinaScalarPtr Pointer to the ElinaScalar that needs to be tested for infinity. Returns ------- result : c_int Integer stating the result of the testing. """ result = None try: elina_scalar_infty_c = elina_auxiliary_api.elina_scalar_infty elina_scalar_infty_c.restype = c_int elina_scalar_infty_c.argtypes = [ElinaScalarPtr] result = elina_scalar_infty_c(scalar) except: print('Problem with loading/calling "elina_scalar_infty" from "libelinaux.so"') print('Make sure you are passing ElinaScalarPtr to the function') return result
11,484
def _package_dmg(paths, dist, config): """Packages a Chrome application bundle into a DMG. Args: paths: A |model.Paths| object. dist: The |model.Distribution| for which the product was customized. config: The |config.CodeSignConfig| object. Returns: A path to the produced DMG file. """ packaging_dir = paths.packaging_dir(config) if dist.channel_customize: dsstore_file = 'chrome_{}_dmg_dsstore'.format(dist.channel) icon_file = 'chrome_{}_dmg_icon.icns'.format(dist.channel) else: dsstore_file = 'chrome_dmg_dsstore' icon_file = 'chrome_dmg_icon.icns' dmg_path = os.path.join(paths.output, '{}.dmg'.format(config.packaging_basename)) app_path = os.path.join(paths.work, config.app_dir) # A locally-created empty directory is more trustworthy than /var/empty. empty_dir = os.path.join(paths.work, 'empty') commands.make_dir(empty_dir) # Make the disk image. Don't include any customized name fragments in # --volname because the .DS_Store expects the volume name to be constant. # Don't put a name on the /Applications symbolic link because the same disk # image is used for all languages. # yapf: disable pkg_dmg = [ os.path.join(packaging_dir, 'pkg-dmg'), '--verbosity', '0', '--tempdir', paths.work, '--source', empty_dir, '--target', dmg_path, '--format', 'UDBZ', '--volname', config.app_product, '--copy', '{}:/'.format(app_path), '--symlink', '/Applications:/ ', ] # yapf: enable if dist.inflation_kilobytes: pkg_dmg += [ '--copy', '{}/inflation.bin:/.background/inflation.bin'.format(packaging_dir) ] if config.is_chrome_branded(): # yapf: disable pkg_dmg += [ '--icon', os.path.join(packaging_dir, icon_file), '--copy', '{}/keystone_install.sh:/.keystone_install'.format(packaging_dir), '--mkdir', '.background', '--copy', '{}/chrome_dmg_background.png:/.background/background.png'.format( packaging_dir), '--copy', '{}/{}:/.DS_Store'.format(packaging_dir, dsstore_file), ] # yapf: enable commands.run_command(pkg_dmg) return dmg_path
11,485
def get_target_grid(return_type, **kwargs): """ Function: get polar or cartasian coordinates of targets Inputs: - return_type: str. "cart" for cartasian coordinates; "polar" for polar coordinates. - kwargs: additional params. - rel_points: dictionary. relative length for target positions and heel positions Outputs: - if return cartasian coordinates: numpy array. x and y coordinates of targets in cartasian coordinates. - if return polar coordinates: dictionary {type('c', 'l', 'h'):numpy array}. polar coordinates of target centers ('c')/lower bounds ('l')/upper bounds ('h') """ ### unravel params. if('rel_points' in kwargs.keys()): rel_points = kwargs['rel_points'] ### calculate ideal grid #### before standardization ##### distance: normal dT0T2 = dT0T5 = dT2T4 = dT4T5 = 1 dT0T4 = dT2T3 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT4T5*dT0T5*math.cos(math.radians(100)))**0.5 dT2T5 = dT3T7 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT0T2*dT0T5*math.cos(math.radians(80)))**0.5 dT0T3 = dT0T7 = ((dT2T5/2) ** 2 + (dT2T3*1.5) ** 2) ** 0.5 ##### angles: normal aT0T2 = math.radians(80)/2 aT0T5 = - math.radians(80)/2 aT0T3 = math.acos((dT0T3 ** 2 + dT0T7 ** 2 - dT3T7 ** 2)/(2*dT0T3*dT0T7))/2 aT0T7 = - aT0T3 aT0T4 = 0 ##### target coordinates T0 = np.array((0,0)) T2 = np.array((aT0T2, dT0T2)) T3 = np.array((aT0T3, dT0T3)) T4 = np.array((aT0T4, dT0T4)) T5 = np.array((aT0T5, dT0T2)) T7 = np.array((aT0T7, dT0T7)) target_grid_polar = np.stack((T0, T2, T3, T4, T5, T7), axis = 0) target_grid_cart = np.zeros((6,2)) for i in range(6): target_grid_cart[i,:] = polar_to_cartesian(target_grid_polar[i,1], target_grid_polar[i,0]) ##### heel coordinates alpha = 0.2354 a = 0.2957 b = 0.5 r_heels_cart = np.zeros((6,2)) r_heels_polar = np.zeros((6,2)) for n in range(1,7): phi_n = -(alpha + (n-1)*(np.pi - 2*alpha)/5) x = a*np.cos(phi_n) y = b*np.sin(phi_n) r, theta = cartesian_to_polar(-y, x) r_heels_cart[n-1, :] = [-y,x] r_heels_polar[n-1, :] = [theta, r] ##### intersect c = my_help.line_intersection((r_heels_cart[2,:], target_grid_cart[2,:]),(r_heels_cart[3,:], target_grid_cart[5,:])) #### after standardization dTiC = np.zeros((6,1)) for i in range(1,6): dTiC[i] = np.linalg.norm(target_grid_cart[i,:] - c) dTiC = dTiC/dTiC[3] aTiCT4 = np.zeros((6,1)) for i in range(1,6): aTiCT4[i] = my_int.inner_angle(target_grid_cart[i,:] - c, target_grid_cart[3,:] - c, True) if(i in [4,5]): aTiCT4[i] = - aTiCT4[i] ### calculate output values if(return_type == 'cart'): grid_cart = np.zeros((6,2)) for i in range(1,6): grid_cart[i,0],grid_cart[i,1] = polar_to_cartesian(dTiC[i][0], aTiCT4[i][0]) return grid_cart elif(return_type == 'polar'): target_grid_polar = {} for t in ['c', 'l', 'h']: T0 = np.array((aTiCT4[0], -rel_points[f'T0{t}'])) T2 = np.array((aTiCT4[1], rel_points[f'T2{t}'])) T3 = np.array((aTiCT4[2], rel_points[f'T3{t}'])) T4 = np.array((aTiCT4[3], rel_points[f'T4{t}'])) T5 = np.array((aTiCT4[4], rel_points[f'T5{t}'])) T3_ = np.array((aTiCT4[5], rel_points[f'T7{t}'])) C0 = np.array((aTiCT4[0], rel_points['center'])) target_grid_polar[t] = np.stack((T0, T2, T3, T4, T5, T3_, C0), axis = 0) return target_grid_polar
11,486
def test_oracle_cdc_offset_chain(sdc_builder, sdc_executor, database, buffer_locally): """ Test to check that offset between pipeline re-starts is tracked properly, especially focusing on JSON-based offsets. """ source_table = None target_table = None pipeline = None try: database_connection = database.engine.connect() source_table_name = get_random_string(string.ascii_uppercase, 16) logger.info('Creating source table %s in %s database ...', source_table_name, database.type) source_table = sqlalchemy.Table(source_table_name, sqlalchemy.MetaData(), sqlalchemy.Column('IDENTIFIER', sqlalchemy.Integer, primary_key=True), sqlalchemy.Column('NAME', sqlalchemy.String(32)), sqlalchemy.Column('SURNAME', sqlalchemy.String(64)), sqlalchemy.Column('COUNTRY', sqlalchemy.String(2)), sqlalchemy.Column('CITY', sqlalchemy.String(3))) source_table.create(database.engine) target_table_name = get_random_string(string.ascii_uppercase, 16) logger.info('Creating target table %s in %s database ...', target_table_name, database.type) target_table = sqlalchemy.Table(target_table_name, sqlalchemy.MetaData(), sqlalchemy.Column('IDENTIFIER', sqlalchemy.Integer, primary_key=True), sqlalchemy.Column('NAME', sqlalchemy.String(32)), sqlalchemy.Column('SURNAME', sqlalchemy.String(64)), sqlalchemy.Column('COUNTRY', sqlalchemy.String(2)), sqlalchemy.Column('CITY', sqlalchemy.String(3))) target_table.create(database.engine) database_last_scn = _get_last_scn(database_connection) pipeline_builder = sdc_builder.get_pipeline_builder() oracle_cdc_client = pipeline_builder.add_stage('Oracle CDC Client') oracle_cdc_client.set_attributes(dictionary_source='DICT_FROM_ONLINE_CATALOG', tables=[{'schema': database.username.upper(), 'table': target_table_name, 'excludePattern': ''}], buffer_changes_locally=buffer_locally, logminer_session_window='${5 * MINUTES}', maximum_transaction_length='${0 * SECONDS}', db_time_zone='UTC', max_batch_size_in_records=1, initial_change='SCN', start_scn=database_last_scn, send_redo_query_in_headers=True) wiretap = pipeline_builder.add_wiretap() oracle_cdc_client >> wiretap.destination pipeline = pipeline_builder.build('Oracle CDC Origin Offset Testing Pipeline').configure_for_environment(database) sdc_executor.add_pipeline(pipeline) number_of_runs = 10 number_of_rows = 100 global_identifier = 0 for runs in range(0, number_of_runs): database_transaction = database_connection.begin() for identifier in range(global_identifier, global_identifier + number_of_rows): table_identifier = identifier table_name = "'" + str(uuid.uuid4())[:32] + "'" table_surname = "'" + str(uuid.uuid4())[:64] + "'" table_country = "'" + str(uuid.uuid4())[:2] + "'" table_city = "'" + str(uuid.uuid4())[:3] + "'" sentence = f'insert into {source_table} values ({table_identifier}, {table_name}, {table_surname}, {table_country}, {table_city})' sql = text(sentence) database_connection.execute(sql) global_identifier = identifier + 1 database_transaction.commit() sentence = f'select count(*) from {source_table}' sql = text(sentence) q = database_connection.execute(sql).fetchone() logger.info(f'(1) Total in source table: {q}') database_transaction = database_connection.begin() sentence = f'insert into {target_table_name} select * from {source_table_name}' sql = text(sentence) database_connection.execute(sql) database_transaction.commit() sentence = f'select count(*) from {target_table}' sql = text(sentence) q = database_connection.execute(sql).fetchone() logger.info(f'(2) Total in target table: {q}') database_transaction = database_connection.begin() sentence = f'update {target_table_name} set CITY = COUNTRY' sql = text(sentence) database_connection.execute(sql) database_transaction.commit() sentence = f'select count(*) from {target_table}' sql = text(sentence) q = database_connection.execute(sql).fetchone() logger.info(f'(3) Total in target table: {q}') database_transaction = database_connection.begin() sentence = f'delete from {target_table_name}' sql = text(sentence) database_connection.execute(sql) database_transaction.commit() sentence = f'select count(*) from {target_table}' sql = text(sentence) q = database_connection.execute(sql).fetchone() logger.info(f'(4) Total in target table: {q}') database_transaction = database_connection.begin() sentence = f'delete from {source_table_name}' sql = text(sentence) database_connection.execute(sql) database_transaction.commit() sentence = f'select count(*) from {source_table}' sql = text(sentence) q = database_connection.execute(sql).fetchone() logger.info(f'(5) Total in source table: {q}') pipeline_command = sdc_executor.start_pipeline(pipeline) pipeline_command.wait_for_pipeline_output_records_count(3 * number_of_rows) q_insert = sum(1 for record in wiretap.output_records if record.header.values["oracle.cdc.operation"] == 'INSERT') q_update = sum(1 for record in wiretap.output_records if record.header.values["oracle.cdc.operation"] == 'UPDATE') q_delete = sum(1 for record in wiretap.output_records if record.header.values["oracle.cdc.operation"] == 'DELETE') logger.info(f'Total INSERT\'s {q_insert}') logger.info(f'Total UPDATE\'s {q_update}') logger.info(f'Total DELETE\'s {q_delete}') for record in wiretap.output_records: logger.info(f'Run: {runs} :: ' f'{record.header.values["oracle.cdc.sequence.internal"]} - ' f'{record.header.values["oracle.cdc.operation"]} - ' f'{record.header.values["sdc.operation.type"]} - ' f'{record}') assert q_insert == number_of_rows assert q_update == number_of_rows assert q_delete == number_of_rows sdc_executor.stop_pipeline(pipeline=pipeline, force=False) wiretap.reset() logger.info('Pipeline stopped') finally: if source_table is not None: source_table.drop(database.engine) if target_table is not None: target_table.drop(database.engine)
11,487
def s2_filename_to_md(filename): """ This function converts the S2 filename into a small dict of metadata :param filename: :return: dict """ basename = system.basename(filename) metadata = dict() splits = basename.split("_") if len(splits) < 4: raise Exception("{} might not be a S2 product".format(filename)) metadata["tile"] = splits[3] datestr = splits[1] metadata["date"] = datetime.datetime.strptime(datestr[:-1], '%Y%m%d-%H%M%S-%f') return metadata
11,488
def remove_freshman_sig(packet_id, freshman): """ Removes the given freshman's signature from the given packet. :param freshman: The freshman's RIT username """ remove_sig(packet_id, freshman, False)
11,489
def parse_command_line_args(): """Parses command line arguments.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--service_account_json', default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"), help='Path to service account JSON file.') parser.add_argument( '--project_id', default=os.environ.get("GOOGLE_CLOUD_PROJECT"), help='GCP project name') parser.add_argument( '--cloud_region', default='us-central1', help='GCP region') parser.add_argument( '--dataset_id', default=None, help='Name of dataset') parser.add_argument( '--hl7v2_store_id', default=None, help='Name of HL7v2 store') parser.add_argument( '--hl7v2_message_file', default=None, help='A file containing a base64-encoded HL7v2 message') parser.add_argument( '--hl7v2_message_id', default=None, help='The identifier for the message returned by the server' ) parser.add_argument( '--label_key', default=None, help='Arbitrary label key to apply to the message' ) parser.add_argument( '--label_value', default=None, help='Arbitrary label value to apply to the message' ) command = parser.add_subparsers(dest='command') command.add_parser( 'create-hl7v2-message', help=create_hl7v2_message.__doc__) command.add_parser( 'delete-hl7v2-message', help=delete_hl7v2_message.__doc__) command.add_parser('get-hl7v2-message', help=get_hl7v2_message.__doc__) command.add_parser( 'ingest-hl7v2-message', help=ingest_hl7v2_message.__doc__) command.add_parser('list-hl7v2-messages', help=list_hl7v2_messages.__doc__) command.add_parser( 'patch-hl7v2-message', help=patch_hl7v2_message.__doc__) return parser.parse_args()
11,490
def realtime_performance_sector(raw: bool, export: str): """Display Real-Time Performance sector. [Source: AlphaVantage] Parameters ---------- raw : bool Output only raw data export : str Export dataframe data to csv,json,xlsx file """ df_sectors = alphavantage_model.get_sector_data() # pylint: disable=invalid-sequence-index df_rtp = df_sectors["Rank A: Real-Time Performance"] if raw: if gtff.USE_TABULATE_DF: print( tabulate( df_rtp.to_frame(), showindex=True, headers=["Sector", "Real-Time Performance"], floatfmt=".5f", tablefmt="fancy_grid", ) ) else: console.print(df_rtp.to_string()) else: df_rtp.plot(kind="bar") plt.title("Real Time Performance (%) per Sector") plt.tight_layout() plt.grid() console.print("") export_data( export, os.path.dirname(os.path.abspath(__file__)), "rtps", df_sectors, ) if not raw: if gtff.USE_ION: plt.ion() plt.show()
11,491
def get_rest_api(id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRestApiResult: """ Resource Type definition for AWS::ApiGateway::RestApi """ __args__ = dict() __args__['id'] = id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:apigateway:getRestApi', __args__, opts=opts, typ=GetRestApiResult).value return AwaitableGetRestApiResult( api_key_source_type=__ret__.api_key_source_type, binary_media_types=__ret__.binary_media_types, body=__ret__.body, body_s3_location=__ret__.body_s3_location, clone_from=__ret__.clone_from, description=__ret__.description, disable_execute_api_endpoint=__ret__.disable_execute_api_endpoint, endpoint_configuration=__ret__.endpoint_configuration, fail_on_warnings=__ret__.fail_on_warnings, id=__ret__.id, minimum_compression_size=__ret__.minimum_compression_size, mode=__ret__.mode, name=__ret__.name, parameters=__ret__.parameters, policy=__ret__.policy, root_resource_id=__ret__.root_resource_id, tags=__ret__.tags)
11,492
def test_subfunc(): """Test subfunction 'my_subfunc' """ a = 'a' b = 'b' res = my_subfunc(a,b) assert res == 'a and b'
11,493
def _uframe_post_instrument_driver_set(reference_designator, command, data): """ Execute set parameters for instrument driver using command and data; return uframe response. (POST) """ debug = False try: uframe_url, timeout, timeout_read = get_c2_uframe_info() if 'CAMDS' in reference_designator: timeout = 10 timeout_read = 200 url = "/".join([uframe_url, reference_designator, command]) if debug: print '\n debug -- (_uframe_post_instrument_driver_set) url: ', url response = requests.post(url, data=data, timeout=(timeout, timeout_read), headers=_post_headers()) return response except ConnectionError: message = 'ConnectionError for instrument driver set command.' raise Exception(message) except Timeout: message = 'Timeout for instrument driver set command.' raise Exception(message) except Exception: raise
11,494
def do_infra_show(cc, args): """Show infrastructure network attributes.""" iinfras = cc.iinfra.list() if not iinfras: print("Infrastructure network not configured") return iinfra = iinfras[0] _print_iinfra_show(iinfra)
11,495
def regroup_if_changed(group, op_list, name=None): """Creates a new group for op_list if it has changed. Args: group: The current group. It is returned if op_list is unchanged. op_list: The list of operations to check. name: The name to use if a new group is created. Returns: Either group or a new group (or if op_list is empty then no_op). """ has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas) if (group is None or len(group.control_inputs) != len(op_list) or (has_deltas and op_list.has_changed())): if has_deltas: op_list.mark() if op_list: return tf.group(*op_list, name=name) else: return tf.no_op(name=name) else: return group
11,496
def spike_train_order_profile(*args, **kwargs): """ Computes the spike train order profile :math:`E(t)` of the given spike trains. Returns the profile as a DiscreteFunction object. Valid call structures:: spike_train_order_profile(st1, st2) # returns the bi-variate profile spike_train_order_profile(st1, st2, st3) # multi-variate profile of 3 # spike trains spike_trains = [st1, st2, st3, st4] # list of spike trains spike_train_order_profile(spike_trains) # profile of the list of spike trains spike_train_order_profile(spike_trains, indices=[0, 1]) # use only the spike trains # given by the indices Additonal arguments: :param max_tau: Upper bound for coincidence window, `default=None`. :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :returns: The spike train order profile :math:`E(t)` :rtype: :class:`.DiscreteFunction` """ if len(args) == 1: return spike_train_order_profile_multi(args[0], **kwargs) elif len(args) == 2: return spike_train_order_profile_bi(args[0], args[1], **kwargs) else: return spike_train_order_profile_multi(args, **kwargs)
11,497
def home(): """Home page.""" form = LoginForm(request.form) with open("POSCAR", "r") as samplefile: sample_input = samplefile.read() inputs = InputForm() current_app.logger.info("Hello from the home page!") # Handle logging in if request.method == "POST": if form.validate_on_submit(): login_user(form.user) flash("You are logged in.", "success") redirect_url = request.args.get("next") or url_for("user.members") return redirect(redirect_url) else: flash_errors(login) return render_template("public/home.html", form=form, inputs=inputs)
11,498
def isUniqueSeq(objlist): """Check that list contains items only once""" return len(set(objlist)) == len(objlist)
11,499