content
stringlengths
22
815k
id
int64
0
4.91M
def get_name(tree, from_='name'): """ Get the name (token) of the AST node. :param tree ast: :rtype: str|None """ # return tree['name']['name'] if 'name' in tree and isinstance(tree['name'], str): return tree['name'] if 'parts' in tree: return djoin(tree['parts']) if from_ in tree: return get_name(tree[from_]) return None
15,100
def pcaTable(labels,vec_mean,vec_std,val_mean,val_std): """Make table with PCA formation mean and std""" header="\\begin{center}\n\\begin{tabular}{| l |"+" c |"*6+"}\\cline{2-7}\n" header+="\\multicolumn{1}{c|}{} & \\multicolumn{2}{c|}{PC1} & \multicolumn{2}{c|}{PC2} & \multicolumn{2}{c|}{PC3} \\\\\\cline{2-7}" header+="\\multicolumn{1}{c|}{} & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ \\\\\\hline\n" tt=n.zeros((vec_mean.shape[0],6)) tt[:,::2]=vec_mean tt[:,1::2]=vec_std tt_=n.zeros(6) tt_[::2]=val_mean tt_[1::2]=val_std tab_data=n.vstack((tt,tt_)) footer="\\hline\\end{tabular}\n\\end{center}" table=header + makeTables(labels,tab_data,True) + footer return table
15,101
def render_reference_page(conn: Connection, reference: str) -> str: """Create HTML section that lists all notes that cite the reference.""" sql = """ SELECT note, Bibliography.html,Notes.html FROM Citations JOIN Notes ON Citations.note = Notes.id JOIN Bibliography ON Bibliography.key = Citations.reference WHERE reference = ? ORDER BY note """ notes = [] text = "" for note, _text, html in conn.execute(sql, (reference,)): assert not text or text == _text text = _text notes.append(Note(note, get_section_title(html))) section = Elem("section", Elem("h1", '@' + reference[4:]), Elem("p", text), note_list(notes), id=reference, title=reference, **{"class": "level1"}) return render(section)
15,102
def update_addresses(): """ Update all AutoAddresses """ global _all_auto_addresses for address in _all_auto_addresses: address.update_address()
15,103
def normalize_bridge_id(bridge_id: str): """Normalize a bridge identifier.""" bridge_id = bridge_id.lower() # zeroconf: properties['id'], field contains semicolons after each 2 char if len(bridge_id) == 17 and sum(True for c in "aa:bb:cc:dd:ee:ff" if c == ":"): return bridge_id.replace(':', '') # nupnp: contains 4 extra characters in the middle: "fffe" if len(bridge_id) == 16 and bridge_id[6:10] == "fffe": return bridge_id[0:6] + bridge_id[-6:] # SSDP/UPNP and Hue Bridge API contains right ID. if len(bridge_id) == 12: return bridge_id logging.getLogger(__name__).warn("Received unexpected bridge id: %s", bridge_id) return bridge_id
15,104
def describe_cached_iscsi_volumes(VolumeARNs=None): """ Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types. The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN). See also: AWS API Documentation Exceptions Examples Returns a description of the gateway cached iSCSI volumes specified in the request. Expected Output: :example: response = client.describe_cached_iscsi_volumes( VolumeARNs=[ 'string', ] ) :type VolumeARNs: list :param VolumeARNs: [REQUIRED]\nAn array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must be from the same gateway. Use ListVolumes to get volume ARNs for a gateway.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'CachediSCSIVolumes': [ { 'VolumeARN': 'string', 'VolumeId': 'string', 'VolumeType': 'string', 'VolumeStatus': 'string', 'VolumeAttachmentStatus': 'string', 'VolumeSizeInBytes': 123, 'VolumeProgress': 123.0, 'SourceSnapshotId': 'string', 'VolumeiSCSIAttributes': { 'TargetARN': 'string', 'NetworkInterfaceId': 'string', 'NetworkInterfacePort': 123, 'LunNumber': 123, 'ChapEnabled': True|False }, 'CreatedDate': datetime(2015, 1, 1), 'VolumeUsedInBytes': 123, 'KMSKey': 'string', 'TargetName': 'string' }, ] } Response Structure (dict) --A JSON object containing the following fields: CachediSCSIVolumes (list) --An array of objects where each object contains metadata about one cached volume. (dict) --Describes an iSCSI cached volume. VolumeARN (string) --The Amazon Resource Name (ARN) of the storage volume. VolumeId (string) --The unique identifier of the volume, e.g. vol-AE4B946D. VolumeType (string) --One of the VolumeType enumeration values that describes the type of the volume. VolumeStatus (string) --One of the VolumeStatus values that indicates the state of the storage volume. VolumeAttachmentStatus (string) --A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see Moving Your Volumes to a Different Gateway . VolumeSizeInBytes (integer) --The size, in bytes, of the volume capacity. VolumeProgress (float) --Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the cached volume is not restoring or bootstrapping. SourceSnapshotId (string) --If the cached volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included. VolumeiSCSIAttributes (dict) --An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume. TargetARN (string) --The Amazon Resource Name (ARN) of the volume target. NetworkInterfaceId (string) --The network interface identifier. NetworkInterfacePort (integer) --The port used to communicate with iSCSI targets. LunNumber (integer) --The logical disk number. ChapEnabled (boolean) --Indicates whether mutual CHAP is enabled for the iSCSI target. CreatedDate (datetime) --The date the volume was created. Volumes created prior to March 28, 2017 don\xe2\x80\x99t have this time stamp. VolumeUsedInBytes (integer) --The size of the data stored on the volume in bytes. This value is calculated based on the number of blocks that are touched, instead of the actual amount of data written. This value can be useful for sequential write patterns but less accurate for random write patterns. VolumeUsedInBytes is different from the compressed size of the volume, which is the value that is used to calculate your bill. Note This value is not available for volumes created prior to May 13, 2015, until you store data on the volume. KMSKey (string) --The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional. TargetName (string) --The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume . The target name must be unique across all volumes on a gateway. If you don\'t specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name. Exceptions StorageGateway.Client.exceptions.InvalidGatewayRequestException StorageGateway.Client.exceptions.InternalServerError Examples Returns a description of the gateway cached iSCSI volumes specified in the request. response = client.describe_cached_iscsi_volumes( VolumeARNs=[ 'arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB', ], ) print(response) Expected Output: { 'CachediSCSIVolumes': [ { 'VolumeARN': 'arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB', 'VolumeId': 'vol-1122AABB', 'VolumeSizeInBytes': 1099511627776, 'VolumeStatus': 'AVAILABLE', 'VolumeType': 'CACHED iSCSI', 'VolumeiSCSIAttributes': { 'ChapEnabled': True, 'LunNumber': 1, 'NetworkInterfaceId': '10.243.43.207', 'NetworkInterfacePort': 3260, 'TargetARN': 'arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume', }, }, ], 'ResponseMetadata': { '...': '...', }, } :return: { 'CachediSCSIVolumes': [ { 'VolumeARN': 'string', 'VolumeId': 'string', 'VolumeType': 'string', 'VolumeStatus': 'string', 'VolumeAttachmentStatus': 'string', 'VolumeSizeInBytes': 123, 'VolumeProgress': 123.0, 'SourceSnapshotId': 'string', 'VolumeiSCSIAttributes': { 'TargetARN': 'string', 'NetworkInterfaceId': 'string', 'NetworkInterfacePort': 123, 'LunNumber': 123, 'ChapEnabled': True|False }, 'CreatedDate': datetime(2015, 1, 1), 'VolumeUsedInBytes': 123, 'KMSKey': 'string', 'TargetName': 'string' }, ] } :returns: StorageGateway.Client.exceptions.InvalidGatewayRequestException StorageGateway.Client.exceptions.InternalServerError """ pass
15,105
def pyc_file_from_path(path): """Given a python source path, locate the .pyc. See http://www.python.org/dev/peps/pep-3147/ #detecting-pep-3147-availability http://www.python.org/dev/peps/pep-3147/#file-extension-checks """ import imp has3147 = hasattr(imp, 'get_tag') if has3147: return imp.cache_from_source(path) else: return path + "c"
15,106
def get_bytes_per_data_block(header): """Calculates the number of bytes in each 128-sample datablock.""" N = 128 # n of amplifier samples # Each data block contains N amplifier samples. bytes_per_block = N * 4 # timestamp data bytes_per_block += N * 2 * header['num_amplifier_channels'] # DC amplifier voltage (absent if flag was off) # bytes_per_block += N * 2 * header['dc_amplifier_data_saved'] if header['dc_amplifier_data_saved'] > 0: bytes_per_block += N * 2 * header['num_amplifier_channels'] # Stimulation data, one per enabled amplifier channels bytes_per_block += N * 2 * header['num_amplifier_channels'] # Board analog inputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_adc_channels'] # Board analog outputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_dac_channels'] # Board digital inputs are sampled at same rate as amplifiers if header['num_board_dig_in_channels'] > 0: bytes_per_block += N * 2 # Board digital outputs are sampled at same rate as amplifiers if header['num_board_dig_out_channels'] > 0: bytes_per_block += N * 2 return bytes_per_block
15,107
def recovered(): """ Real Name: b'Recovered' Original Eqn: b'INTEG ( RR, 0)' Units: b'Person' Limits: (None, None) Type: component b'' """ return integ_recovered()
15,108
def Mux(sel, val1, val0): """Choose between two values. Parameters ---------- sel : Value, in Selector. val1 : Value, in val0 : Value, in Input values. Returns ------- Value, out Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``. """ sel = Value.cast(sel) if len(sel) != 1: sel = sel.bool() return Operator("m", [sel, val1, val0])
15,109
async def test_websocket_update_prefs( opp, opp_ws_client, mock_camera, setup_camera_prefs ): """Test updating preference.""" await async_setup_component(opp, "camera", {}) assert setup_camera_prefs[PREF_PRELOAD_STREAM] client = await opp_ws_client(opp) await client.send_json( { "id": 8, "type": "camera/update_prefs", "entity_id": "camera.demo_camera", "preload_stream": False, } ) response = await client.receive_json() assert response["success"] assert not setup_camera_prefs[PREF_PRELOAD_STREAM] assert ( response["result"][PREF_PRELOAD_STREAM] == setup_camera_prefs[PREF_PRELOAD_STREAM] )
15,110
def get_identity_list(user, provider=None): """ Given the (request) user return all identities on all active providers """ identity_list = CoreIdentity.shared_with_user(user) if provider: identity_list = identity_list.filter(provider=provider) return identity_list
15,111
def test_elements(): """Test simple dc.""" elements = [ ('contributors', 'contributor'), ('coverage', 'coverage'), ('creators', 'creator'), ('dates', 'date'), ('descriptions', 'description'), ('formats', 'format'), ('identifiers', 'identifier'), ('languages', 'language'), ('publishers', 'publisher'), ('relations', 'relation'), ('rights', 'rights'), ('sources', 'source'), ('subjects', 'subject'), ('titles', 'title'), ('types', 'type'), ] # Test each element individually for plural, singular in elements: # Test multiple values tree = simpledc.dump_etree({plural: ['value 1', 'value 2']}) elems = tree.xpath( '/oai_dc:dc/dc:{0}'.format(singular), namespaces=simpledc.ns) assert len(elems) == 2, singular assert elems[0].text == 'value 1' assert elems[1].text == 'value 2' # Test empty values tree = simpledc.dump_etree({plural: []}) elem = tree.xpath( '//dc:{0}'.format(singular), namespaces=simpledc.ns) assert len(elem) == 0, singular # Test all elements together data = {} for plural, singular in elements: data[plural] = ['test 1', 'test 2'] tree = simpledc.dump_etree(data) for plural, singular in elements: elems = tree.xpath( '/oai_dc:dc/dc:{0}'.format(singular), namespaces=simpledc.ns) assert len(elems) == 2, singular assert elems[0].text == 'test 1' assert elems[1].text == 'test 2' # Test tostring xml = simpledc.tostring(data) for plural, singular in elements: assert '<dc:{0}>'.format(singular) in xml
15,112
def read_links(database_path, search_string='', output_max=20, blob='links'): """Вывод схожих рассказов таблицей.""" database = sqlite3.connect(database_path) cursor = database.cursor() sql_list = get_blob(search_string, blob, cursor) for n,sql_tuple in enumerate(sql_list,1): print('# ----------------------------------------------------------------------------') print('# ', n, '/', len(sql_list), sql_tuple[0]) print('# ----------------------------------------------------------------------------') cloud = pickle.loads(sql_tuple[1]) cloud = dict_sort(cloud) n = 0 for filename, value in cloud.items(): book, author = get_bookdata(filename, cursor) # Вывод в процентах: value = round(value * 100, 5) if value > 0.1 and n < output_max: n = n + 1 print ('{0:2} {1:10} | {3:30} | {2}'.format(n, round(value, 3), book, author)) else: break
15,113
def get_user_pool_domain(prefix: str, region: str) -> str: """Return a user pool domain name based on the prefix received and region. Args: prefix: The domain prefix for the domain. region: The region in which the pool resides. """ return "%s.auth.%s.amazoncognito.com" % (prefix, region)
15,114
def database_fixture(): """Delete all entries on users database before & after the test """ mongo_users.delete_many({}) yield mongo_users mongo_users.delete_many({})
15,115
def test_get_ticket_context_additional_fields(): """Unit test Given - additional keys of a ticket alongside regular keys. When - getting a ticket context Then - validate that all the details of the ticket were updated, and all the updated keys are shown in the context with do duplicates. """ assert EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS == get_ticket_context(RESPONSE_TICKET, ['Summary', 'sys_created_by'])
15,116
def corr_activity(ppath, recordings, states, nskip=10, pzscore=True, bands=[]): """ correlate DF/F during states with delta power, theta power, sigma power and EMG amplitude :param ppath: base filder :param recordings: list of recordings :param states: list of len 1 to 3, states to correlate EEG power with; if you want to correlate power during NREM and REM, then set states = [3,1] :param nskip: number of seconds in the beginning to be skipped :param pzscore, if Tue z-score activity, i.e. DF/F - mean(DF/F) / std(DF/F) :return: n/a """ # Fixed Parameters sf_spectrum = 5 if len(bands) == 0: eeg_bands = [[0.5, 4], [6, 10], [10, 15], [100, 150]] else: eeg_bands = bands # EMG band emg_bands = [[10, 100]] bands = eeg_bands + emg_bands bands = {k:bands[k] for k in range(len(bands))} nbands = len(bands) mice = dict() for rec in recordings: idf = re.split('_', rec)[0] if not idf in mice: mice[idf] = 1 mice = list(mice.keys()) # dict, Band -> Mouse ID -> values Pow = {m:{} for m in mice} DFF = {m:[] for m in mice} for m in mice: d = {b:[] for b in range(nbands)} Pow[m] = d for rec in recordings: idf = re.split('_', rec)[0] sr = get_snr(ppath, rec) # number of time bins for each time bin in spectrogram nbin = int(np.round(sr) * 2.5) sdt = nbin * (1 / sr) nskip = int(nskip / sdt) M = sleepy.load_stateidx(ppath, rec)[0][nskip:] ddir = os.path.join(ppath, rec) if os.path.isfile(os.path.join(ddir, 'dffd.mat')): dff_rec = so.loadmat(os.path.join(ddir, 'dffd.mat'), squeeze_me=True)['dffd'] else: dff_rec = so.loadmat(os.path.join(ddir, 'DFF.mat'), squeeze_me=True)['dffd'] print('%s - saving dffd.mat' % rec) so.savemat(os.path.join(ddir, 'dffd.mat'), {'dffd': dff_rec}) #dff_rec = so.loadmat(os.path.join(ppath, rec, 'DFF.mat'), squeeze_me=True)['dffd'][nskip:]*100.0 if pzscore: dff_rec = (dff_rec - dff_rec.mean()) / dff_rec.std() # collect all brain state indices idx = [] for s in states: idx.append(np.where(M==s)[0]) bs_idx = reduce(lambda x,y:np.concatenate((x,y)), idx) # load spectrogram and normalize P = so.loadmat(os.path.join(ppath, rec, 'sp_%s.mat' % rec), squeeze_me=True) SP = P['SP'] freq = P['freq'] df = freq[1] - freq[0] sp_mean = SP.mean(axis=1) SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T) # load EMG MP = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)['mSP'] # calculate EEG bands for b in range(nbands-1): ifreq = np.where((freq >= bands[b][0]) & (freq <= bands[b][1]))[0] tmp = SP[ifreq,:].sum(axis=0)*df tmp = sleepy.smooth_data(tmp, sf_spectrum) Pow[idf][b] = np.concatenate((Pow[idf][b], tmp[bs_idx])) # add EMG band b = nbands-1 ifreq = np.where((freq >= bands[b][0]) & (freq <= bands[b][1]))[0] tmp = MP[ifreq, :].sum(axis=0) * df tmp = sleepy.smooth_data(tmp, sf_spectrum) Pow[idf][b] = np.concatenate((Pow[idf][b], tmp[bs_idx])) DFF[idf] = np.concatenate((DFF[idf], dff_rec[bs_idx])) # collapse all Power values and dff values PowAll = {b:[] for b in bands} DFFAll = [] for b in bands: for m in mice: PowAll[b] = np.concatenate((PowAll[b], Pow[m][b])) for m in mice: DFFAll = np.concatenate((DFFAll, DFF[m])) r_values = {} for b in bands: p = linregress(PowAll[b], DFFAll) r_values[b] = p plt.ion() plt.figure(figsize=(12,6)) nx = 1.0/nbands dx = 0.2 * nx i=0 for b in bands: ax = plt.axes([nx * i + dx, 0.15, nx - dx - dx / 2.0, 0.3]) j=0 for m in mice: ax.plot(Pow[m][b], DFF[m], '.', color=[j*nx,j*nx,j*nx]) j+=1 i+=1 if b>0: #ax.set_yticklabels([]) pass if b<nbands-1: plt.xlabel('EEG Power') else: plt.xlabel('EMG Power') plt.title('%.2f<f<%.2f, r2=%.2f' % (bands[b][0], bands[b][1], r_values[b][2]), fontsize=10) if b==0: if pzscore: plt.ylabel('DF/F (z-scored)') else: plt.ylabel('DF/F') sleepy.box_off(ax) x = np.linspace(PowAll[b].min(), PowAll[b].max(), 100) ax.plot(x, x*r_values[b][0]+r_values[b][1], color='blue') plt.draw() return r_values
15,117
def add_dep_info(tgt_tokens, lang, spacy_nlp, include_detail_tag=True): """ :param tgt_tokens: a list of CoNLLUP_Token_Template() Objects from CoNLL_Annotations.py file :param spacy_nlp: Spacy language model of the target sentence to get the proper Dependency Tree :return: """ doc = spacy_nlp.tokenizer.tokens_from_list([t.word for t in tgt_tokens]) spacy_nlp.tagger(doc) spacy_nlp.parser(doc) for ix, token in enumerate(doc): tgt_tokens[ix].lemma = token.lemma_ or "_" tgt_tokens[ix].head = token.head.i + 1 if lang in ["ES", "FR"]: detail_tag = token.tag_.split("__") # [VERB , Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin] tgt_tokens[ix].pos_tag = detail_tag[0] or "_" if include_detail_tag: tgt_tokens[ix].detail_tag = detail_tag[-1] or "_" else: tgt_tokens[ix].pos_tag = token.tag_ or "_" tgt_tokens[ix].pos_universal = token.pos_ or "_" # Is SpaCy already Universal? tgt_tokens[ix].dep_tag = token.dep_ or "_" tgt_tokens[ix].ancestors = [(t.i, t.text) for t in token.ancestors] tgt_tokens[ix].children = [(t.i, t.text) for t in token.children] # print(token.i, token.text, token.pos_, token.dep_, token.head.text, token.head.i, token.tag_) assert len(doc) == len(tgt_tokens), f"LEN Mismatch! Spacy has {len(doc)} tokens and CoNLL has {len(tgt_tokens)} tokens" return tgt_tokens
15,118
async def apod(request: Request) -> dict: """Get the astronomy picture of the day.""" http_client = request.app.state.http_client async with http_client.session.get( f"https://api.nasa.gov/planetary/apod?api_key={NASA_API}" ) as resp: data = await resp.json() return { "title": data["title"], "explanation": data["explanation"], "img": data["hdurl"], }
15,119
def main_view(request, url, preview=False): """ @param request: HTTP request @param url: string @param preview: boolean """ url_result = parse_url(url) current_site = get_site() # sets tuple (template_name, posts_on_page) current_template = get_template() language = get_language(url_result) if not url_result['page']: page = get_index_page(language) else: page = get_page(url_result['page'], language, preview) menuitems = get_main_menuitems(url_result['page'], page, preview) meta_data = get_metadata(page) page_num = url_result['page_num'] or 1 if url_result['post']: posts = get_post(page, url_result['post'], preview) template_page = 'post.html' form = handle_comment_form(request, posts) else: posts = get_paginated_posts(page, page_num, page.items_per_menu) template_page = 'page.html' site_content = {'site': current_site, 'languages': get_languages(), 'current_language': language, 'menuitems': menuitems, 'page': page, 'scripts': get_scripts(), 'metadata': meta_data, 'posts': posts, } if has_other_menu(): site_content['other_menuitems'] = get_other_menuitems() try: site_content['form'] = form except NameError: pass template = '{}/{}'.format(current_template[0], template_page) return render_to_response( template, {'site_content': site_content}, RequestContext(request) )
15,120
def parse_configs(code_config, field_config, time_config): """ Wrapper to validate and parse each of the config files. Returns a a dictionary with config types as keys and parsed config files as values. """ # performing basic validation of config paths, obtaining dictionary of # config types and correpsonding raw dataframes raw_dfs = validate_config_dfs(code_config, field_config, time_config) # performing additional config-specific validation and parsing config_dict = {} for config_type, df in raw_dfs.items(): if config_type in validation_functions: validation_functions[config_type](df) if config_type in parse_functions: config_dict[config_type] = parse_functions[config_type](df) else: config_dict[config_type] = df # concatenating code and field configs if CODE_CONFIG in config_dict: if FIELD_CONFIG in config_dict: config_dict[FIELD_CONFIG] = pd.concat([config_dict[CODE_CONFIG], config_dict[FIELD_CONFIG]], sort=True) else: config_dict[FIELD_CONFIG] = config_dict[CODE_CONFIG] config_dict.pop(CODE_CONFIG) return config_dict
15,121
def butter_highpass_filter_eda(data): """ High pass filter for 1d EDA data. """ b, a = eda_hpf() y = lfilter(b, a, data) return y
15,122
def normalize_null_vals(reported_val): """ Takes a reported value and returns a normalized NaN is null, nan, empty, etc. Else returns reported value. """ if is_empty_value(reported_val): return np.NaN else: return reported_val
15,123
def in_notebook() -> bool: """Evaluate whether the module is currently running in a jupyter notebook.""" return "ipykernel" in sys.modules
15,124
def status( cycle_id: str=A_(..., help='data load cycle id'), bad_records_file: pathlib.Path = O_( None, '--bad_records_file', help='file to use for storing rows that failed to load', metavar='FILE.csv', dir_okay=False, resolve_path=True ), **frontend_kw ): """ Get the status of a data load. """ cfg = TSConfig.from_cli_args(**frontend_kw, interactive=True) with ThoughtSpot(cfg) as ts: r = ts.api.ts_dataservice.load_status(cycle_id) data = r.json() console.print( f'\nCycle ID: {data["cycle_id"]} ({data["status"]["code"]})' f'\nStage: {data["internal_stage"]}' f'\nRows written: {data["rows_written"]}' f'\nIgnored rows: {data["ignored_row_count"]}' ) if data['status']['code'] == 'LOAD_FAILED': console.print(f'\nFailure reason:\n [red]{data["status"]["message"]}[/]') if bad_records_file is not None and int(data['ignored_row_count']) > 0: r = ts.api.ts_dataservice.load_params(cycle_id) load_params = r.json() r = ts.api.ts_dataservice.bad_records(cycle_id) console.print(f'[red]\n\nBad records found...\n writing to {bad_records_file}') _bad_records_to_file(bad_records_file, data=r.text, params=load_params)
15,125
def test_credential_property(mocker): """ GIVEN an attribute to make as a property WHEN credential_property is called THEN assert a property is returned """ val_mock = mocker.patch.object(messages._utils, 'validate_input') param = credential_property('param') assert isinstance(param, property)
15,126
def _clean_dls(limit, path=None): """Check that number of images saved so far is no more than `limit`. Args: limit: maximum number of downloads allowed in download directory. path: path to saved images. Default to current working directory. Raises: ValueError: if `limit` is not an positive integer. """ if not isinstance(limit, int) or limit < 0: raise ValueError('`limit` must be a positive integer.') if path is None: path = os.getcwd() # collect jpg file paths jpegs = [] for f in os.listdir(path): if os.path.isfile(os.path.join(path, f)): if f.lower().endswith('.jpg'): jpegs.append(os.path.join(path, f)) # check if limit exceeded if len(jpegs) > limit: logger.info('%s jpeg files in %s', len(jpegs), path) logger.info('Cleaning...') # sort by modification time, oldest to newest jpegs.sort(key=os.path.getmtime) while len(jpegs) > limit: f = jpegs.pop(0) os.remove(f) logger.info('%s removed.', f)
15,127
def test_insert(type): """ >>> test_insert(int_) [0, 1, 2, 3, 4, 5] """ tlist = nb.typedlist(type, [1,3]) tlist.insert(0,0) tlist.insert(2,2) tlist.insert(4,4) tlist.insert(8,5) return tlist
15,128
def get_next_month_range(unbounded=False): """获取 下个月的开始和结束时间. :param unbounded: 开区间 """ return get_month_range(months=1, unbounded=unbounded)
15,129
def bedAnnotate_ceas(workflow, conf): """ Calls bedAnnotate to get the genome distribution of the summits """ import os summits = conf.prefix + "_sort_summits.bed" if conf.get("macs2", "type") in ["both", "narrow"] else conf.prefix + "_b_sort_peaks.broadPeak" ceas = attach_back(workflow, ShellCommand( """{tool} -g {param[geneTable]} -b {input} -e {output[exon]} -t {output[gene]}> {output[meta]} meta_info.sh {output[gene]} {output[exon]} 2000 {param[chrominfo]} """, tool="bedAnnotate.py", input=summits, output={"meta":conf.prefix + ".meta", "gene":os.path.join(conf.target_dir, "gene.bed"), "exon": os.path.join(conf.target_dir, "exon.bed"), "promoter": os.path.join(conf.target_dir, "gene.bed_promoter"), "exon": os.path.join(conf.target_dir, "gene.bed_exon")}, param={"geneTable": conf.get_path(conf.get("basics", "species"), "geneTable"), "chrominfo": conf.get_path(conf.get("basics", "species"), "chrom_len")}, name="bedAnnotate (ceas)")) try: has_velcro = conf.get(conf.get("basics", "species"), "velcro") has_dhs = conf.get(conf.get("basics", "species"), "dhs") except: has_velcro = "" has_dhs = "" ceas.allow_fail = True ceas.allow_dangling = True if has_dhs: DHS(workflow, conf) if has_velcro: velcro(workflow, conf) stat_bedAnnotate(workflow, conf, has_dhs, has_velcro)
15,130
def probit_regression( dataset_fn, name='probit_regression', ): """Bayesian probit regression with a Gaussian prior. Args: dataset_fn: A function to create a classification data set. The dataset must have binary labels. name: Name to prepend to ops created in this function, as well as to the `code_name` in the returned `TargetDensity`. Returns: target: `TargetDensity`. """ with tf.name_scope(name) as name: dataset = dataset_fn() num_train_points = dataset.train_features.shape[0] num_test_points = dataset.test_features.shape[0] have_test = num_test_points > 0 # Add bias. train_features = tf.concat( [dataset.train_features, tf.ones([num_train_points, 1])], axis=-1) train_labels = tf.convert_to_tensor(dataset.train_labels) test_features = tf.concat( [dataset.test_features, tf.ones([num_test_points, 1])], axis=-1) test_labels = tf.convert_to_tensor(dataset.test_labels) num_features = int(train_features.shape[1]) root = tfd.JointDistributionCoroutine.Root zero = tf.zeros(num_features) one = tf.ones(num_features) def model_fn(features): weights = yield root(tfd.Independent(tfd.Normal(zero, one), 1)) probits = tf.einsum('nd,...d->...n', features, weights) yield tfd.Independent(tfd.ProbitBernoulli(probits=probits), 1) train_joint_dist = tfd.JointDistributionCoroutine( functools.partial(model_fn, features=train_features)) test_joint_dist = tfd.JointDistributionCoroutine( functools.partial(model_fn, features=test_features)) dist = joint_distribution_posterior.JointDistributionPosterior( train_joint_dist, (None, train_labels)) expectations = { 'params': target_spec.expectation( fn=lambda params: params[0], human_name='Parameters', ) } if have_test: expectations['test_nll'] = target_spec.expectation( fn=lambda params: ( # pylint: disable=g-long-lambda -test_joint_dist.sample_distributions(value=params) [0][-1].log_prob(test_labels)), human_name='Test NLL', ) expectations['per_example_test_nll'] = target_spec.expectation( fn=lambda params: ( # pylint: disable=g-long-lambda -test_joint_dist.sample_distributions(value=params) [0][-1].distribution.log_prob(test_labels)), human_name='Per-example Test NLL', ) return target_spec.TargetDensity.from_distribution( distribution=dist, constraining_bijectors=(tfb.Identity(),), expectations=expectations, code_name='{}_{}'.format(dataset.code_name, name), human_name='{} Probit Regression'.format(dataset.human_name), )
15,131
def test_unet_summary_generation() -> None: """Checks unet summary generation works either in CPU or GPU""" model = UNet3D(input_image_channels=1, initial_feature_channels=2, num_classes=2, kernel_size=1, num_downsampling_paths=2) if machine_has_gpu: model.cuda() summary = ModelSummary(model=model).generate_summary(input_sizes=[(1, 4, 4, 4)]) assert summary is not None
15,132
def block_inception_c(inputs, scope=None, reuse=None): """Builds Inception-C block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_1 = tf.concat(axis=3, values=[ slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') branch_2 = tf.concat(axis=3, values=[ slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
15,133
def from_json(js): """ Helper to parse json values from server into python types """ if js is None or js is True or js is False or isinstance(js, six.text_type): # JsNull, JsBoolean, JsString return js if not isinstance(js, dict) or 'type' not in js or 'data' not in js: raise ValueError('Expected a dict, got {!r}'.format(js)) t = js['type'] data = js['data'] if t in ('byte', 'short', 'int', 'long'): return int(data) if t in ('float', 'double'): return float(data) if t == 'timestamp': # server return timestamp in milliseconds, which is not the python convention return float(data) / 1E3 if t == 'date': # server return timestamp in milliseconds return datetime.date.fromtimestamp(float(data) / 1E3) if t == 'byte_array': return bytearray([int(x) for x in data]) if t in ('wrapped_array', 'seq', 'array'): return [from_json(x) for x in data] if t == 'map': d = {} for entry in data: if 'key' not in entry or 'val' not in entry: raise ValueError('Invalid map entry: {!r}'.format(entry)) d[from_json(entry['key'])] = from_json(entry['val']) return d raise ValueError('Failed to parse value: {!r}'.format(js))
15,134
def exposexml(func): """ Convenience decorator function to expose XML """ def wrapper(self, data, expires, contentType="application/xml"): data = func(self, data) _setCherryPyHeaders(data, contentType, expires) return self.templatepage('XML', data=data, config=self.config, path=request.path_info) wrapper.__doc__ = func.__doc__ wrapper.__name__ = func.__name__ wrapper.exposed = True return wrapper
15,135
def rect(left: float, top: float, right: float, bottom: float, image: Image = None): """ Draws a rectangle outline with upper left corner at (left, top) and lower right corner at (right,bottom). The rectangle is not filled. :param left: x coordinate value of the upper left corner :param top: y coordinate value of the upper left corner :param right: x coordinate value of the lower right corner :param bottom: y coordinate value of the lower right corner :param image: the target image which will be painted on. None means it is the target image (see set_target() and get_target()). """ image = _get_target_image(image) image.rect(left, top, right, bottom)
15,136
def err(*args): """ Outputs its parameters to users stderr. """ for value in args: sys.stderr.write(value) sys.stderr.write(os.linesep)
15,137
def _SetSource(build_config, messages, is_specified_source, no_source, source, gcs_source_staging_dir, ignore_file, hide_logs=False): """Set the source for the build config.""" default_gcs_source = False default_bucket_name = None if gcs_source_staging_dir is None: default_gcs_source = True default_bucket_name = staging_bucket_util.GetDefaultStagingBucket() gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name) gcs_client = storage_api.StorageClient() # --no-source overrides the default --source. if not is_specified_source and no_source: source = None gcs_source_staging = None if source: suffix = '.tgz' if source.startswith('gs://') or os.path.isfile(source): _, suffix = os.path.splitext(source) # Next, stage the source to Cloud Storage. staged_object = '{stamp}-{uuid}{suffix}'.format( stamp=times.GetTimeStampFromDateTime(times.Now()), uuid=uuid.uuid4().hex, suffix=suffix, ) gcs_source_staging_dir = resources.REGISTRY.Parse( gcs_source_staging_dir, collection='storage.objects') try: gcs_client.CreateBucketIfNotExists( gcs_source_staging_dir.bucket, check_ownership=default_gcs_source) except api_exceptions.HttpForbiddenError: raise BucketForbiddenError( 'The user is forbidden from accessing the bucket [{}]. Please check ' 'your organization\'s policy or if the user has the "serviceusage.services.use" permission' .format(gcs_source_staging_dir.bucket)) except storage_api.BucketInWrongProjectError: # If we're using the default bucket but it already exists in a different # project, then it could belong to a malicious attacker (b/33046325). raise c_exceptions.RequiredArgumentException( 'gcs-source-staging-dir', 'A bucket with name {} already exists and is owned by ' 'another project. Specify a bucket using ' '--gcs-source-staging-dir.'.format(default_bucket_name)) if gcs_source_staging_dir.object: staged_object = gcs_source_staging_dir.object + '/' + staged_object gcs_source_staging = resources.REGISTRY.Create( collection='storage.objects', bucket=gcs_source_staging_dir.bucket, object=staged_object) if source.startswith('gs://'): gcs_source = resources.REGISTRY.Parse( source, collection='storage.objects') staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: if not os.path.exists(source): raise c_exceptions.BadFileException( 'could not find source [{src}]'.format(src=source)) if os.path.isdir(source): source_snapshot = snapshot.Snapshot(source, ignore_file=ignore_file) size_str = resource_transform.TransformSize( source_snapshot.uncompressed_size) if not hide_logs: log.status.Print( 'Creating temporary tarball archive of {num_files} file(s)' ' totalling {size} before compression.'.format( num_files=len(source_snapshot.files), size=size_str)) staged_source_obj = source_snapshot.CopyTarballToGCS( gcs_client, gcs_source_staging, ignore_file=ignore_file, hide_logs=hide_logs) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) elif os.path.isfile(source): unused_root, ext = os.path.splitext(source) if ext not in _ALLOWED_SOURCE_EXT: raise c_exceptions.BadFileException('Local file [{src}] is none of ' + ', '.join(_ALLOWED_SOURCE_EXT)) if not hide_logs: log.status.Print('Uploading local file [{src}] to ' '[gs://{bucket}/{object}].'.format( src=source, bucket=gcs_source_staging.bucket, object=gcs_source_staging.object, )) staged_source_obj = gcs_client.CopyFileToGCS(source, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: # No source if not no_source: raise c_exceptions.InvalidArgumentException( '--no-source', 'To omit source, use the --no-source flag.') return build_config
15,138
def transform_categorical_by_percentage(TRAIN, TEST=None, handle_unknown="error", verbose=0): """ Transform categorical features to numerical. The categories are encoded by their relative frequency (in the TRAIN dataset). To be consistent with scikit-learn transformers having categories in transform that are not present during training will raise an error by default. ----- Arguments: TRAIN: DataFrame. TEST: DataFrame, optional (default=None). handle_unknown: str, "error", "ignore" or "NaN", optional (default="error"). Whether to raise an error, ignore or replace by NA if an unknown category is present during transform. verbose: integer, optional (default=0). Controls the verbosity of the process. ----- Returns: TRAIN: DataFrame. TEST: DataFrame. This second DataFrame is returned if two DataFrames were provided. """ categorical = TRAIN.select_dtypes(include=["object"]).columns if TEST is not None: if len(categorical) > 0: for col in categorical: cat_counts = TRAIN[col].value_counts(normalize=True, dropna=False) dict_cat_counts = dict(zip(cat_counts.index, cat_counts)) not_in_train = list(set(TEST[col].unique()) - set(cat_counts.index)) if len(not_in_train) > 0: if handle_unknown == "error": raise ValueError("TEST contains new labels: {0} " "in variable {1}." .format(not_in_train, col)) if handle_unknown == "ignore": print("\n-----\n") print("Variable: {0}".format(col)) print("Unknown category(ies) {0} present during " "transform has(ve) been ignored." .format(not_in_train)) if handle_unknown == "NaN": print("\n-----\n") print("Variable: {0}".format(col)) print("Unknown category(ies) {0} present during " "transform has(ve) been replaced by NA." .format(not_in_train)) for item in not_in_train: dict_cat_counts[item] = np.nan TRAIN[col] = TRAIN[col].replace(dict_cat_counts) TEST[col] = TEST[col].replace(dict_cat_counts) if verbose > 0: print("\n-----\n") print("Feature: {0}".format(col)) if verbose > 1: print(cat_counts) return (TRAIN, TEST) else: for col in categorical: cat_counts = TRAIN[col].value_counts(normalize=True, dropna=False) dict_cat_counts = dict(zip(cat_counts.index, cat_counts)) TRAIN[col] = TRAIN[col].replace(dict_cat_counts) if verbose > 0: print("\n-----\n") print("Feature: {0}".format(col)) if verbose > 1: print(cat_counts) return TRAIN
15,139
def buildDescription(flinfoDescription='', flickrreview=False, reviewer='', override='', addCategory='', removeCategories=False): """Build the final description for the image. The description is based on the info from flickrinfo and improved. """ description = '== {{int:filedesc}} ==\n{}'.format(flinfoDescription) if removeCategories: description = textlib.removeCategoryLinks(description, pywikibot.Site( 'commons', 'commons')) if override: description = description.replace('{{cc-by-sa-2.0}}\n', '') description = description.replace('{{cc-by-2.0}}\n', '') description = description.replace('{{flickrreview}}\n', '') description = description.replace( '{{copyvio|Flickr, licensed as "All Rights Reserved" which is not ' 'a free license --~~~~}}\n', '') description = description.replace('=={{int:license}}==', '=={{int:license}}==\n' + override) elif flickrreview: if reviewer: description = description.replace( '{{flickrreview}}', '{{flickrreview|%s|' '{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-' '{{subst:CURRENTDAY2}}}}' % reviewer) if addCategory: description = description.replace('{{subst:unc}}\n', '') description = description + '\n[[Category:' + addCategory + ']]\n' description = description.replace('\r\n', '\n') return description
15,140
def allowed_file(filename, extensions): """ Check file is image :param filename: string :param extensions: list :return bool: """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in extensions
15,141
def read(infile): """Read result from disk.""" _, ext = os.path.splitext(infile) ext = ext.strip('.') return read_funcs[ext](infile)
15,142
def _url_in_cache(url): """ Determine if a URL's response exists in the cache. Parameters ---------- url : string the url to look for in the cache Returns ------- filepath : string path to cached response for url if it exists in the cache, otherwise None """ # hash the url to generate the cache filename filename = hashlib.md5(url.encode("utf-8")).hexdigest() filepath = os.path.join(settings.cache_folder, os.extsep.join([filename, "json"])) # if this file exists in the cache, return its full path if os.path.isfile(filepath): return filepath
15,143
def get_number_of_recovery_codes(userid): """ Get and return the number of remaining recovery codes for `userid`. Parameters: userid: The userid for which to check the count of recovery codes. Returns: An integer representing the number of remaining recovery codes. """ return d.engine.scalar(""" SELECT COUNT(*) FROM twofa_recovery_codes WHERE userid = %(userid)s """, userid=userid)
15,144
def creation_time(path_to_file): """The file creation time. Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path_to_file) else: stat = os.stat(path_to_file) try: return stat.st_birthtime except AttributeError: # We're probably on Linux. No easy way to get creation dates here, # so we'll settle for when its content was last modified. return stat.st_mtime
15,145
async def mailbox_search(search_term: str, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Search email with a search term""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.search(search_term)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
15,146
def otel_service(docker_ip, docker_services): """Ensure that port is listening.""" # `port_for` takes a container port and returns the corresponding host port port = docker_services.port_for("otel-collector", 4317) docker_services.wait_until_responsive( timeout=30.0, pause=5, check=lambda: is_portListening(docker_ip, port) ) return True
15,147
def broadcast(vec: T.Tensor, matrix: T.Tensor) -> T.Tensor: """ Broadcasts vec into the shape of matrix following numpy rules: vec ~ (N, 1) broadcasts to matrix ~ (N, M) vec ~ (1, N) and (N,) broadcast to matrix ~ (M, N) Args: vec: A vector (either flat, row, or column). matrix: A matrix (i.e., a 2D tensor). Returns: tensor: A tensor of the same size as matrix containing the elements of the vector. Raises: BroadcastError """ try: return numpy.broadcast_to(vec, shape(matrix)) except ValueError: raise BroadcastError('cannot broadcast vector of dimension {} \ onto matrix of dimension {}'.format(shape(vec), shape(matrix)))
15,148
def neighbour_list_n_out(nlist_i: NeighbourList, nlist_j: NeighbourList) -> np.ndarray: """ Compute n^out between two NeighbourList object. Args: nlist_i (NeighbourList): A NeighbourList object for neighbour lists at time 0. nlist_j (NeighbourList): A NeighbourList object for neighbour lists at time t. Returns: (np.ndarray(float)): A 1D array of normalised correlation terms. Raises: ValueError: If the two NeighbourList objects have different numbers of lengths of neighbour list vectors. Note: For each neighbour list vector, computes (l_i.l_i) - (l_i.l_j). See Rabani et al. J. Chem. Phys. 1997 doi:https://doi.org/10.1063/1.474927 Eqn. 8 for details. """ if nlist_i.vectors.shape != nlist_j.vectors.shape: raise ValueError(f'NeighbourList vector shapes are not equal: {nlist_i.vectors.shape} != {nlist_j.vectors.shape}') return (np.einsum('ij,ij->i', nlist_i.vectors, nlist_i.vectors) - np.einsum('ij,ij->i', nlist_i.vectors, nlist_j.vectors))
15,149
def split_if_then(source_file: str) -> dict: """Split a script file into component pieces""" logging.debug("Splitting '{}' into IF/THEN blocks".format(source_file)) with open(source_file) as f: source_text = f.read() logging.debug("Read {} bytes".format(len(source_text))) r = re.compile(_if_then_regex, flags=re.MULTILINE) r_or = re.compile(r"OR\((\d+)\)") r_resp = re.compile(r"RESPONSE #(\d+)") # Replace all double quotes outside comments with single quotes. source_text = replace_double_quotes_with_single_outside_comment( source_text) count = 0 triggers = [] actions = [] for m in r.finditer(source_text): count = count + 1 or_count = 0 # Break if conditions into separate lines. for line in m.group("IF").split('\n'): line = line.strip() or_check = r_or.match(line) if 0 == len(line): pass # elif 0 == or_count and "ActionListEmpty()" == line: # output["ActionListEmpty"] = True elif or_check: or_count = int(or_check.group(1)) triggers.append([]) elif or_count > 0: triggers[-1].append(line) or_count = or_count - 1 else: triggers.append(line) # Break then conditions into separate lines. action_list = [] response_value = None for line in m.group("THEN").split('\n'): line = line.strip() response_check = r_resp.match(line) if 0 == len(line): pass elif response_check: if response_value: actions.append({response_value: action_list}) response_value = response_check.group(1) action_list = [] else: action_list.append(line) if response_value: actions.append({response_value: action_list}) if count > 1: raise RuntimeError("IF/THEN Parse found multiple matches in '{}'" .format(source_file)) # triggers = promote_trigger(triggers, "^HaveSpell") # triggers = promote_trigger(triggers, "^ActionListEmpty") result = {"IF": triggers, "THEN": actions} name = get_name(actions) if name: result["name"] = name return result
15,150
def add(ctx, project): """ This commands adds a project to primer """ confdir = ctx.obj['confdir'] project_path = pathlib.Path(project) project_path = project_path.expanduser() base = project_path.parent name = project_path.name salt_project = Project(confdir, base, name) try: salt_project.save() click.echo(click.style("Applied primer on {0}!\nYou can start adding repositories to {0} now.".format(name), fg='green')) except exceptions.ProjectFolderExistsError: click.echo(click.style("Project folder {} already exists, doing nothing!".format(project), fg='red'), err=True) sys.exit(1) except exceptions.ProjectExistsError: click.echo(click.style("Project {} already defined!".format(name), fg='red'), err=True) sys.exit(1)
15,151
def save_as_geotiff(grid_x: np.ndarray, grid_y: np.ndarray, dx: float, dy: float, rgb: np.ndarray, epsg: int, outfile: str): """ Save output image as geotiff using GDAL. Parameters ---------- grid_x : np.ndarray Grid x-coordinates. grid_y : np.ndarray Grid y-coordinates. dx, dy : float Grid resolution in x and y. rgb : np.ndarray Image data. epsg : int EPSG code for georefencing. outfile : str Output file name. Returns ------- None Will write to file instead. """ # set geotransform nx = rgb.shape[0] ny = rgb.shape[1] geotransform = [grid_x.min(), dx, 0, grid_y.min(), 0, dy] # create the 3-band raster file dst_ds = gdal.GetDriverByName('GTiff').Create( outfile, ny, nx, 3, gdal.GDT_Byte) dst_ds.SetGeoTransform(geotransform) # specify coords srs = osr.SpatialReference() # establish encoding # EPSG:28356 - GDA94 / MGA zone 56 - Projected srs.ImportFromEPSG(int(epsg)) dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file # write rgb bands to the raster dst_ds.GetRasterBand(1).WriteArray(rgb[:, :, 0]) dst_ds.GetRasterBand(2).WriteArray(rgb[:, :, 1]) dst_ds.GetRasterBand(3).WriteArray(rgb[:, :, 2]) # write to disk dst_ds.FlushCache() dst_ds = None
15,152
def get_sql_injection_match_set(SqlInjectionMatchSetId=None): """ Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId . See also: AWS API Documentation Examples The following example returns the details of a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5. Expected Output: :example: response = client.get_sql_injection_match_set( SqlInjectionMatchSetId='string' ) :type SqlInjectionMatchSetId: string :param SqlInjectionMatchSetId: [REQUIRED] The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets . :rtype: dict :return: { 'SqlInjectionMatchSet': { 'SqlInjectionMatchSetId': 'string', 'Name': 'string', 'SqlInjectionMatchTuples': [ { 'FieldToMatch': { 'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY', 'Data': 'string' }, 'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE' }, ] } } :returns: HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data . METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT . QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any. URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg . BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet . """ pass
15,153
def id_label_to_project(id_label): """ Given a project's id_label, return the project. """ match = re.match(r"direct-sharing-(?P<id>\d+)", id_label) if match: project = DataRequestProject.objects.get(id=int(match.group("id"))) return project
15,154
def test_querystring_args_clash(): """ Cannot use any reserved or already used names for querystring args. """ # body_arg clash with pytest.raises(ArgNameConflict): APIEndpoint( 'GET', '/users/{user_id}/feedback/', querystring_args=[Arg('body')], ) # reserved name clash with pytest.raises(ArgNameConflict): APIEndpoint( 'GET', '/users/{user_id}/feedback/', querystring_args=[Arg('_request_kwargs')], ) # if same arg name appears in url and querystring it's assumed you # want to use the same value for both qs_arg = Arg('user_id') endpoint = APIEndpoint( 'GET', '/users/{user_id}/feedback/', querystring_args=[qs_arg], ) assert endpoint.url_args == {'user_id'} assert endpoint.querystring_args == {qs_arg}
15,155
def payoff_blotto_sign(x, y): """ Returns: (0, 0, 1) -- x wins, y loss; (0, 1, 0) -- draw; (1, 0, 0)-- x loss, y wins. """ wins, losses = 0, 0 for x_i, y_i in zip(x, y): if x_i > y_i: wins += 1 elif x_i < y_i: losses += 1 if wins > losses: return (0, 0, 1) elif wins < losses: return (1, 0, 0) return (0, 1, 0)
15,156
def import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=None, overwrite=False, verbose=0): """Downloads, cleans, and imports nyc tlc taxi record files for the specified taxi type into a sqlite database. Parameters ---------- url_path : str or None Path to text file containing nyc tlc taxi record file urls to download from. Set to None to skip download. dl_dir : str Path of directory to download files to or load files from. db_path : str Path to sqlite database. taxi_type : str Taxi type to create regex for ('fhv', 'green', 'yellow', or 'all'). nrows : int or None Number of rows to read. Set to None to read all rows. usecols : list List of column names to include. Specify columns names as strings. Column names can be entered based on names found in original tables for the year specified or names found in the trips table. Set to None to read all columns. overwrite : bool Defines whether or not to overwrite existing database tables. verbose : int Defines verbosity for output statements. Returns ------- import_num : int Number of files imported into database. Notes ----- """ # download taxi record files if url_path: dl_num = dl_urls(url_path, dl_dir, taxi_type, verbose=verbose) else: dl_num = 0 # get taxi record files files = get_regex_files(dl_dir, taxi_regex_patterns(taxi_type), verbose=verbose) # create trips table (if needed) create_sql = """ CREATE TABLE IF NOT EXISTS trips ( trip_id INTEGER PRIMARY KEY, taxi_type INTEGER, vendor_id INTEGER, pickup_datetime TEXT, dropoff_datetime TEXT, passenger_count INTEGER, trip_distance REAL, pickup_longitude REAL, pickup_latitude REAL, pickup_location_id INTEGER, dropoff_longitude REAL, dropoff_latitude REAL, dropoff_location_id INTEGER, trip_duration REAL, trip_pace REAL, trip_straightline REAL, trip_windingfactor REAL ); """ indexes = ['CREATE INDEX IF NOT EXISTS trips_pickup_datetime ON trips ' '(pickup_datetime);'] create_table(db_path, 'trips', create_sql, indexes=indexes, overwrite=overwrite, verbose=verbose) # load, clean, and import taxi files into table import_num = 0 for file in files: if verbose >= 1: output('Started importing ' + file + '.') if taxi_type == 'fhv': df = pd.DataFrame({'taxi_type': []}) elif taxi_type == 'green': df = pd.DataFrame({'taxi_type': []}) elif taxi_type == 'yellow': df, year, month = load_yellow(dl_dir + file, nrows=nrows, usecols=usecols, verbose=verbose) df = clean_yellow(df, year, month, verbose=verbose) import_num += 1 else: output('Unknown taxi_type.', fn_str='import_trips') df = pd.DataFrame({'taxi_type': []}) df_to_table(db_path, df, table='trips', overwrite=False, verbose=verbose) if verbose >= 1: output('Imported ' + file + '.') output('Finished importing ' + str(import_num) + ' files.') return dl_num, import_num
15,157
def _delete_br_cfg_file(bridge): """ Deletes the config file for the specified bridge. Args: bridge (str) bridge name """ opsys = platform.dist()[0] LOG.debug('OS: ' + opsys) if opsys not in ('Ubuntu', 'redhat'): LOG.error('Unsupported Operating System') sys.exit('Unsupported Operating System') if opsys == 'Ubuntu': if os.path.exists('/etc/network/interfaces.d/' + bridge): LOG.info('Deleting bridge config file {}'.format(bridge)) os.system('sudo rm /etc/network/interfaces.d/{}'.format(bridge)) return LOG.error('Support for Red Hat not yet implemented') sys.exit('Support for Red Hat not yet implemented')
15,158
def main(): """ Load point clouds and visualize """ pcds_down = load_point_clouds(pcd_folder, pointcloud_ext, voxel_size) if showPlots: o3d.visualization.draw_geometries(pcds_down, zoom=0.7, front=[0.4257, -0.2125, -0.8795], lookat=[2.6172, 2.0475, 1.532], up=[-0.0694, -0.9768, 0.2024]) """ Full registration """ print("Full registration ...") if verbose: with o3d.utility.VerbosityContextManager( o3d.utility.VerbosityLevel.Debug) as cm: pose_graph = full_registration(pcds_down, max_correspondence_distance_coarse, max_correspondence_distance_fine, pair_registration_method) else: pose_graph = full_registration(pcds_down, max_correspondence_distance_coarse, max_correspondence_distance_fine, pair_registration_method) """ Open3D uses the function global_optimization to perform pose graph optimization. """ """ Pose graph """ print("Optimizing PoseGraph ...") option = o3d.pipelines.registration.GlobalOptimizationOption( max_correspondence_distance=max_correspondence_distance_fine, edge_prune_threshold=0.25, reference_node=0) if verbose: with o3d.utility.VerbosityContextManager( o3d.utility.VerbosityLevel.Debug) as cm: o3d.pipelines.registration.global_optimization( pose_graph, o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(), o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(), option) else: o3d.pipelines.registration.global_optimization( pose_graph, o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(), o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(), option) """ Visualize optimization The transformed point clouds are listed and visualized using draw_geometries. """ print("Transform points and display") for point_id in range(len(pcds_down)): print(pose_graph.nodes[point_id].pose) pcds_down[point_id].transform(pose_graph.nodes[point_id].pose) if showPlots: o3d.visualization.draw_geometries(pcds_down, zoom=0.7, front=[0.4257, -0.2125, -0.8795], lookat=[2.6172, 2.0475, 1.532], up=[-0.0694, -0.9768, 0.2024]) """ Make a combined point cloud. Visualize the multiway total point clouds """ pcds = load_point_clouds(pcd_folder, pointcloud_ext, voxel_size) pcd_combined = o3d.geometry.PointCloud() for point_id in range(len(pcds)): pcds[point_id].transform(pose_graph.nodes[point_id].pose) pcd_combined += pcds[point_id] pcd_combined_down = pcd_combined.voxel_down_sample(voxel_size=voxel_size) o3d.io.write_point_cloud("multiway_registration.pcd", pcd_combined_down) if showPlots: o3d.visualization.draw_geometries([pcd_combined_down], zoom=0.7, front=[0.4257, -0.2125, -0.8795], lookat=[2.6172, 2.0475, 1.532], up=[-0.0694, -0.9768, 0.2024]) """ Save MAT file of the transformations """ allTransformations = np.empty([4,4, len(pcds_down)], np.float64) for point_id in range(len(pcds_down)): allTransformations[:,:, point_id] = pose_graph.nodes[point_id].pose # Save to MATLAB MAT file sio.savemat('transMat.mat', {'tforms': allTransformations})
15,159
def householder(h_v: Vector) -> Matrix: """Get Householder transformation Matrix""" return Matrix.identity(h_v.size()).subtract(2 * h_v * h_v.transpose() / (h_v * h_v))
15,160
def save_crater_records(xcoord_data, ycoord_data, radius, \ xcoord_canvas, ycoord_canvas, scaler): """Opens a file, or creates if it doesn't already exist, and appends the x and y coords, and radius found from selection. Parameters ---------- xcoord_data : string/float The x-coordinate of crater center, in pixels. ycoord_data : string/float The y-coordinate of crater center, in pixels radius : string/float The radius of the crater, in pixels. xcoord_canvas : string/float The x-coordinate of crater center on matplotlib canvas. ycoord_canvas : string/float The y-coordinate of crater center on matplotlib canvas. scaler : string/float The scaling factor to reduce size so can fit into matplotlib window. """ with open(path+'outputs/' + body + '/'+filename, "a+") as f: f.write(str(xcoord_data) + "\t") f.write(str(ycoord_data) + "\t") f.write(str(radius) + "\t") f.write(str(xcoord_canvas) + "\t") f.write(str(ycoord_canvas) + "\t") f.write(str(scaler)) f.write("\n")
15,161
def _suppression_loop_body(boxes, iou_threshold, output_size, idx): """Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable. """ num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE batch_size = tf.shape(boxes)[0] # Iterates over tiles that can possibly suppress the current tile. box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0], [batch_size, _NMS_TILE_SIZE, 4]) _, box_slice, _, _ = tf.while_loop( lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, _cross_suppression, [boxes, box_slice, iou_threshold, tf.constant(0)]) # Iterates over the current tile to compute self-suppression. iou = box_utils.bbox_overlap(box_slice, box_slice) mask = tf.expand_dims( tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape( tf.range(_NMS_TILE_SIZE), [-1, 1]), 0) iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _, _ = tf.while_loop( lambda _iou, loop_condition, _iou_sum, _: loop_condition, _self_suppression, [iou, tf.constant(True), tf.reduce_sum(iou, [1, 2]), iou_threshold]) suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0 box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) # Uses box_slice to update the input boxes. mask = tf.reshape( tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = tf.tile(tf.expand_dims( box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape( boxes, [batch_size, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask) boxes = tf.reshape(boxes, [batch_size, -1, 4]) # Updates output_size. output_size += tf.reduce_sum( tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1]) return boxes, iou_threshold, output_size, idx + 1
15,162
def calculate_invalidation_digest(requirements: Iterable[str]) -> str: """Returns an invalidation digest for the given requirements.""" m = hashlib.sha256() inputs = { # `FrozenOrderedSet` deduplicates while keeping ordering, which speeds up the sorting if # the input was already sorted. "requirements": sorted(FrozenOrderedSet(requirements)), } m.update(json.dumps(inputs).encode("utf-8")) return m.hexdigest()
15,163
def to_base_str(n, base): """Converts a number n into base `base`.""" convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" if n < base: return convert_string[n] else: return to_base_str(n // base, base) + convert_string[n % base]
15,164
def number_of_photons(i,n=6): """Check if number of photons in a sample is higher than n (default value is 6)""" bitstring = tuple(i) if sum(bitstring) > n: return True else: return False
15,165
def close_browser(driver): """ Close Selenium WebDriver Args: driver (Selenium WebDriver) """ logger.info("Close browser") take_screenshot(driver) driver.close()
15,166
def _build_conflicts_from_states( trackers: List[TrackerWithCachedStates], domain: Domain, max_history: Optional[int], conflicting_state_action_mapping: Dict[int, Optional[List[Text]]], tokenizer: Optional[Tokenizer] = None, ) -> List["StoryConflict"]: """Builds a list of `StoryConflict` objects for each given conflict. Args: trackers: Trackers that contain the states. domain: The domain object. max_history: Number of turns to take into account for the state descriptions. conflicting_state_action_mapping: A dictionary mapping state-hashes to a list of actions that follow from each state. tokenizer: A tokenizer to tokenize the user messages. Returns: A list of `StoryConflict` objects that describe inconsistencies in the story structure. These objects also contain the history that leads up to the conflict. """ # Iterate once more over all states and note the (unhashed) state, # for which a conflict occurs conflicts = {} for element in _sliced_states_iterator(trackers, domain, max_history, tokenizer): hashed_state = element.sliced_states_hash if hashed_state in conflicting_state_action_mapping: if hashed_state not in conflicts: conflicts[hashed_state] = StoryConflict(element.sliced_states) conflicts[hashed_state].add_conflicting_action( action=str(element.event), story_name=element.tracker.sender_id ) # Return list of conflicts that arise from unpredictable actions # (actions that start the conversation) return [ conflict for (hashed_state, conflict) in conflicts.items() if conflict.conflict_has_prior_events ]
15,167
def recombine_dna_ocx1(dna1: DNA, dna2: DNA, i1: int, i2: int) -> None: """Ordered crossover.""" copy1 = dna1.copy() replace_dna_ocx1(dna1, dna2, i1, i2) replace_dna_ocx1(dna2, copy1, i1, i2)
15,168
def inception_d(input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper """ Parameters ---------- input_layer : nfilt : Returns ------- """ l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
15,169
def generate_response(response, output): """ :param response: :return dictionary """ status, command = None, None if isinstance(response, dict): status = response.get('ok', None) response.get('command', None) elif isinstance(response, object): status = getattr(response, 'ok', None) command = getattr(response, 'command', None) return { 'status': 'successful' if status else 'failed', 'command': command, 'output': output }
15,170
def check_add_predecessor(data_predecessor_str_set, xml_data_list, xml_chain_list, output_xml): """ Check if each string in data_predecessor_str_set is corresponding to an actual Data object, create new [Data, predecessor] objects lists for object's type : Data. Send lists to add_predecessor() to write them within xml and then returns update_list from it. Parameters: data_predecessor_str_set ([str]) : Lists of string from jarvis cell xml_data_list ([Data]) : Data list from xml parsing xml_chain_list ([View]) : View list from xml parsing output_xml (GenerateXML object) : XML's file object Returns: update ([0/1]) : 1 if update, else 0 """ data_predecessor_list = [] allocated_item_list = [] # Filter input string data_predecessor_str_list = shared_orchestrator.cut_string_list(data_predecessor_str_set) # Create data names list already in xml xml_data_name_list = get_objects_names(xml_data_list) is_elem_found = False for elem in data_predecessor_str_list: is_elem_found = True if elem[0] not in xml_data_name_list: is_elem_found = False if elem[1] not in xml_data_name_list: print(f"{elem[0]} and {elem[1]} do not exist") else: print(f"{elem[0]} does not exist") if elem[0] in xml_data_name_list: if elem[1] not in xml_data_name_list: is_elem_found = False print(f"{elem[1]} does not exist") if is_elem_found: for d, p in data_predecessor_str_list: predecessor = None selected_data = None existing_predecessor_id_list = [] for data in xml_data_list: if d == data.name: selected_data = data for existing_predecessor in data.predecessor_list: existing_predecessor_id_list.append(existing_predecessor.id) for da in xml_data_list: if p == da.name and da.id not in existing_predecessor_id_list: predecessor = da if predecessor is not None and selected_data is not None: data_predecessor_list.append([selected_data, predecessor]) allocation_chain_1 = shared_orchestrator.check_add_allocated_item(d, xml_data_list, xml_chain_list) if allocation_chain_1: allocated_item_list.append(allocation_chain_1) allocation_chain_2 = shared_orchestrator.check_add_allocated_item(p, xml_data_list, xml_chain_list) if allocation_chain_2: allocated_item_list.append(allocation_chain_2) update = add_predecessor(data_predecessor_list, xml_data_list, output_xml) shared_orchestrator.add_allocation({5: allocated_item_list}, output_xml) return update
15,171
def try_wrapper(func, *args, ret_=None, msg_="", verbose_=True, **kwargs): """Wrap ``func(*args, **kwargs)`` with ``try-`` and ``except`` blocks. Args: func (functions) : functions. args (tuple) : ``*args`` for ``func``. kwargs (kwargs) : ``*kwargs`` for ``func``. ret_ (any) : default ret val. msg_ (str) : message to print. verbose_ (bool) : Whether to print message or not. (default= ``True``) Examples: >>> from gummy.utils import try_wrapper >>> ret = try_wrapper(lambda x,y: x/y, 1, 2, msg_="divide") * Succeeded to divide >>> ret 0.5 >>> ret = try_wrapper(lambda x,y: x/y, 1, 0, msg_="divide") * Failed to divide (ZeroDivisionError: division by zero) >>> ret is None True >>> ret = try_wrapper(lambda x,y: x/y, 1, 0, ret_=1, msg_="divide") * Failed to divide (ZeroDivisionError: division by zero) >>> ret is None False >>> ret 1 """ try: ret_ = func(*args, **kwargs) prefix = toGREEN("Succeeded to ") suffix = "" except Exception as e: e.__class__.__name__ prefix = toRED("Failed to ") suffix = f" ({toRED(e.__class__.__name__)}: {toACCENT(e)})" if verbose_: print("* " + prefix + msg_ + suffix) return ret_
15,172
def main(): """Execute Nornir Script.""" print_result(west_region.run(task=example_global_lock)) print_result(west_region.run(task=example_edit_config)) print_result(west_region.run(task=example_unlock))
15,173
def hnet_bsd(args, x, train_phase): """High frequency convolutions are unstable, so get rid of them""" # Sure layers weight & bias order = 1 nf = int(args.n_filters) nf2 = int((args.filter_gain)*nf) nf3 = int((args.filter_gain**2)*nf) nf4 = int((args.filter_gain**3)*nf) bs = args.batch_size fs = args.filter_size nch = args.n_channels nr = args.n_rings tp = train_phase std = args.std_mult x = tf.reshape(x, shape=[bs,args.height,args.width,1,1,3]) fm = {} # Convolutional Layers with tf.name_scope('stage1') as scope: cv1 = hl.conv2d(x, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_1') cv1 = hl.non_linearity(cv1, name='1_1') cv2 = hl.conv2d(cv1, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_2') cv2 = hl.batch_norm(cv2, tp, name='bn1') mags = to_4d(hl.stack_magnitudes(cv2)) fm[1] = linear(mags, 1, 1, name='sw1') with tf.name_scope('stage2') as scope: cv3 = hl.mean_pooling(cv2, ksize=(1,2,2,1), strides=(1,2,2,1)) cv3 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_1') cv3 = hl.non_linearity(cv3, name='2_1') cv4 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_2') cv4 = hl.batch_norm(cv4, train_phase, name='bn2') mags = to_4d(hl.stack_magnitudes(cv4)) fm[2] = linear(mags, 1, 1, name='sw2') with tf.name_scope('stage3') as scope: cv5 = hl.mean_pooling(cv4, ksize=(1,2,2,1), strides=(1,2,2,1)) cv5 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_1') cv5 = hl.non_linearity(cv5, name='3_1') cv6 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_2') cv6 = hl.batch_norm(cv6, train_phase, name='bn3') mags = to_4d(hl.stack_magnitudes(cv6)) fm[3] = linear(mags, 1, 1, name='sw3') with tf.name_scope('stage4') as scope: cv7 = hl.mean_pooling(cv6, ksize=(1,2,2,1), strides=(1,2,2,1)) cv7 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_1') cv7 = hl.non_linearity(cv7, name='4_1') cv8 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_2') cv8 = hl.batch_norm(cv8, train_phase, name='bn4') mags = to_4d(hl.stack_magnitudes(cv8)) fm[4] = linear(mags, 1, 1, name='sw4') with tf.name_scope('stage5') as scope: cv9 = hl.mean_pooling(cv8, ksize=(1,2,2,1), strides=(1,2,2,1)) cv9 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_1') cv9 = hl.non_linearity(cv9, name='5_1') cv10 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_2') cv10 = hl.batch_norm(cv10, train_phase, name='bn5') mags = to_4d(hl.stack_magnitudes(cv10)) fm[5] = linear(mags, 1, 1, name='sw5') fms = {} side_preds = [] xsh = tf.shape(x) with tf.name_scope('fusion') as scope: for key in fm.keys(): fms[key] = tf.image.resize_images(fm[key], tf.stack([xsh[1], xsh[2]])) side_preds.append(fms[key]) side_preds = tf.concat(axis=3, values=side_preds) fms['fuse'] = linear(side_preds, 1, 1, bias_init=0.01, name='side_preds') return fms
15,174
def hdrValFilesToTrainingData(input_filebase: str, target_varname: str): """Extracts useful info from input_filebase.hdr and input_filebase.val Args: input_filebase -- points to two files target_varname -- this will be the y, and the rest will be the X Returns: Xy: 2d array [#vars][#samples] -- transpose of the data from .val file X: 2d array [#full_input_vars][#samples] -- Xy, except y y: 1d array [#samples] -- the vector in Xy corr. to target_varname all_varnames: List[str] -- essentially what .hdr file holds input_varnames: List[str] -- all_varnames, minus target_varname """ # retrieve varnames all_varnames = asciiRowToStrings(input_filebase + ".hdr") # split apart input and output labels x_rows, y_rows, input_varnames = [], [], [] for (row, varname) in enumerate(all_varnames): if varname == target_varname: y_rows.append(row) else: x_rows.append(row) input_varnames.append(varname) assert len(y_rows) == 1, "expected to find one and only one '%s', not: %s" % ( target_varname, all_varnames, ) # split apart input and output data Xy_tr = asciiTo2dArray(input_filebase + ".val") Xy = numpy.transpose(Xy_tr) X = numpy.take(Xy, x_rows, 0) y = numpy.take(Xy, y_rows, 0)[0] assert X.shape[0] + 1 == Xy.shape[0] == len(input_varnames) + 1 == len(all_varnames) assert X.shape[1] == Xy.shape[1] == len(y) return Xy, X, y, all_varnames, input_varnames
15,175
def save_screenshot_on_exception(driver: WebDriver): """ Context manager. Upon a ``WebDriverException`` exception, it saves a screenshot and re-raise the exception. :param driver: ``WebDriver`` instance """ try: yield except WebDriverException as exc: save_screenshot(driver, str(exc)) raise
15,176
def create_column(number_rows: int, column_type: ColumnType) -> pd.Series: """Creates a column with either duplicated values or not, and either of string or int type. :param number_rows: the number of rows in the data-frame. :param column_type: the type of the column. :returns: the data-frame. """ if column_type == ColumnType.UNIQUE_STRING: return pd.Series(range(number_rows)).astype(str) elif column_type == ColumnType.UNIQUE_INT: return pd.Series(range(number_rows)) elif column_type == ColumnType.WITH_DUPLICATES_STRING: return pd.Series(["a"] * number_rows) elif column_type == ColumnType.WITH_DUPLICATES_INT: return pd.Series([2] * number_rows) else: raise ValueError(f"Unknown column-type: {column_type}")
15,177
def abspath(url): """ Get a full path to a file or file URL See os.abspath """ if url.startswith('file://'): url = url[len('file://'):] return os.path.abspath(url)
15,178
def test_password_modify(client): """ Test modifying password with simple modify operation and password policy. """ cli = LDAPClient(client.url) user_dn = "cn=jeff,ou=nerdherd,dc=bonsai,dc=test" cli.set_password_policy(True) cli.set_credentials("SIMPLE", user_dn, "p@ssword") conn, _ = cli.connect() entry = conn.search(user_dn, 0)[0] try: entry["userPassword"] = "newpassword" entry.modify() except Exception as exc: assert isinstance(exc, bonsai.errors.PasswordModNotAllowed) user_dn = "cn=skip,ou=nerdherd,dc=bonsai,dc=test" cli.set_credentials("SIMPLE", user_dn, "p@ssword") conn, _ = cli.connect() entry = conn.search(user_dn, 0)[0] try: entry["userPassword"] = "short" entry.modify() except Exception as exc: assert isinstance(exc, bonsai.errors.PasswordTooShort) try: entry["userPassword"] = "p@ssword" entry.modify() except Exception as exc: assert isinstance(exc, bonsai.errors.PasswordInHistory)
15,179
def PumpEvents(timeout=-1, hevt=None, cb=None): """This following code waits for 'timeout' seconds in the way required for COM, internally doing the correct things depending on the COM appartment of the current thread. It is possible to terminate the message loop by pressing CTRL+C, which will raise a KeyboardInterrupt. """ # XXX Should there be a way to pass additional event handles which # can terminate this function? # XXX XXX XXX # # It may be that I misunderstood the CoWaitForMultipleHandles # function. Is a message loop required in a STA? Seems so... # # MSDN says: # # If the caller resides in a single-thread apartment, # CoWaitForMultipleHandles enters the COM modal loop, and the # thread's message loop will continue to dispatch messages using # the thread's message filter. If no message filter is registered # for the thread, the default COM message processing is used. # # If the calling thread resides in a multithread apartment (MTA), # CoWaitForMultipleHandles calls the Win32 function # MsgWaitForMultipleObjects. # Timeout expected as float in seconds - *1000 to miliseconds # timeout = -1 -> INFINITE 0xFFFFFFFF; # It can also be a callable which should return an amount in seconds if hevt is None: hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None) handles = _handles_type(hevt) RPC_S_CALLPENDING = -2147417835 # @ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint) def HandlerRoutine(dwCtrlType): if dwCtrlType == 0: # CTRL+C ctypes.windll.kernel32.SetEvent(hevt) return 1 return 0 HandlerRoutine = ( ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)(HandlerRoutine) ) ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1) while True: try: tmout = timeout() # check if it's a callable except TypeError: tmout = timeout # it seems to be a number if tmout > 0: tmout *= 1000 tmout = int(tmout) try: res = ctypes.oledll.ole32.CoWaitForMultipleHandles( 0, # COWAIT_FLAGS int(tmout), # dwtimeout len(handles), # number of handles in handles handles, # handles array # pointer to indicate which handle was signaled ctypes.byref(ctypes.c_ulong()) ) except WindowsError as details: if details.args[0] == RPC_S_CALLPENDING: # timeout expired if cb is not None: cb() continue else: ctypes.windll.kernel32.CloseHandle(hevt) ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) raise # something else happened else: ctypes.windll.kernel32.CloseHandle(hevt) ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) raise KeyboardInterrupt # finally: # if False: # ctypes.windll.kernel32.CloseHandle(hevt) # ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) # break
15,180
def check_mix_up(method): """Wrapper method to check the parameters of mix up.""" @wraps(method) def new_method(self, *args, **kwargs): [batch_size, alpha, is_single], _ = parse_user_args(method, *args, **kwargs) check_value(batch_size, (1, FLOAT_MAX_INTEGER)) check_positive(alpha, "alpha") type_check(is_single, (bool,), "is_single") return method(self, *args, **kwargs) return new_method
15,181
def generate_motif_distances(cluster_regions, region_sizes, motifs, motif_location, species): """ Generates all motif distances for a lsit of motifs returns list[motif_distances] motif_location - str location that motifs are stored species - str species (for finding stored motifs) motifs - list of motifs to analize cluster_regions - dict from parse clusters """ motif_distance_list = [] #given a specific motif in a motif file generate distances from that motif...? for motif in motifs: mf = "motif_" + motif + ".BED" mfgz = "motif_" + motif + ".BED.gz" motif_tool = None if os.path.exists(os.path.join(motif_location, species, mf)): motif_tool = pybedtools.BedTool(os.path.join(motifBASE, species, mf)) elif os.path.exists(os.path.join(motif_location, species, mfgz)): motif_tool = pybedtools.BedTool(os.path.join(motif_location, species, mfgz)) else: print "MOTIF BED FILE for motif: %s is not available, please build it" % (mf) if motif_tool is not None: motif_distance_list.append(calculate_motif_distance(cluster_regions, region_sizes, motif_tool)) return motif_distance_list
15,182
def test_list_users_rabbitmq3(): """ Test if it return a list of users based off of rabbitmqctl user_list. """ mock_run = MagicMock( return_value={ "retcode": 0, "stdout": "guest\t[administrator user]\r\nother\t[a b]\r\n", "stderr": "", } ) with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}): assert rabbitmq.list_users() == { "guest": ["administrator", "user"], "other": ["a", "b"], }
15,183
def sig_to_vrs(sig): """ Split a signature into r, s, v components """ r = sig[:32] s = sig[32:64] v = int(encode_hex(sig[64:66]), 16) # Ethereum magic number if v in (0, 1): v += 27 return [r, s, v]
15,184
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None): """Returns a lookup table that maps a `Tensor` of indices into strings. This operation constructs a lookup table to map int64 indices into string values. The mapping is initialized from a string `mapping` 1-D `Tensor` where each element is a value and the corresponding index within the tensor is the key. Any input which does not have a corresponding index in 'mapping' (an out-of-vocabulary entry) is assigned the `default_value` The underlying table must be initialized by calling `tf.tables_initializer.run()` or `table.init.run()` once. Elements in `mapping` cannot have duplicates, otherwise when executing the table initializer op, it will throw a `FailedPreconditionError`. Sample Usages: ```python mapping_string = t.constant(["emerson", "lake", "palmer") indices = tf.constant([1, 5], tf.int64) table = tf.contrib.lookup.index_to_string_table_from_tensor( mapping_string, default_value="UNKNOWN") values = table.lookup(indices) ... tf.tables_initializer().run() values.eval() ==> ["lake", "UNKNOWN"] ``` Args: mapping: A 1-D string `Tensor` that specifies the strings to map from indices. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). Returns: The lookup table to map a string values associated to a given index `int64` `Tensors`. Raises: ValueError: when `mapping` is not set. """ if mapping is None: raise ValueError("mapping must be specified.") return lookup_ops.index_to_string_table_from_tensor( vocabulary_list=mapping, default_value=default_value, name=name)
15,185
def test_profile_reader_no_aws_config(monkeypatch, tmp_path, capsys): """Test profile reader without aws config file.""" fake_get_path_called = 0 def fake_get_path(): nonlocal fake_get_path_called fake_get_path_called += 1 return tmp_path monkeypatch.setattr(awsswitch, "get_path", fake_get_path) awsswitch.app() assert fake_get_path_called == 1 captured = capsys.readouterr() output = captured.out.split("\n") assert output[0] == "AWS profile switcher" err = captured.err.split("\n") assert err[0] == "AWS config path does not exist."
15,186
def get_exclusion_type(exclusion): """ Utility function to get an exclusion's type object by finding the exclusion type that has the given exclusion's code. :param exclusion: The exclusion to find the type for. :return: The exclusion type if found, None otherwise. """ for exclusion_type in EXCLUSION_TYPES: if exclusion_type.code == exclusion.code: return exclusion_type return None
15,187
def generatePlans(update): """ For an update object provided this function references the updateModuleList which lets all exc modules determine if they need to add functions to change the state of the system when new chutes are added to the OS. Returns: True in error, as in we should stop with this update plan """ out.header('Generating %r\n' % (update)) # Iterate through the list provided for this update type for mod in update.updateModuleList: if(mod.generatePlans(update)): return True
15,188
def get_str_by_path(payload: Dict, path: str) -> str: """Return the string value from the dict for the path using dpath library.""" if payload is None: return None try: raw = dpath_util.get(payload, path) return str(raw) if raw is not None else raw except (IndexError, KeyError, TypeError): return None
15,189
def get_token_from_code(request): """ Get authorization code the provider sent back to you Find out what URL to hit to get tokens that allow you to ask for things on behalf of a user. Prepare and send a request to get tokens. Parse the tokens using the OAuth 2 client """ code = request.args.get("code") redirect_uri = request.args.get("redirect_uri") provider_cfg = requests.get(DISCOVERY_URL).json() token_endpoint = provider_cfg["token_endpoint"] token_url, headers, body = client.prepare_token_request( token_endpoint, authorization_response=request.url, redirect_url=redirect_uri, code=code, include_client_id=False, ) token_response = requests.post( token_url, headers=headers, data=body, auth=(CLIENT_ID, SECRET), ) token_response = token_response.json() client.parse_request_body_response(json.dumps(token_response)) return token_response
15,190
def DeWeStartCAN(nBoardNo, nChannelNo): """Dewe start CAN""" if f_dewe_start_can is not None: return f_dewe_start_can(c_int(nBoardNo), c_int(nChannelNo)) else: return -1
15,191
def register_onnx_magics(ip=None): # pragma: no cover """ Register magics function, can be called from a notebook. @param ip from ``get_ipython()`` """ if ip is None: from IPython import get_ipython ip = get_ipython() ip.register_magics(OnnxNotebook)
15,192
def mw_wo_sw(mol, ndigits=2): """Molecular weight without salt and water :param ndigits: number of digits """ cp = clone(mol) # Avoid modification of original object remover.remove_water(cp) remover.remove_salt(cp) return round(sum(a.mw() for _, a in cp.atoms_iter()), ndigits)
15,193
def get_contact_list_info(contact_list): """ Get contact list info out of contact list In rgsummary, this looks like: <ContactLists> <ContactList> <ContactType>Administrative Contact</ContactType> <Contacts> <Contact> <Name>Matyas Selmeci</Name> ... </Contact> </Contacts> </ContactList> ... </ContactLists> and the arg `contact_list` is the contents of a single <ContactList> If vosummary, this looks like: <ContactTypes> <ContactType> <Type>Miscellaneous Contact</Type> <Contacts> <Contact> <Name>...</Name> ... </Contact> ... </Contacts> </ContactType> ... </ContactTypes> and the arg `contact_list` is the contents of <ContactTypes> Returns: a list of dicts that each look like: { 'ContactType': 'Administrative Contact', 'Name': 'Matyas Selmeci', 'Email': '...', ... } """ contact_list_info = [] for contact in contact_list: if contact.tag == 'ContactType' or contact.tag == 'Type': contact_list_type = contact.text.lower() if contact.tag == 'Contacts': for con in contact: contact_info = { 'ContactType' : contact_list_type } for contact_contents in con: contact_info[contact_contents.tag] = contact_contents.text contact_list_info.append(contact_info) return contact_list_info
15,194
def enableLegacyLDAP(host, args, session): """ Called by the ldap function. Configures LDAP on Lagecy systems. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url='https://'+host+'/xyz/openbmc_project/user/ldap/action/CreateConfig' scope = { 'sub' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.sub', 'one' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.one', 'base': 'xyz.openbmc_project.User.Ldap.Create.SearchScope.base' } serverType = { 'ActiveDirectory' : 'xyz.openbmc_project.User.Ldap.Create.Type.ActiveDirectory', 'OpenLDAP' : 'xyz.openbmc_project.User.Ldap.Create.Type.OpenLdap' } data = {"data": [args.uri, args.bindDN, args.baseDN, args.bindPassword, scope[args.scope], serverType[args.serverType]]} try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text
15,195
def retrieve_panelist_appearance_counts(panelist_id: int, database_connection: mysql.connector.connect ) -> List[Dict]: """Retrieve yearly apperance count for the requested panelist ID""" cursor = database_connection.cursor() query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count " "FROM ww_showpnlmap pm " "JOIN ww_shows s ON s.showid = pm.showid " "JOIN ww_panelists p ON p.panelistid = pm.panelistid " "WHERE pm.panelistid = %s AND s.bestof = 0 " "AND s.repeatshowid IS NULL " "GROUP BY p.panelist, YEAR(s.showdate) " "ORDER BY p.panelist ASC, YEAR(s.showdate) ASC") cursor.execute(query, (panelist_id, )) result = cursor.fetchall() cursor.close() if not result: return None appearances = OrderedDict() total_appearances = 0 for row in result: appearances[row[0]] = row[1] total_appearances += row[1] appearances["total"] = total_appearances return appearances
15,196
def set_blueprint(blueprint: Blueprint): """ Plugins factory method to set a blueprint. :param blueprint: """ global bp bp = blueprint from . import routes
15,197
def chart( symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"), start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1 normalize=True, ): """Display a graph of the price history for the list of ticker symbols provided Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. normalize (bool): Whether to normalize prices to 1 at the start of the time series. """ start = util.normalize_date(start or datetime.date(2008, 1, 1)) end = util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = [s.upper() for s in symbols] timeofday = datetime.timedelta(hours=16) timestamps = du.getNYSEdays(start, end, timeofday) ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] ldf_data = da.get_data(timestamps, symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) na_price = d_data['close'].values if normalize: na_price /= na_price[0, :] plt.clf() plt.plot(timestamps, na_price) plt.legend(symbols) plt.ylabel('Adjusted Close') plt.xlabel('Date') plt.savefig('chart.pdf', format='pdf') plt.grid(True) plt.show() return na_price
15,198
def create_table(peak_id, chrom, pstart, pend, p_center, min_dist_hit, attrib_keys, min_pos, genom_loc, ovl_pf, ovl_fp, i): """Saves info of the hit in a tabular form to be written in the output table. """ if attrib_keys != ["None"]: # extract min_dist_content [dist, [feat, fstart, fend, strand, attrib_val]] = min_dist_hit # attrib_val.strip("\r").strip("\t").strip("\n") dist = max(dist) if isinstance(dist, list) else dist dist = '%d' % round(dist, 1) best_res = "\t".join(np.hstack([peak_id, chrom, pstart, p_center, pend, feat, fstart, fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), attrib_val, str(i)])) return best_res + "\n" elif attrib_keys == ["None"]: [dist, [feat, fstart, fend, strand]] = min_dist_hit dist = max(dist) if isinstance(dist, list) else dist dist = '%d' % round(dist, 1) best_res = "\t".join([peak_id, chrom, pstart, p_center, pend, feat, fstart, fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), str(i)]) return best_res + "\n"
15,199