content
stringlengths
22
815k
id
int64
0
4.91M
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='post', value=0.): """ pad_sequences. Pad each sequence to the same length: the length of the longest sequence. If maxlen is provided, any sequence longer than maxlen is truncated to maxlen. Truncation happens off either the beginning or the end (default) of the sequence. Supports pre-padding and post-padding (default). Arguments: sequences: list of lists where each element is a sequence. maxlen: int, maximum length. dtype: type to cast the resulting sequence. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence value: float, value to pad the sequences to the desired value. Returns: x: `numpy array` with dimensions (number_of_sequences, maxlen) Credits: From Keras `pad_sequences` function. """ lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) x = (np.ones((nb_samples, maxlen)) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % padding) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc else: raise ValueError("Padding type '%s' not understood" % padding) return x
35,700
def get_regions(service_name, region_cls=None, connection_cls=None): """ Given a service name (like ``ec2``), returns a list of ``RegionInfo`` objects for that service. This leverages the ``endpoints.json`` file (+ optional user overrides) to configure/construct all the objects. :param service_name: The name of the service to construct the ``RegionInfo`` objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. :type service_name: string :param region_cls: (Optional) The class to use when constructing. By default, this is ``RegionInfo``. :type region_cls: class :param connection_cls: (Optional) The connection class for the ``RegionInfo`` object. Providing this allows the ``connect`` method on the ``RegionInfo`` to work. Default is ``None`` (no connection). :type connection_cls: class :returns: A list of configured ``RegionInfo`` objects :rtype: list """ endpoints = load_regions() if service_name not in endpoints: raise BotoClientError( "Service '%s' not found in endpoints." % service_name ) if region_cls is None: region_cls = RegionInfo region_objs = [] for region_name, endpoint in endpoints.get(service_name, {}).items(): region_objs.append( region_cls( name=region_name, endpoint=endpoint, connection_cls=connection_cls ) ) return region_objs
35,701
def sol_dec(day_of_year): """ Calculate solar declination from day of the year. Based on FAO equation 24 in Allen et al (1998). :param day_of_year: Day of year integer between 1 and 365 or 366). :return: solar declination [radians] :rtype: float """ _check_doy(day_of_year) return 0.409 * math.sin(((2.0 * math.pi / 365.0) * day_of_year - 1.39))
35,702
def print_mro(cls): """Print the Method Resolution Order of a Class""" print(', '.join(c.__name__ for c in cls.__mro__))
35,703
def configure_logging(conf): """Initialize and configure logging.""" root_logger = logging.getLogger() root_logger.setLevel(getattr(logging, conf.loglevel.upper())) if conf.logtostderr: add_stream_handler(root_logger, sys.stderr) if conf.logtostdout: add_stream_handler(root_logger, sys.stdout)
35,704
def makeTestSuiteV201111(): """Set up test suite using v201111. Returns: TestSuite test suite using v201111. """ suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(NetworkServiceTestV201111)) return suite
35,705
def build_series(df): """ Return a series tuple where: the first element is a list of dates, the second element is the series of the daily-type variables, the third element is the series of the current-type variables, the fourth element is the series of the cum-type variables. :param df: pd.DataFrame :return: tuple """ dates = df[DATE_KEY].apply(lambda x: x.strftime(CHART_DATE_FMT)).tolist() series_daily = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in DAILY_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series_cum = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in CUM_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series_current = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in NON_CUM_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series = (dates, series_daily, series_current, series_cum) return series
35,706
def test_ap_recipe_does_not_exist(): """Test raise error if recipe does not exist.""" with pytest.raises(SystemExit) as exit: cli.ap.parse_args('does_not_exit.cfg'.split()) assert exit.type == SystemExit assert exit.value.code == 2
35,707
def lnZ(df_mcmc): """ Compute log Z(1) from PTMCMC traces stored in DataFrame. Parameters ---------- df_mcmc : pandas DataFrame, as outputted from run_ptmcmc. DataFrame containing output of a parallel tempering MCMC run. Only need to contain columns pertinent to computing ln Z, which are 'beta_int', 'lnlike', and 'beta'. Returns ------- output : float ln Z as computed by thermodynamic integration. This is equivalent to what is obtained by calling `sampler.thermodynamic_integration_log_evidence(fburnin=0)` where `sampler` is an emcee.PTSampler instance. Notes ----- .. This is useful when the DataFrame from a PTSampler is too large to store in RAM. """ # Average the log likelihood over the samples log_mean = np.zeros(len(df_mcmc['beta_ind'].unique())) for i, b in enumerate(df_mcmc['beta_ind'].unique()): log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean() # Set of betas (temperatures) betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,))) # Approximate quadrature return np.dot(log_mean, -np.diff(betas))
35,708
def p2h(p, T=293., P0=1000., m=28.966, unit_p='mbar'): """ Returns an elevation from barometric pressure Parameters ---------- p: {float, array} barometric pressure in mbar or torr specified with unit_p T: float, optional Temperature in K P0: float, optional Pressure at reference altitude in hPa (default = 1000.) m: float, optional average mass of gas molecules in u (default = 28.966) unit_p: {[mbar], torr}, optional Source ------ http://en.wikipedia.org/wiki/Barometric_formula """ if unit_p == 'torr': p = unit_conversion.torr2mbar(p) k = const.physical_constants['Boltzmann constant'][0] g = const.physical_constants['standard acceleration of gravity'][0] m *= 1 / const.physical_constants['Avogadro constant'][0] / 1000. h = (np.log(P0) - np.log(p)) * ((k * T) / (m * g)) return h
35,709
def upon_teardown(f: Callable): """ Use this decorator to mark you ploogin function as a handler to call upon teardown. """ return PlooginEventHandler(event=PlooginEvents.TEARDOWN, f=f)
35,710
def assertRegConditionalsForPpaRefModel(registration_requests, conditional_registration_data): """Check the REG Conditionals for PPA creation model and raises an exception. Performs the assert to check installationParam present in registrationRequests or conditional registration data and raises an exception. PpaCreationModel requires the input registrationRequests to have 'installationParam'. But this parameter is removed for devices where conditionals are pre-loaded. Adding the 'installationParam' into registrationRequests by taking the corresponding values from conditionalRegistrationData. Args: registration_requests: A list of individual CBSD registration requests (each of which is itself a dictionary). conditional_registration_data: A list of individual CBSD registration data that need to be preloaded into SAS (each of which is a dictionary). the fccId and cbsdSerialNumber fields are required, other fields are optional but required for ppa reference model. Raises: Exception: If the installationParam object and required fields is not found in conditionalRegistrationData and registrationRequests for category B then raises an exception. """ for device in registration_requests: if 'installationParam' not in device: install_param_assigned = False for conditional_params in conditional_registration_data: # Check if FCC_ID+Serial_Number present in registrationRequest # and conditional_params match and add the 'installationParam'. if (conditional_params['fccId'] == device['fccId'] and conditional_params['cbsdSerialNumber'] == device['cbsdSerialNumber']): device.update({'installationParam': conditional_params['installationParam']}) install_param_assigned = True # If the cbsdCategory is not present in registration request then # assign it to the cbsdCategory in conditional_params. if 'cbsdCategory' not in device: # Causes KeyError: 'cbsdCategory' if 'cbsdCategory' does not exist device['cbsdCategory'] = conditional_params['cbsdCategory'] break # Raise an exception if InstallationParam is not found in the conditionals. if not install_param_assigned: raise Exception("InstallationParam not found in conditionals for device " "%s:%s" % (device['fccId'], device['cbsdSerialNumber']))
35,711
def get_train_test_indices_drone(df, frac, seed=None): """ Split indices of a DataFrame with binary and balanced labels into balanced subindices Args: df (pd.DataFrame): {0,1}-labeled data frac (float): fraction of indicies in first subset random_seed (int): random seed used as random state in np.random and as argument for random.seed() Returns: train_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame test_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame """ split_idx = int(len(df) * frac / 2) df_with = df[df['label'] == 1] df_without = df[df['label'] == 0] np.random.seed(seed) df_with_train = df_with.sample(n=split_idx, random_state=seed) df_with_test = df_with.drop(df_with_train.index) df_without_train = df_without.sample(n=split_idx, random_state=seed) df_without_test = df_without.drop(df_without_train.index) train_indices = list(df_without_train.index) + list(df_with_train.index) test_indices = list(df_without_test.index) + list(df_with_test.index) """" print('fraction of 1-label in train set: {}'.format(len(df_with_train)/(len(df_with_train) + len(df_without_train)))) print('fraction of 1-label in test set: {}'.format(len(df_with_test)/(len(df_with_test) + len(df_with_test)))) """ return train_indices, test_indices
35,712
def get_subs(choice, chatid, obj): """Return subtitle download links.""" url = "https://yts-subs.com" + obj.get_url(chatid, int(choice)) try: reponse = requests.get(url, headers=headers) except Exception as e: print(e) raise Exception("Invalid url") soup = BeautifulSoup(reponse.content, 'html5lib') table = soup.find('tbody') results = table.findAll('tr') href = [] message = [] for i, result in enumerate(results): link = result.find('a')['href'] link = link.replace('subtitles', 'subtitle') language = result.findAll('td', {'class': 'flag-cell'})[0].text.strip() title = result.find('a').text.strip() title = re.findall("subtitle (.*)", title)[0] title = re.sub(r'(\[.*\])', '', title) title = f"{language}: {title}" link = f"https://yifysubtitles.org{link}.zip" href.append(link) message.append(title) if(i == 55): break return href, message
35,713
def get_shape(rhoa_range): """ Find anomaly `shape` from apparent resistivity values framed to the best points. :param rhoa_range: The apparent resistivity from selected anomaly bounds :attr:`~core.erp.ERP.anom_boundaries` :type rhoa_range: array_like or list :returns: - V - W - K - C - M - U :Example: >>> from watex.core.erp import get_shape >>> x = [60, 70, 65, 40, 30, 31, 34, 40, 38, 50, 61, 90] >>> shape = get_shape (rhoa_range= np.array(x)) ...U """ shape ='V' try: minlocals_ix, = argrelextrema(rhoa_range, np.less) except : minlocals_ix = argrelextrema(rhoa_range, np.less) try : maxlocals_ix, = argrelextrema(rhoa_range, np.greater) except : maxlocals_ix = argrelextrema(rhoa_range, np.greater) value_of_median = np.median(rhoa_range) coef_UH = 1.2 c_=[rhoa_range[0] , rhoa_range[-1] ] if len(minlocals_ix)==0 : if len(maxlocals_ix)==0 and\ (max(c_) and min(c_)) > value_of_median : return 'U' return 'C' if len(minlocals_ix) ==1 : if max(c_) > np.median(rhoa_range) and min(c_) < value_of_median/2: return 'C' elif rhoa_range[minlocals_ix] > value_of_median or \ rhoa_range[minlocals_ix] > max(c_): return 'M' if len(minlocals_ix)>1 : if (max(c_) or min(c_))> value_of_median : shape ='W' if max(c_) > value_of_median and\ min(c_) > value_of_median: if rhoa_range[maxlocals_ix].mean()> value_of_median : if coef_UH * rhoa_range[minlocals_ix].mean(): shape ='H' coef_UH = 1. if rhoa_range[minlocals_ix].mean() <= coef_UH * \ rhoa_range[maxlocals_ix].mean(): shape = 'U' else : shape ='K' elif (rhoa_range[0] and rhoa_range[-1]) < np.median(rhoa_range): shape = 'M' return shape return shape
35,714
def elslib_D2(*args): """ * For elementary surfaces from the gp package (cones, cylinders, spheres and tori), computes: - the point P of parameters (U, V), and - the first derivative vectors Vu and Vv at this point in the u and v parametric directions respectively, and - the second derivative vectors Vuu, Vvv and Vuv at this point. :param U: :type U: float :param V: :type V: float :param C: :type C: gp_Cone :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param C: :type C: gp_Cylinder :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param S: :type S: gp_Sphere :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param T: :type T: gp_Torus :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void """ return _ElSLib.elslib_D2(*args)
35,715
def readiness(): """Handle GET requests that are sent to /api/v1/readiness REST API endpoint.""" return flask.jsonify({}), 200
35,716
def camino_minimo(origen,dest,grafo,aeropuertos_por_ciudad,pesado=True): """Obtiene el camino minimo de un vertice a otro del grafo""" camino=[] costo=float("inf") for aeropuerto_i in aeropuertos_por_ciudad[origen]: for aeropuerto_j in aeropuertos_por_ciudad[dest]: if pesado: distancia, predecesores= utils.dijkstra(grafo,aeropuerto_i,aeropuerto_j) else: predecesores, distancia= utils.bfs(grafo,aeropuerto_i,aeropuerto_j) if distancia[aeropuerto_j]< costo: costo=distancia[aeropuerto_j] camino.clear() utils.armar_camino(distancia,predecesores,camino,aeropuerto_i,aeropuerto_j) distancia.clear() predecesores.clear() return costo,camino
35,717
def test_dict_lookup(ami_file_dict): """AMI lookup using json url.""" sample_dict = { 'us-east-1': { 'base_fedora': 'ami-xxxx', }, 'us-west-2': { 'tomcat8': 'ami-yyyy', } } ami_file_dict.return_value = sample_dict assert ami_lookup(region='us-east-1', name='base_fedora') == 'ami-xxxx' assert ami_lookup(region='us-west-2', name='tomcat8') == 'ami-yyyy'
35,718
def extract_name_from_uri_or_curie(item, schema=None): """Extract name from uri or curie :arg str item: an URI or curie :arg dict schema: a JSON-LD representation of schema """ # if schema is provided, look into the schema for the label if schema: name = [record["rdfs:label"] for record in schema["@graph"] if record['@id'] == item] if name: return name[0] else: return extract_name_from_uri_or_curie(item) # handle curie, get the last element after ":" elif 'http' not in item and len(item.split(":")) == 2: return item.split(":")[-1] # handle URI, get the last element after "/" elif len(item.split("//")[-1].split('/')) > 1: return item.split("//")[-1].split('/')[-1] # otherwise, rsise ValueError else: raise ValueError('{} should be converted to either URI or curie'.format(item))
35,719
def test_expand_to_p1(mtz_by_spacegroup): """Test DataSet.expand_to_p1() for common spacegroups""" x = rs.read_mtz(mtz_by_spacegroup) expected = rs.read_mtz(mtz_by_spacegroup[:-4] + '_p1.mtz') expected.sort_index(inplace=True) result = x.expand_to_p1() result.sort_index(inplace=True) expected_sf = expected.to_structurefactor("FMODEL", "PHIFMODEL") result_sf = result.to_structurefactor("FMODEL", "PHIFMODEL") assert_index_equal(result.index, expected.index) assert np.allclose(result_sf.to_numpy(), expected_sf.to_numpy(), rtol=1e-4)
35,720
def get_gcc_timeseries(site, roilist_id, nday=3): """ Read in CSV version of summary timeseries and return GCCTimeSeries object. """ # set cannonical dir for ROI Lists roidir = os.path.join(config.archive_dir, site, "ROI") # set cannonical filename gcc_tsfile = site + "_" + roilist_id + "_{0}day.csv".format(nday) gcc_tspath = os.path.join(roidir, gcc_tsfile) # create empty GCCTimeSeries object gccts = GCCTimeSeries(site=site, ROIListID=roilist_id) # read in from CSV file gccts.readCSV(gcc_tspath) return gccts
35,721
def download_graph(coordinates, distances): """ Criação do grafo de ruas do OSM a partir das coordenadas solicitadas """ max_distance = max(distances) G = False print('Fetching street network') for coordinate in tqdm(coordinates, desc='Downloading'): if G: # "soma" (merge) com grafo já existente (deepcopy utilizado para não perder grafo entre iterações) G = nx.compose(deepcopy(G), ox.graph_from_point(coordinate, distance=max_distance+100, network_type='walk')) else: # inicializa grafo a partir de todos pontos G = ox.graph_from_point(coordinate, distance=max_distance+100, network_type='walk') return G
35,722
def cmd_execute_io(*a): """parse and format opcodes""" for cmd in a: cmd = cmd + " > tmp.out" os.system(cmd) with open("tmp.out") as f: # print("".join(x for x in f.readlines())) a = "".join(x for x in f.readlines()) b = "".join("\\x" + a[x : x + 2] for x in range(0, len(a), 2)) print('buffer += b"' + b + '"') os.system("rm -f tmp.out")
35,723
def external_forces(cod_obj): """actual cone position""" x_pos,y_pos,z_pos =cod_obj.pos_list[-1] """ Drift vector components Drift signal//all directions """ divx = forcep['divxp']([x_pos,y_pos,z_pos])[0] divy = forcep['divyp']([x_pos,y_pos,z_pos])[0] divz = forcep['divzp']([x_pos,y_pos,z_pos])[0] """ Structure tensor components Diffusion metric // Diffusive tensor components """ divxx = forcep['stpxx']([x_pos,y_pos,z_pos])[0] divxy = forcep['stpxy']([x_pos,y_pos,z_pos])[0] divxz = forcep['stpxz']([x_pos,y_pos,z_pos])[0] divyy = forcep['stpyy']([x_pos,y_pos,z_pos])[0] divyz = forcep['stpyz']([x_pos,y_pos,z_pos])[0] divzz = forcep['stpzz']([x_pos,y_pos,z_pos])[0] return [divx, divy, divz], [divxx, divxy, divxz, divyy, divyz, divzz]
35,724
def outlier_filter(df, thrsh, rng, lim): """"Calculate absolute value of difference for n steps in range; replace outliers > threshold with NaN, then linearly interpolate new values. Takes inputs of pandas DataFrame (df), threshold change (thrsh), steps range (rng), and limit (lim)""" for n in np.arange(rng)*-1: df['Diff'] = df.WaterLevel.diff(periods = n) df['WL_nan'] = df['WaterLevel'].mask(df.Diff.abs() > thrsh, np.NaN, inplace = True) # Impute nan's where the sequential difference is > threshold value df['WL_int'] = df['WaterLevel'].interpolate(method='linear', limit=lim)
35,725
def charis_font_spec_css(): """Font spec for using CharisSIL with Pisa (xhtml2pdf).""" return """ @font-face {{ font-family: 'charissil'; src: url('{0}/CharisSIL-R.ttf'); }} @font-face {{ font-family: 'charissil'; font-style: italic; src: url('{0}/CharisSIL-I.ttf'); }} @font-face {{ font-family: 'charissil'; font-weight: bold; src: url('{0}/CharisSIL-B.ttf'); }} @font-face {{ font-family: 'charissil'; font-weight: bold; font-style: italic; src: url('{0}/CharisSIL-BI.ttf'); }} """.format(static_path('fonts'))
35,726
def set_symbols(pcontracts, dt_start="1980-1-1", dt_end="2100-1-1", n=None, spec_date={}): # 'symbol':[,] """ Args: pcontracts (list): list of pcontracts(string) dt_start (datetime/str): start time of all pcontracts dt_end (datetime/str): end time of all pcontracts n (int): last n bars spec_date (dict): time range for specific pcontracts """ global _simulator _simulator = ExecuteUnit(pcontracts, dt_start, dt_end, n, spec_date) return _simulator
35,727
def estimate_vol_gBM(data1, data2, time_incr=0.1): """ Estimate vol and correlation of two geometric Brownian motion samples with time samples on a grid with mesh size time_incr using estimate_vol_2d_rv_incr, the drift parameter and mean rev paramters are set to 0. ---------- args: data1 data array for X1 data2 data array for X2 time_incr time increment log=True if True, then estimation based on log of data1 and data2, else in plain format. output: [0, 0, sigma_1], [0,0, sigma_2], rho format to be used direclty in a LOBLinear model object """ sigma_bid, sigma_ask, rho = estimate_vol_2d_rv_incr(data1, data2, time_incr, log=True) return [float(0), float(0), sigma_bid], [float(0), float(0), sigma_ask], rho
35,728
def check_quarantine(av_quarentine_file): """Check if the quarantine is over.""" in_quarantine = True try: with open(av_quarentine_file, 'r', encoding="utf-8") as ff_av: text = ff_av.readline() quar_str, av_run_str = text.split(':') quarantine = int(quar_str) av_run = DT.date.fromisoformat(av_run_str.strip()) av_quar_end = av_run + DT.timedelta(days=quarantine) av_today = DT.date.today() if av_today > av_quar_end: # Quarantine is over. in_quarantine = False except Exception as ee: logging.error(f"\nError {ee} while reading quarantine file {av_quarentine_file}.") sys.exit(1) return in_quarantine
35,729
def step_pattern(): """ Based on the buffer length determined through fuzzing (previous step), we will create and send a unique pattern which will help us finding the offset """ global current_step current_step = 1 show_step_banner('[1] Finding offset') # Get length from fuzzing show_prompt_text('Enter the length at which the application/service crashed:') user_input = get_input(number_valid) global pattern_length pattern_length = int(user_input) - len(start_command) - len(end_command) global buf_length buf_length = int(user_input) # Call Metasploit framework tmp_file = 'pattern.txt' command = 'msf-pattern_create -l {} > {}'.format(pattern_length, tmp_file) thread = call_command(command) while thread.running(): animation('Creating pattern') # Proceed if pattern creation was successful if thread.result() == 0: print() # Buffer ---------------------------------- with open(tmp_file, 'r') as f: pattern = f.read().splitlines()[0].encode() global buffer buffer = buffer_list.get_buffer_by_name('pattern').get_buffer(pattern) # ----------------------------------------- os.unlink(tmp_file) print('The exploit file will be generated. The following settings will be used:\n') if bo_type == 'local': settings = [desc_pattern(), desc_start_command(), desc_end_command()] show_settings(settings) if proceed_ok(): dump_local_exploit() print(' Load file into vulnerable application and check which pattern is shown in EIP on crash.') elif bo_type == 'remote': settings = [desc_target(), desc_port(), desc_pattern(), desc_start_command(), desc_end_command()] show_settings(settings) if proceed_ok(): dump_remote_exploit() run_remote_exploit() # Proceed step_offsets()
35,730
def get_coin_total(credentials_file: str, coin: str) -> float: """ Get the current total amount of your coin Args: credentials_file: A JSON file containing Coinbase Pro credentials coin: The coin requested Returns: coin_total: The total amount of the coin you hold in your account """ # Instantiate Coinbase API and query the price coin_total = 0 coinbase_creds = get_cbpro_creds_from_file(credentials_file) coinbase_auth = CoinbaseProAuth(coinbase_creds[0], coinbase_creds[1], coinbase_creds[2]) api_query = "accounts" result = requests.get(API_URL + api_query, auth=coinbase_auth).json() for account in result: if account['currency'] == coin: coin_total = float(account['balance']) return coin_total
35,731
def datetime_to_serial(dt): """ Converts the given datetime to the Excel serial format """ if dt.tzinfo: raise ValueError("Doesn't support datetimes with timezones") temp = datetime(1899, 12, 30) delta = dt - temp return delta.days + (float(delta.seconds) + float(delta.microseconds) / 1E6) / (60 * 60 * 24)
35,732
def shellsort(input_list): """Sort the given list using shellsort technique. :param input_list: Given list of items """ # Find middle point gap = len(input_list) / 2 while gap > 0: for i in range(gap, len(input_list)): tmp = input_list[i] j = i while j >= gap and input_list[j-gap] > tmp: input_list[i] = input_list[j-gap] j = j - gap input_list[j] = tmp gap = gap//2
35,733
def compute_transitive_closure(graph): """Compute the transitive closure of a directed graph using Warshall's algorithm. :arg graph: A :class:`collections.abc.Mapping` representing a directed graph. The dictionary contains one key representing each node in the graph, and this key maps to a :class:`collections.abc.MutableSet` of nodes that are connected to the node by outgoing edges. This graph may contain cycles. This object must be picklable. Every graph node must be included as a key in the graph. :returns: The transitive closure of the graph, represented using the same data type. .. versionadded:: 2020.2 """ # Warshall's algorithm from copy import deepcopy closure = deepcopy(graph) # (assumes all graph nodes are included in keys) for k in graph.keys(): for n1 in graph.keys(): for n2 in graph.keys(): if k in closure[n1] and n2 in closure[k]: closure[n1].add(n2) return closure
35,734
def A_intermediate(f1, f2, f3, v1, v2, v3, d1, d3): """Solves system of equations for intermediate amplitude matching""" Mat = np.array( [ [1.0, f1, f1 ** 2, f1 ** 3, f1 ** 4], [1.0, f2, f2 ** 2, f2 ** 3, f2 ** 4], [1.0, f3, f3 ** 2, f3 ** 3, f3 ** 4], [0.0, 1.0, 2 * f1, 3 * f1 ** 2, 4 * f1 ** 3], [0.0, 1.0, 2 * f3, 3 * f3 ** 2, 4 * f3 ** 3], ], dtype="float", ) a = np.array([v1, v2, v3, d1, d3], dtype="float") return np.linalg.solve(Mat, a)
35,735
def server( ctx, root_dir, path, host, port, workers, encrypt_password, username, password ): """开启 HTTP 服务""" api = _recent_api(ctx) if not api: return encrypt_password = encrypt_password or _encrypt_password(ctx) if username: assert password, "Must set password" start_server( api, root_dir=root_dir, path=path, host=host, port=port, workers=workers, encrypt_password=encrypt_password, username=username, password=password, )
35,736
def _evaluate_criterion(criterion, params, criterion_kwargs): """Evaluate the criterion function for the first time. The comparison_plot_data output is needed to initialize the database. The criterion value is stored in the general options for the tao pounders algorithm. Args: criterion (callable): Python function that takes a pandas DataFrame with parameters as the first argument and returns a value or array to be minimized and data for the comparison plot. params (pd.DataFrame): See :ref:`params`. criterion_kwargs (dict): Additional keyword arguments for criterion. Returns: fitness_eval (float): The scalar criterion value. comparison_plot_data (np.array or pd.DataFrame): Data for the comparison_plot. """ criterion_out, comparison_plot_data = criterion(params, **criterion_kwargs) if np.any(np.isnan(criterion_out)): raise ValueError( "The criterion function evaluated at the start parameters returns NaNs." ) elif np.isscalar(criterion_out): fitness_eval = criterion_out else: fitness_eval = np.mean(np.square(criterion_out)) return fitness_eval, comparison_plot_data
35,737
def group_by(collection, callback=None): """Creates an object composed of keys generated from the results of running each element of a `collection` through the callback. Args: collection (list|dict): Collection to iterate over. callback (mixed, optional): Callback applied per iteration. Returns: dict: Results of grouping by `callback`. Example: >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'a') >>> assert results == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]} >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], {'a': 1}) >>> assert results == {False: [{'a': 3, 'b': 4}],\ True: [{'a': 1, 'b': 2}]} .. versionadded:: 1.0.0 """ ret = {} cbk = pyd.iteratee(callback) for value in collection: key = cbk(value) ret.setdefault(key, []) ret[key].append(value) return ret
35,738
def lemmatize_verbs(words): """lemmatize verbs in tokenized word list""" lemmatizer = WordNetLemmatizer() lemmas = [] for word in words: lemma = lemmatizer.lemmatize(word, pos='v') lemmas.append(lemma) return lemmas
35,739
def tempf(): """ Create a tempfile write 5 bytes """ _, tempf = tempfile.mkstemp() tempf = Path(tempf) # make a temp file with 5 bytes tempf.write_bytes(b"tempo") yield tempf tempf.unlink() if tempf.exists() else None
35,740
def compare_streams( execution: riberry.model.job.JobExecution, expected: Dict[str, Dict[str, int]] ): """ Compares the given streams with the execution's actual streams. Example of `expected` parameter: { "STREAM_NAME_1": { # stream name "STEP_NAME_1": 1, # key: step name, value: step count, "STEP_NAME_2": 2 # key: step name, value: step count, }, "STREAM_NAME_2": { # stream name "STEP_NAME_1": 1 # key: step name, value: step count, } } """ actual = get_actual_stream_step_counts(execution) _compare(expected, actual, 'Streams do not match')
35,741
def remove_install_type(type_func): """Remove the registered type_func for installing.""" try: INSTALL_TYPES.pop(type_func) except: pass try: INSTALL_TYPES.remove(type_func) except: pass
35,742
def loadfromensembl(homology, kingdom='fungi', sequence='cdna', additional='type=orthologues', saveonfiles=False, normalized=False, setnans=False, number=0, by="entropy", using="normal", getCAI=None): """ Load from ensembl the datas required in parameters ( look at PyCUB.get_data for more information) returns a fully populated homology object. Args: homology: str the homology code additional: str additional information on the retrieved sequence kingdom: str flags the relevant kingdom of you current session [fungi,plants,bacteria, animals] sequence: str flags the type of sequence you consider the full genome is (coding or non coding or full) [cds, all, cda] by: str flags what type of computation should be done [entropy,frequency, entropylocation] normalized: bool to true if should we normalize the entorpy by length saveonfiles: bool to true if the retrieved data should be saved on a file setnans: bool to true if nans should be set to NaN instead of an avg value using: the method to compute the partition function if using entropy location getCAI: wether or not to compute CAI !! need to have called the corresponding function on Pycub before hand !! Returns: a populated PyCUB.homology of the homology object by [names, taxons, full, lenmat, homocode, nans, KaKs_Scores, similarity_scores, proteinids, GCcount, geneids, refs, ecai, refgene, refprot, tot_volume, mean_hydrophobicity, glucose_cost, synthesis_steps, isoelectricpoint,cai, conservation, uncounted] OR None if the homology is empty Raises: ConnectionError: "tried 50 times but still not able to connect" """ server = "http://rest.ensemblgenomes.org" print 'homology: ' + homology + ' : ' + str(number) ext = "/homology/id/" + homology + '?' if sequence is not None: # dna cdna cds ncrna Protein EMBL GENBANK MySQL TSV GTF GFF3 ext += 'sequence=' + sequence if kingdom is not None: ext += ';compara=' + kingdom if additional is not None: ext += ';' + additional try: r = requests.get(server + ext, headers={"Content-Type": "application/json"}) except ConnectionError: print "problem at " + homology if number > 50: raise IOError("tried 50 times but still not able to connect") return loadfromensembl(homology, kingdom=kingdom, sequence=sequence, additional=additional, saveonfiles=saveonfiles, normalized=normalized, setnans=setnans, number=number + 1, by=by, using=using) if not r.ok: r.raise_for_status() data = r.json()['data'] if not data: return None data = data[0]['homologies'] if not data: return None if saveonfiles: with open('utils/data/' + homology + '.json', "wb") as code: code.write(json.dump(data)) species, GCcount, lenmat, H, nans, similarities, KaKs_Scores, taxons, proteinids,\ geneid, ref, ecai, cai, refgene, refprot, vol, cost, hydrophob, synthcost, isoepoint, conservation, others = process( data, normalized=normalized, setnans=setnans, by=by, getCAI=getCAI) if by == 'entropyLocation': H = getloc(H, np.array(lenmat), using=using) # here we add two things into names but only as a temporary saving measures removed by the # application fo preprocessing in homoset. homo = h.homology(names=[species, taxons], full=H, lenmat=lenmat, homocode=homology, nans=nans, KaKs_Scores=KaKs_Scores, similarity_scores=similarities, proteinids=proteinids, GCcount=GCcount, geneids=geneid, ref=ref, ecai=ecai, cai=cai, refgene=refgene, refprot=refprot, tot_volume=vol, mean_hydrophobicity=hydrophob, glucose_cost=cost, synthesis_steps=synthcost, isoelectricpoint=isoepoint, conservation=conservation, othercods=others) homo.order(withtaxons=True) # a first ordering of the data, usefull afterward in the preprocessing return homo
35,743
def build_json_schema_docs(): """Build markdown from JSON Schema""" header = "# JSON Schema for ISCC Metadata\n\n" schemata = [ "iscc-jsonld.yaml", "iscc-minimal.yaml", "iscc-basic.yaml", "iscc-embeddable.yaml", "iscc-extended.yaml", "iscc-technical.yaml", "iscc-nft.yaml", "iscc-crypto.yaml", "iscc-declaration.yaml", ] content = header for schema in schemata: path = SCHEMAS / schema with open(path, "rt", encoding="utf-8") as infile: data = yaml.safe_load(infile) content += f"## {data['title']}\n" content += f"{data['description']}\n" if data.get("examples"): pretty = json.dumps(data.get("examples")[0], indent=2) content += f""" !!! example ```json {indent(pretty, prefix=" ")} ``` """ for prop, attrs in data["properties"].items(): type_ = attrs.get("type") if attrs.get("format"): type_ += "-" + attrs.get("format") title = f"**{prop}**\n" if attrs.get("x-iscc-context"): title += f"<{attrs.get('x-iscc-context')}>\n" description = attrs.get("description") if attrs.get("example"): description += f"<br><br>**Example**: `{attrs['example']}`" default = attrs.get("default", "none") content += f"### {title}\n" content += f"| Name | Type | Default | Definition |\n" content += f"| ---- | ---- | --------|--------------------------------|\n" content += f"| {prop} | `{type_}` | {default} | {description} |\n\n" with open(MARKDOWN_SCHEMA, "wt", encoding="utf-8", newline="\n") as outf: outf.write(content)
35,744
def run_executable(string_to_execute, project_info, verbosity, exit_on_warning, launcher_arguments=None,write_di=True): """ @brief Executes script/binary @param executable String pointing to the script/binary to execute @param project_info Current namelist in dictionary format @param verbosity The requested verbosity level @param exit_on_warning Boolean defining whether the wrapper should crash on warnings Check the type of script/binary from the executable string suffix and execute the script/binary properly. """ if write_di: write_data_interface(string_to_execute, project_info) suffix = os.path.splitext(string_to_execute)[1][1:] currLauncher = vars(launchers)[suffix + '_launcher']() if launcher_arguments is not None: currLauncher.arguments = launcher_arguments currLauncher.execute(string_to_execute, project_info, verbosity, exit_on_warning)
35,745
def dereference(reference_buffer, groups): """ find a reference within a group """ if len(reference_buffer)>0: ref_number = int(''.join(reference_buffer))-1 return groups[ref_number % len(groups)] +' ' return ''
35,746
def get_main_corpora_info(): """Create dict with the main corpora info saved in CORPORA_SOURCES :return: Dictionary with the corpora info to be shown :rtype: dict """ table = [] for corpus_info in CORPORA_SOURCES: corpus_id = CORPORA_SOURCES.index(corpus_info) + 1 props = corpus_info["properties"] corpus_name = pretty_string( f"{corpus_info['name']} ({props['slug']})", 2 ) table.append({ "id": corpus_id, "name": corpus_name, "lang": props["language"], "size": props["size"], "docs": props["doc_quantity"], "words": props["word_quantity"], "granularity": pretty_string('\n'.join(props["granularity"]), 1), "license": pretty_string(props["license"], 1), }) return table
35,747
def _add_unreachable_server(ip=None): """ Add ip to unreachable_servers list """ if ip: if ip not in obj.unreachable_servers: collectd.debug("%s adding '%s' to unreachable servers list: %s" % (PLUGIN, ip, obj.unreachable_servers)) obj.unreachable_servers.append(ip) collectd.info("%s added '%s' to unreachable servers list: %s" % (PLUGIN, ip, obj.unreachable_servers)) else: collectd.debug("%s ip '%s' already in unreachable_servers list" % (PLUGIN, ip)) else: collectd.error("%s _add_unreachable_server called with no IP" % PLUGIN)
35,748
def start_compare_analysis(api_token, project_id, kind, url, username, password, target_branch, target_revision): """ Get the project identifier from the GraphQL API :param api_token: the access token to the GraphQL API :param project_id: identifier of the project to use as source :param kind: kind of the target repositiory (Github, Gitlab, Git) :param url: URL of the target repository :param username: username of the target repository :param password: password of the target repository :return: the project identifier or None is exception or non-existent project. """ try: args = [] args.append("projectId: " + str(project_id)) args.append("targetKind: " + kind) args.append("targetUrl: \"" + url + "\"") if target_revision: args.append("targetRevision: \"" + target_revision + "\"") if target_branch: args.append("targetBranch: \"" + target_branch + "\"") args_string = ",".join(args) query = """ mutation { createCompareAnalysis(""" + args_string + """){id}} """ response_json = do_graphql_query(api_token, {"query": query}) return response_json["createCompareAnalysis"]["id"] except KeyError: log.error("Error while starting new analysis") return None
35,749
def _install(x: str, update: bool, quiet: bool = False): """Install an Earth Engine JavaScript module. The specified module will be installed in the ee_extra module path. Args: x: str update: bool """ if _check_if_module_exists(x) and not update: if not quiet: print(f"The module '{x}' is already installed!") else: if not quiet: print(f"Downloading '{x}'...") ee_sources = _get_ee_sources_path() # Local path if x.startswith("http"): module_folder = pathlib.Path(ee_sources).joinpath( "EXTERNAL/" + pathlib.Path(x).stem ) else: module_folder = pathlib.Path(ee_sources).joinpath( "/".join(x.replace(":", "/").split("/")[:-1]) ) if not module_folder.exists(): module_folder.mkdir(parents=True, exist_ok=True) # With requests: # r = requests.get(_convert_path_to_ee_sources(x)) # open(_convert_path_to_ee_extra(x), "wb").write(r.content) # With urllib: with urllib.request.urlopen(_convert_path_to_ee_sources(x)) as url: r = url.read() # .decode() open(_convert_path_to_ee_extra(x), "wb").write(r) if not quiet: print(f"The module '{x}' was successfully installed!")
35,750
def ClosedNamedTemporaryFile(data: str, mode: str = "w") -> str: """ Temporary file that can be read by subprocesses on Windows. Source: https://stackoverflow.com/a/46501017 """ file = tempfile.NamedTemporaryFile(delete=False, mode=mode) try: with file: file.write(data) yield file.name finally: os.unlink(file.name)
35,751
def copy_package_files(from_directory, to_directory, hard_links=True): """ Copy package files to a temporary directory, using hard links when possible. :param from_directory: The pathname of a directory tree suitable for packaging with ``dpkg-deb --build``. :param to_directory: The pathname of a temporary build directory. :param hard_links: Use hard links to speed up copying when possible. This function copies a directory tree suitable for packaging with ``dpkg-deb --build`` to a temporary build directory so that individual files can be replaced without changing the original directory tree. If the build directory is on the same file system as the source directory, hard links are used to speed up the copy. This function is used by :func:`build_package()`. """ logger.info("Copying files (%s) to temporary directory (%s) ..", format_path(from_directory), format_path(to_directory)) command = ['cp', '-a'] makedirs(to_directory) if hard_links and ALLOW_HARD_LINKS: # Check whether we can use hard links to speed up the copy. In the past # this used the following simple and obvious check: # # os.stat(source_directory).st_dev == os.stat(build_directory).st_dev # # However this expression holds true inside schroot, yet `cp -al' fails # when trying to create the hard links! This is why the following code now # tries to create an actual hard link to verify that `cp -al' can be used. test_file_from = None test_file_to = None try: # Find a unique filename that we can create and destroy without # touching any of the caller's files. while True: test_name = 'deb-pkg-tools-hard-link-test-%d' % random.randint(1, 1000) test_file_from = os.path.join(from_directory, test_name) test_file_to = os.path.join(to_directory, test_name) if not os.path.isfile(test_file_from): break # Create the test file. with open(test_file_from, 'w') as handle: handle.write('test') os.link(test_file_from, test_file_to) logger.debug("Speeding up file copy using hard links ..") command.append('-l') except (IOError, OSError): pass finally: for test_file in [test_file_from, test_file_to]: if test_file and os.path.isfile(test_file): os.unlink(test_file) # I know this looks really funky, but this is a valid use of shell escaping # and globbing (obviously I tested it ;-). command.append('%s/*' % pipes.quote(from_directory)) command.append(pipes.quote(to_directory)) execute(' '.join(command), logger=logger)
35,752
def parse(): """Parse the command line input arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='f90nml {0}'.format(f90nml.__version__)) parser.add_argument('--group', '-g', action='store', help="specify namelist group to modify. " "When absent, the first group is used") parser.add_argument('--variable', '-v', action='append', help="specify the namelist variable to add or modify, " "followed by the new value. Expressions are of the " "form `VARIABLE=VALUE`") parser.add_argument('--patch', '-p', action='store_true', help="modify the existing namelist as a patch") parser.add_argument('--format', '-f', action='store', help="specify the output format (json, yaml, or nml)") parser.add_argument('input', nargs='?') parser.add_argument('output', nargs='?') if len(sys.argv) == 1: parser.print_help() sys.exit() args = parser.parse_args() input_fname = args.input output_fname = args.output # Get input format # TODO: Combine with output format if input_fname: _, input_ext = os.path.splitext(input_fname) if input_ext == '.json': input_fmt = 'json' elif input_ext == '.yaml': input_fmt = 'yaml' else: input_fmt = 'nml' else: input_fmt = 'nml' # Output format flag validation valid_formats = ('json', 'yaml', 'nml') if args.format and args.format not in valid_formats: print('f90nml: error: format must be one of the following: {0}' ''.format(valid_formats), file=sys.stderr) sys.exit(-1) # Get output format # TODO: Combine with input format if not args.format: if output_fname: _, output_ext = os.path.splitext(output_fname) if output_ext == '.json': output_fmt = 'json' elif output_ext in ('.yaml', '.yml'): output_fmt = 'yaml' else: output_fmt = 'nml' else: output_fmt = 'nml' else: output_fmt = args.format # Confirm that YAML module is available if (input_fmt == 'yaml' or output_fmt == 'yaml') and not has_yaml: print('f90nml: error: YAML module could not be found.', file=sys.stderr) print(' To enable YAML support, install PyYAML or use the ' 'f90nml[yaml] package.', file=sys.stderr) sys.exit(-1) # Do not patch non-namelist output if any(fmt != 'nml' for fmt in (input_fmt, output_fmt)) and args.patch: print('f90nml: error: Only namelist files can be patched.', file=sys.stderr) sys.exit(-1) # Read the input file if input_fname: if input_fmt in ('json', 'yaml'): if input_fmt == 'json': with open(input_fname) as input_file: input_data = json.load(input_file) elif input_ext == '.yaml': with open(input_fname) as input_file: input_data = yaml.safe_load(input_file) else: input_data = f90nml.read(input_fname) else: input_data = {} input_data = f90nml.Namelist(input_data) # Construct the update namelist update_nml = {} if args.variable: if not args.group: # Use the first available group grp = list(input_data.keys())[0] warnings.warn( 'f90nml: warning: Assuming variables are in group \'{g}\'.' ''.format(g=grp) ) else: grp = args.group update_nml_str = '&{0} {1} /\n'.format(grp, ', '.join(args.variable)) update_io = StringIO(update_nml_str) update_nml = f90nml.read(update_io) update_io.close() # Target output output_file = open(output_fname, 'w') if output_fname else sys.stdout if args.patch: # We have to read the file twice for a patch. The main reason is # to identify the default group, in case this is not provided. # It could be avoided if a group is provided, but logically that could # a mess that I do not want to sort out right now. f90nml.patch(input_fname, update_nml, output_file) else: # Update the input namelist directly if update_nml: try: input_data[grp].update(update_nml[grp]) except KeyError: input_data[grp] = update_nml[grp] # Write to output if not args.patch: if output_fmt in ('json', 'yaml'): if output_fmt == 'json': input_data = input_data.todict(complex_tuple=True) json.dump(input_data, output_file, indent=4, separators=(',', ': ')) output_file.write('\n') elif output_fmt == 'yaml': input_data = input_data.todict(complex_tuple=True) yaml.dump(input_data, output_file, default_flow_style=False) else: # Default to namelist output f90nml.write(input_data, output_file) # Cleanup if output_file != sys.stdout: output_file.close()
35,753
def get_matrix_in_format(original_matrix, matrix_format): """Converts matrix to format Parameters ---------- original_matrix : np.matrix or scipy matrix or np.array of np. arrays matrix to convert matrix_format : string format Returns ------- matrix : scipy matrix matrix in given format """ if isinstance(original_matrix, np.ndarray): return SPARSE_FORMAT_TO_CONSTRUCTOR[matrix_format](original_matrix) if original_matrix.getformat() == matrix_format: return original_matrix return original_matrix.asformat(matrix_format)
35,754
def generate_batches(data, n_epochs=5, batch_size=64, shuffle=True, random_state=75894, batches_per_group=-1, verbose=1): """Generate a series of batches. Args: data: Dataset n_epochs: int, number of epochs to generate batch_size: int, size of each batch. The last batch may be truncated shuffle: bool, if True, shuffle the data before each epoch random_state: int, seed for random number generator batches_per_group: int, if positive number of batches before setting epoch_done to True verbose: int, higher values will print more Yields: BatchData instance """ input_keys = list(data.inputs.keys()) if not input_keys: return size = data.inputs[input_keys[0]].shape[0] epoch_done = False batches_per_epoch = size // batch_size if size % batch_size > 0: batches_per_epoch += 1 if shuffle: random = np.random.RandomState(seed=random_state) else: random = None batch_counter = 0 for epoch in range(n_epochs): if verbose > 0: print('On epoch {} of {}'.format(epoch, n_epochs)) if shuffle: perm = random.permutation(np.arange(size)) else: perm = np.arange(size) for b in range(batches_per_epoch): batch_counter += 1 if verbose > 1: if (int(b / batches_per_epoch * 100) > int((b - 1) / batches_per_epoch * 100)): print('{}% done'.format(int(b / batches_per_epoch * 100)), end='\r') indices = perm[b * batch_size: (b + 1) * batch_size] batch_inputs = split_data_dict(data.inputs, indices) batch_outputs = split_data_dict(data.outputs, indices) if ((batches_per_group <= 0 and b == batches_per_epoch - 1) or (batches_per_group > 0 and batch_counter % batches_per_group == 0)): epoch_done = True elif b == batches_per_epoch - 1 and epoch == n_epochs - 1: epoch_done = True batch = BatchData( batch_inputs, outputs=batch_outputs, index=indices, epoch=epoch, batch=b, epoch_done=epoch_done, metadata={'description': 'Generated batch'} ) yield batch epoch_done = False
35,755
def get_haystack_response(res, debug=False, chatbot='QA'): """ Function that filters null answers from the haystack response. NOTE: The necessity of this suggests that Deepset's no_ans_boost default of 0 may not be functioning for FARMReader. :param res: :param chatbot: Type of chatbot to get response for :return: """ answer = None try: answers = res['results'][0]['answers'] df = pd.json_normalize(answers) df.dropna(inplace=True) df.reset_index(inplace=True) answer = df.iloc[df.score.idxmax()] response = answer['answer'] probability = answer['probability'] # TODO remove once haystack defect logged # response = res['results'][0]['answers'][0]['answer'] # probability = res['results'][0]['answers'][0]['probability'] except Exception as e: if debug: response = "So sorry, but there was an error extracting the response from the {} chatbot: '{}'. "\ .format(chatbot, e) else: response = NO_ANSWER_RESPONSE probability = 1 try: url = answer['meta']['link'] except Exception: url = '' return response, probability, url
35,756
def _convert_name(name, recurse=True, subs=None): """ From an absolute path returns the variable name and its owner component in a dict. Names are also formatted. Parameters ---------- name : str Connection absolute path and name recurse : bool If False, treat the top level of each name as the source/target component. subs: tuple or None Character pairs with old and substitute characters Returns ------- dict(str, str) """ def convert(name): sep = '.' name = name.replace('@', sep) name_items = name.split(sep) if recurse: if len(name_items) > 1: comp = name_items[-2] # -1 is variable name, before that -2 is the component name path = name.rsplit(sep, 1)[0] else: msg = ('The name "{}" cannot be processed. The separator character is "{}", ' 'which does not occur in the name.') raise ValueError(msg.format(name, sep)) else: comp = name_items[0] path = comp var = name_items[-1] var = _replace_chars(var, substitutes=subs) return {'comp': comp, 'var': var, 'abs_name': _format_name(name), 'path': _format_name(path)} if isinstance(name, list): # If a source has multiple targets return [convert(n) for n in name] else: # string return convert(name)
35,757
async def test_raises_if_hardware_module_has_gone_missing( subject: ThermocyclerMovementFlagger, state_store: StateStore, hardware_api: HardwareAPI, decoy: Decoy, ) -> None: """It should raise if the hardware module can't be found by its serial no.""" decoy.when(state_store.labware.get_location(labware_id="labware-id")).then_return( ModuleLocation(moduleId="module-id") ) decoy.when(state_store.modules.get_model(module_id="module-id")).then_return( PEModuleModel.THERMOCYCLER_MODULE_V1, ) decoy.when( state_store.modules.get_serial_number(module_id="module-id") ).then_return("module-serial") decoy.when( await hardware_api.find_modules( by_model=OpentronsThermocyclerModuleModel.THERMOCYCLER_V1, resolved_type=OpentronsModuleType.THERMOCYCLER, ) ).then_return(([], None)) with pytest.raises(ThermocyclerNotOpenError): await subject.raise_if_labware_in_non_open_thermocycler( labware_id="labware-id", )
35,758
def test_dual_ne_dates(df, right): """ Test output for multiple conditions. `!=` """ filters = ["A", "Integers", "E", "Dates"] expected = ( df.assign(t=1) .merge(right.assign(t=1), on="t") .query("A != Integers and E != Dates") .reset_index(drop=True) ) expected = expected.filter(filters) actual = df.conditional_join( right, ("A", "Integers", "!="), ("E", "Dates", "!="), how="inner", sort_by_appearance=True, ) actual = actual.filter(filters) assert_frame_equal(expected, actual)
35,759
def memtrace(**kwargs): """ Turn on memory tracing within a certain context. Parameters ---------- kwargs : dict Named options to pass to setup. """ options = _Options(**kwargs) if options.outfile is None: options.outfile = 'mem_trace.raw' if options.min_mem is None: options.min_mem = 1.0 if options.stream is None: options.stream = sys.stdout _setup(options) start() try: yield finally: stop() _file_line2qualname(options.outfile) if options.tree: postprocess_memtrace_tree(fname=options.outfile, min_mem=options.min_mem, show_colors=options.show_colors, stream=options.stream) else: postprocess_memtrace_flat(fname=options.outfile, min_mem=options.min_mem, show_colors=options.show_colors, stream=options.stream)
35,760
def populate_sample_data(): """ Populates the database with sample data """ with db: u = User(login_id="test", password=pbkdf2_sha256.hash("test"), role="ST", email="s1@junk.ss", first_name="TEST", last_name="USER", inst_id="CSB1000") u.save() u = User(login_id="admin", password=pbkdf2_sha256.hash("admin"), role="SU", email="admin@admin.ss", first_name="ADMIN", last_name="USER", inst_id="CSB1001") u.save()
35,761
def _orthogonalize(constraints, X): """ Orthogonalize spline terms with respect to non spline terms. Parameters ---------- constraints: numpy array constraint matrix, non spline terms X: numpy array spline terms Returns ------- constrained_X: numpy array orthogonalized spline terms """ Q, _ = np.linalg.qr(constraints) # compute Q Projection_Matrix = np.matmul(Q,Q.T) constrained_X = X - np.matmul(Projection_Matrix,X) return constrained_X
35,762
def get_shell_output(cmd,verbose=None): """Function to run a shell command and return returncode, stdout and stderr Currently (pyrpipe v 0.0.4) this function is called in getReturnStatus(), getProgramVersion(), find_files() Parameters ---------- cdm: list command to run verbose: bool to print messages :return: (returncode, stdout and stderr) :rtype: tuple: (int,str,str) """ if not verbose: verbose=_verbose #not logging these commands cmd=parse_cmd(cmd) log_message=cmd starttime_str=time.strftime("%y-%m-%d %H:%M:%S", time.localtime(time.time())) if verbose: pu.print_notification("Start:"+starttime_str) pu.print_blue("$ "+log_message) try: result = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True) stdout,stderr = result.communicate() if stdout: stdout=stdout.decode("utf-8") else: stdout='' if stderr: stderr=stderr.decode("utf-8") else: stderr='' return(result.returncode,stdout,stderr) except: return(-1,"","Command failed to execute")
35,763
def property_removed_from_property_group_listener(sender, instance, **kwargs): """ This is called before a GroupsPropertiesRelation is deleted, in other words when a Property is removed from a PropertyGroup. Deletes all ProductPropertyValue which are assigned to the property and the property group from which the property is about to be removed. """ property = instance.property products = instance.group.products.all() for product in products: ProductPropertyValue.objects.filter(product=product, property=property).delete()
35,764
def tril(input, diagonal=0, name=None): """ This op returns the lower triangular part of a matrix (2-D tensor) or batch of matrices :attr:`input`, the other elements of the result tensor are set to 0. The lower triangular part of the matrix is defined as the elements on and below the diagonal. Args: input (Variable): The input variable which is a Tensor. Support data types: ``float64``, ``float32``, ``int32``, ``int64``. diagonal (int, optional): The diagonal to consider, default value is 0. If :attr:`diagonal` = 0, all elements on and below the main diagonal are retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative value excludes just as many diagonals below the main diagonal. The main diagonal are the set of indices :math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where :math:`d_{1}, d_{2}` are the dimensions of the matrix. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor, it's data type is the same as input's Tensor. Raises: TypeError: diagonal is not a int type. ValueError: dimension of :attr:`input` is less than 2. Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid data = np.arange(1, 13, dtype="int64").reshape(3,-1) # array([[ 1, 2, 3, 4], # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) # example 1, default diagonal tril = fluid.layers.tril(x) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 1, 0, 0, 0], # [ 5, 6, 0, 0], # [ 9, 10, 11, 0]]) .. code-block:: python # example 2, positive diagonal value import paddle.fluid as fluid import numpy as np data = np.arange(1, 13, dtype="int64").reshape(3,-1) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) tril = fluid.layers.tril(x, diagonal=2) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 1, 2, 3, 0], # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) .. code-block:: python # example 3, negative diagonal value import paddle.fluid as fluid import numpy as np data = np.arange(1, 13, dtype="int64").reshape(3,-1) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) tril = fluid.layers.tril(x, diagonal=-1) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 0, 0, 0, 0], # [ 5, 0, 0, 0], # [ 9, 10, 0, 0]]) """ return _tril_triu_op(LayerHelper('tril', **locals()))
35,765
def csv_to_objects(csvfile_path): """ Read a CSV file and convert it to a Python dictionary. Parameters ---------- csvfile_path : string The absolute or relative path to a valid CSV file. Returns ------- dict A dict containing a dict of show/episode entries and a dict of movie entries. """ logger.info("Attempting to read from {file}".format(file=csvfile_path)) try: with open(csvfile_path) as csvfile: csvreader = csv.reader(csvfile, delimiter=',') headers = next(csvreader) shows = [] movies = [] for row in csvreader: item = {} for header_num, header in enumerate(headers): item[header] = row[header_num] logger.debug(item) if item['type'] == "episode": shows.append(item) elif item['type'] == "movie": movies.append(item) else: logger.error("Unknow item type {type}".format(type=item['type'])) exit(1) logger.info("{shows} shows and {movies} movies read from CSV".format(shows=len(shows), movies=len(movies))) except (EnvironmentError, EOFError): logger.error("Error whilst loading CSV file {filename}".format(filename=csvfile_path)) return {'shows': shows, 'movies': movies}
35,766
def rename_and_merge_columns_on_dict(data_encoded, rename_encoded_columns_dict, **kwargs): """ Parameters ---------- data_encoded: pandas.DataFrame with numerical columns rename_encoded_columns_dict: dict of columns to rename in data_encoded **kwargs inplace:bool, default=False decides if data_encoded is edited inplace or if a copy is returned Returns ------- pandas.DataFrame with columns renamed according to rename_encoded_columns_dict, columns that share the same name after renaming are merged by adding the columns up Example ------- data_encoded: x y z 0 0 1 1 0 1 0 1 0 rename_encoded_columns_dict: {'y': 'x'} return: x z 0 1 1 1 1 0 """ if 'inplace' not in kwargs: kwargs['inplace'] = False if kwargs['inplace']: data_copy = data_encoded else: data_copy = data_encoded.copy() data_copy.rename(columns=rename_encoded_columns_dict, inplace=True) for col in data_copy.columns: df_col = data_copy[col] # if column name col appears more than once in data_encoded.columns -> df_col is DataFrame (else it is a Series) if isinstance(df_col, pd.DataFrame): # add index to identical column names: [cap-shape_x0, cap-shape_x1, ...] df_col.columns = [col + str(i) for i in range(0, len(df_col.columns))] # drop identical columns col from DataFrame data_copy.drop(columns=col, inplace=True) # create column of zeros and add the numerical columns up col_merged = pd.Series(np.zeros(len(data_copy)), dtype=int) for col_indexed in df_col.columns: col_merged += df_col[col_indexed] data_copy[col] = col_merged if kwargs['inplace']: data_encoded = data_encoded.reindex(sorted(data_encoded.columns), axis=1) return else: data_copy = data_copy.reindex(sorted(data_copy.columns), axis=1) return data_copy
35,767
def _get_public_props(obj) -> List[str]: """Return the list of public props from an object.""" return [prop for prop in dir(obj) if not prop.startswith('_')]
35,768
def get_vd_html( voronoi_diagram: FortunesAlgorithm, limit_sites: List[SiteToUse], xlim: Limit, ylim: Limit, ) -> None: """Plot voronoi diagram.""" figure = get_vd_figure( voronoi_diagram, limit_sites, xlim, ylim, voronoi_diagram.SITE_CLASS ) html = get_html(figure) return html
35,769
def handle_rssadditem(bot, ievent): """ arguments: <feedname> <token> - add an item (token) to a feeds tokens to be displayed, see rss-scan for a list of available tokens. """ try: (name, item) = ievent.args except ValueError: ievent.missing('<feedname> <token>') ; return if ievent.options and ievent.options.channel: target = ievent.options.channel else: target = ievent.channel if name == "all": todo = watcher.list() else: todo = [name, ] errors = {} done = [] for name in todo: try: feed = watcher.byname(name) if not feed: ievent.reply("we don't have a %s feed" % name) ; return try: feed.itemslists.data[jsonstring([name, bot.type, target])].append(item) except KeyError: feed.itemslists.data[jsonstring([name, bot.type, target])] = ['title', 'link', "author"] feed.itemslists.save() except Exception, ex: errors[name] = str(ex) ; continue done.append(name) if errors: event.reply("errors occured: ", errors) ievent.reply('%s added to: ' % item, done)
35,770
def test_class_default_constructor(capfd): """Test to validate that the main method is called.""" result = "Hello World!" hello_world = HelloWorld() print(type(hello_world)) assert type(hello_world) is not None hello_world.print_name() out, err = capfd.readouterr() assert result in out
35,771
def test_chip_properties_NO_ROM_PORTS(): """Ensure the chip has been set with the correct number of ROM ports.""" assert chip.NO_ROM_PORTS == 16
35,772
def drawimage(xmin, xmax, ymin, ymax, width, height, data, model=0): """ Draw an image into a given rectangular area. **Parameters:** `xmin`, `ymin` : First corner point of the rectangle `xmax`, `ymax` : Second corner point of the rectangle `width`, `height` : The width and the height of the image `data` : An array of color values dimensioned `width` by `height` `model` : Color model (default=0) The available color models are: +-----------------------+---+-----------+ |MODEL_RGB | 0| AABBGGRR| +-----------------------+---+-----------+ |MODEL_HSV | 1| AAVVSSHH| +-----------------------+---+-----------+ The points (`xmin`, `ymin`) and (`xmax`, `ymax`) are world coordinates defining diagonally opposite corner points of a rectangle. This rectangle is divided into `width` by `height` cells. The two-dimensional array `data` specifies colors for each cell. """ _data = intarray(width * height, data) __gr.gr_drawimage(c_double(xmin), c_double(xmax), c_double(ymin), c_double(ymax), c_int(width), c_int(height), _data.data, c_int(model))
35,773
def Test(directory): """ Main testing function """ """ Testing function to compare relation results of Canary vs the Gold Standard """ # Stores what type of files we are looking for in the directory types = ("*txt", "*.ann") # Stores the files that match those types (same filename has both .txt & .ann) files = [] for extension in types: files.extend(glob(join(directory, extension))) for file in files: # Printing filename for testing (Canary Relations breaks on something) print("Incase Break: " + file) # Spliting the filename from directory filename = (file.split(directory)) # Filename with no extension (.txt, .ann) filename = (filename[1].split(".")[0]) # Comparing Components results (Canary vs "Gold Standard") componentsAnalysis = BratAnalysis(filename + ".txt", filename + ".ann") # Comparing Relations results (Canary vs "Gold Standard") relationsAnalysis = BratRelationAnalysis(filename + ".txt", filename + ".ann") # Exporting results to .csv file data = [] data.append([filename, "Canary", str(componentsAnalysis[0][0]), str(componentsAnalysis[0][1]), str(componentsAnalysis[0][2]), str(relationsAnalysis[0][0])]) data.append([filename, "Manual", str(componentsAnalysis[1][0]), str(componentsAnalysis[1][1]), str(componentsAnalysis[1][2]), str(relationsAnalysis[0][1])]) # Add another line for f1-score for components/relations exportCSV(data) print("File: " + filename + " exported to canaryTest.csv")
35,774
def sample_normal_mean_jeffreys(s1, ndata, prec): """Samples the mean of a normal distribution""" ## return rn.normal(s1 / ndata, 1 / np.sqrt(prec * ndata))
35,775
def copy_tree( src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=0, dry_run=0, condition=None): """ Copy an entire directory tree 'src' to a new location 'dst'. Both 'src' and 'dst' must be directory names. If 'src' is not a directory, raise DistutilsFileError. If 'dst' does not exist, it is created with 'mkpath()'. The end result of the copy is that every file in 'src' is copied to 'dst', and directories under 'src' are recursively copied to 'dst'. Return the list of files that were copied or might have been copied, using their output name. The return value is unaffected by 'update' or 'dry_run': it is simply the list of all files under 'src', with the names changed to be under 'dst'. 'preserve_mode' and 'preserve_times' are the same as for 'copy_file'; note that they only apply to regular files, not to directories. If 'preserve_symlinks' is true, symlinks will be copied as symlinks (on platforms that support them!); otherwise (the default), the destination of the symlink will be copied. 'update' and 'verbose' are the same as for 'copy_file'. """ assert isinstance(src, (str, unicode)), repr(src) assert isinstance(dst, (str, unicode)), repr(dst) from distutils.dir_util import mkpath from distutils.dep_util import newer from distutils.errors import DistutilsFileError from distutils import log src = fsencoding(src) dst = fsencoding(dst) if condition is None: condition = skipscm if not dry_run and not zipio.isdir(src): raise DistutilsFileError( "cannot copy tree '%s': not a directory" % src) try: names = zipio.listdir(src) except os.error as exc: (errno, errstr) = exc.args if dry_run: names = [] else: raise DistutilsFileError( "error listing files in '%s': %s" % (src, errstr)) if not dry_run: mkpath(dst) outputs = [] for n in names: src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) if (condition is not None) and (not condition(src_name)): continue # Note: using zipio's internal _locate function throws an IOError on # dead symlinks, so handle it here. if os.path.islink(src_name) \ and not os.path.exists(os.readlink(src_name)): continue if preserve_symlinks and zipio.islink(src_name): link_dest = zipio.readlink(src_name) log.info("linking %s -> %s", dst_name, link_dest) if not dry_run: if update and not newer(src, dst_name): pass else: make_symlink(link_dest, dst_name) outputs.append(dst_name) elif zipio.isdir(src_name) and not os.path.isfile(src_name): # ^^^ this odd tests ensures that resource files that # happen to be a zipfile won't get extracted. # XXX: need API in zipio to clean up this code outputs.extend( copy_tree(src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, dry_run=dry_run, condition=condition)) else: copy_file(src_name, dst_name, preserve_mode, preserve_times, update, dry_run=dry_run) outputs.append(dst_name) return outputs
35,776
def pipeline_report_build(submission: Submission, stdout: str, passed: bool, **_): """ POSTed json should be of the shape: { "stdout": "build logs...", "passed": True } :param submission: :param stdout: :param passed: :return: """ if len(stdout) > MYSQL_TEXT_MAX_LENGTH: stdout = stdout[:MYSQL_TEXT_MAX_LENGTH] # Log the build being reported logger.info( "submission build reported", extra={ "type": "build_report", "submission_id": submission.id, "assignment_id": submission.assignment_id, "owner_id": submission.owner_id, "passed": passed, "stdout": stdout, }, ) # Update submission build submission.build.stdout = stdout submission.build.passed = passed # If the build did not passed, then the # submission pipeline is done if passed is False: submission.processed = True submission.state = "Build did not succeed" # Add and commit db.session.add(submission) db.session.add(submission.build) db.session.commit() # Report success return success_response("Build successfully reported.")
35,777
def flatten(episode, context_length, include_labels=True, delimiter='\n'): """ Flatten the data into single example episodes. This is used to make conditional training easier and for a fair comparison of methods. """ context = deque(maxlen=context_length if context_length > 0 else None) new_episode = [] for ex in episode: context.append(ex.get('text', '')) # add context if len(context) > 1: ex.force_set('text', delimiter.join(context)) # set episode_done to be True ex.force_set('episode_done', True) labels = ex.get('labels', ex.get('eval_labels', None)) if labels is not None and include_labels: context.append(random.choice(labels)) new_episode.append(ex) return new_episode
35,778
def calc_binsize(num_bins, t_start, t_stop): """ Calculates the stop point from given parameter. Calculates the size of bins :attr:`binsize` from the three parameter :attr:`num_bins`, :attr:`t_start` and :attr`t_stop`. Parameters ---------- num_bins: int Number of bins t_start: quantities.Quantity Start time t_stop Stop time Returns ------- binsize : quantities.Quantity Size of bins calculated from given parameter. Raises ------ ValueError : Raised when :attr:`t_stop` is smaller than :attr:`t_start`". """ if num_bins is not None and t_start is not None and t_stop is not None: if t_stop < t_start: raise ValueError("t_stop (%s) is smaller than t_start (%s)" % (t_stop, t_start)) return (t_stop - t_start) / num_bins
35,779
def flash_leds(dur): """Flashes LEDs """ leds.on() time.sleep(dur) leds.off()
35,780
def salutation(phrase = 'Bonjour', nom=''): """ Fonction qui salue quelqu'un """ print(phrase + ' ' + nom + ' !')
35,781
def d_within(geom, gdf, distance): """Find the subset of a GeoDataFrame within some distance of a shapely geometry""" return _intersects(geom, gdf, distance)
35,782
def make_params(args, nmax=None): """Format GET parameters for the API endpoint. In particular, the endpoint requires that parameters be sorted alphabetically by name, and that filtering is done only on one parameter when multiple filters are offered. """ if nmax and len(args) > nmax: raise ValueError("Too many parameters supplied") return [(k, stringify(args[k])) for k in sorted(args.keys())]
35,783
def visualize_multiple_categories(): """Example to show how to visualize images that activate multiple categories """ # Build the VGG16 network with ImageNet weights model = VGG16(weights='imagenet', include_top=True) print('Model loaded.') # The name of the layer we want to visualize # (see model definition in vggnet.py) layer_name = 'predictions' layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D) indices = [20, [20, 71]] idx_label_map = dict((idx, utils.get_imagenet_label(idx)) for idx in [20, 71]) vis_img = visualize_activation(model.input, layer_dict[layer_name], max_iter=500, filter_indices=indices, idx_label_map=idx_label_map) cv2.imshow('Multiple category visualization', vis_img) cv2.waitKey(0)
35,784
def debug(msg: str, always: bool = False, icon: str = "🐞") -> None: """Print debug message.""" _echo(icon=icon, msg=msg, always=always, fg=typer.colors.MAGENTA)
35,785
def create_authors(filename, obj, database): """ Wrapper function that extracts all authors and their affiliations from the json object. Each of the authors and their affiliations are created and connected. """ authors = [] for item in obj['metadata']['authors']: # Iterate json obj to find author names and their affiliations. name = ''.join([item['first'], ' ', '' if item['middle'] == [] \ else item['middle'][0] + '. ' if len(item['middle'][0]) == 1 \ else ' '.join(item['middle']) + ' ', item['last']]) location = '' if item['affiliation'] == {} \ else '' if item['affiliation']['location'] == {} \ else '' if 'country' not in item['affiliation']['location'] \ else item['affiliation']['location']['country'] institution = '' if item['affiliation'] == {} \ else item['affiliation']['institution'] laboratory = '' if item['affiliation'] == {} \ else item['affiliation']['laboratory'] # Replace possible empty entries with the string literal 'None' # Also if the string is non-empty remove single or double quotes, # which make create queries fail. location = '' if location == '' \ else location.translate({ord(c): '' for c in '\'\"'}) institution = '' if institution == '' \ else institution.translate({ord(c): '' for c in '\'\"'}) laboratory = '' if laboratory == '' \ else laboratory.translate({ord(c): '' for c in '\'\"'}) # Merge all info of authors into the database. query = f'MERGE (a:Author {{name: "{name}"}}) ' database.execute(query, 'w') # Save all authors into a list for the current paper. authors.append(name) if location != '': database.execute(f'MERGE (l:Location {{name: "{location}"}})', 'w') query = ( f'MATCH (a:Author {{name: "{name}"}}) ' f'MATCH (l:Location {{name: "{location}"}}) ' f'MERGE (a)-[:affiliates_with]->(l)' ) database.execute(query, 'w') if institution != '': database.execute(f'MERGE (i:Institution {{name: "{institution}"}})', 'w') query = ( f'MATCH (a:Author {{name: "{name}"}}) ' f'MATCH (i:Institution {{name: "{institution}"}}) ' f'MERGE (a)-[:affiliates_with]->(i)' ) database.execute(query, 'w') if laboratory != '': database.execute(f'MERGE (l:Laboratory {{name: "{laboratory}"}})', 'w') query = ( f'MATCH (a:Author {{name: "{name}"}}) ' f'MATCH (l:Laboratory {{name: "{laboratory}"}}) ' f'MERGE (a)-[:affiliates_with]->(l)' ) database.execute(query, 'w') # Connect all authors of the paper, with the paper, itself. query = (f'MATCH (a:Author) WHERE a.name IN {authors} ' 'WITH collect(a) as authors ' f'MATCH (p:Paper {{filename: "{filename}"}}) ' 'UNWIND authors as author ' 'CREATE (author)-[:writes]->(p)') database.execute(query, 'w') return
35,786
def list_available_providers(): """ Lists available providers and regions """ regions = config.AVAILABLE_CLOUD_REGIONS for k,v in regions.iteritems(): print('{0:12s}: {1}'.format(k, ', '.join(regions[k].keys())))
35,787
def remap(value, oldMin, oldMax, newMin, newMax): """ Remaps the value to a new min and max value Args: value: value to remap oldMin: old min of range oldMax: old max of range newMin: new min of range newMax: new max of range Returns: The remapped value in the new range """ return newMin + (((value - oldMin) / (oldMax - oldMin)) * (newMax - newMin))
35,788
def test_suspend_user_by_id(): """Will suspend the user for user id 1. :return: Should return: suspended """ syn = syncope.Syncope(syncope_url="http://192.168.1.145:9080", username="admin", password="password") user_data = syn.suspend_user_by_id(1) assert user_data['status'] == "suspended"
35,789
def test_sha_key(): """ Ensure we conform to https://tools.ietf.org/html/rfc3414#appendix-A.3.2 """ engine_id = unhexlify("000000000000000000000002") hasher = password_to_key(hashlib.sha1, 20) result = hasher(b"maplesyrup", engine_id) expected = unhexlify("6695febc9288e36282235fc7151f128497b38f3f") assert result == expected
35,790
async def construct_unit_passport(unit: Unit) -> str: """construct own passport, dump it as .yaml file and return a path to it""" passport = _get_passport_dict(unit) path = f"unit-passports/unit-passport-{unit.uuid}.yaml" _save_passport(unit, passport, path) return path
35,791
def certification_to_csv(stats, filepath, product_id): """Writes certification outputs to the file specified. Parameters ---------- stats : list of dict list of statistical outputs from the function `thermostat.compute_summary_statistics()` filepath : str filepath specification for location of output CSV file. Returns ------- df : pd.DataFrame DataFrame containing data output to CSV. """ if stats is None: warnings.warn("No certification data to export.") return None labels = [i.get("label") for i in stats] sw_version = stats[labels.index("all_tau_cvrmse_savings_p01_filter_heating")][ "sw_version" ] certification_data = [] for column_filter, column_data in DATA_COLUMNS: stats_column_number = labels.index(column_filter) value = stats[stats_column_number].get(column_data, None) row = [ product_id, sw_version, COLUMN_LOOKUP[column_data]["metric"], FILTER_LOOKUP[column_filter]["filter"], FILTER_LOOKUP[column_filter]["region"], COLUMN_LOOKUP[column_data]["statistic"], FILTER_LOOKUP[column_filter]["season"], value, ] certification_data.append(row) output_dataframe = pd.DataFrame(certification_data, columns=CERTIFICATION_HEADERS) output_dataframe.to_csv( filepath, index=False, columns=CERTIFICATION_HEADERS, float_format="%.2f" ) return output_dataframe
35,792
def get_ipns_link(name: str) -> str: """Get the ipns link with the name of it which we remember it by Args: name (str): Name we call ipns link Returns: str: Returns the IPNS url Raises: ValueError: if link not found >>> import random >>> key_name = str(random.getrandbits(32 * 8)) # get random, or else throws duplicate key error >>> create_new_ipns_link(key_name) != '' True """ keys = IPFS_CLIENT.key.list() does_match = lambda x: x['Name'] == name.lower() matches = list(filter(does_match, keys['Keys'])) if len(matches) == 0: raise ValueError(f'IPNS link not found with name: "{name}"!') ipns_id = matches[0]['Id'] # get first match return f'{IPNS_PATH}{ipns_id}'
35,793
def joint_sim(num_samp, num_dim, noise=0.5): """ Function for generating a joint-normal simulation. :param num_samp: number of samples for the simulation :param num_dim: number of dimensions for the simulation :param noise: noise level of the simulation, defaults to 0.5 :return: the data matrix and a response array """ gauss_noise = np.random.normal(loc=0, scale=1, size=(num_samp, 1)) if (num_dim > 1): kappa = 1 else: kappa = 0 rho = 1 / (2*num_dim) sig = np.diag(np.ones(shape=(2*num_dim))) sig[num_dim: (2*num_dim), 0: num_dim] = rho sig[0: num_dim, num_dim: (2*num_dim)] = rho samp = (np.random.multivariate_normal(cov=sig, mean=np.zeros(2*num_dim), size=num_samp)) if num_dim == 1: y = samp[:, (num_dim):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] else: y = samp[:, (num_dim+1):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] return x, y
35,794
def main(phases=['SKS','SKKS'],batch=False,evt_sta_list=None): """ Main - call this function to run the interface to sheba This function depends on the existence of a station list file specified by statlist and you have sac data alreayd downloaded Path should point to the directory that you want the sheba processing directories to be stored under Phases - [list]: Phases that you want to process for shear wave splitting using sheba. Traces should contain the expected arrivals. Interact sheb will loop over all phases provided and run sheba accordingly Batch - [bool]: T- run in batch mode (multi station), using pythons multiprocessing library to improve performance. F - run for a single station evt_sta_list - [str]: string pointing to the location of the event station list. This list should contain the a date/time and station that corresponds to the event time and station where arrivals are obseverd. This list is used to """ #First Indentify the possible station that we could have data for #This way we know what directory paths to look in for the sac files path = '/Users/ja17375/DiscrePy' #Loop over all stations in the list. #################### Time run ################# start = time.time() ################## Start Process ############# if batch is True: # If processing of data for multiple stations is desired #print(__name__) if __name__ == 'interact_sheba': ## Set of pool for mapping statlist ='{}/Data/{}'.format(path,evt_sta_list) print('Processing Data from the Event-Station List {}'.format(statlist)) stations = pd.read_csv(statlist,delim_whitespace=True).STAT.unique() out_pre = input('Enter SDB file name: ') with contextlib.closing( Pool(processes = 4) ) as pool: # Iterate over stations in the station list. pool.map(run_sheba,stations) # pool.map(tidyup,stations) ??? Maybe this would work??? for phase in phases: """ Loop over phases process and tidyup results """ tidy_path = 'Users/ja17375/DiscrePy/Sheba/Runs/Jacks_Split' outfile = '{}_{}_sheba_results.sdb'.format(out_pre,phase) tidyup(tidy_path,phase,outfile) elif batch is False: station = input('Input Station Name > ') run_sheba(station,path=path,phases=phases) end = time.time() runtime = end - start print('The runtime of main is {} seconds'.format(runtime))
35,795
def test_vector_interpolation_cross_section(): """Test cross section interpolation.""" vec = plonk.visualize.interpolation.vector_interpolation( x_data=X_DATA, y_data=Y_DATA, x_position=XX, y_position=YY, z_position=ZZ, extent=EXTENT, smoothing_length=HH, particle_mass=MM, number_of_pixels=PIX, cross_section=ZSLICE, ) np.testing.assert_allclose(vec, vector_cross_section, rtol=1e-5)
35,796
def calculate_seasonal_tilt(axial_tilt, degrees): """Find the seasonal tilt offset from axial tilt and orbit (in degrees) axial_tilt -- The planet's tilt. e.g. Earth's tilt is 23.44 degrees. degrees -- How far along is the planet in its orbit around its star? (between 0 and 360. 0/360 and 180 are equinoxes. 90 and 270 are solstices.) """ # NOTE: IRL the tilt of a planet doesn't actually change as it orbits. # What does change is the *relative* angle of incoming sunlight. return np.sin(degrees * np.pi/180) * axial_tilt
35,797
def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: return except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score)
35,798
def mark_dts_nn(marked_dict): """Loops through a dictionary representation of the XML-text where determiners have been "focus"-marked. Finds the "focus"-marked determiners and looks for their nouns from the words after the determiner until the end of the current sentence. The found noun is then marked with "focus": 2. Once the first noun of the right type for the determiner is found, it stops looking and moved on to the next determiner. This is an add-on to make the second approach of marking both determiners and their nouns possible. Found an issue with single word sentences (often only a bracket or another such character in the XML-text). The "isinstance()" check on word_meta is a bandaid-fix for this. It simply skips these one-word sentences, since they most likely are not relevant to the issue at hand and because I found no relatively quick fix for the issue. Args: marked_dict: a dictionary representation of the XML-text, with the added word metadata attribute "focus" (only determiners marked). Returns: nn_marked_dict: a dictionary representation of the XML-text, with the added wordmetadata attribute "focus" for both determiners (1) and their nouns (2).""" nn_marked_dict = deepcopy(marked_dict) for paragraph in nn_marked_dict['corpus']['text']['lessontext']['paragraph']: sentence_lvl = paragraph['sentence'] if isinstance(sentence_lvl, dict): for word_meta in sentence_lvl['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence_lvl['w'].index(word_meta) for noun_meta in sentence_lvl['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break elif isinstance(sentence_lvl, list): for sentence in sentence_lvl: for word_meta in sentence['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence['w'].index(word_meta) for noun_meta in sentence['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break else: print("Found something that is not a dict/list!") return nn_marked_dict
35,799