content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _aggregate_pop_simplified_comix( pop: pd.Series, target: pd.DataFrame ) -> pd.DataFrame: """ Aggregates the population matrix based on the CoMix table. :param pop: 1-year based population :param target: target dataframe we will want to multiply or divide with :return: Retuns a dataframe that can be multiplied with the comix matrix to get a table of contacts or it can be used to divide the contacts table to get the CoMix back """ agg = pd.DataFrame( { "[0,17)": [pop[:17].sum()], "[17,70)": [pop[17:69].sum()], "70+": [pop[70:].sum()], } ) return pd.concat([agg] * len(target.columns)).set_index(target.index).T
11ddfb103c95416b1a93577f90e12aa6159123eb
21,900
def authenticate(): """ Uses HTTP basic authentication to generate an authentication token. Any resource that requires authentication can use either basic auth or this token. """ token = serialize_token(basic_auth.current_user()) response = {'token': token.decode('ascii')} return jsonify(response)
adfd4d80bb08c6a0c3175495b4b2ab1aa0b898c6
21,901
import torch def split_image(image, N): """ image: (B, C, W, H) """ batches = [] for i in list(torch.split(image, N, dim=2)): batches.extend(list(torch.split(i, N, dim=3))) return batches
da51c3520dfee740a36d5e0241f3fd46a07f2752
21,902
def _upsample_add(x, y): """Upsample and add two feature maps. Args: x: (Variable) top feature map to be upsampled. y: (Variable) lateral feature map. Returns: (Variable) added feature map. Note in PyTorch, when input size is odd, the upsampled feature map with `F.upsample(..., scale_factor=2, mode='nearest')` maybe not equal to the lateral feature map size. e.g. original input size: [N,_,15,15] -> conv2d feature map size: [N,_,8,8] -> upsampled feature map size: [N,_,16,16] So we choose bilinear upsample which supports arbitrary output sizes. """ _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
facac22b50906509138a479074a7744b737d554d
21,903
def get_eta_and_mu(alpha): """Get the value of eta and mu. See (4.46) of the PhD thesis of J.-M. Battini. Parameters ---------- alpha: float the angle of the rotation. Returns ------- The first coefficient eta: float. The second coefficient mu: float. """ if alpha == 0.: eta = 1 / 12 mu = 1 / 360 else: eta = (2 * sin(alpha) - alpha * (1 + cos(alpha))) / \ (2 * alpha ** 2 * sin(alpha)) mu = (alpha * (alpha + sin(alpha)) - 8 * sin(alpha / 2) ** 2) / \ (4 * alpha ** 4 * sin(alpha / 2) ** 2) return eta, mu
4f1a215a52deda250827d4ad3d06f8731c69dc9d
21,904
def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter Returns: df_all - Pandas DataFrame containing city data with no filters df - Pandas DataFrame containing city data filtered by month and day """ print('Loading city data...') # Load DataFrame for city df = pd.read_csv(CITY_DATA[city]) # Convert start and end times to datetime type df['Start Time'] = pd.to_datetime(df['Start Time']) df['End Time'] = pd.to_datetime(df['End Time']) # Create multiple new DataFrame Time Series df['month'] = df['Start Time'].dt.month df['day_str'] = df['Start Time'].dt.weekday_name df['day_int'] = df['Start Time'].dt.weekday df['hour'] = df['Start Time'].dt.hour # Create side copy of df without filters df_all = df.copy() # Filter DataFrame by month month_idx = month_list.index(month) if month != 'All': df = df[df['month'] == month_idx] # Filter DataFrame by day of week if day != 'All': df = df[df['day_str'] == day] print('-'*40) return df_all, df
3d1c1ab7b2f346dab0fe3f01a262983c880eda34
21,905
def write_error_row(rowNum, errInfo): """Google Sheets API Code. Writes all team news link data from RSS feed to the NFL Team Articles speadsheet. https://docs.google.com/spreadsheets/d/1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ/edit#gid=0 """ credentials = get_credentials() http = credentials.authorize(mgs.httplib2.Http()) discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?' 'version=v4') service = mgs.discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl) spreadsheet_id = '1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ' value_input_option = 'RAW' rangeName = 'ERROR!A' + str(rowNum) values = errInfo body = { 'values': values } result = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=rangeName, valueInputOption=value_input_option, body=body).execute() return result
5a3d071d78656f6991103cac72c7ce46f025d689
21,906
import json def get_last_transaction(): """ return last transaction form blockchain """ try: transaction = w3.eth.get_transaction_by_block(w3.eth.blockNumber, 0) tx_dict = dict(transaction) tx_json = json.dumps(tx_dict, cls=HexJsonEncoder) return tx_json except Exception as err: print("Error '{0}' occurred.".format(err)) return {'error':'Error while fetching transaction'}
89a70c474670b6e691fde8424ffc78b739ee415e
21,907
def get_tp_model() -> TargetPlatformModel: """ A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2 bits configuration list for mixed-precision quantization. NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the 'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations. Returns: A TargetPlatformModel object. """ base_config, mixed_precision_cfg_list = get_op_quantization_configs() return generate_tp_model(default_config=base_config, base_config=base_config, mixed_precision_cfg_list=mixed_precision_cfg_list, name='qnnpack_tp_model')
5a501f18a090927f7aeba9883f3676ede595944c
21,908
import os def IsARepoRoot(directory): """Returns True if directory is the root of a repo checkout.""" return os.path.exists( os.path.join(os.path.realpath(os.path.expanduser(directory)), '.repo'))
b7b5fd57907c98835f4962ab095ed11cdd4975a4
21,909
from typing import Optional def check_sparv_version() -> Optional[bool]: """Check if the Sparv data dir is outdated. Returns: True if up to date, False if outdated, None if version file is missing. """ data_dir = paths.get_data_path() version_file = (data_dir / VERSION_FILE) if version_file.is_file(): return version_file.read_text() == __version__ return None
2c2ebeee0ad0c8a08c841b594ca45676a22407d7
21,910
def grav_n(expt_name, num_samples, num_particles, T_max, dt, srate, noise_std, seed): """2-body gravitational problem""" ##### ENERGY ##### def potential_energy(state): '''U=sum_i,j>i G m_i m_j / r_ij''' tot_energy = np.zeros((1, 1, state.shape[2])) for i in range(state.shape[0]): for j in range(i + 1, state.shape[0]): r_ij = ((state[i:i + 1, 1:3] - state[j:j + 1, 1:3]) ** 2).sum(1, keepdims=True) ** .5 m_i = state[i:i + 1, 0:1] m_j = state[j:j + 1, 0:1] tot_energy += m_i * m_j / r_ij U = -tot_energy.sum(0).squeeze() return U def kinetic_energy(state): '''T=sum_i .5*m*v^2''' energies = .5 * state[:, 0:1] * (state[:, 3:5] ** 2).sum(1, keepdims=True) T = energies.sum(0).squeeze() return T def total_energy(state): return potential_energy(state) + kinetic_energy(state) ##### DYNAMICS ##### def get_accelerations(state, epsilon=0): # shape of state is [bodies x properties] net_accs = [] # [nbodies x 2] for i in range(state.shape[0]): # number of bodies other_bodies = np.concatenate([state[:i, :], state[i + 1:, :]], axis=0) displacements = other_bodies[:, 1:3] - state[i, 1:3] # indexes 1:3 -> pxs, pys distances = (displacements ** 2).sum(1, keepdims=True) ** 0.5 masses = other_bodies[:, 0:1] # index 0 -> mass pointwise_accs = masses * displacements / (distances ** 3 + epsilon) # G=1 net_acc = pointwise_accs.sum(0, keepdims=True) net_accs.append(net_acc) net_accs = np.concatenate(net_accs, axis=0) return net_accs def update(t, state): state = state.reshape(-1, 5) # [bodies, properties] # print(state.shape) deriv = np.zeros_like(state) deriv[:, 1:3] = state[:, 3:5] # dx, dy = vx, vy deriv[:, 3:5] = get_accelerations(state) return deriv.reshape(-1) ##### INTEGRATION SETTINGS ##### def get_orbit(state, update_fn=update, t_points=100, t_span=[0, 2], **kwargs): if not 'rtol' in kwargs.keys(): kwargs['rtol'] = 1e-12 # kwargs['atol'] = 1e-12 # kwargs['atol'] = 1e-9 orbit_settings = locals() nbodies = state.shape[0] t_eval = np.arange(t_span[0], t_span[1], dt) if len(t_eval) != t_points: t_eval = t_eval[:-1] orbit_settings['t_eval'] = t_eval path = solve_ivp(fun=update_fn, t_span=t_span, y0=state.flatten(), t_eval=t_eval,method='DOP853', **kwargs) orbit = path['y'].reshape(nbodies, 5, t_points) return orbit, orbit_settings # spring_ivp = rk(update_fn, t_eval, state.reshape(-1), dt) # spring_ivp = np.array(spring_ivp) # print(spring_ivp.shape) # q, p = spring_ivp[:, 0], spring_ivp[:, 1] # dydt = [dynamics_fn(y, None) for y in spring_ivp] # dydt = np.stack(dydt).T # dqdt, dpdt = np.split(dydt, 2) # return spring_ivp.reshape(nbodies,5,t_points), 33 ##### INITIALIZE THE TWO BODIES ##### def random_config(orbit_noise=5e-2, min_radius=0.5, max_radius=1.5): state = np.zeros((2, 5)) state[:, 0] = 1 pos = np.random.rand(2) * (max_radius - min_radius) + min_radius r = np.sqrt(np.sum((pos ** 2))) # velocity that yields a circular orbit vel = np.flipud(pos) / (2 * r ** 1.5) vel[0] *= -1 vel *= 1 + orbit_noise * np.random.randn() # make the circular orbits SLIGHTLY elliptical state[:, 1:3] = pos state[:, 3:5] = vel state[1, 1:] *= -1 return state ##### HELPER FUNCTION ##### def coords2state(coords, nbodies=2, mass=1): timesteps = coords.shape[0] state = coords.T state = state.reshape(-1, nbodies, timesteps).transpose(1, 0, 2) mass_vec = mass * np.ones((nbodies, 1, timesteps)) state = np.concatenate([mass_vec, state], axis=1) return state ##### INTEGRATE AN ORBIT OR TWO ##### def sample_orbits(timesteps=50, trials=1000, nbodies=2, orbit_noise=5e-2, min_radius=0.5, max_radius=1.5, t_span=[0, 20], verbose=False, **kwargs): orbit_settings = locals() if verbose: print("Making a dataset of near-circular 2-body orbits:") x, dx, e, ks, ms = [], [], [], [], [] # samps_per_trial = np.ceil((T_max / srate)) # N = samps_per_trial * trials np.random.seed(seed) for _ in range(trials): state = random_config(orbit_noise, min_radius, max_radius) orbit, _ = get_orbit(state, t_points=timesteps, t_span=t_span, **kwargs) print(orbit.shape) batch = orbit.transpose(2, 0, 1).reshape(-1, 10) ssr = int(srate / dt) # (batch.shape) batch = batch[::ssr] # print('ssr') # print(batch.shape) sbx, sbdx, sbe = [], [], [] for state in batch: dstate = update(None, state) # reshape from [nbodies, state] where state=[m, qx, qy, px, py] # to [canonical_coords] = [qx1, qx2, qy1, qy2, px1,px2,....] coords = state.reshape(nbodies, 5).T[1:].flatten() dcoords = dstate.reshape(nbodies, 5).T[1:].flatten() # print(coords.shape) coords += np.random.randn(*coords.shape) * noise_std dcoords += np.random.randn(*dcoords.shape) * noise_std x.append(coords) dx.append(dcoords) shaped_state = state.copy().reshape(2, 5, 1) e.append(total_energy(shaped_state)) ks.append(np.ones(num_particles)) ms.append(np.ones(num_particles)) # print(len(x)) data = {'x': np.stack(x)[:, [0, 2, 1, 3, 4, 6, 5, 7]], 'dx': np.stack(dx)[:, [0, 2, 1, 3, 4, 6, 5, 7]], 'energy': np.stack(e), 'ks': np.stack(ks), 'mass': np.stack(ms)} return data return sample_orbits(timesteps=int(np.ceil(T_max / dt)), trials=num_samples, nbodies=2, orbit_noise=5e-2, min_radius=0.5, max_radius=1.5, t_span=[0, T_max], verbose=False)
05f8aa5f6b864440a8a69be35f23d3938114e133
21,911
def fit_spline_linear_extrapolation(cumul_observations, smoothing_fun=simple_mirroring, smoothed_dat=[], plotf=False, smoothep=True, smooth=0.5, ns=3, H=7): """ Linear extrapolation by splines on log daily cases Input: cumul_observations: cumulative observations, smoothed_dat: list of trends of incremental history, ns: optional smoothing window parameter, H: forecasting horison smooth: whether to compute mean from trend or from raw data Output: forecast on horison H in terms of cumulative numbers starting from the last observation """ if len(smoothed_dat) == 0: smoothed_dat = smoothing_fun(cumul_observations, Ws=ns) val_start = smoothed_dat[-1] dat = np.log(list(smoothed_dat + 1)) spl = csaps.UnivariateCubicSmoothingSpline(range(len(dat)), dat, smooth=smooth) dat_diff = np.diff(spl(np.arange(len(dat)))) x = np.arange(len(dat_diff)) spl = csaps.UnivariateCubicSmoothingSpline(x, dat_diff, smooth=smooth) dat_diff_sm = spl(x) step = dat_diff_sm[-1] - dat_diff_sm[-2] if smoothep: dat_forecast = dat_diff_sm[-1] + step * np.arange(1, H + 1) # + seasonality else: dat_forecast = dat_diff[-1] + step * np.arange(1, H + 1) # + seasonality forecast = np.insert(np.exp(np.cumsum(dat_forecast)) * val_start, 0, val_start) return forecast
62c0feacf87096a3889e63a2193bc93092b9dc02
21,912
def compute_dl_target(location): """ When the location is empty, set the location path to /usr/sys/inst.images return: return code : 0 - OK 1 - if error dl_target value or msg in case of error """ if not location or not location.strip(): loc = "/usr/sys/inst.images" else: loc = location.rstrip('/') dl_target = loc return 0, dl_target
419b9fcad59ca12b54ad981a9f3b265620a22ab1
21,913
def hz_to_angstrom(frequency): """Convert a frequency in Hz to a wavelength in Angstroms. Parameters ---------- frequency: float The frequency in Hz. Returns ------- The wavelength in Angstroms. """ return C / frequency / ANGSTROM
9fed63f7933c6d957a35de7244464d0303abf3ce
21,914
from re import T def is_literal(token): """ リテラル判定(文字列・数値) """ return token.ttype in T.Literal
46527a24660f8544951b999ec556a4cf12204087
21,915
def PutObject(*, session, bucket, key, content, type_="application/octet-stream"): """Saves data to S3 under specified filename and bucketname :param session: The session to use for AWS connection :type session: boto3.session.Session :param bucket: Name of bucket :type bucket: str :param key: Name of file :type key: str :param content: Data to save :type content: bytes | str :param type_: Content type of the data to put :type type_: str :return: The new S3 object :rtype: boto3.core.resource.S3Object """ s3conn = session.connect_to("s3") # Make sure, we have the bucket to add object to try: b = GetOrCreateBuckets(session, bucket) except Exception as e: # There is a chance that the user trying to PutObject does not have permissions # to Create/List Buckets. In such cases and error is thrown. We can still try to # save and assume the bucket already exists. pass # Now we can create the object S3Objects = session.get_collection("s3", "S3ObjectCollection") s3objects = S3Objects(connection=s3conn, bucket=bucket, key=key) if isinstance(content, str): bindata = content.encode("utf-8") else: bindata = content # Now we create the object return s3objects.create(key=key, acl="private", content_type=type_, body=bindata)
908581b7d61c3cce9a976b03a0bf7d3ed8c691ca
21,916
from io import StringIO def insert_sequences_into_tree(aln, moltype, params={}, write_log=True): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ # convert aln to phy since seq_names need fixed to run through pplacer new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() ih = '_input_as_multiline_string' pplacer_app = Pplacer(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False) pplacer_result = pplacer_app(seqs) # write a log file if write_log: log_fp = join(params["--out-dir"],'log_pplacer_' + \ split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(pplacer_result['StdOut'].read()) log_file.close() # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \ output_dir=params['--out-dir'], \ params=guppy_params) pplacer_result.cleanUp() return new_tree
d81167b49f2e375f17a227d708d5115af5d18549
21,917
from azure.cli.core.azclierror import CLIInternalError def billing_invoice_download(client, account_name=None, invoice_name=None, download_token=None, download_urls=None): """ Get URL to download invoice :param account_name: The ID that uniquely identifies a billing account. :param invoice_name: The ID that uniquely identifies an invoice. :param download_token: The download token with document source and document ID. :param download_urls: An array of download urls for individual. """ if account_name and invoice_name and download_token: return client.download_invoice(account_name, invoice_name, download_token) if account_name and download_urls: return client.download_multiple_modern_invoice(account_name, download_urls) if download_urls: return client.download_multiple_billing_subscription_invoice(download_urls) if invoice_name and download_token: return client.download_billing_subscription_invoice( invoice_name, download_token ) raise CLIInternalError( "Uncaught argument combinations for Azure CLI to handle. Please submit an issue" )
a75326953188e0aaf0145ceeaa791460ec0c0823
21,918
import re def find_classes(text): """ find line that contains a top-level open brace then look for class { in that line """ nest_level = 0 brace_re = re.compile("[\{\}]") classname_re = "[\w\<\>\:]+" class_re = re.compile( "(?:class|struct)\s*(\w+)\s*(?:\:\s*public\s*" + classname_re + "(?:,\s*public\s*" + classname_re + ")*)?\s*\{") classes = [] lines = text.split("\n") for (i,line) in enumerate(lines): if True:#nest_level == 0 and (i==0 or "template" not in lines[i-1]): classes.extend(class_re.findall(line)) braces = brace_re.findall(line) for brace in braces: if brace == "{": nest_level += 1 elif brace == "}": nest_level -= 1 return classes
126bc091a809e152c3d447ffdd103c764bc6c9ac
21,919
def kineticEnergyCOM(robot : object, symbolic = False): """This function calculates the total kinetic energy, with respect to each center of mass, given linear and angular velocities Args: robot (object): serial robot (this won't work with other type of robots) symbolic (bool, optional): used to calculate symbolic equations. Defaults to False. Returns: K (SymPy Matrix): kinetic matrix (symbolical) """ # Kinetic Matrix calculation D = inertiaMatrixCOM(robot, symbolic) return 0.5 * (robot.qdSymbolic.T * D * robot.qdSymbolic) if symbolic else 0.5 * (robot.jointsVelocities.T.dot(D).dot(robot.jointsVelocities))
5f0559e55a389741ad0591b5ac3f220ffdb76a2c
21,920
def get_input(request) -> str: """Get the input song from the request form.""" return request.form.get('input')
de237dc0ad3ce2fa6312dc6ba0ea9fe1c2bdbeb3
21,921
from typing import Hashable import math def _unit_circle_positions(item_counts: dict[Hashable, tuple[int, int]], radius=0.45, center_x=0.5, center_y=0.5) -> dict[Hashable, tuple[float, float]]: """ computes equally spaced points on a circle based on the radius and center positions :param item_counts: item dict LinkedNetwork.get_item_link_count_dict() :param radius: radius of the circle :param center_x: x center position :param center_y: y center position :return: dict of items and their corresponding positions """ r = radius cx, cy = center_x, center_y a = math.radians(360) / len(item_counts) points = {} i = 0 for key, _ in item_counts.items(): points[key] = (math.cos(a * i) * r + cx, math.sin(a * i) * r + cy) i += 1 return points
66f60f5b90f7825f2abfdd2484375c9558786250
21,922
import re def rate_table_download(request, table_id): """ Download a calcification rate table as CSV. """ def render_permission_error(request, message): return render(request, 'permission_denied.html', dict(error=message)) table_permission_error_message = \ f"You don't have permission to download table of ID {table_id}." try: rate_table = CalcifyRateTable.objects.get(pk=table_id) except CalcifyRateTable.DoesNotExist: # Technically the error message isn't accurate here, since it # implies the table ID exists. But users don't really have any # business knowing which table IDs exist or not outside their source. # So this obfuscation makes sense. return render_permission_error(request, table_permission_error_message) if rate_table.source: if not rate_table.source.visible_to_user(request.user): # Table belongs to a source, and the user doesn't have access to # that source. return render_permission_error( request, table_permission_error_message) # The source_id parameter tells us to limit the downloaded CSV to the # entries in the specified source's labelset, rather than including all # the rows of the rate table. This is particularly useful when downloading # a default rate table. if 'source_id' in request.GET: source_id = request.GET['source_id'] source_permission_error_message = \ f"You don't have permission to access source of ID {source_id}." try: source = Source.objects.get(pk=source_id) except Source.DoesNotExist: return render_permission_error( request, source_permission_error_message) if not source.visible_to_user(request.user): return render_permission_error( request, source_permission_error_message) else: source = None # At this point we do have permission, so proceed. # Convert the rate table's name to a valid filename in Windows and # Linux/Mac (or at least make a reasonable effort to). # Convert chars that are problematic in either OS to underscores. # # Linux only disallows / (and the null char, but we'll ignore that case). # Windows: # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions non_filename_chars_regex = re.compile(r'[<>:"/\\|?*]') csv_filename = non_filename_chars_regex.sub('_', rate_table.name) # Make a CSV stream response and write the data to it. response = create_csv_stream_response('{}.csv'.format(csv_filename)) rate_table_json_to_csv(response, rate_table, source=source) return response
d62eb09d11c0e91cca3eb36388f1165e2a4433ee
21,923
from os.path import normpath, sep def normalize_path(path): """ Normalize a pathname by collapsing redundant separators and up-level references """ result = normpath(path) result = result.replace("/",sep) result = result.replace("\\",sep) return adapt_path(result)
e422a76e1fe19401db34dfe80ff1d8ce2f411af2
21,924
def rgb_to_RGB255(rgb: RGBTuple) -> RGB255Tuple: """ Convert from Color.rgb's 0-1 range to ANSI RGB (0-255) range. >>> rgb_to_RGB255((1, 0.5, 0)) (255, 128, 0) """ return tuple([int(round(map_interval(0, 1, 0, 255, c))) for c in rgb])
1fe460a4716244efbbacbc6d44f10a3fc6ba8d3f
21,925
import requests import re def check_rest_version(host="http://www.compbio.dundee.ac.uk/jpred4/cgi-bin/rest", suffix="version", silent=False): """Check version of JPred REST interface. :param str host: JPred host address. :param str suffix: Host address suffix. :param silent: Should the work be done silently? :type silent: :py:obj:`True` or :py:obj:`False` :return: Version of JPred REST API. :rtype: :py:class:`str` """ version_url = "{}/{}".format(host, suffix) response = requests.get(version_url) version = re.search(r"VERSION=(v\.[0-9]*.[0-9]*)", response.text).group(1) if not silent: print(version) return version
f9c5e858e4a4681d8b5045e8ed08738f1c32016a
21,926
def analyzer_options(*args): """ analyzer_options() Allow the user to set analyzer options. (show a dialog box) ( 'ui_analyzer_options' ) """ return _ida_kernwin.analyzer_options(*args)
e0b78523d32cce303fe8048012ae1e57c7c422bd
21,927
import torch def vector_vector_feature(v_a, v_b, weight, p_idx, frames, symmetric): """ Taking outer product, create matrix feature per pair, average, express in SO2 feature. :param v_a: [E, 3] :param v_b: [E, 3] :param weight: [E] :param p_idx: [E] index [0, V) :param frames: [V, 3, 3] per vertex, rows are (X, Y, normal) vectors. :param symmetric: bool :return: [V, 2/3, 5] (2 channels if symmetric) """ m_pair = torch.einsum("ni,nj,n->nij", v_a, v_b, weight) m_p = scatter_sum(m_pair, p_idx, dim=0) / scatter_sum(weight, p_idx)[:, None, None] m_p_gauge = frames @ m_p @ frames.transpose(1, 2) return (three_sym_matrix_to_so2_features if symmetric else three_matrix_to_so2_features)( m_p_gauge )
6b583cc834d79d463fc5e6b68b98cd027c4969ec
21,928
def parse_steps(filename): """ Read each line of FILENAME and return a dict where the key is the step and the value is a list of prerequisite steps. """ steps = defaultdict(lambda: list()) all_steps = set() with open(filename) as f: for line in f: words = line.split(' ') steps[words[7]].append(words[1]) all_steps.add(words[1]) # Add steps with no prerequisites. for step in all_steps: if step not in steps: steps[step] = [] return steps
450d93cb72cf92c186cbcecc1992c6e4391ca428
21,929
import glob import json def sitestructure(config, path, extra): """Read all markdown files and make a site structure file""" # no error handling here, because compile_page has it entire_site = list() for page in glob.iglob(path + '**/*.md', recursive=True): merged = compile_page(None, config, page, extra) if 'tags' in merged: merged['tags'] = [x.strip() for x in merged['tags'].split(',')] if 'content_raw' in merged: merged['snippet'] = merged['content_raw'][:200] + "..." # remote certain elements if 'content' in merged: del merged['content'] if 'content_raw' in merged: del merged['content_raw'] if 'templates' in merged: del merged['templates'] entire_site.append(merged) return json.dumps(entire_site)
6c7d21bce30ebb418a8146f302ce62bbe0386bbf
21,930
import uuid def _create_component(tag_name, allow_children=True, callbacks=[]): """ Create a component for an HTML Tag Examples: >>> marquee = _create_component('marquee') >>> marquee('woohoo') <marquee>woohoo</marquee> """ def _component(*children, **kwargs): if 'children' in kwargs: children = kwargs.pop('children') else: # Flatten children under specific circumstances # This supports the use case of div([a, b, c]) # And allows users to skip the * operator if len(children) == 1 and isinstance(children[0], list): # We want children to be tuples and not lists, so # they can be immutable children = tuple(children[0]) if 'style' in kwargs: style = kwargs.pop('style') else: style = None if 'attributes' in kwargs: attributes = kwargs['attributes'] else: attributes = dict(**kwargs) if (tag_name == 'a') and ('href' not in attributes): attributes['href'] = '#' if not allow_children and children: # We don't allow children, but some were passed in raise ValueError( '<{tag_name} /> cannot have children'.format(tag_name=tag_name)) for cb in callbacks: cbname = cb['name'] if cbname in attributes: if attributes[cbname] is not None: # from google.colab import output as colab_output callback_id = cbname + 'callback-' + str(uuid.uuid4()) register_callback(callback_id, attributes[cbname]) # js="google.colab.kernel.invokeFunction('{callback_id}', [], {kwargs})" js = "window.vdomr_invokeFunction('{callback_id}', [], {kwargs})" js = js.replace('{callback_id}', callback_id) js = js.replace('{kwargs}', cb['kwargs']) attributes[cbname] = js else: attributes[cbname] = '' v = VDOM(tag_name, attributes, style, children) return v return _component
a11572de6d079b35ffe0492154939cceb953b199
21,931
def typeof(val, purpose=Purpose.argument): """ Get the Numba type of a Python value for the given purpose. """ # Note the behaviour for Purpose.argument must match _typeof.c. c = _TypeofContext(purpose) ty = typeof_impl(val, c) if ty is None: msg = _termcolor.errmsg( "cannot determine Numba type of %r") % (type(val),) raise ValueError(msg) return ty
83d8e84fca58ce78b15e9106a14ff95c86ccac68
21,932
import logging def scale_site_by_jobslots(df, target_score, jobslot_col=Metric.JOBSLOT_COUNT.value, count_col=Metric.NODE_COUNT.value): """ Scale a resource environment (data frame with node type information) to the supplied share. This method uses the number of jobslots in each node as a target metric. """ if df[jobslot_col].isnull().sum() > 0 or df[count_col].isnull().sum() > 0: logging.warning("Node description has null values for jobslots or node target scores!") slots_per_type = df[jobslot_col] * df[count_col] total_slots = slots_per_type.sum() share = target_score / total_slots return scale_dataframe(df, share, count_col, jobslot_col)
c46129ebf4761fbcb7c3e40165c83259d0eb24e0
21,933
def primesfrom2to(n): """Input n>=6, Returns a array of primes, 2 <= p < n""" sieve = np.ones(n / 3 + (n % 6 == 2), dtype=np.bool) sieve[0] = False for i in xrange(int(n ** 0.5) / 3 + 1): if sieve[i]: k = 3 * i + 1 | 1 sieve[((k * k) / 3)::2 * k] = False sieve[(k * k + 4 * k - 2 * k * (i & 1)) / 3::2 * k] = False return np.r_[2, 3, ((3 * np.nonzero(sieve)[0] + 1) | 1)]
e66a8dd1bb23f1aab8786d4832e382d07e5973e0
21,934
import subprocess def file_types_diff(cwd, old_ver, new_ver): """ NB: Uses Git and the magic/ directory Select only files that are Copied (C), Modified (M), Renamed (R), have their type (i.e. regular file, symlink, submodule, ...) changed (T) Returns a list of changed file types! """ # diff only Modified and Type changed cmdline = "git diff -M --diff-filter=MT %s..%s" % (old_ver, new_ver) proc = subprocess.Popen(cmdline.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) text = proc.communicate()[0] text = text.decode('UTF8', 'replace').strip('\n') # keep leading white space if text: return (FAIL, text) else: return (PASS, "None")
47ddee3bd7bfe0cbeb1af642016f3836900cd0a9
21,935
import time def TimeFromTicks(ticks): """construct an object holding a time value from the given ticks value.""" return Time(*time.localtime(ticks)[3:6])
96651ff5e0da5e88988640b5e3f544ad82dd90b2
21,936
def check_key_exists(file_location, section, key): """ Searches an INI Configuration file for the existance of a section & key :param file_location: The file to get a key value from :param section: The section to find the key value :param key: The key that can contain a value to retrieve :return: The boolean value of whether or not the key exists """ config = ConfigParser() config.read(file_location) return config.has_option(section, key)
afdc8bad295cd2b0ed0576eab0ff633fb1f854b3
21,937
def replace_na(str_value: str, ch: str = "0") -> str: """replaces \"0\" with na, specifically designed for category list, may not work for others need Args: str_value (str): category list ch (str, optional): Replacemet char. Defaults to "0". Returns: str: clean cotegory name """ if str_value is not None: len_str = len(str_value) if len_str > 0: if str_value == "0": return "na" all_indices = [i for i, ltr in enumerate(str_value) if ltr == ch] if all_indices: for i in all_indices: if i == 0 and str_value[1].isalpha(): str_value = "na"+str_value[1:] elif i == (len_str - 1) and (str_value[len_str-2].isalpha() or str_value[len_str-2] != "."): str_value = str_value[:len_str] + "na" elif str_value[len_str-2] != ".": str_value = str_value[:i] + "na" + str_value[(i+1):] return str_value
d8e6dfe6806c7a008163ba92c62e7b2b18633538
21,938
def intent_requires(): """ This view encapsulates the method get_intent_requirement It requires an Intent. :return: A dict containing the different entities required for an Intent """ data = request.get_json() if "intent" in data: return kg.get_intent_requirements(data["intent"]) else: return {"message": "Must provide an intent name", "status": 404}
75901abc3d0833eba39c229cc35249c8cb3e6162
21,939
def standardize_df_off_tr(df_tr:pd.DataFrame, df_te:pd.DataFrame): """Standardize dataframes from a training and testing frame, where the means and standard deviations that are calculated from the training dataset. """ for key in df_tr.keys(): if key != 'target': # scale the testing data w/ the training means/stds ssd = df_tr[key].values.std() if np.abs(ssd) < .0001: ssd = .001 df_te[key] = (df_te[key].values - df_tr[key].values.mean())/ssd # scale the training data df_tr[key] = (df_tr[key].values - df_tr[key].values.mean())/ssd return df_tr, df_te
04438b7f31efbe80129ef1cda488ea3c93bcf55e
21,940
def filter_clusters(aoi_clusters, min_ratio, max_deviation, message, run=None): """ min_ratio: Has to have more than x % of all dots in the corner within the cluster max_deviation: Should not deviate more than x % of the screen size from the respective AOI """ aoi_clusters = aoi_clusters \ .sort_values(by='quadrant') \ .assign(n_ratio=aoi_clusters['n_cluster'] / \ aoi_clusters['n_total']) \ .assign(x_deviation=aoi_clusters['x'] - \ pd.Series([0.25, 0.75, 0.25, 0.75])) \ .assign(y_deviation=aoi_clusters['y'] - \ pd.Series([0.75, 0.75, 0.25, 0.25])) aoi_clusters['euclid_deviation'] = np.sqrt( aoi_clusters['x_deviation'] ** 2 + aoi_clusters['y_deviation'] ** 2) realistic_clusters = aoi_clusters[ (aoi_clusters['n_ratio'] > min_ratio) & (aoi_clusters['euclid_deviation'] < max_deviation)] not_enough_gaze_points = len(aoi_clusters[ (aoi_clusters['n_ratio'] > min_ratio)]) < 4 too_far_away = len(aoi_clusters[ aoi_clusters[ 'euclid_deviation'] < max_deviation]) < 4 if message: if not_enough_gaze_points | too_far_away: print(f"""\nRun {run} could not be clustered: """) if not_enough_gaze_points: print(f""" <{min_ratio * 100}% gaze point within """ f"""the AOIs for each corner""") if too_far_away: print(f""" >{max_deviation * 100}% from where the AOI """ f"""is supposed to be \n""") else: print(f"""\nRun {run} can be clustered: """) print(f"""{aoi_clusters[[ 'quadrant', 'n_cluster', 'cluster', 'n_ratio', 'x_deviation', 'y_deviation']]} \n""" f"""Notes: """) return realistic_clusters
2767afd05093e364cf6be22b98b4821c6811165e
21,941
def set_to_true(): """matches v1, which assign True to v1""" key = yield symbol res = Assign(key, True) return res
e1a8eb62be409252475ad39d9d72a087b0344f9f
21,942
def fit_spectrum(spectrum, lineshapes, params, amps, bounds, ampbounds, centers, rIDs, box_width, error_flag, verb=True, **kw): """ Fit a NMR spectrum by regions which contain one or more peaks. Parameters ---------- spectrum : array_like NMR data. ndarray or emulated type, must be slicable. lineshape :list List of lineshapes by label (str) or a lineshape class. See :py:func:`fit_NDregion` for details. params : list P-length list (P is the number of peaks in region) of N-length lists of tuples where each each tuple is the optimiztion starting parameters for a given peak and dimension lineshape. amps : list P-length list of amplitudes. bounds : list List of bounds for parameter of same shape as params. If none of the parameters in a given dimension have limits None can be used, otherwise each dimension should have a list or tuple of (min,max) or None for each parameter. min or max may be None when there is no bounds in a given direction. ampbounds : list P-length list of bounds for the amplitude with format similar to bounds. centers : list List of N-tuples indicating peak centers. rIDs : list P-length list of region numbers. Peak with the same region number are fit together. box_width : tuple Tuple of length N indicating box width to add and subtract from peak centers to form regions around peak to fit. error_flag : bool True to estimate errors for each lineshape parameter and amplitude. verb : bool, optional True to print a summary of each region fit, False (the default) supresses all printing. **kw : optional Additional keywords passed to the scipy.optimize.leastsq function. Returns ------- params_best : list Optimal values for lineshape parameters with same format as params input parameter. amp_best : list List of optimal peak amplitudes. param_err : list, only returned when error_flag is True Estimated lineshape parameter errors with same format as params. amp_err : list, only returned when error_flag is True Estimated peak amplitude errors. iers : list List of interger flag from scipy.optimize.leastsq indicating if the solution was found for a given peak. 1,2,3,4 indicates that a solution was found. Other indicate an error. """ pbest = [[]] * len(params) pbest_err = [[]] * len(params) abest = [[]] * len(params) abest_err = [[]] * len(params) iers = [[]] * len(params) shape = spectrum.shape ls_classes = [] for l in lineshapes: if isinstance(l, str): ls_classes.append(ls_str2class(l)) else: ls_classes.append(l) cIDs = set(rIDs) # region values to loop over for cID in cIDs: cpeaks = [i for i, v in enumerate(rIDs) if v == cID] # select the parameter cparams = [params[i] for i in cpeaks] camps = [amps[i] for i in cpeaks] cbounds = [bounds[i] for i in cpeaks] campbounds = [ampbounds[i] for i in cpeaks] ccenters = [centers[i] for i in cpeaks] # find the box edges bcenters = np.round(np.array(ccenters).astype('int')) bmin = bcenters - box_width bmax = bcenters + box_width + 1 # correct for spectrum edges for i in range(len(shape)): bmin[:, i][np.where(bmin[:, i] < 0)] = 0 for i, v in enumerate(shape): bmax[:, i][np.where(bmax[:, i] > v)] = v # find the region limits rmin = edge = np.array(bmin).min(0) rmax = np.array(bmax).max(0) # cut the spectrum s = tuple([slice(mn, mx) for mn, mx in zip(rmin, rmax)]) region = spectrum[s] # add edge to the box limits ebmin = bmin - edge ebmax = bmax - edge # create the weight mask array wmask = np.zeros(region.shape, dtype='bool') for bmn, bmx in zip(ebmin, ebmax): s = tuple([slice(mn, mx) for mn, mx in zip(bmn, bmx)]) wmask[s] = True # add edges to the initial parameters ecparams = [[ls.add_edge(p, (mn, mx)) for ls, mn, mx, p in zip(ls_classes, rmin, rmax, g)] for g in cparams] # TODO make this better... ecbounds = [[zip(*[ls.add_edge(b, (mn, mx)) for b in zip(*db)]) for ls, mn, mx, db in zip(ls_classes, rmin, rmax, pb)] for pb in cbounds] # fit the region t = fit_NDregion(region, ls_classes, ecparams, camps, ecbounds, campbounds, wmask, error_flag, **kw) if error_flag: ecpbest, acbest, ecpbest_err, acbest_err, ier = t cpbest_err = [[ls.remove_edge(p, (mn, mx)) for ls, mn, mx, p in zip(ls_classes, rmin, rmax, g)] for g in ecpbest_err] else: ecpbest, acbest, ier = t # remove edges from best fit parameters cpbest = [[ls.remove_edge(p, (mn, mx)) for ls, mn, mx, p in zip(ls_classes, rmin, rmax, g)] for g in ecpbest] if verb: print("-----------------------") print("cID:", cID, "ier:", ier, "Peaks fit", cpeaks) print("fit parameters:", cpbest) print("fit amplitudes", acbest) for i, pb, ab in zip(cpeaks, cpbest, acbest): pbest[i] = pb abest[i] = ab iers[i] = ier if error_flag: for i, pb, ab in zip(cpeaks, cpbest_err, acbest_err): pbest_err[i] = pb abest_err[i] = ab if error_flag is False: return pbest, abest, iers return pbest, abest, pbest_err, abest_err, iers
e13959eb2fbca2146172d9c1f345c2715d28471b
21,943
def d1_to_q1(A, b, mapper, cnt, M): """ Constraints for d1 to q1 """ for key in mapper['ck'].keys(): for i in range(M): for j in range(i, M): # hermetian constraints if i != j: A[cnt, mapper['ck'][key](i, j)] += 0.5 A[cnt, mapper['ck'][key](j, i)] += 0.5 A[cnt, mapper['kc'][key](j, i)] += 0.5 A[cnt, mapper['kc'][key](i, j)] += 0.5 b[cnt, 0] = 0.0 else: A[cnt, mapper['ck'][key](i, j)] += 1.0 A[cnt, mapper['kc'][key](j, i)] += 1.0 b[cnt, 0] = 1.0 cnt += 1 return A, b, cnt
1ee9ec17f4464ef280aa22780d6034309941954e
21,944
import argparse import logging def handle_args(): """ Gathers commmand line options and sets up logging according to the verbose param. Returns the parsed args """ parser = argparse.ArgumentParser(description='Checks the queue for new messages and caclulates the calendar as needed') parser.add_argument('--verbose', '-v', action='count') args = parser.parse_args() if args.verbose == 1: logging.basicConfig(level=logging.INFO) elif args.verbose == 2: logging.basicConfig(level=logging.DEBUG) elif args.verbose >= 3: logging.basicConfig(level=logging.DEBUG) logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) else: logging.basicConfig(level=logging.WARNING) return args
cb01af59300841c96fda7b1a78a7d4f671a127e3
21,945
import os def relative_of(base_path: str, relative_path: str) -> str: """Given a base file and path relative to it, get full path of it""" return os.path.normpath(os.path.join(os.path.dirname(base_path), relative_path))
b35e580ff2afc4cf196f6e53eedd5f4383579c6e
21,946
from typing import OrderedDict def _get_dict_roi(directory=None): """Get all available images with ROI bounding box. Returns ------- dict : {<image_id>: <ROI file path>} """ d = OrderedDict() for f in listdir(directory or IJ_ROI_DIR): d[splitext(f)[0]] = join(directory or IJ_ROI_DIR, f) return d
56c2b6a8cb3cb296489050a5465e19e6829ee383
21,947
from operator import index def geol_units(img, lon_w, lat, legend=None): """Get geological units based on (lon, lat) coordinates. Parameters ---------- img: 2d-array 2D geol map image centered at 180°. lon_w: float or array Point west longitude(s). lat: float or array Point latitude(s). legend: dict, optional Table to mapping geol units to values. Returns ------- float, str or array Geological unit(s). """ units = img[index(img, lon_w, lat)] if not isinstance(legend, dict): return units if np.ndim(units) == 0: return legend[units] geol = np.vectorize(legend.get)(units) if np.ma.is_masked(lon_w) or np.ma.is_masked(lat): mask = np.ma.getmask(lon_w) | np.ma.getmask(lat) return np.ma.array(geol, mask=mask) return geol
d564ee29139d6a8c5d2235da10acb11b24866d80
21,948
def Water_Mask(shape_lsc,Reflect): """ Calculates the water and cloud mask """ mask = np.zeros((shape_lsc[1], shape_lsc[0])) mask[np.logical_and(Reflect[:, :, 3] < Reflect[:, :, 2], Reflect[:, :, 4] < Reflect[:, :, 1])] = 1.0 water_mask_temp = np.copy(mask) return(water_mask_temp)
6bcf7b4a96c4de9938c1520253d81460dd7a8025
21,949
from typing import Iterable from typing import Tuple from typing import Any from functools import reduce def unzip(sequence: Iterable) -> Tuple[Any]: """Opposite of zip. Unzip is shallow. >>> unzip([[1,'a'], [2,'b'], [3,'c']]) ((1, 2, 3), ('a', 'b', 'c')) >>> unzip([ [1,'a','A'], [2, 'b','B'], [3,'c','C'] ]) ((1, 2, 3), ('a', 'b', 'c'), ('A', 'B', 'C')) shallow nature of unzip. >>> unzip([ [[1,'num'],['a','str']], [[2,'num'],['b','str']] ]) (([1, 'num'], [2, 'num']), (['a', 'str'], ['b', 'str'])) Added in version: 0.1.0 """ # TODO find better name for split? def split(constructed, inner_lis): # constructed is a nested list like [[1,2,3], ['a','b','c']] return tuple(map(conj, constructed, inner_lis)) def create_nested_list(sequence): # to be passed as an initial value to reduce # the number of 2nd level lists corresponds # to the number of elements in the inner list # of sequence. for e.g # [ [1,'a'], [2,'b], [3,'c'] ] -> ( (), () ) return (() for i in range(len(sequence[0]))) return reduce(split, sequence, create_nested_list(sequence))
8f1b71b2dfc4d6e67729e0aac012031579956d81
21,950
def get_gs_distortion(dict_energies: dict): """Calculates energy difference between Unperturbed structure and most favourable distortion. Returns energy drop of the ground-state relative to Unperturbed (in eV) and the BDM distortion that lead to ground-state. Args: dict_energies (dict): Dictionary matching distortion to final energy, as produced by organize_data() Returns: (energy_difference, BDM_ground_state_distortion) """ if len(dict_energies['distortions']) == 1: energy_diff = dict_energies['distortions']['rattled'] - dict_energies['Unperturbed'] if energy_diff < 0 : gs_distortion = 'rattled' #just rattle (no BDM) else: gs_distortion = "Unperturbed" else: lowest_E_RBDM = min(dict_energies['distortions'].values()) #lowest E obtained with RBDM energy_diff = lowest_E_RBDM - dict_energies['Unperturbed'] if lowest_E_RBDM < dict_energies['Unperturbed'] : #if energy lower that with Unperturbed gs_distortion = list(dict_energies['distortions'].keys())[list(dict_energies['distortions'].values()).index( lowest_E_RBDM )] #BDM distortion that lead to ground-state else: gs_distortion = "Unperturbed" return energy_diff, gs_distortion
2f23103ccac8e801cb6c2c4aff1fb4fc08341e78
21,951
def parse_accept_language(data: str = None): """Parse HTTP header `Accept-Language` Returns a tuple like below: ``` ((1.0, Locale('zh_Hant_TW')), (0.9, Locale('en')), (0.0, _fallback_ns)) ``` """ langs = {(0.0, _fallback_ns)} if data is None: return tuple(langs) for s in data.split(","): tags = s.strip().split(";") loc_ins = Locale.parse(tags[0], sep="-") q = 1.0 if len(tags) > 1: q = float(tags[1][2:]) langs.add((q, loc_ins)) return tuple(sorted(langs, reverse=True))
fd2d9aef4825dc0d7fd7a84b69391c69353e9f86
21,952
def stop_service(): """ Stopping the service """ global __service_thread dbg("Trying to stop service thread") shutdown_service() __service_thread.join() __service_thread = None info("Server stopped") return True
97f7b9fb60a7a271f3c234be43b2b513c42ce77e
21,953
def get_constants_name_from_value(constant_dict, value) : """ @param constant_dict : constant dictionary to consider @param value : value's constant name to retrieve @rtype : a string """ try: return constant_dict[value] except KeyError: log.error("The constant name corresponding to the value '%s' can not be found in the dictionary '%s'" % (value, constant_dict)) return ERROR_CONSTANT_NAME_NOT_FOUND
3848e3e83946196250f3987a976b5a74da016a34
21,954
def rotxyz(x_ang,y_ang,z_ang): """Creates a 3x3 numpy rotation matrix from three rotations done in the order of x, y, and z in the local coordinate frame as it rotates. The three columns represent the new basis vectors in the global coordinate system of a coordinate system rotated by this matrix. Args: x_ang: angle for rotation about the x axis in radians y_ang: angle for rotation about the y axis in radians z_ang: angle for rotation about the z axis in radians Returns: The 3D rotation matrix for a x, y, z rotation """ # return rotx(x_ang) @ roty(y_ang) @ rotz(z_ang) return np.matmul(np.matmul(rotx(x_ang), roty(y_ang)), rotz(z_ang))
779c4ca37d5636ad7cff38d9200a9b50b3b0fffe
21,955
def hpdi(proba, array): """ Give the highest posterior density interval. For example, the 95% HPDI is a lower bound and upper bound such that: 1. they contain 95% probability, and 2. in total, have higher peaks than any other bound. Parameters: proba: float A value between 0 and 1, inclusive. For example, if proba is 0.95, then we'll get a 95% HPDI. array: np.array An array of samples. Returns: tuple(integer, integer) First item is the lower bound. Second item is the upper bound. """ if proba < 0 or proba > 1: raise ValueError( f"Proba {proba} should be between 0 and 1, inclusive." ) sorted_array = np.array(sorted(array)) # use binary search length = sorted_array.shape[0] normalizer = sorted_array.sum() minimum_width = normalizer start_index_to_return = None end_index_to_return = None limit = int((1 - proba) * length) for start_index in range(limit): end_index = length - limit + start_index diff = sorted_array[end_index] - sorted_array[start_index] if diff <= minimum_width: minimum_width = diff start_index_to_return = start_index end_index_to_return = end_index return ( sorted_array[start_index_to_return], sorted_array[end_index_to_return] )
a417b6adba19ef6206326791250c880e3b2a28a1
21,956
import matplotlib.pyplot as plt import numpy as np from scipy.signal import find_peaks def PAMI_for_delay(ts, n = 5, plotting = False): """This function calculates the mutual information between permutations with tau = 1 and tau = delay Args: ts (array): Time series (1d). Kwargs: plotting (bool): Plotting for user interpretation. defaut is False. n (int): dimension for calculating delay. delault is 5 as explain in On the Automatic Parameter Selection for Permutation Entropy Returns: (int): tau, The embedding delay for permutation formation. """ cutoff = 0.01 max_delay = 100 m = 2 MP = [] tau_a = [] window_a = [] flag = False delay = 0 while flag == False: delay = delay+1 tau_a.append(delay) window_a.append(delay*(n-1)) MI_Perm = mutualPerm(ts, delay, m) MP.append(MI_Perm) #calculates mutual information peaks,_ = find_peaks(-np.array(MP), height=-cutoff) if MI_Perm < cutoff and len(peaks) > 0: flag = True if delay > max_delay: delay = 0 cutoff = cutoff*10 MP = [] tau_a = [] window_a = [] delay_2 = delay delay_n = int(delay_2/(n-1)) if plotting == True: TextSize = 12 plt.figure(1) plt.plot(tau_a, MP, label = 'n = ' + str(m), linewidth = 2) plt.xlabel(r'$\tau(n-1)$', size = TextSize) plt.ylabel(r'$I_p(\tau,n)$', size = TextSize) plt.xticks(size = TextSize) plt.yticks(size = TextSize) plt.legend(loc = 'upper right', fontsize = TextSize) plt.ylim(0) plt.show() return delay_n
fd681ba0e3121be8ac709c2dc1f4d3a36358f84a
21,957
import urllib def _capabilities(repo, proto): """return a list of capabilities for a repo This function exists to allow extensions to easily wrap capabilities computation - returns a lists: easy to alter - change done here will be propagated to both `capabilities` and `hello` command without any other action needed. """ # copy to prevent modification of the global list caps = list(wireprotocaps) if _allowstream(repo.ui): if repo.ui.configbool('server', 'preferuncompressed', False): caps.append('stream-preferred') requiredformats = repo.requirements & repo.supportedformats # if our local revlogs are just revlogv1, add 'stream' cap if not requiredformats - set(('revlogv1',)): caps.append('stream') # otherwise, add 'streamreqs' detailing our local revlog format else: caps.append('streamreqs=%s' % ','.join(requiredformats)) if repo.ui.configbool('experimental', 'bundle2-exp', False): capsblob = bundle2.encodecaps(repo.bundle2caps) caps.append('bundle2-exp=' + urllib.quote(capsblob)) caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority)) caps.append('httpheader=1024') return caps
764c59bbbe525b8d825dfded871643bb88a1588f
21,958
import types from typing import Dict import collections def dep(doclike: types.DocLike) -> Dict[str, int]: """ Count the number of times each syntactic dependency relation appears as a token annotation in ``doclike``. Args: doclike Returns: Mapping of dependency relation to count of occurrence. """ return dict(collections.Counter(tok.dep_ for tok in doclike))
e4fcb5a54578b2b001eda4255cd3a22b89a6d195
21,959
def fix_phonology_table(engine, phonology_table, phonologybackup_table, user_table): """Give each phonology UUID and modifier_id values; also give the phonology backups of existing phonologies UUID values. """ print_('Fixing the phonology table ... ') msgs = [] #engine.execute('set names latin1') engine.execute('set names utf8;') users = engine.execute(user_table.select()).fetchall() phonologybackups = engine.execute(phonologybackup_table.select()).fetchall() buffer1 = [] buffer2 = [] for row in engine.execute(phonology_table.select()): values = row2dict(row) values['UUID'] = str(uuid4()) backups = sorted([pb for pb in phonologybackups if pb['phonology_id'] == values['id']], key=lambda pb: pb['datetimeModified']) if backups: try: most_recent_backuper = json.loads(backups[-1]['backuper'])['id'] if [u for u in users if u['id'] == most_recent_backuper]: values['modifier_id'] = most_recent_backuper else: values['modifier_id'] = values['enterer_id'] msgs.append('There is no user %d to serve as the most recent backuper for phonology %d' % (most_recent_backuper, values['id'])) except Exception: msgs.append('''WARNING: there are %d backups for phonology %d; however, it was not possible to extract a backuper from the most recent one (backuper value: %s)'''.replace('\n', ' ') % ( len(backups), values['id'], backups[-1]['backuper'])) values['modifier_id'] = values['enterer_id'] else: values['modifier_id'] = values['enterer_id'] buffer1.append(values) for pb in backups: buffer2.append({'pb_id': pb['id'], 'UUID': values['UUID']}) update = phonologybackup_table.update().where(phonologybackup_table.c.id==bindparam('pb_id')).\ values(UUID=bindparam('UUID')) engine.execute(update, buffer2) if buffer1: engine.execute('set names utf8;') update = phonology_table.update().where(phonology_table.c.id==bindparam('id_')).\ values(modifier_id=bindparam('modifier_id'), UUID=bindparam('UUID')) engine.execute(update, buffer1) print 'done.' return msgs
746ca4a479b450f3320b4c04a3ce6013beb88ed4
21,960
def D(field, dynkin): """A derivative. Returns a new field with additional dotted and undotted indices. Example: >>> D(L, "01") DL(01001)(-1/2) >>> D(L, "21") DL(21001)(-1/2) """ undotted_delta = int(dynkin[0]) - field.dynkin_ints[0] dotted_delta = int(dynkin[1]) - field.dynkin_ints[1] # derivative can only change one dotted and one undotted index assert abs(undotted_delta) == 1 assert abs(dotted_delta) == 1 # other info to construct field instance deriv_symbol = "D" symbol = deriv_symbol + field.label new_field_dynkin = dynkin + field.dynkin[2:] rest = { "charges": field.charges, "comm": field.comm, "is_conj": field.is_conj, "nf": field.nf, "stripped": field.stripped, } new_field = Field(symbol, dynkin=new_field_dynkin, **rest) new_field.latex = f"(D{strip_parens(field.get_latex())})" new_field.derivs = field.derivs + 1 # only add this information for the first derivative if new_field.stripped is None: new_field.stripped = { "label": field.label, "dynkin": field.dynkin, "symmetry": field.symmetry, "charges": field.charges, "latex": field.latex, } return new_field
1ee408a8cc2923b141c5ffe1c41d919a501111ae
21,961
def _det(m, n): """Recursive calculation of matrix determinant""" """utilizing cofactors""" sgn = 1 Det = 0 if n == 1: return m[0][0] cofact = [n*[0] for i in range(n)] for i in range(n): _get_cofact(m, cofact,0,i,n); Det += sgn*m[0][i]*_det(cofact, n - 1); sgn = -sgn; return Det
9019dd9dc1054fc4c36e72a6e3c9a3c478afa4ad
21,962
from typing import List def get_vcps() -> List[LinuxVCP]: """ Interrogates I2C buses to determine if they are DDC-CI capable. Returns: List of all VCPs detected. """ vcps = [] # iterate I2C devices for device in pyudev.Context().list_devices(subsystem="i2c"): vcp = LinuxVCP(device.sys_number) try: with vcp: pass except (OSError, VCPIOError): pass else: vcps.append(vcp) return vcps
ce3766c695fe9ffb0a6ebcced4ac04808987f340
21,963
def toGoatLatin(S): """ :type S: str :rtype: str """ l_words = [] for i, word in enumerate(S.split()): if not is_vowel(word[0]): word = word[1:] + word[0] aa = "a" * (i + 1) l_words.append(word + "ma" + aa) return " ".join(l_words)
5ed41084a0d35d69e65b2821b43c2373cf289d26
21,964
def list_startswith(_list, lstart): """ Check if a list (_list) starts with all the items from another list (lstart) :param _list: list :param lstart: list :return: bool, True if _list starts with all the items of lstart. """ if _list is None: return False lenlist = len(_list) lenstart = len(lstart) if lenlist >= lenstart: # if _list longer or as long as lstart, check 1st items: return (_list[:lenstart] == lstart) else: # _list smaller than lstart: always false return False
6f8952a80da81381464521fec55abaaee4a04881
21,965
import os import warnings def _get_default_scheduler(): """Determine which scheduler system is being used. It tries to determine it by running both PBS and SLURM commands. If both are available then one needs to set an environment variable called 'SCHEDULER_SYSTEM' which is either 'PBS' or 'SLURM'. For example add the following to your `.bashrc` ```bash export SCHEDULER_SYSTEM="PBS" ``` By default it is "SLURM". """ has_pbs = bool(find_executable("qsub")) and bool(find_executable("qstat")) has_slurm = bool(find_executable("sbatch")) and bool(find_executable("squeue")) DEFAULT = SLURM default_msg = f"We set DefaultScheduler to '{DEFAULT}'." scheduler_system = os.environ.get("SCHEDULER_SYSTEM", "").upper() if scheduler_system: if scheduler_system not in ("PBS", "SLURM"): warnings.warn( f"SCHEDULER_SYSTEM={scheduler_system} is not implemented." f"Use SLURM or PBS. {default_msg}" ) return DEFAULT else: return {"SLURM": SLURM, "PBS": PBS}[scheduler_system] elif has_slurm and has_pbs: msg = f"Both SLURM and PBS are detected. {default_msg}" warnings.warn(msg) return DEFAULT elif has_pbs: return PBS elif has_slurm: return SLURM else: msg = f"No scheduler system could be detected. {default_msg}" warnings.warn(msg) return DEFAULT
6a8d6518d7e9b561f763a9268fc3557e09db3fb5
21,966
def get_subscribers(subreddit_, *args): """Gets current sub count for one or more subreddits. Inputs ------- str: Desired subreddit name(s) Returns ------- int: sub count or dict:{subreddit: int(sub count)} """ if len(args) > 0: subreddit = reddit.subreddit(subreddit_) subcount = {subreddit_: subreddit.subscribers} for page in args: subreddit = reddit.subreddit(page) subcount[page] = subreddit.subscribers return subcount else: subreddit = reddit.subreddit(subreddit_) return subreddit.subscribers
2648eb7db5fe0ebc9940f714fab8770947960463
21,967
def pattern_match(value, pattern, env=None): """ Pattern match a value and a pattern. Args: value: the value to pattern-match on pattern: a pattern, consisting of literals and/or locally bound variables env: a dictionary of local variables bound while matching Returns: (True, env) if the match is successful, and (False, env) otherwise Raises: SyntaxError, if a variable name is used multiple times in the same pattern """ env = {} if env is None else env if isinstance(pattern, PatternMatchBind): if pattern.name in env: raise SyntaxError("Conflicting definitions for %s" % pattern.name) env[pattern.name] = value return True, env elif isinstance(pattern, PatternMatchListBind): head, tail = list(value[:len(pattern.head)]), value[len(pattern.head):] matches, env = pattern_match(head, pattern.head, env) if matches: return pattern_match(tail, pattern.tail, env) return False, env elif type(value) == type(pattern): if isinstance(value, ADT): return pattern_match(nt_to_tuple(value), nt_to_tuple(pattern), env) elif hasattr(value, "__iter__"): matches = [] if len(value) != len(pattern): return False, env for v, p in zip(value, pattern): match_status, env = pattern_match(v, p, env) matches.append(match_status) return all(matches), env elif value == pattern: return True, env return False, env
145ef26283f4e21f7ab763317174c5e6da043d84
21,968
import sys import heapq def dijkstra(adjacency_list, source_vertex, cull_distance = sys.maxsize): """ Implementation of Dijkstra's Algorithm for finding shortest path to all vertices in a graph. Parameters ---------- adjacency_list (dict of int : (dict of int : int)) Maps vertices to a dictionary of neighboring vertices as keys and whose data is the distance between them with. *** Distances must be non-negative. *** source_vertex (int) The vertex to start the algorithm (distance zero vertex) cull_distance (int) *optional, defaults to sys.maxsize The maximum distance desired to traverse plus 1 (Represents infinite distance) Returns ------- dict(int : int) A dictionary whose keys are the reachable vertices of the adjacency list and data is the distance required to reach that vertex from the source vertex. """ pq = [] # Priority Queue (Min-Heap) holding vertices to traverse distance = {} # Distance Map (Return Value) count = 0 # Counter for Creating Unique IDs valid_ids = {} # Maps Vertices to Their Valid ID # Named tuple to be used in the priority queue DistVtxId = namedtuple('DistVtxId', 'distance vertex id') ### SETUP # Add each vertex in the adjacency list to the priority queue for vertex in adjacency_list.keys(): id = count # Unique ID for each vertex in the priority queue count += 1 temp = None # <- for name scope if (vertex == source_vertex): # Source vertex gets distance zero from itself temp = DistVtxId(0, vertex, id) # Add the source vertex to the final result distance[source_vertex] = 0 else: # Non-Source vertices start at infinite distance temp = DistVtxId(cull_distance, vertex, id) # Push the vertex onto the priority queue heapq.heappush(pq, temp) valid_ids[vertex] = temp.id # Add this vertex's initial distance to the return value distance[vertex] = temp.distance ### TRAVERSAL # Iterates (at most) the number of vertices times for i in range(0, len(adjacency_list)): # Get the lowest edge distance from the priority queue u_star = heapq.heappop(pq) # Ignore this element if it does not have a valid ID # Occurs when the priority of a vertex has been "updated" if (valid_ids[u_star.vertex] != u_star.id): continue # For every neighboring vertex for vertex, edge_weight in adjacency_list[u_star.vertex].items(): new_distance = u_star.distance + edge_weight old_distance = distance[vertex] # If we can reach the neighbor covering less distance from # the source if (new_distance < old_distance): distance[vertex] = new_distance # (Effectively) Update the priority (distance) of the # vertex in the priority queue temp = DistVtxId(new_distance, vertex, count) heapq.heappush(pq, temp) valid_ids[temp.vertex] = temp.id count += 1 # Cull the vertices that were unreachable (or farther away from # the source than the cull_distance) distance = {vtx : dist for vtx, dist in distance.items() if dist != cull_distance} return distance
067ecb9a0dd94631fb252ad98c1aae59d33e592f
21,969
import os def process_existing_fiber(country): """ Load and process existing fiber data. Parameters ---------- country : dict Contains all country specfic information. """ iso3 = country['iso3'] iso2 = country['iso2'].lower() folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing') if not os.path.exists(folder): os.makedirs(folder) filename = 'core_edges_existing.shp' path_output = os.path.join(folder, filename) if os.path.exists(path_output): return print('Existing fiber already processed') path = os.path.join(DATA_RAW, 'afterfiber', 'afterfiber.shp') shape = fiona.open(path) data = [] for item in shape: if item['properties']['iso2'].lower() == iso2.lower(): if item['geometry']['type'] == 'LineString': if int(item['properties']['live']) == 1: data.append({ 'type': 'Feature', 'geometry': { 'type': 'LineString', 'coordinates': item['geometry']['coordinates'], }, 'properties': { 'operators': item['properties']['operator'], 'source': 'existing' } }) if item['geometry']['type'] == 'MultiLineString': if int(item['properties']['live']) == 1: try: geom = MultiLineString(item['geometry']['coordinates']) for line in geom: data.append({ 'type': 'Feature', 'geometry': mapping(line), 'properties': { 'operators': item['properties']['operator'], 'source': 'existing' } }) except: # some geometries are incorrect from data source # exclude to avoid issues pass if len(data) == 0: return print('No existing infrastructure') data = gpd.GeoDataFrame.from_features(data) data.to_file(path_output, crs='epsg:4326') return print('Existing fiber processed')
77d6c034a48f1ca54e17227d55287ed308f53579
21,970
import functools import six def wraps(wrapped): """A functools.wraps helper that handles partial objects on Python 2.""" # https://github.com/google/pytype/issues/322 if isinstance(wrapped, functools.partial): # pytype: disable=wrong-arg-types return six.wraps(wrapped, assigned=_PARTIAL_VALID_ASSIGNMENTS) else: return six.wraps(wrapped)
8e3762c9d7f50c8e26df0f0de545de7991d59e92
21,971
def byte_to_bits(byte): """Convert a byte to an tuple of 8 bits for use in Merkle-Hellman. The first element of the returned tuple is the most significant bit. Usage:: byte_to_bits(65) # => [0, 1, 0, 0, 0, 0, 0, 1] byte_to_bits(b'ABC'[0]) # => [0, 1, 0, 0, 0, 0, 0, 1] byte_to_bits('A') # => raises TypeError :param byte: The byte to convert. :type byte: int between 0 and 255, inclusive. :raises: BinaryConversionError if byte is not in [0, 255]. :returns: An 8-tuple of bits representing this byte's value. """ if not 0 <= byte <= 255: raise BinaryConversionError(byte) out = [] for i in range(8): out.append(byte & 1) byte >>= 1 return tuple(out[::-1])
231272c60a3d06de0a914b38fee4f50a0209bcd4
21,972
def KK_RC48_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) )
1395f182880db7f42d43eba05605673eab83770b
21,973
def race(deer, seconds): """ Use the reindeer's speed and rest times to find the timed distance """ distance = 0 stats = reindeer[deer] resting = False while True: if resting: if seconds <= stats[2]: break seconds -= stats[2] else: if seconds <= stats[1]: distance += seconds * stats[0] break seconds -= stats[1] distance += stats[1] * stats[0] resting = not resting return distance
ea7cb0577cdfa4aab558ca8ad6f4ddde2d79e996
21,974
def AsdlEqual(left, right): """Check if generated ASDL instances are equal. We don't use equality in the actual code, so this is relegated to test_lib. """ if left is None and right is None: return True if isinstance(left, (int, str, bool, pybase.SimpleObj)): return left == right if isinstance(left, list): if len(left) != len(right): return False for a, b in zip(left, right): if not AsdlEqual(a, b): return False return True if isinstance(left, pybase.CompoundObj): if left.tag != right.tag: return False field_names = left.__slots__ # hack for now for name in field_names: # Special case: we are not testing locations right now. if name == 'span_id': continue a = getattr(left, name) b = getattr(right, name) if not AsdlEqual(a, b): return False return True raise AssertionError(left)
ac5752cd30ff31488ecc000426b6f2430acb1718
21,975
def FindPriority(bug_entry): """Finds and returns the priority of a provided bug entry. Args: bug_entry: The provided bug, a IssueEntry instance. Returns: A string containg the priority of the bug ("1", "2", etc...) """ priority = '' for label in bug_entry.label: if label.text.lower().startswith('pri-'): priority = label.text[4:] return priority
a56838fd90b0e46048e5e39c66e5bcb429270c21
21,976
import torch def attention_padding_mask(q, k, padding_index=0): """Generate mask tensor for padding value Args: q (Tensor): (B, T_q) k (Tensor): (B, T_k) padding_index (int): padding index. Default: 0 Returns: (torch.BoolTensor): Mask with shape (B, T_q, T_k). True element stands for requiring making. Notes: Assume padding_index is 0: k.eq(0) -> BoolTensor (B, T_k) k.eq(0).unsqueeze(1) -> (B, 1, T_k) k.eq(0).unsqueeze(1).expand(-1, q.size(-1), -1) -> (B, T_q, T_k) """ ## we take the mean because we want to get rid of last dim. ### what we do to remove that dim deosn't matter, since we are only ending up with ### true/false for mask. q = torch.mean(q,2) mask = k.eq(padding_index).unsqueeze(1).expand(-1, q.size(-1), -1) return mask
49d1dc8dd4e59284eb090711545cf70c9ba5fad4
21,977
import sys def get_pcap_bytes(pcap_file): """Get the raw bytes of a pcap file or stdin.""" if pcap_file == "-": pcap_bytes = sys.stdin.buffer.read() else: with open(pcap_file, "rb") as f: pcap_bytes = f.read() return pcap_bytes
51abbefeb918016edef6f8f40c7c40cb973e2fc0
21,978
import subprocess def run(s, output_cmd=True, stdout=False): """Runs a subprocess.""" if output_cmd: print(f"Running: {s}") p_out = subprocess.run( s, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, check=False ) if stdout: return p_out.stdout.decode("utf-8").strip() else: return p_out
efcfe30a536789a69662642d2b2da1afc04ebe57
21,979
import itertools def process_params(request, standard_params=STANDARD_QUERY_PARAMS, filter_fields=None, defaults=None): """Parse query params. Parses, validates, and converts query into a consistent format. :keyword request: the bottle request :keyword standard_params: query params that are present in most of our (opinionated) APIs (ex. limit, offset, sort, q, and facets) :keyword filter_fields: list of field names to allow filtering on :keyword defaults: dict of params and their default values :retuns: dict of query params with supplied values (string or list) """ if not filter_fields: filter_fields = [] unfilterable = (set(request.query.keys()) - set(filter_fields) - set(standard_params)) if unfilterable: bottle.abort(400, "The following query params were invalid: %s. " "Try one (or more) of %s." % (", ".join(unfilterable), ", ".join(filter_fields))) query_fields = defaults or {} for key in request.query: if key in filter_fields: # turns ?netloc=this.com&netloc=that.com,what.net into # {'netloc': ['this.com', 'that.com', 'what.net']} matches = request.query.getall(key) matches = list(itertools.chain(*(k.split(',') for k in matches))) if len(matches) > 1: query_fields[key] = matches else: query_fields[key] = matches[0] if 'sort' in request.query: sort = request.query.getall('sort') sort = list(itertools.chain(*( comma_separated_strings(str(k)) for k in sort))) query_fields['sort'] = sort if 'q' in request.query: search = request.query.getall('q') search = list(itertools.chain(*( comma_separated_strings(k) for k in search if k))) query_fields['q'] = search return query_fields
06de4c5df0bdcfcc091aefa12cc8aa7fd4c06597
21,980
import inspect def all_attributes(cls): """ Each object will have the attributes declared directly on the object in the attrs dictionary. In addition there may be attributes declared by a particular object's parent classes. This function walks the class hierarchy to collect the attrs in the object's parent classes For example if Location.City is a subclass of Location and Location has the attribute GPS_COORDS then this function would combine GPS_COORDS and the existing attributes on the Location.City object and return the combination """ attrs = cls.attrs.copy() # walk the class hierarchy for sub in inspect.getmro(cls): for name, prop in getattr(sub, 'attrs', {}).iteritems(): if name in attrs: continue attrs[name] = prop return attrs
3d1a1013fe36cef776b6a9842f774f5394aaeff5
21,981
def _reshape_model_inputs(model_inputs: np.ndarray, num_trajectories: int, trajectory_size: int) -> np.ndarray: """Reshapes the model inputs' matrix. Parameters ---------- model_inputs: np.ndarray Matrix of model inputs num_trajectories: int Number of trajectories trajectory_size: int Number of points in a trajectory Returns ------- input_matrix: np.ndarray Reshaped input matrix. """ num_vars = model_inputs.shape[1] input_matrix = model_inputs.reshape(num_trajectories, trajectory_size, num_vars) return input_matrix
2562d143c5dbf7b6c1c018afe2f87df6297752da
21,982
from types import ModuleType import sys def _create_module(module_name): """ex. mod = _create_module('tenjin.util')""" mod = ModuleType(module_name.split('.')[-1]) sys.modules[module_name] = mod return mod
bfc1092bce61f7716a42ceeccc0604ced3696cdd
21,983
def VIS(img, **normalization): """Unmixes according to the Vegetation-Impervious-Soil (VIS) approach. Args: img: the ee.Image to unmix. **normalization: keyword arguments to pass to fractionalCover(), like shade_normalize=True. Returns: unmixed: a 3-band image file in order of (soil-veg-impervious). """ endmembers = [soil, pv, urban] endmember_names = ["soil", "pv", "impervious"] unmixed = fractionalCover(img, endmembers, endmember_names, **normalization) return unmixed
2c85aa894f6ccfae3da8650cb9c32cc125a19a45
21,984
def create_labels(mapfile, Nodes=None): """ Mapping from the protein identifier to the group Format : ##protein start_position end_position orthologous_group protein_annotation :param Nodes: set -- create mapping only for these set of nodes :param mapfile: file that contains the mapping for the organism :return: """ f = open(mapfile) labels = defaultdict(str) while True: line = f.readline().strip() if not line: break sp = line.split("\t") if not Nodes: labels[sp[0]] = sp[3] elif sp[0] in Nodes: labels[sp[0]] = sp[3] return labels
634eefc5a837e484059278939ba34fd2482846bf
21,985
import os import json def provide(annotation_path=None, images_dir=None): """Return image_paths and class labels. Args: annotation_path: Path to an anotation's .json file. images_dir: Path to images directory. Returns: image_files: A list containing the paths of images. annotation_dict: A dictionary containing the class labels of each image. Raises: ValueError: If annotation_path does not exist. """ if not os.path.exists(annotation_path): raise ValueError('`annotation_path` does not exist.') annotation_json = open(annotation_path, 'r') annotation_list = json.load(annotation_json) image_files = [] annotation_dict = {} for d in annotation_list: image_name = d.get('image_id') disease_class = d.get('disease_class') if images_dir is not None: image_name = os.path.join(images_dir, image_name) image_files.append(image_name) annotation_dict[image_name] = disease_class return image_files, annotation_dict
02fd2584568bdaedbb1a605b30092ceb462948f4
21,986
def sortKSUID(ksuidList): """ sorts a list of ksuids by their date (recent in the front) """ return sorted(ksuidList, key=lambda x: x.getTimestamp(), reverse=False)
0476bc0ef19f8730488041ac33598ba7471f96e7
21,987
import os def is_installable_dir(path): # type: (str) -> bool """Return True if `path` is a directory containing a setup.py file.""" if not os.path.isdir(path): return False setup_py = os.path.join(path, "setup.py") if os.path.isfile(setup_py): return True return False
b80929ac5ea558064bbcb3e47754c24cc548e478
21,988
from typing import Counter def get_vocabulary(list_): """ Computes the vocabulary for the provided list of sentences :param list_: a list of sentences (strings) :return: a dictionary with key, val = word, count and a sorted list, by count, of all the words """ all_the_words = [] for text in list_: for word in text: all_the_words.append(word) vocabulary_counter = Counter(all_the_words) vocabulary_sorted = list(map(lambda x: x[0], sorted(vocabulary_counter.items(), key=lambda x: -x[1]))) return vocabulary_sorted, vocabulary_counter
d6c357a5768c2c784c7dfe97743d34795b2695c0
21,989
import logging def lod_build_config(slurm_nodes, mds_list, oss_list, fsname, mdtdevs, ostdevs, inet, mountpoint, index): """ Build lod configuration for LOD instance """ # pylint: disable=too-many-arguments,too-many-locals,too-many-branches # take slurm nodes directly if found node_list = slurm_nodes client_list = slurm_nodes if slurm_nodes: logging.debug("Slurm node: %s.", slurm_nodes) if inet is not None: net = inet else: net = LOD_DEFAULT_NET device = None if mdtdevs: mdt_device = mdtdevs else: logging.error("no mdtdevs found") return None if ostdevs: ost_device = ostdevs else: logging.error("no ostdevs found") return None if fsname is not None: fs_name = fsname else: fs_name = LOD_DEFAULT_FSNAME if mountpoint: mount_point = mountpoint else: mount_point = LOD_DEFAULT_MOUNTPOINT return LodConfig(node_list, device, mdt_device, ost_device, mds_list, oss_list, client_list, net, fs_name, mount_point, index)
dc17de21a64409425192d8e660124b396f7a743f
21,990
def check_field(rule: tuple, field: int) -> bool: """check if a field is valid given a rule""" for min_range, max_range in rule: if min_range <= field <= max_range: return True return False
32e34da10fff12e765dd6d48472acf0ac5ad72af
21,991
import math def split(value, precision=1): """ Split `value` into value and "exponent-of-10", where "exponent-of-10" is a multiple of 3. This corresponds to SI prefixes. Returns tuple, where the second value is the "exponent-of-10" and the first value is `value` divided by the "exponent-of-10". Args ---- value : int, float Input value. precision : int Number of digits after decimal place to include. Returns ------- tuple The second value is the "exponent-of-10" and the first value is `value` divided by the "exponent-of-10". Examples -------- .. code-block:: python si_prefix.split(0.04781) -> (47.8, -3) si_prefix.split(4781.123) -> (4.8, 3) See :func:`si_format` for more examples. """ negative = False digits = precision + 1 if value < 0.: value = -value negative = True elif value == 0.: return 0., 0 expof10 = int(math.log10(value)) if expof10 > 0: expof10 = (expof10 // 3) * 3 else: expof10 = (-expof10 + 3) // 3 * (-3) value *= 10 ** (-expof10) if value >= 1000.: value /= 1000.0 expof10 += 3 elif value >= 100.0: digits -= 2 elif value >= 10.0: digits -= 1 if negative: value *= -1 return value, int(expof10)
776ded073807773b755dcd7ab20c47d1f33ca1e1
21,992
import os import json def load_settings(): """Load JSON data from settings file :return: dictionary with settings details :rtype: dict """ if os.path.exists(config.SETTINGS_FILE): with open(config.SETTINGS_FILE, 'r') as sfile: settings = json.loads(sfile.read()) else: settings = { 'Chapters': config.CHAPTERS, 'Status': [], } return settings
2b13eb5f671f88cd0883a20f99d4d80aa346cd29
21,993
import requests def test_cert(host, port=443, timeout=5, **kwargs): """Test that a cert is valid on a site. Args: host (:obj:`str`): hostname to connect to. can be any of: "scheme://host:port", "scheme://host", or "host". port (:obj:`str`, optional): port to connect to on host. If no :PORT in host, this will be added to host. Defaults to: 443 timeout (:obj:`str`, optional): Timeout for connect/response. Defaults to: 5. kwargs: passed thru to requests.get() Returns: (:obj:`tuple` of (:obj:`bool`, :obj:`Exception`)): True / False if cert was valid. Exception that was thrown if cert not valid, or None if successfull. """ kwargs.setdefault("timeout", timeout) kwargs.setdefault("url", build_url(host=host, port=port)) try: requests.get(**kwargs) return (True, None) except requests.exceptions.SSLError as exc: return (False, exc)
3d0e0098b5f654305c187f2a566c25f8c87a5ce3
21,994
def get_predicates(): # noqa: E501 """get_predicates Get a list of predicates used in statements issued by the knowledge source # noqa: E501 :rtype: List[BeaconPredicate] """ return controller_impl.get_predicates()
7f3f89b300a0e43449a1860cff8200af6d33a3b1
21,995
def noisy_job_stage3(aht, ht, zz, exact=False): """Adds noise to decoding circuit. Args: ===== aht, ht, zz : numeric Circuit parameters for decoding circuit exact : bool If True, works with wavefunction Returns: ======== noisy_circuit : cirq.Circuit Noisy version of input circuit param_resolvers : list """ job = Job(decoder_circuit(aht, ht, zz, exact)) noisy = DepolarizerChannel(probability=noise_level) noisy_job = noisy.transform_job(job) param_resolvers = [ParamResolver({k:v for k, v in e}) for e in noisy_job.sweep.param_tuples()] return noisy_job.circuit, param_resolvers
a5f1bcb8cced41b2b6179d2eeb68e8b8939aca96
21,996
import math def buy_and_hold_manager_factory(mgr, j:int, y, s:dict, e=1000): """ Ignores manager preference except every j data points For this to make any sense, 'y' must be changes in log prices. For this to be efficient, the manager must respect the "e" convention. That is, the manager must do little work when e<0 :param mgr: :param j: :param y: :param s: State :param mgr_kwargs: :return: w Portfolio weights """ if j==1: # Special case: just use the manager # This is the only time the user's e parameter is passed on. s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y,s=s_mgr, e=e) s['s_mgr'] = s_mgr return w, s else: if s.get('w') is None: # Initialization s['count']=0 s_mgr = {} w, s_mgr = mgr(y=y,s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: s['count'] = s['count']+1 if s['count'] % j == 0: # Sporadically use the manager s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y, s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: # Tell the manager not to worry too much about this data point, as the weights won't be used ... s_mgr = s['s_mgr'] _ignore_w, s_mgr = mgr(y=y, s=s_mgr, e=-1) s['s_mgr'] = s_mgr # ... instead we let it ride w_prev = s['w'] w = normalize( [ wi*math.exp(yi) for wi,yi in zip(w_prev,y)] ) s['w'] = w return w, s
2225b6f41979e1781a778f397b699751456dc2a4
21,997
def explicit_wait_visibility_of_element_located(browser, xpath, timeout=35): """Explicitly wait until visibility on element.""" locator = (By.XPATH, xpath) condition = expected_conditions.visibility_of_element_located(locator) try: wait = WebDriverWait(browser, timeout) result = wait.until(condition) except TimeoutException: print("Timeout Exception in explicit wait") return False return result
2fd6fe951d1d55121909e2a326b72af4524f577b
21,998
def list_all_resources(): """Return a list of all known resources. :param start_timestamp: Limits resources by last update time >= this value. (optional) :type start_timestamp: ISO date in UTC :param end_timestamp: Limits resources by last update time < this value. (optional) :type end_timestamp: ISO date in UTC :param metadata.<key>: match on the metadata within the resource. (optional) """ return _list_resources( project=acl.get_limited_to_project(flask.request.headers))
c2b42abd7c03d2f2b6541a45b7b45b2cb420ebc4
21,999