content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import requests def get_message(message_id): """ Shows details for a message, by message ID. :param message_id: Specify the message ID in the messageId parameter in the URI. :return: message details formatted in JSON """ api_node = "{}messages/{}".format(SPARK_API_URL, message_id) headers = set_spark_header() resp = requests.get(api_node, headers=headers) if resp.status_code != 200: print("Error querying API: {} {}".format(resp.status_code, resp.text)) print() return resp.json()
abd124781eb8002c2ff27f5c603e9061523ff352
3,632,800
def testsuite(*args, **kwargs): """ Annotate a class as being a test suite An :py:func:`@testsuite <testsuite>`-annotated class must have one or more :py:func:`@testcase <testcase>`-annotated methods. These methods will be executed in their order of definition. If a ``setup(self, env)`` and ``teardown(self, env)`` methods are present on the :py:func:`@testsuite <testsuite>`-annotated class, then they will be executed respectively before and after the :py:func:`@testcase <testcase>`-annotated methods have executed. It is possible to assign tags to a suite via `@testsuite(tags=...)` syntax: .. code-block:: python @testsuite(tags=('server', 'keep-alive')) class SampleSuite(object): ... """ return _selective_call( decorator_func=_testsuite, meta_func=_testsuite_meta, wrapper_func=testsuite, )(*args, **kwargs)
ad1885ff95a43823ee6411c50b03d0d460b5f7f1
3,632,801
def GetClusterAdjcency(clusters, facedge): """ Creates sparse cluster adjcent matrix """ # Get boundary clusters for adjcent cluster computation edgeclus = clusters[facedge] bmask = edgeclus[:, 0] != edgeclus[:, 1] bclus = edgeclus[bmask] a = np.hstack((bclus[:, 0], bclus[:, 1])) b = np.hstack((bclus[:, 1], bclus[:, 0])) c = np.ones(len(a), dtype='bool') return sparse.csr_matrix((c, (a, b)), dtype='bool')
b335d48069bda241208f81b80462097ec5fa927c
3,632,802
def _strip(g, base, orbits, transversals): """ Attempt to decompose a permutation using a (possibly partial) BSGS structure. This is done by treating the sequence ``base`` as an actual base, and the orbits ``orbits`` and transversals ``transversals`` as basic orbits and transversals relative to it. This process is called "sifting". A sift is unsuccessful when a certain orbit element is not found or when after the sift the decomposition doesn't end with the identity element. The argument ``transversals`` is a list of dictionaries that provides transversal elements for the orbits ``orbits``. Parameters ========== ``g`` - permutation to be decomposed ``base`` - sequence of points ``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]`` under some subgroup of the pointwise stabilizer of ` `base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit in this function since the only information we need is encoded in the orbits and transversals ``transversals`` - a list of orbit transversals associated with the orbits ``orbits``. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.print_cyclic = True >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.util import _strip >>> S = SymmetricGroup(5) >>> S.schreier_sims() >>> g = Permutation([0, 2, 3, 1, 4]) >>> _strip(g, S.base, S.basic_orbits, S.basic_transversals) ((4), 5) Notes ===== The algorithm is described in [1],pp.89-90. The reason for returning both the current state of the element being decomposed and the level at which the sifting ends is that they provide important information for the randomized version of the Schreier-Sims algorithm. References ========== [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random """ h = g._array_form base_len = len(base) for i in range(base_len): beta = h[base[i]] if beta == base[i]: continue if beta not in orbits[i]: return _af_new(h), i + 1 u = transversals[i][beta]._array_form h = _af_rmul(_af_invert(u), h) return _af_new(h), base_len + 1
999f5ed33d895dae446d8aa8eabf58eb82bcb30b
3,632,803
def list_physical_devices(device_type=None): """Return a list of physical devices visible to the runtime. Physical devices are hardware devices locally present on the current machine. By default all discovered CPU and GPU devices are considered visible. The `list_physical_devices` allows querying the hardware prior to runtime initialization. The following example ensures the machine can see at least 1 GPU. >>> physical_devices = tf.config.experimental.list_physical_devices('GPU') >>> print("Num GPUs:", len(physical_devices)) Num GPUs: ... Args: device_type: (optional) Device type to filter by such as "CPU" or "GPU" Returns: List of PhysicalDevice objects """ return context.context().list_physical_devices(device_type)
d9683db64be013df5c258aa6573456005863e74e
3,632,804
def awards_grants_honors(p): """Make sorted awards grants and honors list. Parameters ---------- p : dict The person entry """ aghs = [] for x in p.get('funding', ()): d = {'description': '{0} ({1}{2:,})'.format( latex_safe(x['name']), x.get('currency', '$').replace('$', '\$'), x['value']), 'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0)), } aghs.append(d) for x in p.get('service', []) + p.get('honors', []): d = {'description': latex_safe(x['name']), 'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0)), } aghs.append(d) aghs.sort(key=(lambda x: x.get('_key', 0.0)), reverse=True) return aghs
c1b0f2626109fe59ca71654a86f46e34a1da8a7d
3,632,805
import re from datetime import datetime def string_to_time(course_time): # '二1-2 三3-4' """ :param course_time: '二1-2' :return: 十周或若干周的上课下课时间 [{start_time, end_time},...] """ course_times = [] course_minutes = [0, 55, 120, 175, 250, 295, 370, 425, 480, 535, 600, 655, 710] available_weeks = range(1, 11) text_times = re.findall("[一|二|三|四|五|六|七|八|九|十]\d?\d-\d?\d", course_time) # ['二1-2', '三3-4'] # part of weeks like: 4 - 6 if re.findall("\d-\d?\d周", course_time): part_week = re.findall("\d-\d?\d周", course_time) weeks = re.findall("\d\d*", part_week[0]) start_week = int(weeks[0]) end_week = int(weeks[1]) available_weeks = range(start_week, end_week + 1) # some of weeks like: 1, 6 elif re.findall("\d,\d?\d周", course_time): part_week = re.findall("\d,\d?\d周", course_time) week1 = re.findall("\d", part_week[0])[0] week2 = re.findall("\d?\d", part_week[0])[1] available_weeks = [int(week1), int(week2)] # full ten weeks for text_time in text_times: day = re.findall("[一|二|三|四|五|六|七|八|九|十]", text_time) times = re.findall("\d\d*", text_time) time_start = int(times[0]) time_end = int(times[1]) for i in available_weeks: course_times.append({ 'start_time': term_start_date + datetime.timedelta(days=string_to_int(day[0]), weeks=i - 1, minutes=course_minutes[time_start - 1]), 'end_time': term_start_date + datetime.timedelta(days=string_to_int(day[0]) + (i - 1) * 7, minutes=course_minutes[time_end - 1] + 45) }) return course_times
be4722163f776d44bdf80257664d25be8da0b571
3,632,806
def get_parameters(model: str, group_id: int = 0, t_in: int = 0, t_out: int = 0, p_th: int = 0) -> pd.DataFrame: """ Loads the content of the database for a specific heat pump model and returns a pandas ``DataFrame`` containing the heat pump parameters. Parameters ---------- model : str Name of the heat pump model or "Generic". group_id : numeric, default 0 only for model "Generic": Group ID for subtype of heat pump. [1-6]. t_in : numeric, default 0 only for model "Generic": Input temperature :math:`T` at primary side of the heat pump. [°C] t_out : numeric, default 0 only for model "Generic": Output temperature :math:`T` at secondary side of the heat pump. [°C] p_th : numeric, default 0 only for model "Generic": Thermal output power at setpoint t_in, t_out (and for water/water, brine/water heat pumps t_amb = -7°C). [W] Returns ------- parameters : pd.DataFrame Data frame containing the model parameters. """ df = pd.read_csv(cwd()+r'/hplib_database.csv', delimiter=',') df = df.loc[df['Model'] == model] parameters = pd.DataFrame() parameters['Manufacturer']=(df['Manufacturer'].values.tolist()) parameters['Model'] = (df['Model'].values.tolist()) try: parameters['MAPE_COP']=df['MAPE_COP'].values.tolist() parameters['MAPE_P_el']=df['MAPE_P_el'].values.tolist() parameters['MAPE_P_th']=df['MAPE_P_th'].values.tolist() except: pass parameters['P_th_h_ref [W]'] = (df['P_th_h_ref [W]'].values.tolist()) parameters['P_el_h_ref [W]'] = (df['P_el_h_ref [W]'].values.tolist()) parameters['COP_ref'] = (df['COP_ref'].values.tolist()) parameters['Group'] = (df['Group'].values.tolist()) parameters['p1_P_th [1/°C]'] = (df['p1_P_th [1/°C]'].values.tolist()) parameters['p2_P_th [1/°C]'] = (df['p2_P_th [1/°C]'].values.tolist()) parameters['p3_P_th [-]'] = (df['p3_P_th [-]'].values.tolist()) parameters['p4_P_th [1/°C]'] = (df['p4_P_th [1/°C]'].values.tolist()) parameters['p1_P_el_h [1/°C]'] = (df['p1_P_el_h [1/°C]'].values.tolist()) parameters['p2_P_el_h [1/°C]'] = (df['p2_P_el_h [1/°C]'].values.tolist()) parameters['p3_P_el_h [-]'] = (df['p3_P_el_h [-]'].values.tolist()) parameters['p4_P_el_h [1/°C]'] = (df['p4_P_el_h [1/°C]'].values.tolist()) parameters['p1_COP [-]'] = (df['p1_COP [-]'].values.tolist()) parameters['p2_COP [-]'] = (df['p2_COP [-]'].values.tolist()) parameters['p3_COP [-]'] = (df['p3_COP [-]'].values.tolist()) parameters['p4_COP [-]'] = (df['p4_COP [-]'].values.tolist()) try: parameters['P_th_c_ref [W]'] = (df['P_th_c_ref [W]'].values.tolist()) parameters['P_el_c_ref [W]'] = (df['P_el_c_ref [W]'].values.tolist()) parameters['p1_Pdc [1/°C]'] = (df['p1_Pdc [1/°C]'].values.tolist()) parameters['p2_Pdc [1/°C]'] = (df['p2_Pdc [1/°C]'].values.tolist()) parameters['p3_Pdc [-]'] = (df['p3_Pdc [-]'].values.tolist()) parameters['p4_Pdc [1/°C]'] = (df['p4_Pdc [1/°C]'].values.tolist()) parameters['p1_P_el_c [1/°C]'] = (df['p1_P_el_c [1/°C]'].values.tolist()) parameters['p2_P_el_c [1/°C]'] = (df['p2_P_el_c [1/°C]'].values.tolist()) parameters['p3_P_el_c [-]'] = (df['p3_P_el_c [-]'].values.tolist()) parameters['p4_P_el_c [1/°C]'] = (df['p4_P_el_c [1/°C]'].values.tolist()) parameters['p1_EER [-]'] = (df['p1_EER [-]'].values.tolist()) parameters['p2_EER [-]'] = (df['p2_EER [-]'].values.tolist()) parameters['p3_EER [-]'] = (df['p3_EER [-]'].values.tolist()) parameters['p4_EER [-]'] = (df['p4_EER [-]'].values.tolist()) except: pass if model == 'Generic': parameters = parameters.iloc[group_id - 1:group_id] p_th_ref = fit_p_th_ref(t_in, t_out, group_id, p_th) parameters.loc[:, 'P_th_h_ref [W]'] = p_th_ref t_in_hp = [-7,0,10] # air/water, brine/water, water/water t_out_fix = 52 t_amb_fix = -7 p1_cop = parameters['p1_COP [-]'].array[0] p2_cop = parameters['p2_COP [-]'].array[0] p3_cop = parameters['p3_COP [-]'].array[0] p4_cop = parameters['p4_COP [-]'].array[0] if (p1_cop * t_in + p2_cop * t_out + p3_cop + p4_cop * t_amb_fix)<=1.0: raise ValueError('COP too low! Increase t_in or decrease t_out.') if group_id == 1 or group_id == 4: t_in_fix = t_in_hp[0] if group_id == 2 or group_id == 5: t_in_fix = t_in_hp[1] if group_id == 3 or group_id == 6: t_in_fix = t_in_hp[2] cop_ref = p1_cop * t_in_fix + p2_cop * t_out_fix + p3_cop + p4_cop * t_amb_fix p_el_ref = p_th_ref / cop_ref parameters.loc[:, 'P_el_h_ref [W]'] = p_el_ref parameters.loc[:, 'COP_ref'] = cop_ref if group_id==1: try: p1_eer = parameters['p1_EER [-]'].array[0] p2_eer = parameters['p2_EER [-]'].array[0] p3_eer = parameters['p3_EER [-]'].array[0] p4_eer = parameters['p4_EER [-]'].array[0] eer_ref = p1_eer * 35 + p2_eer * 7 + p3_eer + p4_eer * 35 parameters.loc[:,'P_th_c_ref [W]'] = p_el_ref * 0.6852 * eer_ref parameters['P_el_c_ref [W]'] = p_el_ref * 0.6852 #average value from real Heatpumps (P_el35/7 to P_el-7/52) parameters.loc[:, 'EER_ref'] = eer_ref except: pass return parameters
041d55d2ed839413a8213f3c4c8cbdb09759b933
3,632,807
def gauss(x, mu, var, a=1): """ Gauss distribution value at x :param x: :param mu: expected value :param var: variance, (sigma^2) :param a: coefficient in cases total area != 1 :return: """ return a/(np.sqrt(2*var*np.pi)) * np.exp(- (x-mu)**2/(2*var))
6b0843ab4372a7f3f75fd709fbdc32be826a2626
3,632,808
def build_backbone( image_size: tuple, out_channels: int, model_config: dict, method_name: str ) -> tf.keras.Model: """ Backbone model accepts a single input of shape (batch, dim1, dim2, dim3, ch_in) and returns a single output of shape (batch, dim1, dim2, dim3, ch_out) :param image_size: tuple, dims of image, (dim1, dim2, dim3) :param out_channels: int, number of out channels, ch_out :param method_name: str, one of ddf | dvf | conditional :param model_config: dict, model configuration, returned from parser.yaml.load :return: tf.keras.Model """ if method_name not in ["ddf", "dvf", "conditional"]: raise ValueError( "method name has to be one of ddf / dvf / conditional in build_backbone, " "got {}".format(method_name) ) if method_name in ["ddf", "dvf"]: out_activation = None # TODO try random init with smaller number out_kernel_initializer = "zeros" # to ensure small ddf and dvf elif method_name in ["conditional"]: out_activation = "sigmoid" # output is probability out_kernel_initializer = "glorot_uniform" else: raise ValueError("Unknown method name {}".format(method_name)) if model_config["backbone"] == "local": return LocalNet( image_size=image_size, out_channels=out_channels, out_kernel_initializer=out_kernel_initializer, out_activation=out_activation, **model_config["local"], ) elif model_config["backbone"] == "global": return GlobalNet( image_size=image_size, out_channels=out_channels, out_kernel_initializer=out_kernel_initializer, out_activation=out_activation, **model_config["global"], ) elif model_config["backbone"] == "unet": return UNet( image_size=image_size, out_channels=out_channels, out_kernel_initializer=out_kernel_initializer, out_activation=out_activation, **model_config["unet"], ) else: raise ValueError("Unknown model name")
31d1f329bf7344a6db6d9fbf0e417ff0f3d20de8
3,632,809
def create_subnet(client, cidr_blk, vpc_id): """ Create a subnet in the given CIDR block and VPC using client. :param client: a valid boto3 EC2 client. :param cidr_blk: a valid IP range in the format 'a.b.c.d/XX' :type cidr_blk: str :param vpc_id: the VpcID of the Databricks VPC. :type vpc_id: str :return: returns the subnet ID of the created subnet """ try: # create subnet on the Databricks VPC response = client.create_subnet( VpcId=vpc_id, CidrBlock=cidr_blk, ) except ClientError as e: raise Exception('Error when creating subnet: {}', format(e)) subnet_id = response.get('Subnet').get('SubnetId') print("Create subnet with subnetID {}".format(subnet_id)) return subnet_id
ff34f2c2ac89edcbc568a80890c90ca0b3616c09
3,632,810
import math def get_hertz_feed(reference_timestamp, current_timestamp, period_days, phase_days, reference_asset_value, amplitude): """ Given the reference timestamp, the current timestamp, the period (in days), the phase (in days), the reference asset value (ie 1.00) and the amplitude (> 0 && < 1), output the current hertz value. You can use this formula for an alternative HERTZ asset! Be aware though that extreme values for amplitude|period will create high volatility which could cause black swan events. BSIP 18 should help, but best tread carefully! """ hz_reference_timestamp = pendulum.parse(reference_timestamp).timestamp() # Retrieving the Bitshares2.0 genesis block timestamp hz_period = pendulum.SECONDS_PER_DAY * period_days hz_phase = pendulum.SECONDS_PER_DAY * phase_days hz_waveform = math.sin(((((current_timestamp - (hz_reference_timestamp + hz_phase))/hz_period) % 1) * hz_period) * ((2*math.pi)/hz_period)) # Only change for an alternative HERTZ ABA. hz_value = reference_asset_value + ((amplitude * reference_asset_value) * hz_waveform) return hz_value
4da9ae370e4a68119a8fe64b2442275f249d5ed2
3,632,811
def find_list_in_list(reference_array, inp): """ --------------------------------------------------------------------------- Find occurrences of input list in a reference list and return indices into the reference list Inputs: reference_array [list or numpy array] One-dimensional reference list or numpy array in which occurrences of elements in the input list or array will be found inp [list or numpy array] One-dimensional input list whose elements will be searched in the reference array and the indices into the reference array will be returned Output: ind [numpy masked array] Indices of occurrences of elements of input array in the reference array. It will be of same size as input array. For example, inp = reference_array[ind]. Indices for elements which are not found in the reference array will be masked. --------------------------------------------------------------------------- """ try: reference_array, inp except NameError: raise NameError('Inputs reference_array, inp must be specified') if not isinstance(reference_array, (list, NP.ndarray)): raise TypeError('Input reference_array must be a list or numpy array') reference_array = NP.asarray(reference_array).ravel() if not isinstance(inp, (list, NP.ndarray)): raise TypeError('Input inp must be a list or numpy array') inp = NP.asarray(inp).ravel() if (inp.size == 0) or (reference_array.size == 0): raise ValueError('One or both inputs contain no elements') sortind_ref = NP.argsort(reference_array) sorted_ref = reference_array[sortind_ref] ind_in_sorted_ref = NP.searchsorted(sorted_ref, inp) ii = NP.take(sortind_ref, ind_in_sorted_ref, mode='clip') mask = reference_array[ii] != inp ind = NP.ma.array(ii, mask=mask) return ind
7a4db527c6f73dcaf3436afa46317ffe443bc5bc
3,632,812
import torch def accuracy(output, target): """Computes the accuracy over the top predictions""" with torch.no_grad(): batch_size = target.size(0) _, preds = torch.max(output.data, 1) correct = (preds == target).sum().item() return correct/batch_size
8f4dfde0e00f12d889b403265d50a379930ba3c8
3,632,813
from bs4 import BeautifulSoup def get_absolute_url(body_string: str): """Get absolute manga mangadex url""" parser = BeautifulSoup(body_string, 'html.parser') for link_elements in parser.find_all('link'): # aiming for canonical link try: rel = link_elements.attrs['rel'] href = link_elements.attrs['href'] except KeyError: continue else: if 'canonical' in rel: return href else: continue
35ca71dba04c243ab7c4cfa4893326ddeb480336
3,632,814
def panel_grid(hspace, wspace, ncols, num_panels): """Init plot.""" n_panels_x = min(ncols, num_panels) n_panels_y = np.ceil(num_panels / n_panels_x).astype(int) if wspace is None: # try to set a wspace that is not too large or too small given the # current figure size wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02 # each panel will have the size of rcParams['figure.figsize'] fig = plt.figure( figsize=( n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace), n_panels_y * rcParams['figure.figsize'][1], ) ) left = 0.2 / n_panels_x bottom = 0.13 / n_panels_y gs = gridspec.GridSpec( nrows=n_panels_y, ncols=n_panels_x, left=left, right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x, bottom=bottom, top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y, hspace=hspace, wspace=wspace ) return fig, gs
4da67af64bfcead3309f3ba3622d441301fcfbf6
3,632,815
import scipy.linalg as LA import re def ma_rhythm(ppath, recordings, ma_thr=20.0, min_dur = 160, band=[10,15], state=3, win=64, pplot=True, pflipx=True, pnorm=False): """ calculate powerspectrum of EEG spectrogram to identify oscillations in sleep activity within different frequency bands; only contineous NREM periods are considered for @PARAMETERS: ppath - base folder of recordings recordings - single recording name or list of recordings @OPTIONAL: ma_thr - microarousal threshold; wake periods <= $min_dur are transferred to NREM min_dur - minimal duration [s] of a NREM period band - frequency band used for calculation win - window (number of indices) for FFT calculation pplot - if True, plot window showing result pflipx - if True, plot wavelength instead of frequency on x-axis pnorm - if True, normalize spectrum (for each mouse) by its total power @RETURN: SpecMx, f - ndarray [mice x frequencies], vector [frequencies] """ min_dur = np.max([win*2.5, min_dur]) if type(recordings) != list: recordings = [recordings] Spec = {} for rec in recordings: idf = re.split('_', rec)[0] Spec[idf] = [] mice = list(Spec.keys()) for rec in recordings: idf = re.split('_', rec)[0] # sampling rate and time bin for spectrogram #SR = get_snr(ppath, rec) #NBIN = np.round(2.5*SR) #dt = NBIN * 1/SR dt = 2.5 # load sleep state M = load_stateidx(ppath, "", ann_name=rec)[0] Mseq = M.copy() Mseq[np.where(M != 2)] = 0 Mseq[np.where(M == 2)] = 1 seq = get_sequences(np.where(M==state)[0], ibreak=int(np.round(ma_thr/dt))+1) seq = [range(s[0], s[-1]+1) for s in seq] # load frequency band #P = so.loadmat(os.path.join(ppath, rec, 'sp_' + rec + '.mat')); #SP = np.squeeze(P['SP']) #freq = np.squeeze(P['freq']) #ifreq = np.where((freq>=band[0]) & (freq<=band[1])) #pow_band = SP[ifreq,:].mean(axis=0) seq = [s for s in seq if len(s)*dt >= min_dur] for s in seq: y,f = power_spectrum(Mseq[s], win, dt) #y = y.mean(axis=0) Spec[idf].append(y) # Transform %Spec to ndarray SpecMx = np.zeros((len(Spec), len(f))) i=0 for idf in Spec: SpecMx[i,:] = np.array(Spec[idf]).mean(axis=0) if pnorm==True: SpecMx[i,:] = SpecMx[i,:]/LA.norm(SpecMx[i,:]) i += 1 if pplot: plt.figure() ax = plt.axes([0.1, 0.1, 0.8, 0.8]) x = f[1:] if pflipx == True: x = 1.0/f[1:] y = SpecMx[:,1:] if len(mice) <= 1: ax.plot(x, y.mean(axis=0), color='gray', lw=2) else: ax.errorbar(x, y.mean(axis=0), yerr=y.std(axis=0), color='gray', fmt='-o') box_off(ax) if pflipx == True: plt.xlabel('Wavelength (s)') else: plt.xlabel('Frequency (Hz)') plt.ylabel('Power (uV^2)') plt.show() return SpecMx, f
f5fee2d602f5f186f0aaa881938a396ab5c9d977
3,632,816
from typing import Dict async def find_file_ids(paths: Dict[str, int]) -> Dict[str, str]: """Parameter 1: dict of "file path" -> file size.""" fpaths = [p for p in paths.keys() if p] if not fpaths: return {} query = "select path, size, file_id from file_ids where path in ({})".format( ','.join('?' * len(fpaths))) db = await get_db() cursor = await db.execute(query, tuple(fpaths)) return {r['path']: r['file_id'] async for r in cursor if r['size'] == paths[r['path']]}
c43567330b8a4ca7430dfbb337df518831c92a6f
3,632,817
def select_curve(message='Select one curve.'): """Select one curve in the Rhino view. Parameters ---------- message : str, optional Instruction for the user. Returns ------- System.Guid The identifer of the selected curve. """ return rs.GetObject(message, preselect=True, select=True, filter=rs.filter.curve)
65952f2f2ea76c196ec55db4766de0d07cfe1b5b
3,632,818
def fetch_cert(url: str) -> Certificate: """ Fetch a certificate from a URL. :param url: the URL to the certificate file :return: a certificate object """ with fetch_file(url) as cert_file: return ssl_serializer.deserialize_cert(cert_file.read())
3a18eca6e5c8e51ad3ca96860972ead48b74c029
3,632,819
def import_execute(request, extra_context={}): """ This is the view that actually processed the import based on the options set by the user in import_options (above). In addition to calling the appropriate import function (see below) this view also prepares the status information dictionary that will be used by those import functions and later used to render the results. The actual template used just immediately reloads the page to the results template. Customize template used by setting the BATCH_IMPORT_EXECUTE_TEMPLATE setting. **Required arguments** none. **Optional arguments** ``extra_context`` A dictionary of variables to add to the template context. Any callable object in this dictionary will be called to produce the end result which appears in the context. """ # Get the name of the uploaded Excel file for processing and the model # for which we're trying to import. If either are missing, send the user # back to the beginning of the process. try: model_import_info = request.session['model_import_info'] save_file_name = request.session['save_file_name'] except KeyError: # Either we don't have a file or we don't know what we're importing. # So restart the process with a blank form (which will show the # model list). form = UploadImportFileForm() context = RequestContext(request) for key, value in extra_context.items(): context[key] = callable(value) and value() or value return render_to_response(BATCH_IMPORT_START_TEMPLATE, {'form': form}, context_instance=context) # Retrieve the "import mechanics options". These will be set from the # user-specified options or from the settings-based defaults. process_option_dict = request.session['process_options'] # Prepare the context to be sent to the template so we can load it # as we go along. status_dict = {} # Prepare for the results processing. status_dict['start_row'] = process_option_dict['start_row'] status_dict['end_row'] = process_option_dict['end_row'] status_dict['num_rows_in_spreadsheet'] = 0 status_dict['num_rows_processed'] = 0 status_dict['num_items_imported'] = 0 status_dict['num_items_updated'] = 0 status_dict['num_errors'] = 0 status_dict['combined_results_messages'] = [] status_dict['import_results_messages'] = [] status_dict['update_results_messages'] = [] status_dict['error_results_messages'] = [] # Open the uploaded Excel file and iterate over each of its rows starting # start_row and ending at end_row. filepath = join(BATCH_IMPORT_TEMPFILE_LOCATION, save_file_name) if not isfile(filepath): status_dict['error_results_messages'].append('Error opening file. Uploaded file was either not found or corrupt.') return _render_results_response(request, status_dict, extra_context) # Try to open the uploaded Excel file. If it fails, bomb out. try: book = xlrd.open_workbook(filepath) sheet = book.sheet_by_index(0) status_dict['num_rows_in_spreadsheet'] = sheet.nrows except: status_dict['error_results_messages'].append('Error opening Excel file: '+ `sys.exc_info()[1]`) return _render_results_response(request, status_dict, extra_context) # Determine the last row of the spreadsheet to be processed. if process_option_dict['end_row'] == -1: process_option_dict['end_row'] = sheet.nrows status_dict['end_row'] = process_option_dict['end_row'] if model_import_info.import_mode == ModelImportInfo.OBJECT_IMPORT: status_dict = _do_batch_import(request, model_import_info, book, sheet, process_option_dict, status_dict) else: status_dict = _do_relation_import(request, model_import_info, book, sheet, process_option_dict, status_dict) # Clean up... del request.session['save_file_name'] del request.session['model_for_import'] del request.session['process_options'] del request.session['model_import_info'] filepath = join(BATCH_IMPORT_TEMPFILE_LOCATION, save_file_name) if isfile(filepath): os.remove(filepath) # Render the response. return _render_results_response(request, status_dict, extra_context)
0850d79c1d7f23b848e2e52cf7322485122f0e98
3,632,820
def get_default_view(source_type, source_name, menu_name=None, source_transform=None, viewer_transform=None, **kwargs): """ Create default view metadata for a single source. Arguments: source_type [str] - type of the source, either "image" or "segmentation" source_name [str] - name of the source. menu_name [str] - menu name for this view (default: None) source_transform [dict] - dict with affine source transform. If given, must contain "parameters" and may contain "timepoints" (default: None). viewer_transform [dict] - dict with viewer transform (default: None) **kwargs - additional settings for this view """ menu_name = f"{source_type}s" if menu_name is None else menu_name if source_transform is None: source_transforms = None else: source_transforms = [ get_affine_source_transform( [source_name], source_transform["parameters"], source_transform.get("timepoints", None) ) ] view = get_view([source_name], [source_type], [[source_name]], [kwargs], is_exclusive=False, menu_name=menu_name, source_transforms=source_transforms, viewer_transform=viewer_transform) return view
056eb43104acea60c51a84da06aee036a1127c46
3,632,821
def PullToBrepFace(curve, face, tolerance, multiple=False): """ Pull a curve to a BrepFace using closest point projection. Args: curve (Curve): Curve to pull. face (BrepFace): Brep face that pulls. tolerance (double): Tolerance to use for pulling. Returns: Curve[]: An array of pulled curves, or an empty array on failure. """ url = "rhino/geometry/curve/pulltobrepface-curve_brepface_double" if multiple: url += "?multiple=true" args = [curve, face, tolerance] if multiple: args = list(zip(curve, face, tolerance)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
ac8ac6e5de10b7012c4650915fd5af1de52659f4
3,632,822
def floatX(X): """ Change data to theano type """ return np.asarray(X, dtype=theano.config.floatX)
ee2e8863fcdc5475cef461b75c557f1e96a73457
3,632,823
def primes_sieve3(n): """ Sieve method 3: Returns a list of primes < n >>> primes_sieve3(100) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97] """ # begin half-sieve, n>>1 == n//2 sieve = [True] * (n>>1) upper = int(n**0.5)+1 for i in range(3, upper, 2): if sieve[i>>1]: sieve[i*i>>1::i] = [False] * ((n-i*i-1)//(2*i)+1) return [2] + [2*i+1 for i in range(1, n>>1) if sieve[i]]
629942c738eb08624672e20af1923cf78d3dcec7
3,632,824
import os def __detect_app_dir(name): """ 如果app目录不存在,报错 :param name: :return: """ app_dir = services.detect_app_dir(name) if not os.path.exists(app_dir): raise ServiceException("无法找到应用根目录") return app_dir
be7549cbd57a254bb27d6c5ceda226400b1a7a32
3,632,825
def kl_mvg_diag( pm: jnp.ndarray, pv: jnp.ndarray, qm: jnp.ndarray, qv: jnp.ndarray ) -> jnp.ndarray: """ Kullback-Leibler divergence from Gaussian pm,pv to Gaussian qm,qv. Also computes KL divergence from a single Gaussian pm,pv to a set of Gaussians qm,qv. Diagonal covariances are assumed. Divergence is expressed in nats. Args: pm: mean of starting distribution pv: standard deviation of starting distribution qm: mean of target distribution qv: standard deviation of target distribution Returns: KL divergence from p to q """ if len(qm.shape) == 2: axis = 1 else: axis = 0 # Determinants of diagonal covariances pv, qv dpv = pv.prod() dqv = qv.prod(axis) # Inverse of diagonal covariance qv iqv = 1.0 / qv # Difference between means pm, qm diff = qm - pm return 0.5 * ( jnp.log(dqv / dpv) # log |\Sigma_q| / |\Sigma_p| + (iqv * pv).sum(axis) # + tr(\Sigma_q^{-1} * \Sigma_p) + (diff * iqv * diff).sum(axis) # + (\mu_q-\mu_p)^T\Sigma_q^{-1}(\mu_q-\mu_p) - len(pm) )
c0f28495a2b8c18b8563d66152dc62019b3f2e60
3,632,826
from typing import Any def isstring(var:Any, raise_error:bool=False) -> bool: """Check if var is a string Args: var (str): variable to check raise_error (bool, optional): TypeError raised if set to `True`. Defaults to `False`. Raises: TypeError: raised if var is not string Returns: bool: `True` if var is a string """ is_ =isinstance(var, str) if not is_ and raise_error: raise TypeError(f'String expected: {var=} is not a str') return is_
897c43539099c3d0b9b38abccce88869a90b9d9e
3,632,827
def run_simulation(solution, times, conditions=None, condition_type = 'adiabatic-constant-volume', output_species = True, output_reactions = True, output_directional_reactions = False, output_rop_roc = False, atol = 1e-15, rtol = 1e-9, temperature_values=None): """ This method iterates through the cantera solution object and outputs information about the simulation as a pandas.DataFrame object. This method returns a dictionary with the reaction conditions data, species data, net reaction data, forward/reverse reaction data, and the rate of production and consumption (or `None` if a variable not specified). `solution` = Cantera.Solution object `conditions` = tuple of temperature, pressure, and mole fraction initial species (will be deprecated. Set parameters before running) `times` = an iterable of times which you would like to store information in `condition_type` = string describing the run type `output_species` = output a DataFrame of species' concentrations `output_reactions` = output a DataFrame of net reaction rates `output_directional_reactions` = output a DataFrame of directional reaction rates `output_rop_roc` = output a DataFrame of species rates of consumption & production condition_types supported ######################### 'adiabatic-constant-volume' - assumes no heat transfer and no volume change 'constant-temperature-and-pressure' - no solving energy equation or changing rate constants 'constant-temperature-and-volume' - no solving energy equation but allows for pressure to change with reactions 'specified-temperature-constant-volume' - the temperature profile specified `temperature_values`, which corresponds to the input `times`, alters the temperature right before the next time step is taken. Constant volume is assumed. """ if conditions is not None: solution.TPX = conditions if condition_type == 'adiabatic-constant-volume': reactor = ct.IdealGasReactor(solution) elif condition_type == 'constant-temperature-and-pressure': reactor = ct.IdealGasConstPressureReactor(solution, energy='off') elif condition_type == 'constant-temperature-and-volume': reactor = ct.IdealGasReactor(solution, energy='off') elif condition_type == 'specified-temperature-constant-volume': reactor = ct.IdealGasReactor(solution, energy='off') if temperature_values is None: raise AttributeError('Must specify temperature with `temperature_values` parameter') elif len(times) != len(temperature_values): raise AttributeError('`times` (len {0}) and `temperature_values` (len {1}) must have the same length.'.format(len(times),len(temperature_values))) else: supported_types = ['adiabatic-constant-volume','constant-temperature-and-pressure', 'constant-temperature-and-volume','specified-temperature-constant-volume'] raise NotImplementedError('only {0} are supported. {1} input'.format(supported_types, condition_type)) simulator = ct.ReactorNet([reactor]) solution = reactor.kinetics simulator.atol = atol simulator.rtol = rtol # setup data storage outputs = {} outputs['conditions'] = pd.DataFrame() if output_species: outputs['species'] = pd.DataFrame() if output_reactions: outputs['net_reactions'] = pd.DataFrame() if output_directional_reactions: outputs['directional_reactions'] = pd.DataFrame() if output_rop_roc: outputs['rop'] = pd.DataFrame() for time_index, time in enumerate(times): if condition_type == 'specified-temperature-constant-volume': solution.TD = temperature_values[time_index], solution.density reactor = ct.IdealGasReactor(solution, energy='off') simulator = ct.ReactorNet([reactor]) solution = reactor.kinetics simulator.atol = atol simulator.rtol = rtol if time_index > 0: simulator.set_initial_time(times[time_index-1]) simulator.advance(time) # save data outputs['conditions'] = outputs['conditions'].append( get_conditions_series(simulator,reactor,solution), ignore_index = True) if output_species: outputs['species'] = outputs['species'].append( get_species_series(solution), ignore_index = True) if output_reactions: outputs['net_reactions'] = outputs['net_reactions'].append( get_reaction_series(solution), ignore_index = True) if output_directional_reactions: outputs['directional_reactions'] = outputs['directional_reactions'].append( get_forward_and_reverse_reactions_series(solution), ignore_index = True) if output_rop_roc: outputs['rop'] = outputs['rop'].append( get_rop_and_roc_series(solution), ignore_index = True) # set indexes as time time_vector = outputs['conditions']['time (s)'] for output in outputs.values(): output.set_index(time_vector,inplace=True) return outputs
352516d021b17589d60de5645076c25258df5e9c
3,632,828
def first_second_person_density(doc): """Compute density of first|second person. :param doc: Processed text :type doc: Spacy Doc :return: Density 1,2 person :rtype: float """ return first_second_person_count(doc) / word_count(doc)
29fa561361e1d3b4846accf206103fd3a99d774f
3,632,829
def create_map(*columns): """ Creates a new map column. The input columns must be grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...). The key columns must all have the same data type, and can't be null. The value columns must all have the same data type. """ return _with_expr(exprs.CreateMap, columns)
40d09e0f5c16c935d741ef0c5cff2a07f62fbaa9
3,632,830
import six import logging def for_review_request_field(context, nodelist, review_request_details, fieldset): """Loops through all fields in a fieldset. This can take a fieldset instance or a fieldset ID. """ s = [] request = context.get('request') if isinstance(fieldset, six.text_type): fieldset = get_review_request_fieldset(fieldset) for field_cls in fieldset.field_classes: try: field = field_cls(review_request_details, request=request) except Exception as e: logging.exception('Error instantiating field %r: %s', field_cls, e) continue try: if field.should_render(field.value): context.push() context['field'] = field s.append(nodelist.render(context)) context.pop() except Exception as e: logging.exception( 'Error running should_render for field %r: %s', field_cls, e) return ''.join(s)
592092f0c6909b18fb92309c72ced4d8c0ee8d7b
3,632,831
import warnings def bayesian_optimization(f, gpr, acq_func, bounds, max_iter = None, prop_kwargs = None, minimize = None, verbose = False, noise=0.0): """ Implement Bayesian optimization to maximize or minimize a scalar function. Arguments --------- f : callable A routine to compute the objective function. Must have signature f(X) -> np.ndarray. If a matrix is provided, the routine should evaluate the objective on the rows of the matrix. gpr : GaussianProcessRegressor Regressor object, pre-fit to the sample data via the command gpr.fit(X_sample, Y_sample). acq_func : AcquisitionFunction The acquisition function to use for generating proposal points. Whether the routine seeks a maximizer or a minimizer is determined by the value of acq_func.minimize_objective. bounds : array_like; shape (2, input_dimension) Lower and upper bounds defining a box in input space, on which the acquisition function is to be optimized. In particular, bounds[0,:] should contain the lower bounds and bounds[:,1] the corresponding upper bounds. Initial conditions for the optimization are sampled uniformly at random over this box. max_iter : int, optional Maximum number of iterations to perform (new points to sample). If not specified, uses gpr.X_train_.shape[0]. prop_kwargs : dict, optional Key-value pairs specifying values for optional parameters in the propose_location subroutine. The value of return_data is forced False. minimize : bool, optional Explicitly specifies whether the objective function is to be minimized. If no value is provided, the task is inferred from the acquisition function. If a value is provided that contradicts that implied by the acquisition function, an Exception is raised. verbose : bool, optional Whether to display a progress bar counting completed iterations. Returns ------- results : dict, with the following entries: "X": np.ndarray of shape (N,d), where rows represent sampled points; "y": np.ndarray of shape (N,1), where entries specify measurements of the objective function at the corresponding sample point; "args": dictionary containing the values passed to the arguments gpr, bounds, and prop_kwargs; "warnings": list of warning messages raised during the optimization. """ if prop_kwargs is None: prop_kwargs = dict() prop_kwargs["return_data"] = False if minimize is None: minimize = acq_func.minimize_objective elif int(minimize) + int(acq_func.minimize_objective) == 1: raise ValueError("Acquisition function contradicts 'minimize' value.") if gpr.X_train_.shape[0] == gpr.y_train_.size: N0 = gpr.y_train_.size #Initial dimensions of points else: raise ValueError("Input and output sample shapes don't match.") if max_iter is None: max_iter = N0 elif max_iter <= 0: raise ValueError("Number of iterations must be positive.") print(max_iter) X = np.r_[gpr.X_train_, np.zeros((max_iter, gpr.X_train_.shape[1]))] y = np.r_[gpr.y_train_.flatten(), np.zeros(max_iter)].reshape(-1, 1) if verbose: pbar = trange(N0, N0 + max_iter, ncols = 75) else: pbar = range(N0, N0 + max_iter) # OG dim + num of iters each_iter = {'0':{"X":X, "y":y}} with warnings.catch_warnings(record = True) as w: warnings.simplefilter("always") for i, j in enumerate(pbar): # Fitting the GPR on new points -- refitting will it cause overfitting? gpr.fit(X[:j,:], y[:j]) next_X = propose_location(acq_func, gpr, bounds, **prop_kwargs) X[j,:] = np.array(next_X["best_X"]) y[j] = f(next_X["best_X"], noise) each_iter['{}'.format(i)] = {"X":X[j,:], "y":y[j]} args = {"bounds": bounds, "gpr": gpr, "prop_kwargs": prop_kwargs} return {"args": args, "warnings": w, "X": X, "y": y}, each_iter
6f1eab7d21c6385532151656a9b9a80e78b78fa0
3,632,832
def to_nearest(num, tick_size): """ Given a number, round it to the nearest tick. Very useful for sussing float error out of numbers: e.g. toNearest(401.46, 0.01) -> 401.46, whereas processing is normally with floats would give you 401.46000000000004. Use this after adding/subtracting/multiplying numbers. """ tick_dec = Decimal(str(tick_size)) return float((Decimal(round(num / tick_size, 0)) * tick_dec))
662a4e0cb2956161f5b776bc65cb6c35e32aaf32
3,632,833
from typing import List def create_epub(raw_articles: List[Article], title: str) -> str: """ Create an EPUB book from multiple articles. :returns temp path to created ebook """ epub_path = mkdtemp() logger.debug(f"Creating epub folder in {epub_path}") articles = [EPUBArticle(raw_article, epub_path) for raw_article in raw_articles] book = EPUB(articles, title, epub_path) return book.build()
4befa8bb1a0b1d2d6efc3be4abc42e7dccb774dc
3,632,834
def placeholder(value, token): """ Add placeholder attribute, esp. for form inputs and textareas """ value.field.widget.attrs["placeholder"] = token return value
16bb46a6e92c3a59972589ed28315e681a7580f3
3,632,835
def create_xml_element(connection, token, name): """A helper function creating an etree.Element with the necessary attributes :param name: The name of the element :returns: etree.Element """ return etree.Element( name, nsmap={None: XHTML_NAMESPACE}, shop_id=connection.shop_id, partner_id=connection.partner_id, token=token, )
00427ffa2ce582674d78cf6597624c0bbe29edfc
3,632,836
from typing import Optional def generate_categorical_dataframe( sm: nx.DiGraph, n_samples: int, distribution: str = "logit", n_categories: int = 3, noise_scale: float = 1.0, intercept: bool = False, seed: int = None, kernel: Optional[Kernel] = None, ) -> pd.DataFrame: """ Generates a dataframe with samples from SEM with specified type of noise. Args: sm: A DAG in form of a networkx or StructureModel. Does not require weights. n_samples: The number of rows/observations to sample. kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or Matern(1) or any combinations thereof. The kernels are used to create the latent variable for the binary / categorical variables and are directly used for continuous variables. distribution: The type of distribution to use for the noise of a variable. Options: 'probit'/'normal' (alias), "logit"/"gumbel" (alias). Logit is default. n_categories: Number of categories per variable/node. noise_scale: The standard deviation of the noise. The categorical features are created using a latent variable approach. The noise standard deviation determines how much weight the "mean" estimate has on the feature value. intercept: Whether to use an intercept for the latent variable of each feature. seed: Random state Returns: x_mat: [n_samples, d_nodes] sample matrix Raises: ValueError: if distribution is not 'probit', 'normal', 'logit', 'gumbel' """ if kernel is None: return sem_generator( graph=sm, default_type=f"categorical:{n_categories}", n_samples=n_samples, distributions={"categorical": distribution}, noise_std=noise_scale, intercept=intercept, seed=seed, ) return nonlinear_sem_generator( graph=sm, kernel=kernel, default_type=f"categorical:{n_categories}", n_samples=n_samples, distributions={"categorical": distribution}, noise_std=noise_scale, seed=seed, )
5cd72c10e6fdede53b2051eef2bdac82045ed7af
3,632,837
def bias_cross_func(data, *args, **kwargs): """ 生成一条年利率 4% 的模拟货币基金基准线 支持QA add_func,第二个参数 默认为 indices= 为已经计算指标 理论上这个函数只计算单一标的,不要尝试传递复杂标的,indices会尝试拆分。 """ if (ST.VERBOSE in data.columns): print('Phase bias_cross_func', QA_util_timestamp_to_str()) # 针对多标的,拆分 indices 数据再自动合并 code = data.index.get_level_values(level=1)[0] if (len(kwargs.keys()) > 0): indices = kwargs['indices'].loc[(slice(None), code), :] if ('indices' in kwargs.keys()) else None bias_zscore_21 = rolling_pctrank(indices[FLD.BIAS3].values, w=21) #bias_zscore_21 = indices[FLD.BIAS3].rolling(21).apply(lambda x: # pd.Series(x).rank(pct=True).iat[-1], # raw=True) if (ST.VERBOSE in data.columns): print('Phase bias_cross_func Phase 1', QA_util_timestamp_to_str()) indices[FLD.BIAS3_ZSCORE] = bias_zscore_21 indices[FLD.BIAS3_CROSS] = CROSS(indices[FLD.BIAS3], 0) indices[FLD.BIAS3_CROSS] = np.where(CROSS(0, indices[FLD.BIAS3]) == 1, -1, indices[FLD.BIAS3_CROSS]) indices[FLD.BIAS3_CROSS_JX_BEFORE] = Timeline_duration(np.where(indices[FLD.BIAS3_CROSS] == 1, 1, 0)) indices[FLD.BIAS3_CROSS_SX_BEFORE] = Timeline_duration(np.where(indices[FLD.BIAS3_CROSS] == -1, 1, 0)) if (ST.VERBOSE in data.columns): print('Phase bias_cross_func Phase 2', QA_util_timestamp_to_str()) bias = QA.QA_indicator_BIAS(data, 6, 12, 24) bias_cross = np.where((bias['BIAS3'] > 0) & \ (bias['BIAS2'] > 0) & \ (bias['BIAS1'] > 0), 1, np.where((bias['BIAS3'] < 0) & \ (bias['BIAS2'] < 0) & \ (bias['BIAS1'] < 0), -1, 0)) indices[FLD.BIAS_TREND_TIMING_LAG] = calc_event_timing_lag(bias_cross) indices[FLD.BIAS3_TREND_TIMING_LAG] = indices[FLD.BIAS3_CROSS_SX_BEFORE] - indices[FLD.BIAS3_CROSS_JX_BEFORE] bias3_delta = indices[FLD.BIAS3] - indices[FLD.BIAS3].shift(2) bias3_cross = np.where(indices[FLD.BIAS3_TREND_TIMING_LAG] > 0, 1, np.where(indices[FLD.BIAS3_TREND_TIMING_LAG] < 0, -1, np.where(bias3_delta > 0, 1, np.where(bias3_delta < 0, -1, 0)))) indices[FLD.BIAS3_TREND_TIMING_LAG] = calc_event_timing_lag(bias3_cross) if (ST.VERBOSE in data.columns): print('Phase bias_cross_func Done', QA_util_timestamp_to_str()) return indices
7b89c7ab3be1c6d2079dd9cbbf1f0dde8044b042
3,632,838
from typing import Optional def login(uid: str, pwd: str) -> Optional[Admin]: """ 登录 :return: """ sql = '''SELECT admins.id, admins.uid, admins.is_super FROM admins WHERE uid=%s AND pwd=%s LIMIT 1''' connect = get_connect() with connect.cursor() as cursor: cursor.execute(sql, (uid, pwd)) row = cursor.fetchone() if row: admin = Admin() admin.id, admin.uid, admin.is_super = row return admin else: return None
1c207ab29b6d793c7ac513cf901d5c9588c566d4
3,632,839
import aiohttp async def catch_uniqueness_error( request: aiohttp.web.Request, handler: swift_browser_ui.common.types.AiohttpHandler ) -> aiohttp.web.Response: """Catch excepetion arising from a non-unique primary key.""" try: return await handler(request) except asyncpg.exceptions.UniqueViolationError: raise aiohttp.web.HTTPConflict(reason="Duplicate entries are not allowed.")
177faf3497fa8d048b6fc1c1dc8058ed78785cf4
3,632,840
def df_canonicalize_from_smiles(df, smiles_col: str, include_stereocenters=True)->pd.Series: """ Canonicalize the SMILES strings with RDKit. Args: df: dataframe smiles_col: column name in df include_stereocenters: whether to keep the stereochemical information in the canonical SMILES string Returns: Canonicalized SMILES string, None if the molecule is invalid. """ def df_canonicalize_from_smiles_inner(row): mol = Chem.MolFromSmiles(row[smiles_col]) if mol is not None: return Chem.MolToSmiles(mol, isomericSmiles=include_stereocenters) else: return None return df.apply(lambda x: pd.Series(df_canonicalize_from_smiles_inner(x)), axis=1)
846c01b71201411ed36d5270b83f8f32d832e9f4
3,632,841
from re import T def mse_loss(y, pred, w): """ Regression loss function, mean squared error. """ return T.mean(w * (y - pred) ** 2)
bb6f127fd69dcbaa1e64dd811caa0b356de9f741
3,632,842
def compute_gradients( state, supermatrices, supergradients, super_oplabels, observables, observables_labels, num_discretes, ): """ Compute the gradients of a symplectic acyclic_graph for the cost function <psi|sum_n H_n |psi>, with H_n the element at `observables[n]`, acting on discretes `observables_labels[n]`. Args: state: a random numpy ndarray of shape (2,)* num_discretes. supermatrices (list[np.ndarray]): list of supermatrices supergradients (list[dict]): list of dict of gradient matrices of each supermatrix. each dict maps sympy.Symbol to np.ndarray super_oplabels (list[tuple[int]]): the discrete labels of each large_block. observables (list[np.ndarray]): a list of observables (in tensor format). observables_labels (list[tuple[int]]): the discrete labels for each element in `observables` num_discretes (int): the number of discretes """ obs_and_labels = list(zip(observables, observables_labels)) state_labels = tuple(range(num_discretes)) state = apply_supermatrices( state, state_labels, supermatrices, super_oplabels ) psi = np.zeros(state.shape, state.dtype) for ob, ob_labels in obs_and_labels: inds = [state_labels.index(l) for l in ob_labels] cont_state_labels = list(range(-1, -len(state_labels) - 1, -1)) cont_ob_labels = [] for n, i in enumerate(inds): cont_ob_labels.append(cont_state_labels[i]) cont_state_labels[i] = ob_labels[n] + 1 shape = (2,) * (2 * len(ob_labels)) psi += tn.ncon( [state, ob.reshape(shape)], [ tuple(cont_state_labels), tuple([o + 1 for o in ob_labels]) + tuple(cont_ob_labels), ], ) reversed_super_oplabels = list(reversed(super_oplabels)) reversed_supergradients = list(reversed(supergradients)) accumulated_gradients = {} psi = psi.conj() for n, building_block in enumerate(reversed(supermatrices)): building_block_labels = reversed_super_oplabels[n] state, tmp_labels = dot(state, state_labels, building_block.T.conj(), building_block_labels) for k, grad in reversed_supergradients[n].items(): tmp, _ = dot(psi, state_labels, grad.T, building_block_labels) if k in accumulated_gradients: accumulated_gradients[k] += np.dot(tmp.ravel(), state.ravel()) else: accumulated_gradients[k] = np.dot(tmp.ravel(), state.ravel()) psi, state_labels = dot(psi, state_labels, building_block.T, building_block_labels) assert ( tmp_labels == state_labels ), "two identical building_block applications produced different label-ordering" # bring state back into natural discrete ordering (i.e. small to large) perm = [state_labels.index(i) for i in range(num_discretes)] return accumulated_gradients, state.transpose(perm)
378cbc6ee3e147ca67d62edbb1929459dfb2993a
3,632,843
import os import subprocess def running_from_pacman(): """ Return True if the parent process is pacman """ debug = InformantConfig().get_argv_debug() ppid = os.getppid() p_name = subprocess.check_output(['ps', '-p', str(ppid), '-o', 'comm=']) p_name = p_name.decode().rstrip() if debug: debug_print('informant: running from: {}'.format(p_name)) return p_name == 'pacman'
e85707aaf5e0596df74b4ec055e6100d78eee64e
3,632,844
def _cs_count_top_bottom(fragments): """Counting: top and bottom of the entire core sample""" cs_top, cs_bottom = 1e10, 0 for fragment in fragments: cs_top = min(cs_top, float(fragment['top'])) cs_bottom = max(cs_bottom, float(fragment['bottom'])) return cs_top, cs_bottom
3b9a98993a837ff7c08980f644abbb6aad13f908
3,632,845
def get_index_from_filename( file_name: str ) -> str: """ Returns the index of chart from a reproducible JSON filename. :param file_name: `str` The name of the file without parent path. :returns: `str` The index of the chart (e.g., 1) or an empty string. """ assembled_index = "" for name_char in file_name.replace(".json", "")[::-1]: if name_char.isnumeric(): assembled_index += name_char else: break return assembled_index[::-1]
2cddcbcd9bf5079d58c75f19b5d2bf5b44ded173
3,632,846
def get_box_transformation_matrix(box): """ Create a transformation matrix for a given box pose. """ # tx,ty,tz = box.center_x,box.center_y,box.center_z tx,ty,tz = box[0], box[1], box[2] c = np.cos(box[6]) s = np.sin(box[6]) sl, sw, sh = box[3], box[4], box[5] # 这里如果读取的是 det3d 的 det box,注意顺序 return np.array([ [ sl*c, -sw*s, 0, tx], [ sl*s, sw*c, 0, ty], [ 0, 0, sh, tz], [ 0, 0, 0, 1]])
dae86d3260d0463d8e974c4fdba055781b520c93
3,632,847
def get_redirect_target(): """ 获取跳转目标 :return: """ for target in request.args.get('next'), request.referrer: if not target: continue if is_safe_url(target): return target
2ff373be5ad9d44124304454a5f89dd7d1f7d939
3,632,848
def mixer_carrier_cancellation(SH, source, MC, chI_par, chQ_par, frequency: float=None, SH_ref_level: float=-40, init_stepsize: float=0.1, x0=(0.0, 0.0), label: str='Offset_calibration', ftarget=-110, maxiter=300): """ Varies the mixer offsets to minimize leakage at the carrier frequency. this is a generic version. Args: SH (instr) : Signal hound used to measure power source (instr) : mw_source that provides the leakage tone MC (instr) : chI_par (par) : chQ_par (par) : frequency (float) : the frequency in Hz at which to minimize leakage SH_ref_level (float) : Signal hound reference level init_stepsize (float): initial stepsize for Nelder mead algorithm x0 (tuple) : starting point for optimization ftarget (float) : termination value """ source.on() if frequency is None: frequency = source.frequency() else: source.frequency(frequency) ''' Make coarse sweeps to approximate the minimum ''' SH.ref_lvl(SH_ref_level) detector = det.Signal_Hound_fixed_frequency( SH, frequency=(source.frequency()), Navg=5, delay=0.0, prepare_for_each_point=False) ad_func_pars = {'adaptive_function': cma.fmin, 'x0': x0, 'sigma0':1, 'options': {'maxiter': maxiter, # maximum function cals # Scaling for individual sigma's 'cma_stds': [init_stepsize]*2, 'ftarget': ftarget }, 'minimize': True} MC.set_sweep_functions([chI_par, chQ_par]) MC.set_detector_function(detector) # sets test_detector MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=label, mode='adaptive') a = ma.OptimizationAnalysis(label=label) # v2 creates a pretty picture of the optimizations ma.OptimizationAnalysis_v2(label=label) ch_1_min = a.optimization_result[0][0] ch_2_min = a.optimization_result[0][1] return ch_1_min, ch_2_min
792f998cf2382d889ecfc2e6335c55a805d234df
3,632,849
def create_single_wall_box(box_dimensions): """ width, depth, height, thickness, fold_margin, wing_width returns ShapeArray """ width = box_dimensions['width'] height = box_dimensions['height'] depth = box_dimensions['depth'] thickness = box_dimensions['thickness'] fold_margin = box_dimensions['fold_margin'] wing = box_dimensions['wing_width'] wing_slope = fold_margin * 2 safe_wing = height/2 * 0.95 if wing > safe_wing: wing = safe_wing shapes = ShapeArray() bottom = Shape( ((0, 0), (width, 0), (width, depth), (0, depth)), closed=True, layer=ScoreLayer) outer_cut_left = Shape( RelPoints([ (-(width / 2 - thickness), 0), (-wing, wing_slope), (0, (height - 2*wing_slope)), (wing, wing_slope), (-(height+thickness), 0), (0, depth), # <-- Middle ((height + thickness), 0), (-wing, wing_slope), (0, (height-2*wing_slope)), (wing, wing_slope), ((width / 2 - thickness), 0), ], mirror=(1, -1)), move=((width / 2), -height), layer=CutLayer, ) shapes.add_shape(bottom) shapes.add_shape(outer_cut_left) shapes.add_shape(outer_cut_left.create_internal_line(1, 4, layer=ScoreLayer)) shapes.add_shape(outer_cut_left.create_internal_line(7, 10, layer=ScoreLayer)) shapes.add_shape(outer_cut_left.create_internal_line(12, 15, layer=ScoreLayer)) shapes.add_shape(outer_cut_left.create_internal_line(18, 21, layer=ScoreLayer)) return shapes
d7ee5700cebd083bf6800d636563d08fb1e645d3
3,632,850
from typing import Optional def correct_start_cell_number( start_cell_number: Optional[int], mcnp: Optional[str] ) -> int: """Define cell number to start with on output to accompanying excel. Args: start_cell_number: number from command line or configuration, optional. mcnp: MCNP file name, optional Returns: If `start_cell_number` is not set, tries to find the first cell number from an MCNP file. If MCNP is also not defined returns 1. """ if start_cell_number: return start_cell_number if not mcnp: _start_cell_number = 1 else: _start_cell_number = find_first_cell_number(mcnp) return _start_cell_number
c5fa474970be0f4f23c02ef6dead6f5028c3fc88
3,632,851
from typing import List def convert_labels_to_one_hot( label_list: List[List[str]], label_dict: Dictionary ) -> List[List[int]]: """ Convert list of labels (strings) to a one hot list. :param label_list: list of labels :param label_dict: label dictionary :return: converted label list """ return [ [1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list ]
856ae66a591ec7c32bbd5cf91e97ff87852f5b83
3,632,852
def create(container_dir, distro_config): """Create a container using chocolatey.""" return _fetch_choco(container_dir, distro_config)
e342101c8723b62e79b7f455e5444f2e214d9621
3,632,853
def notas(*num, sit=False): """ Essa função cria um dicionário que guarda várias informações sobre o boletim de um aluno :param num: lista de notas do aluno :param sit: situação do aluno (aprovado, reprovado, recuperação) :return: retorna o dicionário completo """ boletim = {} boletim['Quantidade de notas'] = len(num) boletim['Maior'] = max(num) boletim['Menor'] = min(num) boletim['Média do aluno'] = sum(num) / len(num) if sit: if boletim['Média do aluno'] >= 7: boletim['Situação'] = 'APROVADO' elif boletim['Média do aluno'] < 6: boletim['Situação'] = 'REPROVADO' else: boletim['Situação'] = 'RECUPERAÇÃO' return boletim
a154d39be15018ce764e71c7bc97d9b4b25575df
3,632,854
from operator import concat def columnize(student: 'StudentResult', longest_user: str, max_hwk_num: int, max_lab_num: int, max_wst_num: int, highlight_partials: bool = True): """Build the data for each row of the information table""" name = '{0:<{1}}'.format(student.name, len(longest_user)) if len(student.unmerged_branches) > 0: name = colored(name, attrs={'bold': True}) homework_row = concat(student.homeworks, max_hwk_num, highlight_partials) lab_row = concat(student.labs, max_lab_num, highlight_partials) worksheet_row = concat(student.worksheets, max_wst_num, highlight_partials) if student.error: return '{name} {sep} {err}'.format( name=name, sep=COL, err=student.error) else: return '{name} {sep} {hws} {sep} {labs} {sep} {wkshts}'.format( name=name, hws=homework_row, labs=lab_row, wkshts=worksheet_row, sep=COL)
5d4d2c20713883e6380d16b1e55ea72487b4e2d5
3,632,855
from typing import Iterable from typing import Callable from typing import Iterator from typing import Tuple def relate_one_to_many( lhs: Iterable[Left], rhs: Iterable[Right], lhs_key: Callable[[Left], Key]=DEFAULT_KEY, rhs_key: Callable[[Right], Key]=DEFAULT_KEY, ) -> Iterator[Tuple[Left, Iterator[Right]]]: """ Relates `rhs` items to each `lhs` items. Note that: - `Key` must be 'comparable' (supports `__lt__()` and `__gt__()`). - `lhs` must be sorted by keys that `lhs_key` provides. - `rhs` must be sorted by keys that `rhs_key` provides. `lhs_key` and `rhs_key` are optional. When not given, then relates `rhs` to `lhs` by their first items (`left[0]` and `right[0]`). Here are some normal cases. These collections are sorted by the first items. >>> lhs = [(0, 'a'), (1, 'b'), (2, 'c')] >>> rhs = [(1, 's'), (2, 't'), (2, 'u'), (3, 'v')] When not given any keys, then relates `rhs` to `lhs` by their first items. >>> relations = relate_one_to_many(lhs, rhs) >>> for left, right in relations: ... left, list(right) ((0, 'a'), []) ((1, 'b'), [(1, 's')]) ((2, 'c'), [(2, 't'), (2, 'u')]) When given custom keys, then relates `rhs` to `lhs` by that keys. Note that the custom keys *must not* break the key ordering. >>> relations = relate_one_to_many( ... lhs, rhs, ... lhs_key=lambda l: l[0] * 2, ... rhs_key=lambda r: r[0] - 1) >>> for left, right in relations: ... left, list(right) ((0, 'a'), [(1, 's')]) ((1, 'b'), [(3, 'v')]) ((2, 'c'), []) Here are some seminormal cases. When given an empty `lhs`, then returns an empty iterator. >>> relations = relate_one_to_many([], [(1, 's')]) >>> list(relations) [] When given an empty `rhs`, then returns an iterator that relates nothing. >>> relations = relate_one_to_many([(1, 'a')], []) >>> for left, right in relations: ... left, list(right) ((1, 'a'), []) When given unordered `lhs`, then stops relationing at reverse-ordering segments. >>> lhs = [(0, 'a'), (2, 'b'), (1, 'c'), (4, 'd'), (3, 'e')] >>> rhs = [(1, 's'), (2, 't'), (2, 'u'), (3, 'v'), (4, 'w')] >>> relations = relate_one_to_many(lhs, rhs) >>> for left, right in relations: ... left, list(right) ((0, 'a'), []) ((2, 'b'), [(2, 't'), (2, 'u')]) ((1, 'c'), []) ((4, 'd'), [(4, 'w')]) ((3, 'e'), []) When given unordered `rhs`, then stops relationing at reverse-ordering segments. >>> lhs = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd'), (4, 'e')] >>> rhs = [(1, 's'), (3, 't'), (3, 'u'), (2, 'v'), (4, 'w')] >>> relations = relate_one_to_many(lhs, rhs) >>> for left, right in relations: ... left, list(right) ((0, 'a'), []) ((1, 'b'), [(1, 's')]) ((2, 'c'), []) ((3, 'd'), [(3, 't'), (3, 'u')]) ((4, 'e'), [(4, 'w')]) """ rhs_finder = _UnidirectionalFinder(rhs, rhs_key) return ((l, rhs_finder.find(lhs_key(l))) for l in lhs)
d49906eb86f645093b2b75f3a84f5a9f8faaba8d
3,632,856
def cut(vid, bx, by): """Scales image without changing aspect ratio but instead growing to at least the size of box bx/by.""" ix = vid.get(cv.CAP_PROP_FRAME_WIDTH) iy = vid.get(cv.CAP_PROP_FRAME_HEIGHT) if bx / float(ix) > by / float(iy): # fit to width scale_factor = bx / float(ix) sy = scale_factor * iy sx = bx else: # fit to height scale_factor = by / float(iy) sx = scale_factor * ix sy = by return int(sx), int(sy)
7e66f170b54bc421389609efd9ce88f3b8c05b41
3,632,857
def _conserve_heat(m, t): """ TODO """ sources = m.NuCap sinks = -(m.Turbine[t] / ECONV_RATE) + m.NPP_unused[t] battery = 0 for tes in HEAT_TECHS: charge = getattr(m, f'{tes}_charge')[t] discharge = getattr(m, f'{tes}_discharge')[t] battery += (charge + discharge) return sources + battery + sinks == 0
cd3ae71678c5342404cd08c8ed7715ecd091347e
3,632,858
import numpy as np import matplotlib.pyplot as plt from datetime import datetime def files_to_daisy_chain(ifg_names, figures = True): """ Given a list of interfergram names (masterDate_slaveDate), it: - finds all the acquisition dates - forms a list of names of the simplest daisy chain of interfegrams we can make - lists which number acquistion each interferogram is between (e.g (0, 3)) Inputs: ifg_names | list of strings | of form 20180101_20190102 (for a 1 day ifg in January) figures | boolean | For the figure output of the termporal baselines. """ # get acquistion dates (ie when each SAR image was taken) dates_acq = [] for date in ifg_names: date1 = date[:8] date2 = date[9::] if date1 not in dates_acq: dates_acq.append(date1) if date2 not in dates_acq: dates_acq.append(date2) dates_acq = sorted(dates_acq) # get the dates for the daisy chain of interferograms (ie the simplest span to link them all) daisy_chain_ifgs= [] for i in range(len(dates_acq)-1): daisy_chain_ifgs.append(dates_acq[i] + '_' + dates_acq[i+1]) # get acquestion dates in terms of days since first one days_elapsed = np.zeros((len(dates_acq), 1)) first_acq = datetime.strptime(ifg_names[0][:8], '%Y%m%d') for i, date in enumerate(dates_acq): date_time = datetime.strptime(date, '%Y%m%d') days_elapsed[i,0] = (date_time - first_acq).days # find which acquisiton number each ifg spans ifg_acq_numbers = [] for i, ifg in enumerate(ifg_names): master = ifg[:8] slave = ifg[9:] pair = (dates_acq.index(master), dates_acq.index(slave)) ifg_acq_numbers.append(pair) if figures: # temp baseline plot f, ax = plt.subplots(1) for i, file in enumerate(ifg_names): master = datetime.strptime(file[:8], '%Y%m%d') slave = datetime.strptime(file[9:], '%Y%m%d') master_xval = (master - first_acq).days slave_xval = (slave - first_acq).days ax.plot((master_xval, slave_xval), (i,i), '-') for i in range(len(dates_acq)-1): master = datetime.strptime(dates_acq[i], '%Y%m%d') slave = datetime.strptime(dates_acq[i+1], '%Y%m%d') master_xval = (master - first_acq).days slave_xval = (slave - first_acq).days ax.plot((master_xval, slave_xval), (-len(dates_acq)+i,-len(dates_acq)+i), '-', c = 'k') ax.set_ylabel('Ifg. #') ax.set_xlabel('Days since first acquisition') return dates_acq, daisy_chain_ifgs, ifg_acq_numbers
0c21bfc09fb93979715daaf8407ea9ed083cbad9
3,632,859
def find_region_candidates_volume(volume, peak_threshold, shifts = (0, 0)): """ :param volume: :param peak_threshold: :param shifts: (x, y) :return: """ results = list() for i in range(volume.shape[-1]): slice = volume[..., i].copy() cv2.GaussianBlur(slice, (3, 3), 0.4, dst = slice) pts = peak_local_max(slice, min_distance = 1, threshold_abs = peak_threshold) # (y, x), main results.extend([[int(p[1]) - shifts[0], int(p[0]) - shifts[1], i] for p in pts]) # pts shift (x, y, z) return results
c0f61a6907bb1eb326daafc22274d90f281b5255
3,632,860
def summarize(text): """ Summarizes some text """ if len(text) > 20: summary = text[0:10] + " ... " + text[-10:] else: summary = text summary = summary.replace("\n", "\\n") return summary
5c37f7a50e2b533bf3e05b598ce68b2c4de88fe1
3,632,861
from datetime import datetime def create_nic(fco_api, cluster_uuid, net_type, net_uuid, vdc_uuid, name=None): """ Create NIC. :param fco_api: FCO API object :param cluster_uuid: Cluster UUID :param net_type: Network type; currently recommended 'IP' :param net_uuid: Network UUID :param vdc_uuid: VDC UUID :param name: NIC name :return: Job-compatible object """ if name is None: name = 'NIC ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') nic_data = cobjects.Nic(clusterUUID=cluster_uuid, networkUUID=net_uuid, vdcUUID=vdc_uuid, resourceType=RT.NIC, serverUUID=None, sortOrder=None, networkType=net_type, resourceName=name) return fco_api.createNetworkInterface(skeletonNIC=nic_data)
4709f215662bed8a1c76920ca0d5d49e501a50a6
3,632,862
def clean_logger(name = settings["app_name"]): """ Removes all handlers associated with a given logger Parameters ---------- name : string name of the logger Returns ------- logger.logger """ logger = lg.getLogger(name) handlers = logger.handlers for handler in handlers: logger.removeHandler(handler) logger.is_set = False return logger
fee9743c57ef054a9dbbd53bf8ab0dcd1d2801bd
3,632,863
def model_from_json(json_string, custom_objects=None): """Parses a JSON model configuration string and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). """ config = json_utils.decode(json_string) from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top return deserialize(config, custom_objects=custom_objects)
3e929b5f2c07cb214edea5ed042fe0e49f0971ad
3,632,864
def get_stock_stream(symbol, params={}): """ gets stream of messages for given symbol copied from api.py (found on GitHub) """ all_params = ST_BASE_PARAMS.copy() return R.get_json(ST_BASE_URL + 'streams/symbol/{}.json'.format(symbol), params=all_params)
f6b5aa3601473eba52f3a736b4befa5ae1163be7
3,632,865
def get_free_swap_memory() -> int: """Get the free swap memory size in bytes.""" return swap_memory().free
141313acb49baff1a25d837daec73048b9accd53
3,632,866
def get_application(application_id: str = None): """ Returns an application. :param application_id: The numeric ID of the application you're interested in. :returns: String containing xml or an lxml element. """ return get_anonymous('getApplication', application_id=application_id)
53142b0ac238876f52e26e00f3af190643443671
3,632,867
def sexastr2deci(sexa_str): """Converts as sexagesimal string to decimal Converts a given sexagesimal string to its decimal value Args: A string encoding of a sexagesimal value, with the various components separated by colons Returns: A decimal value corresponding to the sexagesimal string Examples: >>> sexastr2deci('15:30:00') 15.5 >>> sexastr2deci('-15:30:45') -15.5125 """ if sexa_str[0] == '-': sgn = -1.0 dms = sexa_str[1:].split(':') # dms = degree minute second else: sgn = 1.0 dms = sexa_str.split(':') decival = 0 for i in range(0, len(dms)): decival = decival + float(dms[i]) / (60.0 ** i) return decival * sgn
46a9d8752b05b1579ecc2b85d94c28613a08ab3c
3,632,868
def run_extractor_on_dataset(dataset_path): """Run the feature extractor pipeline on the target dataset then consolidate results. Note: This function runs on all images within SUB DIRECTORIES of the dataset path. Output will be a single features JSON for each frame ID. All emitted individual image feature files will be removed. Example Features JSON --------------------- { "id": "253-134-S--2020-05-05--13.10.14--features", "model": "VeRI_densenet_ft50.pth", "253-134-S--2020-05-05--13.10.14--detections--1--car--25.3--11x7--113.43v124.50": { "id": "253-134-S--2020-05-05--13.10.14--detections--1--car--25.3--11x7--113.43v124.50", "data": [ 0.000247638818109408, 0.008388178423047066, -0.0002481069532223046, 0.006181125063449144, ... ] }, "253-134-S--2020-05-05--13.10.14--detections--2--car--27.4--10x7--51.46v61.53": { "id": "253-134-S--2020-05-05--13.10.14--detections--2--car--27.4--10x7--51.46v61.53", "data": [ 0.000247638818109408, 0.008388178423047066, -0.0002481069532223046, 0.006181125063449144, ... ] } } Example Return Dict --------------------- { "253-134-S--2020-05-05--13.10.14": { "id": "253-134-S--2020-05-05--13.10.14--features", "model": "VeRI_densenet_ft50.pth", "253-134-S--2020-05-05--13.10.14--detections--1--car--25.3--11x7--113.43v124.50": { "id": "253-134-S--2020-05-05--13.10.14--detections--1--car--25.3--11x7--113.43v124.50", "data": [ 0.000247638818109408, 0.008388178423047066, -0.0002481069532223046, 0.006181125063449144, ... ] }, "253-134-S--2020-05-05--13.10.14--detections--2--car--27.4--10x7--51.46v61.53": { "id": "253-134-S--2020-05-05--13.10.14--detections--2--car--27.4--10x7--51.46v61.53", "data": [ 0.000247638818109408, 0.008388178423047066, -0.0002481069532223046, 0.006181125063449144, ... ] } } } Parameters ----------- dataset_path: str Path to dataset directory containing SUB DIRECTORIES with target image files to be extracted. Returns ------- dict Map of features extracted for each frame ID within the dataset. """ extracted_features = extract_features_from_dataset(dataset_path) extractor_output = utils.post_process_features_data( dataset_path, extracted_features, FEATURES_MODEL_PATH) return extractor_output
79b816110cca7bf5ecf258f8380bbf285f0b9018
3,632,869
from typing import Callable from typing import Mapping def run_pipeline_func_on_cluster( pipeline_func: Callable, arguments: Mapping[str, str], run_name: str = None, experiment_name: str = None, kfp_client: Client = None, pipeline_conf: dsl.PipelineConf = None, ): """Runs pipeline on KFP-enabled Kubernetes cluster. This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution. Feature stage: [Alpha](https://github.com/kubeflow/pipelines/blob/07328e5094ac2981d3059314cc848fbb71437a76/docs/release/feature-stages.md#alpha) Args: pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph. arguments: Arguments to the pipeline function provided as a dict. run_name: Optional. Name of the run to be shown in the UI. experiment_name: Optional. Name of the experiment to add the run to. kfp_client: Optional. An instance of kfp.Client configured for the desired KFP cluster. pipeline_conf: Optional. kfp.dsl.PipelineConf instance. Can specify op transforms, image pull secrets and other pipeline-level configuration options. """ kfp_client = kfp_client or Client() return kfp_client.create_run_from_pipeline_func( pipeline_func, arguments, run_name, experiment_name, pipeline_conf )
c44069b016706f5cdcbab1fd9319cf96ef4f2dfa
3,632,870
import os import hashlib def find_duplicates(dirname, extension): """Write a program that searches a directory and all of its subdirectories, recursively, and returns a list of complete paths for all files with a given suffix (like .mp3).""" possible_duplicates = dict() for file in filter_files_with_extension(dirname, extension): file_name = os.path.basename(file) possible_duplicates.setdefault(file_name, []) possible_duplicates[file_name].append(file) duplicates = dict() for file_name, items in possible_duplicates.items(): if len(items) > 1: checksum_first = hashlib.md5(open(items[0], 'rb').read()).hexdigest() duplicates[checksum_first] = [items[0]] for i in range(1, len(items)): checksum = hashlib.md5(open(items[i], 'rb').read()).hexdigest() if checksum_first == checksum: duplicates[checksum_first].append(items[i]) return duplicates
07e44327681975fbf95a7b011d5908a5ce706121
3,632,871
def reverb2mix_transcript_parse(path): """ Parse the file format of the MLF files that contains the transcripts in the REVERB challenge dataset """ utterances = {} with open(path, "r") as f: everything = f.read() all_utt = everything.split("\n.\n") for i, utt in enumerate(all_utt): if i == 0: assert utt[:7] == "#!MLF!#" utt = utt[7:] words = utt.split("\n") label = words[0][4:-6] sentence = " ".join(words[1:]) speaker = label[:-5] utterance = label[-5:] utterances[label] = { "utterance_id": utterance, "speaker_id": speaker, "transcript": sentence, } return utterances
c8a1aa0c8a4d0dec6626cf8e9d2491336ee42d5a
3,632,872
def extract_qa_bits(qa_band, start_bit, end_bit): """Extracts the QA bitmask values for a specified bitmask (starting and ending bit). Parameters ---------- qa_band : numpy array Array containing the raw QA values (base-2) for all bitmasks. start_bit : int First bit in the bitmask. end_bit : int Last bit in the bitmask. Returns ------- qa_values : numpy array Array containing the extracted QA values (base-10) for the bitmask. Example ------- >>> >>> >>> >>> """ # Initialize QA bit string/pattern to check QA band against qa_bits = 0 # Add each specified QA bit flag value/string/pattern # to the QA bits to check/extract for bit in range(start_bit, end_bit + 1): qa_bits += bit ** 2 # Check QA band against specified QA bits to see what # QA flag values are set qa_flags_set = qa_band & qa_bits # Get base-10 value that matches bitmask documentation # (0-1 for single bit, 0-3 for 2 bits, or 0-2^N for N bits) qa_values = qa_flags_set >> start_bit return qa_values
523dc1ee149af5c5e9a494b5fe3a3c14bc3186d2
3,632,873
def is_scalar(v,value=None): """Returns True if v evaluates to a scalar. If value is provided, then returns True only if v evaluates to be equal to value""" if isinstance(v,Variable): if not v.type.is_scalar(): return False return value is None or v.value == value elif isinstance(v,ConstantExpression): return is_scalar(v.value,value) elif isinstance(v,Expression): rt = v.returnType() if rt is None: return False if not rt.is_scalar(): return False return value is None else: if not (isinstance(v,(float,int,bool)) or (hasattr(v,'shape') and v.shape == ())): return False return value is None or v == value
831d539c634d812b42a1c9716aab994145bf8cd2
3,632,874
def cluster_homogeneity(df:pd.DataFrame, edge_type="Edge", iteration_type="Iteration"): """ # Create Graph from soothsayer_utils import get_iris_data df_adj = get_iris_data(["X"]).iloc[:5].T.corr() + np.random.RandomState(0).normal(size=(5,5)) graph = nx.from_pandas_adjacency(df_adj) graph.nodes() # NodeView(('sepal_length', 'sepal_width', 'petal_length', 'petal_width')) # Community detection (network clustering) df_louvain = community_detection(graph, n_iter=10, algorithm="louvain") df_louvain # Partition 0 1 2 3 4 5 6 7 8 9 # Node # iris_0 0 0 0 0 0 0 0 0 0 0 # iris_1 1 1 1 1 1 1 1 1 1 1 # iris_2 1 2 2 2 2 1 2 2 2 2 # iris_3 0 1 1 1 1 0 1 1 1 1 # iris_4 2 3 3 3 3 2 3 3 3 3 # Determine cluster homogeneity df_homogeneity = cluster_homogeneity(df_louvain) df_homogeneity # Iteration 0 1 2 3 4 5 6 7 8 9 # Edge # (iris_1, iris_0) 0 0 0 0 0 0 0 0 0 0 # (iris_2, iris_0) 0 0 0 0 0 0 0 0 0 0 # (iris_3, iris_0) 1 0 0 0 0 1 0 0 0 0 # (iris_4, iris_0) 0 0 0 0 0 0 0 0 0 0 # (iris_1, iris_2) 1 0 0 0 0 1 0 0 0 0 # (iris_3, iris_1) 0 1 1 1 1 0 1 1 1 1 # (iris_4, iris_1) 0 0 0 0 0 0 0 0 0 0 # (iris_3, iris_2) 0 0 0 0 0 0 0 0 0 0 # (iris_4, iris_2) 0 0 0 0 0 0 0 0 0 0 # (iris_4, iris_3) 0 0 0 0 0 0 0 0 0 0 df_homogeneity.mean(axis=1)[lambda x: x > 0.5] # Edge # (iris_3, iris_1) 0.8 # dtype: float64 """ # Adapted from @code-different: # https://stackoverflow.com/questions/58566957/how-to-transform-a-dataframe-of-cluster-class-group-labels-into-a-pairwise-dataf # `x` is a table of (n=nodes, p=iterations) nodes = df.index iterations = df.columns x = df.values n,p = x.shape # `y` is an array of n tables, each having 1 row and p columns y = x[:, None] # Using numpy broadcasting, `z` contains the result of comparing each # table in `y` against `x`. So the shape of `z` is (n x n x p) z = x == y # Reshaping `z` by merging the first two dimensions data = z.reshape((z.shape[0] * z.shape[1], z.shape[2])) # Redundant pairs redundant_pairs = list(map(lambda node:frozenset([node]), nodes)) # Create pairwise clustering matrix df_pairs = pd.DataFrame( data=data, index=pd.Index(list(map(frozenset, product(nodes,nodes))), name=edge_type), columns=pd.Index(iterations, name=iteration_type), dtype=int, ).drop(redundant_pairs, axis=0) return df_pairs[~df_pairs.index.duplicated(keep="first")]
e077ff0f3e1660ea5788880282fbc3862b5737df
3,632,875
import json def to_json_for_storage(desc: SomeRunDescriber) -> str: """ Serialize the given RunDescriber to JSON as a RunDescriber of the version for storage """ return json.dumps(to_dict_for_storage(desc))
cdda5322acf1da6e704bdea47cd1cee9019019ad
3,632,876
def astar(graph, start, end, heuristic={}): """ Performs A-star search to find the shortest path from start to end Args: graph (gennav.utils.graph): Dictionary representing the graph where keys are the nodes and the value is a list of all neighbouring nodes start (gennav.utils.RobotState): Point representing key corresponding to the start point end (gennav.utils.RobotState): Point representing key corresponding to the end point heuristic (dict): Dictionary containing the heuristic values for all the nodes, if not specified the default heuristic is euclidean distance Returns: gennav.utils.Trajectory: The planned path as trajectory """ if not (start in graph.nodes and end in graph.nodes): path = [start] traj = Trajectory(path) return traj open_ = [] closed = [] # calculates heuristic for start if not provided by the user # pushes the start point in the open_ Priority Queue start_node = NodeAstar(state=start) if len(heuristic) == 0: start_node.h = compute_distance(start.position, end.position) else: start_node.h = heuristic[start] start_node.g = 0 start_node.f = start_node.g + start_node.h open_.append(start_node) # performs astar search to find the shortest path while len(open_) > 0: open_.sort() current_node = open_.pop(0) closed.append(current_node) # checks if the goal has been reached if current_node.state.position == end.position: path = [] # forms path from closed list while current_node.parent is not None: path.append(current_node.state) current_node = current_node.parent path.append(start_node.state) # returns reversed path path = path[::-1] traj = Trajectory(path) return traj # continues to search for the goal # makes a list of all neighbours of the current_node neighbours = graph.edges[current_node.state] # adds them to open_ if they are already present in open_ # checks and updates the total cost for all the neighbours for node in neighbours: # creates neighbour which can be pushed to open_ if required neighbour = NodeAstar(state=node, parent=current_node) # checks if neighbour is in closed if neighbour in closed: continue # calculates weight cost neighbour.g = compute_distance(node.position, current_node.state.position) # calculates heuristic for the node if not provided by the user if len(heuristic) == 0: neighbour.h = compute_distance(node.position, end.position) else: neighbour.h = heuristic[node] # calculates total cost neighbour.f = neighbour.g + neighbour.h # checks if the total cost of neighbour needs to be updated # if it is presnt in open_ else adds it to open_ flag = 1 for new_node in open_: if neighbour == new_node and neighbour.f < new_node.f: new_node = neighbour # lgtm [py/multiple-definition] flag = 0 break elif neighbour == new_node and neighbour.f > new_node.f: flag = 0 break if flag == 1: open_.append(neighbour) # if path doesn't exsist it raises a PathNotFound Exception. path = [start] traj = Trajectory(path) raise PathNotFound(traj, message="Path contains only one state")
90c60cc7b14e7d223889316a49223450fa4806f4
3,632,877
def extractFileName(form, id, cleanup=True, allowEmptyPostfix=False): """Extract the filename of the widget with the given id. Uploads from win/IE need some cleanup because the filename includes also the path. The option ``cleanup=True`` will do this for you. The option ``allowEmptyPostfix`` allows to have a filename without extensions. By default this option is set to ``False`` and will raise a ``ValueError`` if a filename doesn't contain a extension. """ widget = getWidgetById(form, id) if not allowEmptyPostfix or cleanup: # We need to strip out the path section even if we do not reomve them # later, because we just need to check the filename extension. cleanFileName = widget.filename.split('\\')[-1] cleanFileName = cleanFileName.split('/')[-1] dottedParts = cleanFileName.split('.') if not allowEmptyPostfix: if len(dottedParts) <= 1: raise ValueError(_('Missing filename extension.')) if cleanup: return cleanFileName return widget.filename
8aeccba3be2f4a4de781efaff88bf1835645c293
3,632,878
def find_pointing_start(asn_table_name): """ Parameters: asn_table_name : string For example, 'gs2-01-189-g102'. Targname-visit-PA-filter Returns: 0 : if visit starts with direct image 1 : if visit starts with grism Outputs: """ # Parse out the asn tables's target and visit path_to_outputs = set_paths.paths['path_to_outputs'] # Read in files.info. data = ascii.read(os.path.join(path_to_outputs, 'files.info'), guess=False, format='basic') # Makes lists out of FILE, TARGNAME, TIME-OBS # Note that if this is not working, just open 'file.info' and convert all tabs to spaces # with your editor. For some reason 'flt_info.sh' mixes the two sometimes... filenames = np.array(data['FILE']) targnames = np.array(data['TARGNAME']) dateobses = np.array(data['DATE-OBS']) timeobses = np.array(data['TIME-OBS']) filters = np.array(data['FILTER']) # Sort out only the rows with same target and visit as asn table. targname_sort = np.where(targnames == asn_table_name.split('-')[0].upper()) visit_sort = [] for filename, i in zip(filenames[targname_sort], np.arange(len(filenames[targname_sort]))): if filename[4:6] == asn_table_name.split('-')[1]: visit_sort.append(i) visit_sort = np.array(visit_sort) dateobses_sorted = (dateobses[targname_sort])[visit_sort] timeobses_sorted = (timeobses[targname_sort])[visit_sort] filters_sorted = (filters[targname_sort])[visit_sort] # Now loop through time-obs list and find which entry came very first. # Set the first_image initially to be first entry. (and in most cases # this should remain true after loop) first_time = Time(dateobses_sorted[0] + ' ' + timeobses_sorted[0], format='iso', out_subfmt='date_hms') first_image = filters_sorted[0] print "Initial start image: {} and start time: {}".format(first_image, first_time) for dateobs, timeobs, filt in zip(dateobses_sorted[1:], timeobses_sorted[1:], filters_sorted[1:]): # Turn into a date-time object. next_time = Time(dateobs + ' ' + timeobs, format='iso', out_subfmt='date_hms') min_time = min(first_time, next_time) if min_time == next_time: first_time = next_time first_image = filt print "New start image: {} and start time: {}".format(first_image, first_time) # If a Direct image (filter=f105w) taken first, return 0. if first_image.lower() == 'f105w': return 0 # If a Grism (filter=g102) taken first, return 1. elif first_image.lower() == 'g102': return 1
ee700ddc8903914956c42ec82763a2463186e38b
3,632,879
def data_logs(recipe_id=None): """ Flask controller: show logs for a recipe """ level = flask.request.args.get('level', 'WARNING').upper() recipe = recipes.Recipe(recipe_id) return flask.render_template("data-logs.html", recipe=recipe, level=level, in_logger=True)
fc0bd90c04449eb2e0748e87490fb35eff991975
3,632,880
def slice_metadata_using_already_sliced_data_df(data_df, row_meta_df, col_meta_df): """Slice row_meta_df and col_meta_df to only contain the row_ids and col_ids in data_df. Args: data_df (pandas df) row_meta_df (pandas df) col_meta_df (pandas df) Returns: out_gct (GCToo object): with all dfs correctly sliced """ # Get rows and samples to keep from data_df rows = data_df.index.values cols = data_df.columns.values # Number of rows and samples should be > 1 # If only one row or col, the df will be transposed and that will be ugly if not len(rows) > 1: err_msg = "Fewer than 2 rows remain after data processing. I don't like that!" logger.error(err_msg) raise Exception(err_msg) if not len(cols) > 1: err_msg = "Fewer than 2 columns remain after data processing. I don't like that!" logger.error(err_msg) raise Exception(err_msg) # Extract remaining rows and samples from row and col metadata dfs out_row_meta_df = row_meta_df.loc[rows, :] out_col_meta_df = col_meta_df.loc[cols, :] # Return gct out_gct = GCToo.GCToo(data_df=data_df, row_metadata_df=out_row_meta_df, col_metadata_df=out_col_meta_df) return out_gct
423ed7a9d50f5d831dc3eff58bcab120dd30d22e
3,632,881
def create_user(request, template_name='create_user.html', redirect_field_name=REDIRECT_FIELD_NAME, user_creation_form=UserCreationForm, current_app=None, extra_context=None): """ Displays the login form and handles the login action. """ # noinspection PyUnusedLocal current_app = current_app redirect_to = request.REQUEST.get(redirect_field_name, '') if request.method == "POST": form = user_creation_form(data=request.POST) if form.is_valid(): # Ensure the user-originating redirection url is safe. if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL) form.save(commit=True) if request.session.test_cookie_worked(): request.session.delete_test_cookie() return HttpResponseRedirect(redirect_to) else: form = user_creation_form() request.session.set_test_cookie() current_site = get_current_site(request) context = { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, } if extra_context is not None: context.update(extra_context) return TemplateResponse(request, template_name, context)
1b551723d9dfcc3db407f5c10fb680d664e43ca2
3,632,882
def found_table(schema:str, table_name:str) -> bool: """Returns whether the given table is found in the list of cached tables""" return len( [ table for table in tables if table.table_name == table_name and table.schema == schema ] ) == 1
eac5a1dbe43f07ca664ab6f0aeb40a7952f80823
3,632,883
import wget def download_url(url, pth_download=None, speak=False): """Download data from URL.""" if wget is None: raise ImportError('wget not available') bar = None if speak: def bar(current, total, width=80): print("Downloading %s to %s | Progress: %d%% (%d/%d bytes)" % (url, pth_download, current / total * 100, current, total)) pth_data = wget.download(url, pth_download, bar=bar) return pth_data
715da8fecd5fe1442024acf8150314b5c2e64f99
3,632,884
def openurl(url): """Retries urlopen. :param url: url to open """ return urlopen(url)
eae62fa336ac0eab462354a805c61ca0834ef3c4
3,632,885
from datetime import datetime from typing import Tuple def portfolio_return(holdings: float, asset_percent: float, bond_percent: float, asset_etfs: pd.DataFrame, bond_etfs: pd.DataFrame, start_date: datetime, end_date: datetime) -> Tuple[pd.DataFrame, pd.DataFrame]: """ An implementation of the 12% Solution ETF rotation. From start_date to end_date the code calculates the highest past three month return from the asset_etfs and bond_etfs. The ETF with the highest return is selected for the next month. Calculate the monthly period from start_date to end_date. This will provide the monthly periods for the calculation. The daily returns are calculated for the next month and applied to the current portfolio balance. Then the loop steps forward by one month. """ assert asset_etfs.shape[0] == bond_etfs.shape[0] periods_df = find_month_periods(start_date, end_date, asset_etfs) back_delta = relativedelta(months=3) date_index = asset_etfs.index bond_asset_l = list() equity_asset_l = list() month_index_l = list() portfolio_a = np.zeros(0) for row in range(periods_df.shape[0]): asset_holdings = holdings * asset_percent bond_holdings = holdings * bond_percent period_info = periods_df[:][row:row+1] month_start_date = convert_date(period_info['start_date'].values[0]) back_start_date = month_start_date - back_delta back_start_ix = findDateIndex(date_index, back_start_date) month_start_ix = period_info['start_ix'].values[0] month_end_ix = period_info['end_ix'].values[0] equity_asset = chooseAssetName(start=back_start_ix, end=month_start_ix, asset_set=asset_etfs) bond_asset = chooseAssetName(start=back_start_ix, end=month_start_ix, asset_set=bond_etfs) equity_asset_l.append(equity_asset) bond_asset_l.append(bond_asset) month_index_l.append(month_start_date) asset_month_prices_df = pd.DataFrame(asset_etfs[equity_asset][month_start_ix:month_end_ix + 1]) bond_month_prices_df = pd.DataFrame(bond_etfs[bond_asset][month_start_ix:month_end_ix + 1]) asset_month_return_df = return_df(asset_month_prices_df) bond_month_return_df = return_df(bond_month_prices_df) asset_month_a = apply_return(asset_holdings, asset_month_return_df) bond_month_a = apply_return(bond_holdings, bond_month_return_df) portfolio_total_a = asset_month_a + bond_month_a holdings = portfolio_total_a[-1] portfolio_a = np.append(portfolio_a, portfolio_total_a) portfolio_df = pd.DataFrame(portfolio_a) portfolio_df.columns = ['portfolio'] num_rows = periods_df.shape[0] first_row = periods_df[:][0:1] last_row = periods_df[:][num_rows - 1:num_rows] start_ix = first_row['start_ix'].values[0] end_ix = last_row['end_ix'].values[0] portfolio_index = date_index[start_ix:end_ix + 1] portfolio_df.index = portfolio_index choices_df = pd.DataFrame() choices_df['Equity'] = pd.DataFrame(equity_asset_l) choices_df['Bond'] = pd.DataFrame(bond_asset_l) choices_df.index = month_index_l return portfolio_df, choices_df
dbf78f636e4f5f3a57baf15e94b8937582e63fcd
3,632,886
def rewofzt2(x,y): """Real part of asymptotic representation of wofz function 1 for |z|**2 > 111 (for e = 10e-6) See Zaghloul (2018) arxiv:1806.01656 Args: x: y: Returns: f: Real(wofz(x+iy)) """ z=x+y*(1j) q=(1j)*z/(jnp.sqrt(jnp.pi))*(z*z - 2.5)/(z*z*(z*z-3.0) + 0.75) return jnp.real(q)
3b820a6be8b4cccc220e8616a5f0b7807a4295de
3,632,887
import struct def _macos_bundle_impl(ctx): """Implementation of the macos_bundle rule.""" additional_resource_sets = [] additional_resources = depset(ctx.files.app_icons) if additional_resources: additional_resource_sets.append(AppleResourceSet( resources=additional_resources, )) # TODO(b/36557429): Add support for macOS frameworks. binary_artifact = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleLoadableBundleBinary).binary deps_objc_providers = providers.find_all(ctx.attr.deps, "objc") additional_providers, legacy_providers, additional_outputs = bundler.run( ctx, "MacosBundleArchive", "macOS executable bundle", ctx.attr.bundle_id, binary_artifact=binary_artifact, additional_bundlable_files=_additional_contents_bundlable_files( ctx, ctx.attr.additional_contents), additional_resource_sets=additional_resource_sets, deps_objc_providers=deps_objc_providers, ) # TODO(b/36556789): Add support for "bazel run". return struct( files=additional_outputs, providers=[ MacosBundleBundleInfo(), ] + additional_providers, **legacy_providers )
db90378ffece90caf08666f35892a78b970e1003
3,632,888
import logging def combine_ecgs_and_clinical_parameters(ecgs, clinical_parameters): """ Combines ECGs and their corresponding clinical parameters :param ecgs: List of ECGs :param clinical_parameters: Corresponding clinical parameters :return: Medical data for each patient including ECGs and the patients clinical parameters """ combined = {} for record_id in ecgs: ecg = ecgs[record_id] try: cp = clinical_parameters[record_id] except KeyError: logging.warning( 'No clinical parameters available in datapipeline for record "{}". Skipping record.'.format(record_id)) continue combined[record_id] = dict(ecg) combined[record_id].update(cp) return combined
91ca17f62ff36776980a74ecd5334ae9a55ade18
3,632,889
def is_territory_group(group: inkex.ShapeElement) -> bool: """ Checks if element is a territory group. It is a territory group if it is a non-layer Group and has two children, one of which is a territory, the other of which is a center point group. :param group: :return: """ valid = isinstance(group, inkex.Group) valid = valid and not isinstance(group, inkex.Layer) valid = valid and len(group.getchildren()) in [2, 3] valid = valid and len(get_territories(group, is_recursive=False)) == 1 valid = valid and len(group.xpath(f"./{Svg.GROUP}[{Svg.RECTANGLE} and {Svg.TEXT}]")) == 1 valid = valid and (len(group.getchildren()) == 2) or (len(group.xpath(f"./{Svg.TITLE}")) == 1) return valid
5fcb8ef53e915f6040e9d661076e7fa2867c2620
3,632,890
def filter_tracks(tracks, size_th, mask_file): """Filter tracks that doesn't fulfill requirement""" print('filtering tracks...') del_idx = [] for i,t in enumerate(tracks): dets = t.dump() if not np.any(filter_detections(dets, size_th, mask_file)): del_idx.append(i) print('deleting %d tracks...' % len(del_idx)) for i in reversed(del_idx): del tracks[i] return tracks
767bd8cdef8a9a949b89e969b9c106d48c9a0fae
3,632,891
def disable_chat_bot_callback(message): """ Disable chat bot callback. :param message: The message. :return: The return from the function. """ return disable_chat_bot(message.guild.name)
c98d2fba6e91158df614a43b56b798947a88771e
3,632,892
import nibabel as nib import os def write_anat(t1w, bids_path, raw=None, trans=None, landmarks=None, deface=False, overwrite=False, verbose=False): """Put anatomical MRI data into a BIDS format. Given a BIDS directory and a T1 weighted MRI scan for a certain subject, format the MRI scan to be in BIDS format and put it into the correct location in the bids_dir. If a transformation matrix is supplied, a sidecar JSON file will be written for the T1 weighted data. Parameters ---------- bids_path : BIDSPath The file to write. The `mne_bids.BIDSPath` instance passed here **must** have the ``root`` and ``subject`` attributes set. t1w : str | pathlib.Path | nibabel image object Path to a T1 weighted MRI scan of the subject. Can be in any format readable by nibabel. Can also be a nibabel image object of a T1 weighted MRI scan. Will be written as a .nii.gz file. raw : instance of Raw | None The raw data of `subject` corresponding to `t1w`. If `raw` is None, `trans` has to be None as well trans : instance of mne.transforms.Transform | str | None The transformation matrix from head coordinates to MRI coordinates. Can also be a string pointing to a .trans file containing the transformation matrix. If None, no sidecar JSON file will be written for `t1w` deface : bool | dict If False, no defacing is performed. If True, deface with default parameters. `trans` and `raw` must not be `None` if True. If dict, accepts the following keys: - `inset`: how far back in millimeters to start defacing relative to the nasion (default 20) - `theta`: is the angle of the defacing shear in degrees relative to the normal to the plane passing through the anatomical landmarks (default 35). landmarks: instance of DigMontage | str The DigMontage or filepath to a DigMontage with landmarks that can be passed to provide information for defacing. Landmarks can be determined from the head model using `mne coreg` GUI, or they can be determined from the MRI using `freeview`. overwrite : bool Whether to overwrite existing files or data in files. Defaults to False. If overwrite is True, any existing files with the same BIDS parameters will be overwritten with the exception of the `participants.tsv` and `scans.tsv` files. For these files, parts of pre-existing data that match the current data will be replaced. If overwrite is False, no existing data will be overwritten or replaced. verbose : bool If verbose is True, this will print a snippet of the sidecar files. If False, no content will be printed. Returns ------- bids_path : BIDSPath Path to the anatomical scan in the BIDS directory. """ if not has_nibabel(): # pragma: no cover raise ImportError('This function requires nibabel.') if deface and (trans is None or raw is None) and landmarks is None: raise ValueError('The raw object, trans and raw or the landmarks ' 'must be provided to deface the T1') # Check if the root is available if bids_path.root is None: raise ValueError('The root of the "bids_path" must be set. ' 'Please use `bids_path.update(root="<root>")` ' 'to set the root of the BIDS folder to read.') # create a copy bids_path = bids_path.copy() # this file is anat if bids_path.datatype is None: bids_path.update(datatype='anat') # set extension and suffix hard-coded to T1w and compressed Nifti bids_path.update(suffix='T1w', extension='.nii.gz') # create the directory for the T1w dataset bids_path.directory.mkdir(exist_ok=True, parents=True) # Try to read our T1 file and convert to MGH representation try: t1w = _path_to_str(t1w) except ValueError: # t1w -> str conversion failed, so maybe the user passed an nibabel # object instead of a path. if type(t1w) not in nib.all_image_classes: raise ValueError('`t1w` must be a path to a T1 weighted MRI data ' 'file , or a nibabel image object, but it is of ' 'type "{}"'.format(type(t1w))) else: # t1w -> str conversion in the try block was successful, so load the # file from the specified location. We do this here and not in the try # block to keep the try block as short as possible. t1w = nib.load(t1w) t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine) # XYZT_UNITS = NIFT_UNITS_MM (10 in binary or 2 in decimal) # seems to be the default for Nifti files # https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/xyzt_units.html if t1w.header['xyzt_units'] == 0: t1w.header['xyzt_units'] = np.array(10, dtype='uint8') # Check if we have necessary conditions for writing a sidecar JSON if trans is not None or landmarks is not None: t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine) if landmarks is not None: if raw is not None: raise ValueError('Please use either `landmarks` or `raw`, ' 'which digitization to use is ambiguous.') if isinstance(landmarks, str): landmarks, coord_frame = read_fiducials(landmarks) landmarks = np.array([landmark['r'] for landmark in landmarks], dtype=float) # unpack else: coords_dict, coord_frame = _get_fid_coords(landmarks.dig) landmarks = np.asarray((coords_dict['lpa'], coords_dict['nasion'], coords_dict['rpa'])) if coord_frame == FIFF.FIFFV_COORD_HEAD: if trans is None: raise ValueError('Head space landmarks provided, ' '`trans` required') mri_landmarks = _meg_landmarks_to_mri_landmarks( landmarks, trans) mri_landmarks = _mri_landmarks_to_mri_voxels( mri_landmarks, t1_mgh) elif coord_frame in (FIFF.FIFFV_MNE_COORD_MRI_VOXEL, FIFF.FIFFV_COORD_MRI): if trans is not None: raise ValueError('`trans` was provided but `landmark` ' 'data is in mri space. Please use ' 'only one of these.') if coord_frame == FIFF.FIFFV_COORD_MRI: landmarks = _mri_landmarks_to_mri_voxels(landmarks * 1e3, t1_mgh) mri_landmarks = landmarks else: raise ValueError('Coordinate frame not recognized, ' f'found {coord_frame}') elif trans is not None: # get trans and ensure it is from head to MRI trans, _ = _get_trans(trans, fro='head', to='mri') if not isinstance(raw, BaseRaw): raise ValueError('`raw` must be specified if `trans` ' 'is not None') # Prepare to write the sidecar JSON # extract MEG landmarks coords_dict, coord_fname = _get_fid_coords(raw.info['dig']) meg_landmarks = np.asarray((coords_dict['lpa'], coords_dict['nasion'], coords_dict['rpa'])) mri_landmarks = _meg_landmarks_to_mri_landmarks( meg_landmarks, trans) mri_landmarks = _mri_landmarks_to_mri_voxels( mri_landmarks, t1_mgh) # Write sidecar.json t1w_json = dict() t1w_json['AnatomicalLandmarkCoordinates'] = \ {'LPA': list(mri_landmarks[0, :]), 'NAS': list(mri_landmarks[1, :]), 'RPA': list(mri_landmarks[2, :])} fname = bids_path.copy().update(extension='.json') if op.isfile(fname) and not overwrite: raise IOError('Wanted to write a file but it already exists and ' '`overwrite` is set to False. File: "{}"' .format(fname)) _write_json(fname, t1w_json, overwrite, verbose) if deface: t1w = _deface(t1w, mri_landmarks, deface) # Save anatomical data if op.exists(bids_path): if overwrite: os.remove(bids_path) else: raise IOError(f'Wanted to write a file but it already exists and ' f'`overwrite` is set to False. File: "{bids_path}"') nib.save(t1w, bids_path.fpath) return bids_path
985ac12171dc0681cbbf9b3d8587f164d4ba8fdb
3,632,893
def new_post(request): """The new post publication""" form = PostForm(request.POST or None, files=request.FILES or None) if form.is_valid(): form.instance.author = request.user form.save() return redirect('/') return render(request, 'new.html', {'form': form})
31dab55dbbe450ff46a2a111ed9996ef6fbba2ae
3,632,894
import numpy as np def get_fake_prediction(anchors, coords, config): """ Generates the anchor labels with random noise to fake a good prediction. Used for testing non-model related code. anchors The list of anchor coordinates generated from get_anchors(). coords The list of ground truth point coordinates. config The configuration dictionary. See ppn.config.ppn_config. """ r_near = config['r_near'] r_far = config['r_far'] img_size = config['image_size'] feature_size = config['feature_size'] def noise(): return np.random.normal(loc=0.0, scale=0.1) step = img_size / feature_size halfstep = step * 0.5 y_conf = np.full(anchors.shape[0], 0.0, dtype=np.int8) y_reg = np.zeros(anchors.shape) # For each point, find the nearest anchor and calculate the distance. # This ensures that most points have an associated anchor. for (x, y) in coords: x_norm = (x - halfstep) / step y_norm = (y - halfstep) / step r = int(np.round(y_norm)) c = int(np.round(x_norm)) anchor_index = r * feature_size + c y_conf[anchor_index] = 1 y_reg[anchor_index][0] = nosie() + (x - anchors[anchor_index][0]) / step y_reg[anchor_index][1] = noise() + (y - anchors[anchor_index][1]) / step # for each anchor, calculate the distances to each point for i in range(0, len(anchors)): x, y = anchors[i] x /= step y /= step distances = [] for (px, py) in coords: px /= step py /= step distances.append(np.sqrt((x-px)**2 + (y-py)**2)) near = np.argmin(distances) dist = distances[near] if dist <= r_near: y_conf[i] = 1 px, py = coords[near] px /= step py /= step y_reg[i][0] = noise() + (px - x) y_reg[i][1] = noise() + (py - y) elif dist > r_far: y_conf[i] = 0 # reshape for use in PPN training y_conf = np.reshape(y_conf, (feature_size, feature_size)) y_reg = np.reshape(y_reg, (feature_size, feature_size) + (2,)) return y_conf, y_reg
e05f9746c4c6255401b02975c7a76e7b0c25a4e7
3,632,895
import requests def service(schema): """Service fixture""" assert schema.namespaces # this is pythonic way how to check > 0 return pyodata.v2.service.Service(URL_ROOT, schema, requests)
8b48865b57487493fdf829dad62551d76b40536d
3,632,896
def register_handlers(cls, nsmap, events, stanzas): """Register all special handlers in a plugin.""" ## Sanity check st_handlers = set(m for (_, (_, m)) in stanzas) for (_, callbacks) in events: dup = st_handlers.intersection(set(callbacks)) if dup: raise PluginError('Stanza handler duplicated as event handler.', dup) register(cls, 'EVENTS', merge_events, add_events, events) register(cls, 'STANZAS', merge_dicts, add_dicts, stanzas) cls.__nsmap__ = nsmap return cls
c8a5b6c69632cf5c6671b84055d1457a89d31f86
3,632,897
def get_question_types(container): """ SELECTOR FOR RETURNING QUESTION TYPES """ return container.keys()
0c667e893323c106319038d19f333d233a5d1e07
3,632,898
import requests import urllib3 def uploader(dict_in): """Post the global dictionary to the server This function contains a post request to the flask server. The global dictionary in the patient GUI that saved all the information to be upload will be posted. This function will also catch the exceptions that the server can not be found or breakdown. Args: dict_in (dict): The global dictionary in the client_gui that store all the information to be sent to the server and save in mongoDB Returns: str: "uploaded" if the request is successfully made and "failed" if there are something wrong with the connection to the server """ try: r = requests.post("http://127.0.0.1:5000/api/upload", json=dict_in) print(r.status_code) print(r.text) return "Uploaded" except (requests.exceptions.ConnectionError, ConnectionRefusedError, urllib3.exceptions.NewConnectionError, urllib3.exceptions.MaxRetryError): return "failed"
a454bb7e0523e48d851efb2a46d877aea12dd38e
3,632,899