content
stringlengths
22
815k
id
int64
0
4.91M
def validators(*chained_validators): """ Creates a validator chain from several validator functions. :param chained_validators: :type chained_validators: :return: :rtype: """ def validator_chain(match): # pylint:disable=missing-docstring for chained_validator in chained_validators: if not chained_validator(match): return False return True return validator_chain
35,100
def jacobian(vf): """Compute the jacobian of a vectorfield pointwise.""" vf0_dz, vf0_dy, vf0_dx = image_gradients(vf[..., 0:1]) vf1_dz, vf1_dy, vf1_dx = image_gradients(vf[..., 1:2]) vf2_dz, vf2_dy, vf2_dx = image_gradients(vf[..., 2:3]) r1 = tf.concat([vf0_dz[..., None], vf0_dy[..., None], vf0_dx[..., None]], axis=-1) r2 = tf.concat([vf1_dz[..., None], vf1_dy[..., None], vf1_dx[..., None]], axis=-1) r3 = tf.concat([vf2_dz[..., None], vf2_dy[..., None], vf2_dx[..., None]], axis=-1) return tf.concat([r1, r2, r3], axis=-2)
35,101
def insert_ones(y, segment_end_ms, Ty, steps=50, background_len=10000.0): """Update the label vector y The labels of the output steps strictly after the end of the segment should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the 50 followinf labels should be ones. Args: y (ndarray): numpy array of shape (1, Ty), the labels of the training example segment_end_ms (int): the end time of the segment in ms steps (int): number of output steps after to segment to put the label background_len (float): number of time steps in the sample Returns: y (ndarray): updated labels """ # duration of the background (in terms of spectrogram time-steps) segment_end_y = int(segment_end_ms * Ty / background_len) for i in range(segment_end_y+1, segment_end_y+steps+1): if i < Ty: y[0, i] = 1.0 return y
35,102
def stream_name_mapping(stream, exclude_params=['name'], reverse=False): """ Return a complete dictionary mapping between stream parameter names to their applicable renames, excluding parameters listed in exclude_params. If reverse is True, the mapping is from the renamed strings to the original stream parameter names. """ filtered = [k for k in stream.param if k not in exclude_params] mapping = {k:stream._rename.get(k,k) for k in filtered} if reverse: return {v:k for k,v in mapping.items()} else: return mapping
35,103
def newton(f, df, x0, tolx, tolf, nmax): """ Algoritmo di Newton per il calcolo dello zero di una funzione. :param f: la funzione di cui calcolare lo zero :param df: la derivata della funzione di cui calcolare lo zero :param x0: il valore di innesco :param tolx: la tolleranza sull'incremento :param tolf: la tolleranza sul valore della funzione :param nmax: il numero massimo di iterazioni :return: (zero della funzione, numero di iterazioni, iterazioni) """ def delta(value): return f(value) / df(value) if df(value) > np.spacing(1) else exit("Derivata nulla") def prossimax(value): return value - delta(value) x = prossimax(x0) fx = f(x) it, xk = 1, [x] while it < nmax and abs(fx) >= tolf and abs(delta(x)) >= tolx * abs(x): x = prossimax(x) xk.append(x) fx = f(x) it += 1 return x, it, xk
35,104
def preprocess(x, scale='std', clahe=True): """ Preprocess the input features. Args: x: batch of input images clahe: perform a contrast limited histogram equalization before scaling scale: 'normalize' the data into a range of 0 and 1 or 'standardize' the data to zero mean and standard deviation 1 Returns: The preprocessed input features, eventually reduced to single channel """ if clahe is True: x = np.array([np.expand_dims(rgb2clahe(img), 2) for img in x]) x = np.float32(x) if scale is not None and scale.lower() in ['norm', 'normalize']: x /= x.max() elif scale is not None and scale.lower() in ['std', 'standardize']: mean, std = x.mean(), x.std() x = (x - mean) / (std + np.finfo(float).eps) return x
35,105
def process_commands(operators, log_level, log_file, mip, dry_run): """This result callback is invoked with an iterable of all the chained subcommands. As in this example each subcommand returns a function we can chain them together to feed one into the other, similar to how a pipe on unix works. """ # It turns out that a tuple will not work correctly! stream = [get_initial_task(), ] # Pipe it through all stream operators. for operator in operators: stream = operator(stream) # task = next(stream) # Evaluate the stream and throw away the items. if stream: for _ in stream: pass
35,106
def endswith(s, tags): """除了模拟str.endswith方法,输入的tag也可以是可迭代对象 >>> endswith('a.dvi', ('.log', '.aux', '.dvi', 'busy')) True """ if isinstance(tags, str): return s.endswith(tags) elif isinstance(tags, (list, tuple)): for t in tags: if s.endswith(t): return True else: raise TypeError return False
35,107
def parse(data): """ Takes the byte string of an x509 certificate and returns a dict containing the info in the cert :param data: The certificate byte string :return: A dict with the following keys: - version """ structure = load(data) if structure[0][0] != Sequence: return None body = structure[0][1] if len(body) != 3: return None algo_oid_map = { '1.2.840.113549.1.1.1': 'rsaEncryption', '1.2.840.113549.1.1.2': 'md2WithRSAEncryption', '1.2.840.113549.1.1.4': 'md5WithRSAEncryption', '1.2.840.113549.1.1.5': 'sha1WithRSAEncryption', '1.2.840.113549.1.1.11': 'sha256WithRSAEncryption', '1.2.840.113549.1.1.12': 'sha384WithRSAEncryption', '1.2.840.113549.1.1.13': 'sha512WithRSAEncryption' } cert_struct = body[0][1] output = {} output['algorithm'] = body[1][1][0][1] if output['algorithm'] in algo_oid_map: output['algorithm'] = algo_oid_map[output['algorithm']] output['signature'] = body[2][1] i = 0 # At least one CA cert on Windows was missing the version if cert_struct[i][0] == 0x00: output['version'] = cert_struct[i][1][0][1] + 1 i += 1 else: output['version'] = 3 output['serialNumber'] = cert_struct[i][1] i += 1 # The algorithm is repeated at cert_struct[i][1][0][1] i += 1 output['issuer'] = parse_subject(cert_struct[i]) i += 1 output['notBefore'] = cert_struct[i][1][0][1] output['notAfter'] = cert_struct[i][1][1][1] i += 1 output['subject'] = parse_subject(cert_struct[i]) i += 1 output['publicKeyAlgorithm'] = cert_struct[i][1][0][1][0][1] if output['publicKeyAlgorithm'] in algo_oid_map: output['publicKeyAlgorithm'] = algo_oid_map[output['publicKeyAlgorithm']] output['subjectPublicKey'] = cert_struct[i][1][1][1] i += 1 for j in range(i, len(cert_struct)): if cert_struct[j][0] == 0x01: # Issuer unique identifier pass elif cert_struct[j][0] == 0x02: # Subject unique identifier pass elif cert_struct[j][0] == 0x03: output['subjectAltName'] = parse_subject_alt_name(cert_struct[j]) return output
35,108
def to_auto_diff(x): """ Transforms x into a automatically differentiated function (ADF), unless it is already an ADF (or a subclass of it), in which case x is returned unchanged. Raises an exception unless 'x' belongs to some specific classes of objects that are known not to depend on ADF objects (which then cannot be considered as constants). """ if isinstance(x, ADF): return x #! In Python 2.6+, numbers.Number could be used instead, here: if isinstance(x, CONSTANT_TYPES): # constants have no derivatives to define: return ADF(x, {}, {}, {}) raise NotImplementedError( 'Automatic differentiation not yet supported for {0:} objects'.format( type(x)) )
35,109
def voc_eval(class_recs: dict, detect: dict, iou_thresh: float = 0.5, use_07_metric: bool = False): """ recall, precision, ap = voc_eval(class_recs, detection, [iou_thresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. Please make sure that the class_recs only have one class annotations. precision = tp / (tp + fp) recall = tp / (tp + fn) Args: class_recalls: recalls dict of a class class_recs[image_name]={'bbox': []}. detection: Path to annotations detection={'image_ids':[], bbox': [], 'confidence':[]}. [iou_thresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) Returns: a dict of result including true_positive_number, false_positive_number, recall, precision and average_precision. Raises: TypeError: the data format is not np.ndarray. """ # format data # class_rec data load npos = 0 for imagename in class_recs.keys(): if not isinstance(class_recs[imagename]['bbox'], np.ndarray): raise TypeError detected_num = class_recs[imagename]['bbox'].shape[0] npos += detected_num class_recs[imagename]['det'] = [False] * detected_num # detections data load image_ids = detect['image_ids'] confidence = detect['confidence'] BB = detect['bbox'] if not isinstance(confidence, np.ndarray): raise TypeError if not isinstance(BB, np.ndarray): raise TypeError # sort by confidence sorted_ind = np.argsort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) iou_max = -np.inf BBGT = R['bbox'].astype(float) if BBGT.size > 0: overlaps = compute_overlaps(BBGT, bb) iou_max = np.max(overlaps) iou_max_index = np.argmax(overlaps) if iou_max > iou_thresh: if not R['det'][iou_max_index]: tp[d] = 1. R['det'][iou_max_index] = 1 else: fp[d] = 1. else: fp[d] = 1. # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) true_positive_number = tp[-1] false_positive_number = fp[-1] recall = tp / float(npos) # avoid divide by zero in case the first detection matches # a difficult ground truth precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) average_precision = voc_ap(recall, precision, use_07_metric) result = {} result['true_positive_number'] = true_positive_number result['false_positive_number'] = false_positive_number result['recall'] = recall result['precision'] = precision result['average_precision'] = average_precision return result
35,110
def data_upgrades(): """Add any optional data upgrade migrations here!""" query =''' INSERT INTO public."ModuleGrids"( "ID","Module_ID", "TypeObj", "Name", "Label", "GridRender", "GridSize", "CellType", "GridOrder", "QueryName", "Options", "FilterOrder", "FilterSize", "IsSearchable", "FilterDefaultValue", "FilterRender", "FilterType", "FilterClass", "Status", "ColumnParams") VALUES (300,23,NULL,'ID','ID',2,'{"width" : 120,"maxWidth" : 350,"minWidth" : 100}','integer',1,NULL,NULL,10,2,true,NULL,4,'Text',NULL,NULL,'{"pinned" : "left" }'), (301,23,NULL,'Name','Nom',2,'{"width" : 120,"maxWidth" : 350,"minWidth" : 100}','string',2,NULL,'{"source": "autocomplete/clients/Name", "minLength" : 3}',10,2,true,NULL,4,'AutocompleteEditor',NULL,NULL,NULL), (302,23,NULL,'description','Description',2,'{"width" : 120,"maxWidth" : 750,"minWidth" : 100}','string',3,NULL,'{"source" : "autocomplete/clients/description", "minLength" : 3}',10,2,true,NULL,4,'AutocompleteEditor',NULL,NULL,NULL); ''' op.execute(query)
35,111
def birth() -> character.Character: """Gives birth to krydort.""" krydort = character.Character('Krydort Wolverry') krydort.attributes.INT = 8 krydort.attributes.REF = 6 krydort.attributes.DEX = 6 krydort.attributes.BODY = 6 krydort.attributes.SPD = 4 krydort.attributes.EMP = 10 krydort.attributes.CRA = 7 krydort.attributes.WILL = 10 krydort.attributes.LUCK = 3 krydort.skills['INT'].Business = 4 krydort.skills['INT'].Education = 3 krydort.skills['INT'].CommonSpeech = 8 krydort.skills['INT'].ElderSpeech = 4 krydort.skills['INT'].Dwarven = 2 krydort.skills['INT'].Streetwise = 4 krydort.skills['REF'].DodgingEscape = 2 krydort.skills['REF'].SmallBlades = 4 krydort.skills['REF'].Swordsmanship = 2 krydort.skills['DEX'].Athletics = 2 krydort.skills['BODY'].Endurance = 2 krydort.skills['EMP'].Charisma = 6 krydort.skills['EMP'].Deceit = 4 krydort.skills['EMP'].Gambling = 2 krydort.skills['EMP'].GroomingAndStyle = 1 krydort.skills['EMP'].HumanPerception = 4 krydort.skills['EMP'].Persuasion = 6 krydort.skills['WILL'].Courage = 2 krydort.skills['WILL'].ResistCoercion = 5 return krydort
35,112
def imagePath(image): """ Return full path to given image. """ return os.path.join(":/images", image)
35,113
def compress(from_name: str, to_name: Optional[str]=None, remove_original: bool=False) -> None: """ Compress the file `from_name` and store it as `to_name`. `to_name` defaults to `from_name` with `.bz2` appended. If `remove_original` is True, removes `from_name` when the compress finishes. """ if to_name is None: to_name = from_name if not to_name.endswith('.bz2'): to_name += '.bz2' LOGGER.info('Compress %s to %s', from_name, to_name) with bz2.open(to_name, 'w') as outh, \ open(from_name, 'rb') as inh: while True: data = inh.read(10240) if not data: break outh.write(data) if remove_original: LOGGER.debug('Remove %s', from_name) os.remove(from_name)
35,114
def LinearCombinationOfContVars(doc:NexDoc, resultName, contVar1:NexVar, coeff1, contVar2:NexVar, coeff2): """Calculates a linear combination of two continuous variables.""" return NexRun("LinearCombinationOfContVars", locals())
35,115
def main(): """Main function""" config = get_config() reddit = init_reddit(config) subreddit = config['reddit']['subreddit'] arg = argparse.ArgumentParser() arg.add_argument("--clock", help="set sidebar with the current time (utc)", action="store_true") arg.add_argument("--vos", help="set sidebar with the current Voice of Seren", action="store_true") arg.add_argument("--dxp", help="set sidebar with time until the end of dxp", action="store_true") arg.add_argument("--news", help="set sidebar with the last 3 RuneScape news", action="store_true") arg.add_argument("--wiki", help="download the subreddit's wiki pages", action="store_true") arg.add_argument("--traffic", help="get subreddits traffic", action="store_true") args = arg.parse_args() if args.clock: try: push_sidebar_update(reddit, 'clock', get_time(), subreddit) except ValueError: raise ValueError else: print("'clock' completed and pushed to %s" % subreddit) if args.vos: try: push_sidebar_update(reddit, 'vos', get_active_vos(config['twitter']['consumer_key'], config['twitter']['consumer_secret'], config['twitter']['access_token'], config['twitter']['access_token_secret']), subreddit) except ValueError: raise ValueError else: print("'vos' completed and pushed to %s" % subreddit) if args.news: try: push_sidebar_update(reddit, 'news', get_latest_news(), subreddit) except ValueError: raise ValueError else: print("'news' completed and pushed to %s" % subreddit) if args.dxp: try: push_sidebar_update(reddit, 'dxp', get_dxp(config['dxp']['start'], config['dxp']['end'], config['dxp']['news_url'], config['dxp']['portables_url']), subreddit) except ValueError: raise ValueError else: print("'dxp' completed and pushed to %s" % subreddit) if args.wiki: save_wiki(reddit, subreddit) if args.traffic: try: get_traffic(reddit, subreddit) except ValueError: raise ValueError
35,116
def handler404(request, exception): # pylint: disable=unused-argument """404: NOT FOUND ERROR handler""" response = render_to_string( "404.html", request=request, context=get_base_context(request) ) return HttpResponseNotFound(response)
35,117
def get_notification_user(operations_shift): """ Shift > Site > Project > Reports to """ if operations_shift.supervisor: supervisor = get_employee_user_id(operations_shift.supervisor) if supervisor != doc.owner: return supervisor operations_site = frappe.get_doc("Operations Site", operations_shift.site) if operations_site.account_supervisor: account_supervisor = get_employee_user_id(operations_site.account_supervisor) if account_supervisor != doc.owner: return account_supervisor if operations_site.project: project = frappe.get_doc("Project", operations_site.project) if project.account_manager: account_manager = get_employee_user_id(project.account_manager) if account_manager != doc.owner: return account_manager
35,118
def list_groups( namespace: str = "default", account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None ) -> List[Dict[str, Any]]: """List all QuickSight Groups. Parameters ---------- namespace : str The namespace. Currently, you should set this to default . account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- List[Dict[str, Any]] Groups. Examples -------- >>> import awswrangler as wr >>> groups = wr.quicksight.list_groups() """ return _list( func_name="list_groups", attr_name="GroupList", account_id=account_id, boto3_session=boto3_session, Namespace=namespace, )
35,119
def find_tm5_output(path, expname=None, varname=None, freq=None): """ Finds TM5 outputfiles, which consist of varname + "_" + "AER"[freq] + * + dates + ".nc" inputs: Path (mandatory) experiment name (optional) varname (optional) frequency (optional) output: list of full paths to files """ subexpr = ".*" if expname: subexpr = expname if varname is None: # # select alphanumeric variable name followed by _AER + * + _expname_ + * + dates.nc # # matches like this: # first quotation marks: # emioa_AER*_ # subexpr: # aspi # second quotation marks: # _*_185001-185012.nc to _*_185001010000-185012312300 [6-12 numbers in date] expr = re.compile("(([0-9A-Za-z]+)\w_AER.*)_" + subexpr + "_.*_[0-9]{6,12}-[0-9]{6,12}\.nc$") elif varname is not None and freq == 'fx': expr = re.compile(varname + "_.*" + freq + ".*_" + subexpr + "_.*.nc$") else: expr = re.compile(varname + "_.*" + freq + ".*_" + subexpr + "_.*.nc$") a = [os.path.join(path, f) for f in os.listdir(path) if re.match(expr, f)] return [os.path.join(path, f) for f in os.listdir(path) if re.match(expr, f)]
35,120
def big_diagram(BFIELD=1000,output='S0'): """ Main code to plot 'big' diagram with the following components: - Theoretical absorption spectrum (top panel) - Breit Rabi diagram for 0 to specified B-field (left) - Energy levels for ground and excited states (bottom panel) - Arrows for each transition, underneath the corresponding part of the spectrum """ ## ## First part - calculate the absorption spectrum ## # Define the detuning axis based on what the magnetic field strength is (in GHz) # Values for BFIELD should be given in Gauss (1 G = 1e-4 T) Dmax = max(6,5 + (BFIELD/1e4 * 3 * mu_B)) det_range = np.linspace(-Dmax,Dmax,int(3e4)) # Input parameters to calculate the spectrum Bfield = BFIELD #alias ELEM = 'Rb' DLINE = 'D2' RB85FRAC = 0.0 # Pure Rb87 LCELL = 1e-3 TEMP = 100 # C ~ 373K # Voigt, horizontal polarisation pol = [1,0,0] p_dict = {'T':TEMP,'lcell':LCELL,'Elem':ELEM,'rb85frac':RB85FRAC,'Dline':DLINE, 'Bfield':BFIELD,'Btheta':90*np.pi/180,'Bphi':45*np.pi/180,'BoltzmannFactor':True} [S0,S1,S2,S3] = get_spectra(det_range*1e3,pol,p_dict,outputs=['S0','S1','S2','S3']) lenergy87, lstrength87, ltransno87, lgl87, lel87, \ renergy87, rstrength87, rtransno87, rgl87, rel87, \ zenergy87, zstrength87, ztransno87, zgl87, zel87 = calc_chi_energies([1], p_dict) ## ## Second part - calculate the Breit-Rabi diagram ## BreitRabiVals = np.linspace(0,BFIELD,2000) BreitRabiVals = np.append(BreitRabiVals,BreitRabiVals[-1]) Bstep = BreitRabiVals[1] - BreitRabiVals[0] # Calculate Zeeman-shifted energy levels in parallel (uses multiprocessing module) po = Pool() res = po.map_async(eval_energies,(("Rb87","D2",BreitRabiVals[k],) for k in range(len(BreitRabiVals)))) energies = res.get() gnd_energies = np.zeros((len(energies[0][0]),len(BreitRabiVals))) exc_energies = np.zeros((len(energies[0][1]),len(BreitRabiVals))) for jj, energyB in enumerate(energies): gnd_energies[:,jj] = energyB[0] exc_energies[:,jj] = energyB[1] po.close() po.join() # Energies at largest B-field value final_gnd_energies, final_exc_energies = eval_energies(("Rb87","D2",BreitRabiVals[-1])) ## ## Third part - calculate state decomposition ## ## Below values are for Rb-87. **Change for other atoms**. I=3.0/2; L=0; S=1.0/2; J=1.0/2 output_states = AM_StateDecomp(I,L,S,J,atom='Rb',B=BFIELD/1e4) print('\nState decomposition at B = ',BFIELD/1e4) print(output_states) ## ## Fourth part - arrange the plot panels ## fig = plt.figure("Big diagram at "+str(BFIELD/1e4)+' T',facecolor=None,figsize=(12,8)) plt.clf() # Subplot arrangement xBR = 2 xspec = 6 yBRe = 3 yBRg = 5 yspec = 4 xx = xBR + xspec yy = yBRe + yBRg + yspec ax_spec = plt.subplot2grid((yy,xx),(0,xBR),colspan=xspec,rowspan=yspec) ax_excBR = plt.subplot2grid((yy,xx),(yspec,0),colspan=xBR,rowspan=yBRe) ax_gndBR = plt.subplot2grid((yy,xx),(yspec+yBRe,0),colspan=xBR,rowspan=yBRg,sharex=ax_excBR) ax_eLev = plt.subplot2grid((yy,xx),(yspec,xBR),colspan=xspec,rowspan=yBRe,sharex=ax_spec,sharey=ax_excBR) ax_gLev = plt.subplot2grid((yy,xx),(yspec+yBRe,xBR),colspan=xspec,rowspan=yBRg,sharex=ax_spec,sharey=ax_gndBR) # Turn off axes for eLev and gLev axes for ax in [ax_eLev,ax_gLev]: ax.set_frame_on(False) for parameter in [ax.get_xticklabels(),ax.get_yticklabels(),ax.get_xticklines(),ax.get_yticklines()]: plt.setp(parameter,visible=False) plt.setp(ax_excBR.get_xticklabels(),visible=False) ax_excBR.spines['right'].set_color('none') ax_gndBR.spines['right'].set_color('none') ax_gndBR.spines['top'].set_color('none') ax_excBR.spines['top'].set_color('none') ax_excBR.spines['bottom'].set_color('none') ax_gndBR.xaxis.set_ticks_position('bottom') ax_excBR.xaxis.set_ticks_position('none') ax_excBR.tick_params(axis='y',left=True,right=False) ax_gndBR.tick_params(axis='y',left=True,right=False) # axis labels ax_spec.set_xlabel('Detuning (GHz)') ax_spec.xaxis.set_label_position('top') ax_spec.tick_params(axis='x',bottom=True,top=True,labelbottom=False,labeltop=True) ax_excBR.set_ylabel('$5P_{3/2}$ energy (GHz)') ax_gndBR.set_ylabel('$5S_{1/2}$ energy (GHz)') ax_gndBR.set_xlabel('Magnetic Field (T)') fig.subplots_adjust(left=0.07,right=0.98,top=0.93,bottom=0.085,hspace=0.34,wspace=0) #Ghost axes for actually plotting the Breit-Rabi data eleft = ax_excBR.get_position().extents[0:2] eright = ax_eLev.get_position().extents[2:] gleft = ax_gndBR.get_position().extents[0:2] gright = ax_gLev.get_position().extents[2:] ax_e_bound = np.append(eleft,eright-eleft) ax_g_bound = np.append(gleft,gright-gleft) print('\nAxes bounds for B-R diagram:') print(ax_e_bound) print(ax_g_bound) ax_e = fig.add_axes(ax_e_bound,frameon=False,facecolor=None) ax_g = fig.add_axes(ax_g_bound,frameon=False,facecolor=None) ax_g.set_xticks([]) ax_g.set_yticks([]) ax_e.set_xticks([]) ax_e.set_yticks([]) ## ## Fifth part - Add the data to the figure ## # Edit last magnetic field value BreitRabiVals[-1] = BreitRabiVals[-2] * ((xspec + xBR) / xBR) print('\nMagnetic field values (Breit-Rabi diagram)') print(BreitRabiVals) if output == 'S0': ax_spec.set_ylabel('Transmission, $S_{0}$') ax_spec.plot(det_range,S0.real,lw=2,color=d_black) elif output == 'S1': ax_spec.set_ylabel('$S_{1}$') ax_spec.plot(det_range,S1.real,lw=2,color=d_black) elif output == 'S2': ax_spec.set_ylabel('$S_{2}$') ax_spec.plot(det_range,S2.real,lw=2,color=d_black) elif output == 'S3': ax_spec.set_ylabel('$S_{3}$') ax_spec.plot(det_range,S3.real,lw=2,color=d_black) #convert to GHz from MHz exc_energies /= 1e3 gnd_energies /= 1e3 final_exc_energies /= 1e3 final_gnd_energies /= 1e3 for energy in exc_energies[int(len(final_exc_energies)/3):]: ax_e.plot(BreitRabiVals/1e4,energy,color=d_black,lw=1) for energy in gnd_energies: ax_g.plot(BreitRabiVals/1e4,energy,color=d_black,lw=1.5) ax_excBR.set_xlim(0,(Bfield + 10*Bstep)/1e4) for ax in [ax_g,ax_e]: ax.set_ylim(ax.get_ylim()[0]*1.15,ax.get_ylim()[1]*1.15) ax.set_xlim(BreitRabiVals[0]/1e4, BreitRabiVals[-1]/1e4) ax_excBR.set_ylim(ax_e.get_ylim()) ax_gndBR.set_ylim(ax_g.get_ylim()) ax_spec.set_xlim(det_range[0],det_range[-1]) ax_spec.set_ylim(ax_spec.get_ylim()[0],1.01) ## ## Sixth part - Add arrows for each transition ## print('Sigma minus transitions:') print(sorted(lenergy87)) print('Sigma plus transitions:') print(sorted(renergy87)) print('Pi transitions:') print(sorted(zenergy87)) for energy in lenergy87: ax_spec.axvline(energy/1e3,color=d_purple,lw=1.5) for energy in renergy87: ax_spec.axvline(energy/1e3,color=d_blue,lw=1.5) for energy in zenergy87: ax_spec.axvline(energy/1e3,color=d_olive,lw=1.5,linestyle='dashed') # Coordinates for arrows - sigma minus transitions (purple) xy1s = zip(lenergy87/1e3,lgl87/1e3) xy2s = zip(lenergy87/1e3,lel87/1e3) ecol = d_purple fcol = 0.5 * (np.array(d_lightpurple) + np.array(d_purple)) alpha = 0.9 #styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed'] for xy1,xy2,strength in zip(xy1s,xy2s,lstrength87): #if (xy1[0] > 15) or (xy1[0]<-15): coordsA = 'data' coordsB = 'data' con = ConnectionPatch(xy1,xy2,coordsA,coordsB, arrowstyle="simple",shrinkB=0, axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25, ec=ecol,fc=fcol,lw=1.25,alpha=alpha) ax_gLev.add_artist(con) # Coordinates for arrows - sigma plus transitions (blue) xy1s = zip(renergy87/1e3,rgl87/1e3) xy2s = zip(renergy87/1e3,rel87/1e3) ecol = d_blue fcol = 0.5 * (np.array(d_midblue) + np.array(d_blue)) alpha = 0.9 #styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed'] for xy1,xy2,strength in zip(xy1s,xy2s,rstrength87): #if (xy1[0] > 15) or (xy1[0]<-15): coordsA = 'data' coordsB = 'data' con = ConnectionPatch(xy1,xy2,coordsA,coordsB, arrowstyle="simple",shrinkB=0, axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25, ec=ecol,fc=fcol,lw=1.25,alpha=alpha) ax_gLev.add_artist(con) # Coordinates for arrows - pi transitions (olive) xy1s = zip(zenergy87/1e3,zgl87/1e3) xy2s = zip(zenergy87/1e3,zel87/1e3) ecol = d_darkolive fcol = d_olive#darkyellow#olive #(0.16,0.85,0.16) alpha = 0.6 #styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed'] for xy1,xy2,strength in zip(xy1s,xy2s,zstrength87): #if (xy1[0] < 15) and (xy1[0]>-15): coordsA = 'data' coordsB = 'data' con = ConnectionPatch(xy1,xy2,coordsA,coordsB, arrowstyle="simple",shrinkB=0, axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25, ec=ecol,fc=fcol,lw=1.25,alpha=alpha) ax_gLev.add_artist(con) # Add B-field info to plot - top left fig.text(0.1,0.78-0.03,'L = '+str(LCELL*1e3)+' mm',size=18,ha='center') fig.text(0.1,0.82-0.03,r'T = '+str(TEMP)+' $^{\circ}$C',size=18,ha='center') fig.text(0.1,0.86-0.03,'B = '+str(Bfield/1e4)+' T',size=18,ha='center') fig.text(0.1,0.90-0.03,str(DLINE)+' Line',size=18,ha='center') fig.text(0.1,0.94-0.03,'$^{87}$Rb',size=18,ha='center') ## ## Finally - show the plot and save the figure ## ax_spec.set_xlim(-Dmax,Dmax) # fig.savefig('./BR_plot_'+str(Bfield)+str(output)'.pdf',dpi=300) # fig.savefig('./BR_plot_'+str(Bfield)+str(output)'.png',dpi=300) plt.show() print('--- End of calculations ---') return fig
35,121
def _add_subject(subject_list: List[t_subject], subject: t_subject) -> None: """Add one Subject to the list.""" value = Subject(uuid_ref=subject['uuid-ref'], type=subject['type']) if 'title' in subject: value.title = subject['title'] if 'properties' in subject: props = [] properties = subject['properties'] for name in properties: prop = _get_property('osco', 'inventory-item', name, properties[name]) props.append(prop) value.props = props subject_list.append(value)
35,122
def referenced_fmr(X=None, Y=None, Z=None, delta_x_idx:{"type": "int", "min":0, "max": 1, "hint": "Distance of the background signal (in x-index units)"}=0,): """ For each X-index, calculate Z[X]-X([+delta_x_idx], X will be set to X[X] """ slc_x_val = slice(0, X.shape[0] - delta_x_idx) slc_x_delta = slice(delta_x_idx, X.shape[0]) return X[slc_x_val, :], Y[slc_x_val, :], Z[slc_x_val,:]/Z[slc_x_delta,:]
35,123
def categorize_dish(dish_name, dish_ingredients): """ :param dish_name: str :param dish_ingredients: list :return: str "dish name: CATEGORY" This function should return a string with the `dish name: <CATEGORY>` (which meal category the dish belongs to). All dishes will "fit" into one of the categories imported from `sets_categories_data.py` (VEGAN, VEGETARIAN, PALEO, KETO, or OMNIVORE). """ if VEGAN >= dish_ingredients: category = 'VEGAN' elif VEGETARIAN >= dish_ingredients: category = 'VEGETARIAN' elif PALEO >= dish_ingredients: category = 'PALEO' elif KETO >= dish_ingredients: category = 'KETO' elif OMNIVORE >= dish_ingredients: category = 'OMNIVORE' return f"{dish_name}: {category}"
35,124
def read_text_subset( subset: str, source_dir: str = "data/CUB_200_2011_with_text/text" ) -> Tuple[List[str], List[int], List]: """ Read the pretrained embedding caption text for the birds and flowers datasets as encoded using a pretrained char-CNN-RNN network from: https://arxiv.org/abs/1605.05396 """ file_names_path = os.path.join(source_dir, subset, "filenames.pickle") file_names = read_pickle(file_names_path) class_info_path = os.path.join(source_dir, subset, "class_info.pickle") class_info = read_pickle(class_info_path) pretrained_embeddings_path = os.path.join( source_dir, subset, "char-CNN-RNN-embeddings.pickle" ) char_CNN_RNN_embeddings = read_pickle(pretrained_embeddings_path) return file_names, class_info, char_CNN_RNN_embeddings
35,125
def generate_points_realistic(N=100, distortion_param=0, rng=None): """Generates two poses and the corresponding scene points and image points.""" # Check if a seed is used (for unittests) if not rng: rng = np.random.default_rng() # Relative translation t = 2 * rng.random((3, 1)) - 1 # Make sure the baseline is okay t = t / np.linalg.norm(t) # Calibration matrix f = rng.random() * 200 + 200 K = np.diag([f, f, 1.0]) Kinv = np.diag([1.0 / f, 1.0 / f, 1.0]) R1, _ = np.linalg.qr(rng.random((3, 3))) R2, _ = np.linalg.qr(rng.random((3, 3))) R = R2 @ R1.T P1 = K @ np.hstack((R1, np.zeros((3, 1)))) P2 = K @ np.hstack((R2, t)) # Fundamental matrix F = Kinv.T @ skew(t) @ R @ Kinv # Generate points with y-coordinate in front of scene X = np.vstack(( 6 * rng.random((1, N)) - 3, 5 * rng.random((1, N)) + 3, 6 * rng.random((1, N)) - 3, np.ones((1, N)))) # Generate point correspondences (pinhole) x1 = pflat(P1 @ X) x2 = pflat(P2 @ X) # Add radial distortion (if desired) x1u = x1 x2u = x2 if distortion_param < 0: x1 = radialdistort(x1, distortion_param) x2 = radialdistort(x2, distortion_param) return R1, R2, f, F, x1, x2, R, t, x1u, x2u
35,126
def truncated_step( x: array, f_x: float, grad: array, step_size: float = 0.1, search_direction: Optional[array] = None, step_lower_bound: float = 0.0, ): """Motivated by https://arxiv.org/abs/1903.08619 , use knowledge of a lower-bound on f_x to prevent from taking a step too large TODO: consider further damping? TODO: rather than truncating at absolute global bound on loss, consider truncating at relative bound, like, don't take a step that you predict would decrease the loss by more than X % ? X absolute increment? some combination of these? TODO: generalize to use local surrogates other than first-order Taylor expansions by refactoring to accept a callable `f_prime` directly, rather than constructing a default `f_prime` from `x`, `f_x`, `grad` arguments Notes ----- * search_direction not assumed normalized. for example, it could be the raw gradient * `step_size` is used to generate an initial proposal `x_proposed`. If `f_prime(x_proposed) < step_lower_bound`, then the step will be truncated. * The default `step_lower_bound=0` corresponds to a suggestion in the cited study, incorporating the knowledge that the loss is bounded below by 0. In the script, we pass in a non-default argument to the `step_lower_bound` to make the behavior of the method more conservative, and this is probably something we'll fiddle with a bit. * The default value `step_size=0.1` isn't very precisely chosen. The behavior of the method will be insensitive to picking `step_size` anywhere between like 1e-3 and +inf for our problems, since this will trigger the step-truncating logic on most every step. If the `step_size` is chosen sufficiently small that it rarely produces proposals that violate `step_lower_bound`, then that will start to have an effect on the behavior of the optimizer. """ # default search direction: SGD if search_direction is None: search_direction = -grad assert np.linalg.norm(search_direction) > 0 # if this vector is all zeros, doesn't make sense to proceed # default local surrogate model: linear f_prime = _taylor_first_order(x, f_x, grad) # default step: step_size * search_direction x_next = x + step_size * search_direction # if this is too optimistic, according to local surrogate f_prime if f_prime(x_next) < step_lower_bound: # TODO: replace f_prime bound with something more configurable x_proposed = x_next line_search_fxn = lambda alpha: f_prime(x + alpha * search_direction) - step_lower_bound result = root_scalar(line_search_fxn, x0=0, x1=step_size) alpha = result.root x_next = x + alpha * search_direction message = f""" f_prime(x_proposed) = {f_prime(x_proposed):.5f} using default step size {step_size:.5f} is lower than step_lower_bound = {step_lower_bound:.5f} truncating step size to {alpha:.5f}, so that the predicted f_prime(x_next) = {f_prime(x_next):.5f}""" print(message) x_increment = np.array(x_next - x) return x_increment
35,127
def test_single_feature_label(): """ >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... has_test_case('test_single_feature_label', ... has_feature('single feature') ... )) """ pass
35,128
def search_covid_results(patient_id: str, covid_df: pd.DataFrame): """ Given a patient ID and a dataframe of COVID-19 PCR results, return whether a patient had a positive result at any point and the date of their first positive. If no positives but negative results exist, return "N" for negative, otherwise "U" for unknown. Parameters ---------- patient_id: str Patient ID covid_df: Pandas.DataFrame COVID-19 PCR results Returns ------- str, str or None """ pt_status = covid_df[covid_df.PATIENT_ID == patient_id].sort_values("collection_datetime", ascending=True).copy() positives = pt_status[pt_status.TEXT == "Positive"].copy() for x in ["collection_datetime", "test_datetime"]: positives[x] = positives[x].dt.strftime("%Y-%m-%dT%H:%M:%SZ") if pt_status.shape[0] == 0: return "U", None if positives.shape[0] != 0: first_positive = positives.iloc[0] if pd.isnull(first_positive.collection_datetime): if pd.isnull(first_positive.test_datetime): return "P", None return "P", first_positive.test_datetime return "P", first_positive.collection_datetime negatives = pt_status[pt_status.TEXT == "Negative"] if negatives.shape[0] != 0: return "N", None return "U", None
35,129
def is_slow_test_hostile(): """Use this to disable some tests in CI enviroment where 15 minute deadline applies.""" return "CI" in os.environ or "SKIP_SLOW_TEST" in os.environ
35,130
def str_is_path(p: str): """Detects if the variable contains absolute paths. If so, we distinguish paths that exist and paths that are images. Args: p: the Path Returns: True is is an absolute path """ try: path = Path(p) if path.is_absolute(): return True else: return False except TypeError: return False
35,131
def run_tests(requested_test_classes, serial, config): """Actually run the test suites, potentially in parallel.""" root_tmpdir = tempfile.mkdtemp(prefix='faucet-tests-') ports_sock = os.path.join(root_tmpdir, 'ports-server') ports_server = threading.Thread( target=faucet_mininet_test_util.serve_ports, args=(ports_sock,)) ports_server.setDaemon(True) ports_server.start() single_tests = unittest.TestSuite() parallel_tests = unittest.TestSuite() for name, obj in inspect.getmembers(sys.modules[__name__]): if not inspect.isclass(obj): continue if requested_test_classes and name not in requested_test_classes: continue if name.endswith('Test') and name.startswith('Faucet'): print 'adding test %s' % name test_suite = make_suite(obj, config, root_tmpdir, ports_sock) if serial or name.startswith('FaucetSingle'): single_tests.addTest(test_suite) else: parallel_tests.addTest(test_suite) print 'running %u tests in parallel and %u tests serial' % ( parallel_tests.countTestCases(), single_tests.countTestCases()) results = [] if parallel_tests.countTestCases(): max_parallel_tests = min(parallel_tests.countTestCases(), MAX_PARALLEL_TESTS) parallel_runner = unittest.TextTestRunner(verbosity=255) parallel_suite = ConcurrentTestSuite( parallel_tests, fork_for_tests(max_parallel_tests)) results.append(parallel_runner.run(parallel_suite)) # TODO: Tests that are serialized generally depend on hardcoded ports. # Make them use dynamic ports. if single_tests.countTestCases(): single_runner = unittest.TextTestRunner(verbosity=255) results.append(single_runner.run(single_tests)) os.remove(ports_sock) all_successful = True for result in results: if not result.wasSuccessful(): all_successful = False print result.printErrors() pipeline_superset_report(root_tmpdir) if all_successful: shutil.rmtree(root_tmpdir)
35,132
def PLAY(command: Command) -> Command: """ Moves clip from background to foreground and starts playing it. If a transition (see LOADBG) is prepared, it will be executed. """ return command
35,133
def deserialize(data: str) -> dict: """ Given a string, deserialize it from JSON. """ if data is None: return {} def fix(jd: Any) -> Any: if type(jd) == dict: # Fix each element in the dictionary. for key in jd: jd[key] = fix(jd[key]) return jd if type(jd) == list: # Could be serialized by us, could be a normal list. if len(jd) >= 1 and jd[0] == '__bytes__': # This is a serialized bytestring return bytes(jd[1:]) # Possibly one of these is a dictionary/list/serialized. for i in range(len(jd)): jd[i] = fix(jd[i]) return jd # Normal value, its deserialized version is itself. return jd return fix(json.loads(data))
35,134
def seq_windows_df( df, target=None, start_index=0, end_index=None, history_size=1, target_size=1, step=1, single_step=False, ): """ create sliding window tuples for training nns on multivar timeseries """ data = [] labels = [] start_index = start_index + history_size if target is None: target = df if end_index is None: end_index = df.shape[0] - target_size for i in range(start_index, end_index): indices = range(i - history_size, i, step) X = df.iloc[indices] data.append(X) if single_step: label = target[i + target_size] else: label = target[i: i + target_size] labels.append(label) return data, labels
35,135
def get_cachable_provider( cachables: List[Cachable] = [Collection1(), Collection2()] ) -> Callable[[], List[Cachable]]: """ Returns a cachable_provider. """ return lambda: cachables
35,136
def getQuality(component, propertyName): # type: (JComponent, String) -> int """Returns the data quality for the property of the given component as an integer. This function can be used to check the quality of a Tag binding on a component in the middle of the script so that alternative actions can be taken in the event of device disconnections. Args: component: The component whose property is being checked. propertyName: The name of the property as a string value. Returns: The data quality of the given property as an integer. """ print(component, propertyName) return 192
35,137
def init(): """Initialize a directory with tasks.""" if os.path.isdir('tasks'): print('Directory "tasks" already exists!') sys.exit(0) os.mkdir('tasks') os.chdir('tasks') templates_dir = os.path.join( os.path.dirname(inspect.getfile(mlpractice)), 'templates', ) tasks_dir = os.getcwd() copy_tree(templates_dir, tasks_dir) for dir_path, dir_names, filenames in os.walk(tasks_dir): for filename in filenames: file_path = os.path.join(dir_path, filename) if file_path.endswith('.ipynb'): inject_sources_into_template(file_path) print(f'Initialized a directory with tasks at {tasks_dir}') # initialize a file with statistics about user's progress _init_stats()
35,138
def norm1(x): """Normalize to the unit sphere.""" return x / x.square().sum(axis=-1, keepdims=True).sqrt()
35,139
def set_each_question_path(config: DictConfig): """ qstを読み取るのめんどくさい """ # hedファイルを全体で指定しているか、各モデルで設定しているかを判定する for typ in ('timelag', 'duration', 'acoustic'): if config[typ].question_path is None: config[typ].question_path = config.question_path else: config[typ].question_path = config[typ].question_path
35,140
def markup_sentence(s, modifiers, targets, prune_inactive=True): """ Function which executes all markup steps at once """ markup = pyConText.ConTextMarkup() markup.setRawText(s) markup.cleanText() markup.markItems(modifiers, mode="modifier") markup.markItems(targets, mode="target") markup.pruneMarks() markup.dropMarks('Exclusion') # apply modifiers to any targets within the modifiers scope markup.applyModifiers() markup.pruneSelfModifyingRelationships() if prune_inactive: markup.dropInactiveModifiers() return markup
35,141
def get_stream_info(stream_id): """ Uses the `/stream/info` endpoint taking the stream_id as a parameter. e.g. stream_id="e83a515e-fe69-4b19-afba-20f30d56b719" """ endpoint = KICKFLIP_API_URL + '/stream/info/' payload = {'stream_id': stream_id} response = kickflip_session.post(endpoint, payload) return response.json()
35,142
def invchisquared_sample(df, scale, size): """Return `size` samples from the inverse-chi-squared distribution.""" # Parametrize inverse-gamma alpha = df/2 beta = df*scale/2. # Parametrize gamma k = alpha theta = 1./beta gamma_samples = np.random.gamma(k, theta, size) return 1./gamma_samples
35,143
def get_data_from_matlab(file_url, index, columns, data): """Description:* This function takes a Matlab file .mat and extract some information to a pandas data frame. The structure of the mat file must be known, as the loadmat function used returns a dictionary of arrays and they must be called by the key name Args: file_url: the ubication of the .mat file index: the key for the array of string date-like to be used as index for the dataframe columns: the key for the array of data to be used as columns in the dataframe data: the key for the array to be used as data in the dataframe Returns: Pandas dataframe """ import scipy.io as sio import datetime as dt # load mat file to dictionary mat = sio.loadmat(file_url) # define data to import, columns names and index cl = mat[data] stocks = mat[columns] dates = mat[index] # extract the ticket to be used as columns name in dataframe # to-do: list compression here columns = [] for each_item in stocks: for inside_item in each_item: for ticket in inside_item: columns.append(ticket) # extract string ins date array and convert to datetimeindex # to-do list compression here df_dates =[] for each_item in dates: for inside_item in each_item: df_dates.append(inside_item) df_dates = pd.Series([pd.to_datetime(date, format= '%Y%m%d') for date in df_dates], name='date') # construct the final dataframe data = pd.DataFrame(cl, columns=columns, index=df_dates) return data
35,144
def get_loop_end(header: bytes) -> int: """Return loop end position.""" assert isinstance(value := _unpack(header, "LOOP_END"), int), type(value) assert 0 < value < 65535, value return value
35,145
def report(filename: str, ignore_result: tuple, hide_empty_groups:bool): """{p}arse a report and do a simple output""" if not os.path.exists(filename): logger.error("Failed to find file {}, bailing", filename) return False if ignore_result: print(f"Ignoring the following result values: {', '.join(ignore_result)}") with open(filename, 'r', encoding="utf8") as file_handle: report_data = json_load(file_handle) if "summary" not in report_data: raise ValueError("Parsing fail - should include a summary key in data?") summary = report_data.get("summary") print("Report Summary") print_underline("Report Summary", underline="=") for key in summary: print(f"{COLOUR.get(key, COLOUR['default'])}{summary[key]}{COLOUR['end']}\t{key}") print("\n") if len(report_data.get('reports')) == 1: print(f"There is 1 report.\n") else: print(f"There are {len(report_data.get('reports'))} reports.\n") for report_index, report in enumerate(report_data.get("reports")): print(f"Report #:\t{COLOUR['white']}{report_index+1}{COLOUR['end']}") for key in REPORT_SUMMARY_KEYS: if report.get(key): print(f"{key}\t{COLOUR['white']}{report.get(key)}{COLOUR['end']}") print("") for group_index, group in enumerate(report.get("groups")): # check if all the items in this group have been skipped checks_without_skipped = [ check for check in group.get("checks") if check.get("result") not in ignore_result] if hide_empty_groups and len(checks_without_skipped) == 0: continue print(f"\n{COLOUR['warning']}Check Group #{group_index+1} - {group.get('description')}{COLOUR['end']}") print_underline(group.get("description")) if len(checks_without_skipped) == 0: print("All checks in this group have been ignored.") print("="*20) for check in checks_without_skipped: print("="*20) result = check.get("result") print(f"Result: {COLOUR.get(result, COLOUR.get('default'))}{result}{COLOUR.get('end')}") description = check.get('description').replace('\n', ' ') print(f"Check: {description}") # {result}") # print(check.keys()) messages = check.get("messages") for message in messages: print(message.get("message")) if message.get("filename"): print(f"Filename: {message.get('filename')}") print("Done!")
35,146
def test_no_subfield(add_citation): """ When no institution is linked, return None. """ citation = add_citation() assert citation.subfield == None
35,147
def measuresegment(waveform, Naverage, minstrhandle, read_ch, mV_range=2000, process=True, device_parameters=None): """Wrapper to identify measurement instrument and run appropriate acquisition function. Supported instruments: m4i digitizer, ZI UHF-LI Args: waveform (dict): waveform specification Naverage (int): number of averages to perform minstrhandle (str or Instrument): handle to acquisition device read_ch (list): channels to read from the instrument mV_range (float): range for input process (bool): If True, process the segment data from scope reader device_parameters (dict): dictionary passed as keyword parameters to the measurement methods Returns: data (numpy array): recorded and processed data """ if device_parameters is None: device_parameters = {} is_m4i = _is_m4i(minstrhandle) is_uhfli = _is_measurement_device(minstrhandle, qcodes.instrument_drivers.ZI.ZIUHFLI.ZIUHFLI) is_scope_reader = _is_measurement_device(minstrhandle, AcquisitionScopeInterface) is_simulator = _is_measurement_device(minstrhandle, SimulationDigitizer) measure_instrument = get_instrument(minstrhandle) if is_m4i: data = measuresegment_m4i(minstrhandle, waveform, read_ch, mV_range, Naverage, process=process, **device_parameters) elif is_uhfli: data = measure_segment_uhfli(minstrhandle, waveform, read_ch, Naverage, **device_parameters) elif is_scope_reader: data = measure_segment_scope_reader(minstrhandle, waveform, Naverage, process=process, **device_parameters) elif is_simulator: data = measure_instrument.measuresegment(waveform, channels=read_ch) elif minstrhandle == 'dummy': # for testing purposes data = np.random.rand(100, ) else: raise Exception(f'Unrecognized fast readout instrument {minstrhandle}') if np.array(data).size == 0: warnings.warn('measuresegment: received empty data array') return data
35,148
def user_list(ks_cli): """Print a list of all users, Requires ADMIN Credentials.""" users = ks_cli.users.list() # print_structure(users.data[0],geta=False) for i in users.data: print(i.id, i.name, i.enabled) return
35,149
def grid_definition_proj(): """Custom grid definition using a proj string.""" return { "shape": (1, 1), "bounds": (-4000000.0, -4000000.0, 4000000.0, 4000000.0), "is_global": False, "proj": example_proj, }
35,150
def _compare_lines(line1, line2, tol=1e-14): """ Parameters ---------- line1: list of str line2: list of str Returns ------- bool """ if len(line1) != len(line2): return False for i, a in enumerate(line1): b = line2[i] if type(a) not in {int, float}: if a != b: return False elif type(a) is int and type(b) is int: if a != b: return False elif type(a) in {int, float} and type(b) in {int, float}: if abs(a - b) > tol: return False else: if a != b: return False return True
35,151
def helping_func(self, driver, value): """Helper function for testing method composition. """ return value + 1
35,152
def ffmpeg_video_write(data, video_path, fps=25): """Video writer based on FFMPEG. Args: data: A `np.array` with the shape of [seq_len, height, width, 3] video_path: A video file. fps: Use specific fps for video writing. (optional) """ assert len(data.shape) == 4, f'input shape is not valid! Got {data.shape}!' _, height, width, _ = data.shape os.makedirs(os.path.dirname(video_path), exist_ok=True) writer = ( ffmpeg .input('pipe:', framerate=fps, format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)) .output(video_path, pix_fmt='yuv420p') .overwrite_output() .run_async(pipe_stdin=True) ) for frame in data: writer.stdin.write(frame.astype(np.uint8).tobytes()) writer.stdin.close()
35,153
def add_languages_modify(schema, fields, locales=None): """Adds localized field keys to the given schema""" if locales is None: locales = get_locales() ignore_missing = toolkit.get_validator('ignore_missing') convert_to_extras = toolkit.get_converter('convert_to_extras') for locale in locales: for field in fields: schema.update({"%s_%s" % (field, locale): [ignore_missing, str, convert_to_extras]}) return schema
35,154
def package_tests(zip_file, robotium_cfg_file, test_apk, skp_dir=None, resource_dir=None): """Package all tests into a zip file.""" sdcard_files = [] with zipfile.ZipFile(zip_file, 'w') as zip_file: zip_file.write(test_apk, os.path.basename(test_apk), zipfile.ZIP_DEFLATED) if skp_dir: skps_prefix = 'skps' write_to_zip_file(zip_file, skp_dir, skps_prefix) sdcard_files.extend( ['/'.join((skps_prefix, f)) for f in os.listdir(skp_dir)]) if resource_dir: resources_prefix = 'resources' write_to_zip_file(zip_file, resource_dir, resources_prefix) sdcard_files.extend( ['/'.join((resources_prefix, f)) for f in os.listdir(resource_dir)]) robotium_cfg = '''[robotium] dumpsys=1 dumpstate=1 collect_artifacts=/sdcard/skia_results host_test=%s sdcard_files=%s [appurify] profiler=0 videocapture=0 ''' % (os.path.basename(test_apk), ','.join(sdcard_files)) with open(robotium_cfg_file, 'w') as f: f.write(robotium_cfg)
35,155
def apply_temporary_fixes(font, is_for_cros=False, is_for_web=False): """Apply some temporary fixes.""" # Fix usWeight: font_name = font_data.font_name(font) weight = noto_fonts.parse_weight(font_name) weight_number = noto_fonts.WEIGHTS[weight] # Chrome OS wants Thin to have usWeightClass=100 if is_for_cros and weight == 'Thin': weight_number = 100 font['OS/2'].usWeightClass = weight_number # Set bold bits for Black (macStyle bit 0, fsSelection bit 5) if is_for_web is False: name_records = font_data.get_name_records(font) family_name = name_records[1] if family_name.endswith('Black'): font['head'].macStyle |= (1 << 0) font['OS/2'].fsSelection |= (1 << 5) font['OS/2'].fsSelection &= ~(1 << 6)
35,156
def from_iterable( iterable: tp.Union[tp.Iterable[T], pypeln_utils.Undefined] = pypeln_utils.UNDEFINED, use_thread: bool = True, ) -> tp.Union[Stage[T], pypeln_utils.Partial[Stage[T]]]: """ Creates a stage from an iterable. Arguments: iterable: A source Iterable. use_thread: If set to `True` (default) it will use a thread instead of a process to consume the iterable. Threads start faster and use thread memory to the iterable is not serialized, however, if the iterable is going to perform slow computations it better to use a process. Returns: Returns a `Stage` if the `iterable` parameters is given, else it returns a `Partial`. """ if isinstance(iterable, pypeln_utils.Undefined): return pypeln_utils.Partial( lambda iterable: from_iterable(iterable, use_thread=use_thread) ) return Stage( process_fn=FromIterable(iterable), workers=1, maxsize=0, timeout=0, total_sources=1, dependencies=[], on_start=None, on_done=None, use_threads=use_thread, f_args=[], )
35,157
def get_consumer_secret(): """This is entirely questionable. See settings.py""" consumer_secret = None try: loc = "%s/consumer_secret.txt" % settings.TWITTER_CONSUMER_URL url = urllib2.urlopen(loc) consumer_secret = url.read().rstrip() except (urllib2.HTTPError, IOError), e: print "Unable to obtain consumer_secret from %s: %s" % (loc, e) return consumer_secret
35,158
def thread_keep_storing_one_File(syn, project, schedule_for_cleanup): """Makes one file and stores it over and over again.""" # Make a local file to continuously store path = utils.make_bogus_data_file() schedule_for_cleanup(path) myPrecious = File(path, parent=project, description='This bogus file is MINE', mwa="hahahah") while syn.test_keepRunning: stored = store_catch_412_HTTPError(syn, myPrecious) if stored is not None: myPrecious = stored elif 'id' in myPrecious: # only attempt to retrieve if the entity was initially saved above without encountering a 412 error # and thus has a retrievable synapse id myPrecious = syn.get(myPrecious) sleep_for_a_bit()
35,159
def preprocess(src, cutoff, shape=(240, 240)): """Pre-processes the image""" # Resizing the image, for computational reasons, else the algorithm will take too much time dst = cv2.resize(src, shape) # (automated) Canny Edge Detection dst = aced.detect(dst) # Binary or Adaptive thresholding dst = aced.thresh(dst, cutoff, method='bin') return dst
35,160
def test_main(): """ call the main :return: """ visitor.main()
35,161
def is_aware(value): """ Determines if a given datetime.datetime is aware. The concept is defined in Python's docs: http://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is not None
35,162
def check_manifest(of: fsspec.core.OpenFile, manifest: str) -> bool: """ Check to see if a given string exists in a manifest file. Parameters ========== x: str The string to check. manifest: str The path to a manifest file. Returns ======= True if the file is *not* in the manifest, False if it is. """ # Check if the file actually exists. If not, return true. mf = fsspec.open(manifest, "r") if not mf.fs.exists(manifest): return True # If the file exists, check if the file exists in it. with mf as f: content = set(f.read().split("\n")) return of.path not in content
35,163
def loadExpObjectFast(filename): """loads a CiPdeN object from a JSON file irnores generation data, expect the first and the last Parameters ---------- filename : str includes path and filename Returns ------- dict returns a dict if it worked, else return None """ try: with open(filename, 'rb') as f: result = bigjson.load(f) obj_dict = dict() obj_dict["pde"] = result["pde"] obj_dict["kernel_type"] = result["kernel_type"] obj_dict["opt_algo"] = result["opt_algo"] obj_dict["exec_time"] = result["exec_time"] obj_dict["mem_consumption"] = result["mem_consumption"] obj_dict["normL2"] = result["normL2"] obj_dict["sol_kernel"] = np.array(result["sol_kernel"].to_python()) return obj_dict except Exception as e: print(str(e)) return None
35,164
def makeLoc(*args): """This function creates locators based on the number specified by the user""" # We query the number of arms given by the user start.armsValue = cmds.intSliderGrp(start.numArms, q=True, v=True) # We iterate over this number for i in range(1, start.armsValue+2): # We create a locator AND add it to the locator list start.locList.append(cmds.spaceLocator(n="ArmLocator%i"%i if i<=(start.armsValue) else "BucketLocator", p=(0,0,0), a=True)) # Center locator's pivot cmds.CenterPivot() # Move locator in world space so the user does not have to deal with local space cmds.move(0,0,(i-1)*5) # Modifies the UI so the buttons get enabled or disabled updateUI(True)
35,165
def Vij_beam_correct(j, Vij, centre=None): """Corrects Vij for the beam amplitude. This is required when beam correction has not been done during calibration. Assumes identical beam patterns. Assumes calibrator source is at centre of image""" my_shape = Vij[0, 0, :, :].shape if centre is None: centre = (my_shape[0] / 2, my_shape[1] / 2) # Cal source at image centre logger.warning('Using centre of image as calibrator location') temp = beam_tools.makeUnpolInstrumentalResponse(j[:, :, centre[0], centre[1]], j[:, :, centre[0], centre[1]]) XX = temp[0, 0] YY = temp[1, 1] # XY = temp[0, 1] # YX = temp[1, 0] correction = np.array([[XX, XX ** 0.5 * YY ** 0.5], [XX ** 0.5 * YY ** 0.5, YY]]) # correction=np.array([[XX,XY],[YX,YY]]) # correction=np.array([[XX,1],[1,YY]]) logger.warning('Calibration correction factors: XX=%s, XY=%s, YX=%s, YY=%s' % (correction[0, 0], correction[0, 1], correction[1, 0], correction[1, 1])) # Tile 2x2 correction matrix apply to Vij Vij_corrected = Vij * np.tile(correction[:, :, np.newaxis, np.newaxis], (my_shape[0], my_shape[1])) return Vij_corrected
35,166
def set_colorize(value: bool) -> None: """Globally turn colored terminal output on/off""" global _COLOR _COLOR = value
35,167
def _calc_sc_1ph(net, bus): """ calculation method for single phase to ground short-circuit currents """ _add_auxiliary_elements(net) # pos. seq bus impedance ppc, ppci = _pd2ppc(net) _calc_ybus(ppci) # zero seq bus impedance ppc_0, ppci_0 = _pd2ppc_zero(net) _calc_ybus(ppci_0) if net["_options"]["inverse_y"]: _calc_zbus(net, ppci) _calc_zbus(net, ppci_0) else: # Factorization Ybus once ppci["internal"]["ybus_fact"] = factorized(ppci["internal"]["Ybus"]) ppci_0["internal"]["ybus_fact"] = factorized(ppci_0["internal"]["Ybus"]) _calc_rx(net, ppci, bus=bus) _add_kappa_to_ppc(net, ppci) _calc_rx(net, ppci_0, bus=bus) _calc_ikss_1ph(net, ppci, ppci_0, bus=bus) if net._options["branch_results"]: _calc_branch_currents(net, ppci, bus=bus) ppc_0 = _copy_results_ppci_to_ppc(ppci_0, ppc_0, "sc") ppc = _copy_results_ppci_to_ppc(ppci, ppc, "sc") _extract_results(net, ppc, ppc_0, bus=bus) _clean_up(net)
35,168
def get_data(github, selected_repos): """Generate json form custom-cards org.""" org = "custom-cards" data = {} repos = [] if selected_repos: repos.append(selected_repos) else: for repo in list(github.get_user(org).get_repos()): repos.append(repo.name) for repo in repos: try: repo = github.get_repo(org + "/" + repo) if repo.name not in BLACKLIST and not repo.archived: print("Generating json for:", "{}/{}".format(org, repo.name)) try: release = list(repo.get_releases())[0] except Exception: # pylint: disable=W0703 release = None name = repo.name version = None try: if release and release.tag_name is not None: version = release.tag_name else: content = repo.get_file_contents("VERSION") content = content.decoded_content.decode() version = content.split()[0] except Exception: # pylint: disable=W0703 version = None if release: remote_location = REUSE_TAG.format(org, name, version, name) else: remote_location = REUSE.format(org, name, name) remote_location = remote_location + ".js" testfile = requests.get(remote_location) if testfile.status_code != 200: remote_location = remote_location.split(name + ".js")[0] remote_location = remote_location + "dist/" + name + ".js" testfile = requests.get(remote_location) if testfile.status_code != 200: remote_location = remote_location.split("dist/" + name + ".js")[0] remote_location = remote_location + "src/" + name + ".js" testfile = requests.get(remote_location) if testfile.status_code != 200: continue visit_repo = VISIT.format(org, name) try: changelog = list(repo.get_releases())[0].html_url if "untagged" in list(repo.get_releases())[0].name: changelog = None except Exception: # pylint: disable=W0703 changelog = None if changelog is None: changelog = VISIT.format(org, name) data[name] = {} data[name]["version"] = version data[name]["remote_location"] = remote_location data[name]["visit_repo"] = visit_repo data[name]["changelog"] = changelog except Exception as error: # pylint: disable=W0703 print(error) return data
35,169
def provider_pre_delete(sender, instance, **kwargs): """Forward signal to ChangeBuilder""" from supervisr.core.providers.multiplexer import ProviderMultiplexer from supervisr.core.providers.objects import ProviderAction from supervisr.core.models import ProviderTriggerMixin if issubclass(instance.__class__, ProviderTriggerMixin): LOGGER.debug("ProviderTriggerMixin pre_delete") args = (ProviderAction.DELETE, class_to_path(instance.__class__), instance.pk) system_user.task_apply_async(ProviderMultiplexer(), *args)
35,170
def test_convert(pending_info, expected): """ Test conversion from the minimum version supported to the last version supported. """ convert(pending_info) diff = DeepDiff(pending_info, expected) assert ( not diff ), "diff found between converted pending_info and expected pending_info"
35,171
def fensemble_boosting_regressor(preds_valid, targs_valid, preds_train, targs_train, alpha=0.9): """ Learn combination of ensemble members from training data using Gradient Boosting Regression Also provides prediction intervals (using quantile regression) alpha = % prediction interval https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html https://towardsdatascience.com/how-to-generate-prediction-intervals-with-scikit-learn-and-python-ab3899f992ed """ ensemble_preds = [] ensemble_lower = [] ensemble_upper = [] H = preds_valid.shape[2] # run for each day over horizon for h in range(H): X_train = preds_train[:,:,h].T y_train = targs_train[:,h] X_test = preds_valid[:,:,h].T y_test = targs_valid[:,h] upper_model = GradientBoostingRegressor(loss="quantile", alpha=alpha) mid_model = GradientBoostingRegressor(loss="ls") lower_model = GradientBoostingRegressor(loss="quantile", alpha=(1.0-alpha)) # fit models lower_model.fit(X_train, y_train) mid_model.fit(X_train, y_train) upper_model.fit(X_train, y_train) # store predictions ensemble_preds.append(mid_model.predict(X_test)) ensemble_lower.append(lower_model.predict(X_test)) ensemble_upper.append(upper_model.predict(X_test)) return np.stack(ensemble_preds).T, np.stack(ensemble_lower).T, np.stack(ensemble_upper).T
35,172
def setup_land_units(srank): """ Sets up our land forces for an effective social rank. We go through an populate a dictionary of constants that represent the IDs of unit types and their quantities. That dict is then returned to setup_units for setting the base size of our navy. Args: srank (int): Our effective social rank for determing the size of our army. Returns: A dict of unit IDs to the quanity of those troops. """ INF = unit_constants.INFANTRY PIK = unit_constants.PIKE CAV = unit_constants.CAVALRY ARC = unit_constants.ARCHERS units = {} # add more units based on srank if srank > 6: units[INF] = 75 units[PIK] = 30 units[CAV] = 15 units[ARC] = 30 elif srank == 6: units[INF] = 200 units[PIK] = 70 units[CAV] = 40 units[ARC] = 70 elif srank == 5: units[INF] = 375 units[PIK] = 125 units[CAV] = 70 units[ARC] = 125 elif srank == 4: units[INF] = 750 units[PIK] = 250 units[CAV] = 125 units[ARC] = 250 elif srank == 3: units[INF] = 1500 units[PIK] = 500 units[CAV] = 250 units[ARC] = 500 elif srank == 2: units[INF] = 3000 units[PIK] = 1000 units[CAV] = 500 units[ARC] = 1000 elif srank == 1: units[INF] = 5000 units[PIK] = 1500 units[CAV] = 1000 units[ARC] = 1500 elif srank < 1: units[INF] = 10000 units[PIK] = 3000 units[CAV] = 2000 units[ARC] = 3000 return units
35,173
def one_hot_encode_test(test, txt_indexes_test): """Return the test dataframe with label-encoded textual features. Keyword arguments: test -- the test dataframe txt_indexes_test -- ndarray of test textual column indexes """ test_dummies = pd.get_dummies(test.iloc[:, txt_indexes_test]) test.drop(test.select_dtypes('object').columns, axis=1, inplace=True) test = pd.concat([test, test_dummies], axis=1) return test
35,174
def get_with_label(label, tree): """ Get a tree's node given it's label """ return [n for n in tree.children if n.label == label][0]
35,175
def plot_image_retrieval(query_image, query_image_class, query_dataset, queried_dataset, top_distances, top_indices): """Prints and plots the results of a retrieval query, showing the query image and the top results and distances. Args: query_image: tensor with the original image pixels. query_image_class: name of the image's class. query_dataset: the Dataset that contains the query image. queried_dataset: the Dataset that was queried. top_distances: one-dimensional tensor with the distances of the query image's embedding to the top k most similar images' embeddings. top_indices: list of the indices of the top k most similar images in the dataset. Returns: """ aux = [queried_dataset[j] for j in top_indices] image_tensors = torch.stack([tup[0] for tup in aux]) image_classes = [tup[1] for tup in aux] print("query image class = {}".format(query_dataset.classes[query_image_class])) print("distances = {}".format(top_distances)) print("classes = {}".format([queried_dataset.classes[class_name] for class_name in image_classes])) plot_image_batch([query_image, query_image_class]) plot_image_batch([image_tensors, image_classes])
35,176
def test_convertcolor_pipeline(plot=False): """ Test ConvertColor of transforms """ logger.info("test_convertcolor_pipeline") convert_color(mode.ConvertMode.COLOR_BGR2GRAY, cv2.COLOR_BGR2GRAY, plot) convert_color(mode.ConvertMode.COLOR_BGR2RGB, cv2.COLOR_BGR2RGB, plot) convert_color(mode.ConvertMode.COLOR_BGR2BGRA, cv2.COLOR_BGR2BGRA, plot)
35,177
def test_text_dataframe_csv(): """ Tests if the songs are written into the dataframe, if omitted clauses still exists or not, if .csv is created """ namelist = ['eric-clapton'] loc = os.getcwd() df_ = text_dataframe_csv(namelist, loc) str_unf = df_[df_['eric-clapton'].str.contains("Unfortunately, we are not authorized to show these lyrics")] assert len(str_unf) == 0 assert os.path.isfile('./eric-clapton.csv') == True num_row = 0 for row in open("./eric-clapton.csv"): num_row+=1 assert num_row > 1
35,178
def load_state_dicts(checkpoint_file, map_location=None, **kwargs): """ Load torch items from saved state_dictionaries """ if map_location is None: checkpoint = torch.load(checkpoint_file) else: checkpoint = torch.load(checkpoint_file, map_location=map_location) for key, value in kwargs.items(): value.load_state_dict(checkpoint[key]) epoch = checkpoint.get('epoch') if epoch: return epoch
35,179
def build_transform_gen(cfg, is_train): """ Create a list of :class:`TransformGen` from config. Now it includes resizing and flipping. Returns: list[TransformGen] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" if sample_style == "range": assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format( len(min_size) ) logger = logging.getLogger("detectron2.data.detection_utils") tfm_gens = [] tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) if is_train: tfm_gens.append(T.RandomContrast(0.5, 1.5)) tfm_gens.append(T.RandomBrightness(0.5, 1.5)) tfm_gens.append(T.RandomSaturation(0.5, 1.5)) tfm_gens.append(T.RandomFlip()) logger.info("TransformGens used in training[Updated]: " + str(tfm_gens)) return tfm_gens
35,180
def initialize_uninitialized_variables(session, var_list=None): """Initializes all uninitialized variables. Parameters ---------- session: tf.Session The TensorFlow session to scan for uninitialized variables var_list: list(tf.Varaible) or None The list of variables to filter for uninitialized ones. Defaults to tf.all_variables() is used. """ uninit_vars = uninitialized_variables(session, var_list) session.run(tf.initialize_variables(uninit_vars))
35,181
def test_atomic_g_year_month_min_inclusive_1_nistxml_sv_iv_atomic_g_year_month_min_inclusive_2_3(mode, save_output, output_format): """ Type atomic/gYearMonth is restricted by facet minInclusive with value 2012-02. """ assert_bindings( schema="nistData/atomic/gYearMonth/Schema+Instance/NISTSchema-SV-IV-atomic-gYearMonth-minInclusive-2.xsd", instance="nistData/atomic/gYearMonth/Schema+Instance/NISTXML-SV-IV-atomic-gYearMonth-minInclusive-2-3.xml", class_name="NistschemaSvIvAtomicGYearMonthMinInclusive2", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
35,182
def rain_specific_attenuation(R, f, el, tau): """Compute the specific attenuation γ_R (dB/km) given the rainfall rate. A method to compute the specific attenuation γ_R (dB/km) from rain. The value is obtained from the rainfall rate R (mm/h) using a power law relationship. .. math:: \\gamma_R = k R^\\alpha Parameters ---------- R : number, sequence, numpy.ndarray or Quantity Rain rate (mm/h) f : number or Quantity Frequency (GHz) el : number, sequence, or numpy.ndarray Elevation angle of the receiver points tau : number, sequence, or numpy.ndarray Polarization tilt angle relative to the horizontal (degrees). Tau = 45 deg for circular polarization) Returns ------- γ_R: numpy.ndarray Specific attenuation from rain (dB/km) References ---------- [1] Rain height model for prediction methods: https://www.itu.int/rec/R-REC-P.838/en """ R = prepare_quantity(R, u.mm / u.hr, 'Rain rate') f = prepare_quantity(f, u.GHz, 'Frequency') return __model.rain_specific_attenuation(R, f, el, tau) * u.dB / u.km
35,183
def filter(dg, start=None, end=None, tasks=(), skip_with=states.SKIPPED.name): """Filters a graph TODO(dshulyak) skip_with should also support NOOP, which will instead of blocking task, and its successors, should mark task as visited :param skip_with: SKIPPED or NOOP """ error_msgs = [] subpath = dg.nodes() if tasks: subpath = tasks else: subgraph = dg if start: error_msgs = validate(subgraph, start, [], error_msgs) if error_msgs: return error_msgs subpath = start_from(subgraph, start) subgraph = dg.subgraph(subpath) if end: error_msgs = validate(subgraph, start, end, error_msgs) if error_msgs: return error_msgs subpath = end_at(subgraph, end) for node in dg: if node not in subpath: dg.node[node]['status'] = skip_with return None
35,184
def parse_input(usr_input): """Main logic of program""" usr_input = usr_input.strip() if usr_input.upper() == QUIT_KEY: #exit logic return False else: usr_input = usr_input.split() if len(usr_input) == 1: #if only one argument supplied default to weekly pay = float(usr_input[0]) pay_after_tax = round(pay - paye_funcs.calculate_PAYE(pay, "w"), 2) print(OUTPUT.format(pay_after_tax)) elif len(usr_input) == 2: #two arguments check if expecting 3 arguments and calculate if usr_input[1] == '-n': print(ERROR_NO_N_NUMBER) else: decide_calculation(usr_input) elif len(usr_input) == 3: if usr_input[1] == '-n': if usr_input[2].isnumeric(): calculate_paye_on_days(usr_input) else: print(ERROR_NO_N_NUMBER) else: print(UNKNOWN_COMMAND) return True
35,185
def plot_feature_importances(clf, title='Feature Importance', feature_names=None, max_num_features=20, order='descending', x_tick_rotation=0, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Generates a plot of a classifier's feature importances. Args: clf: Classifier instance that implements ``fit`` and ``predict_proba`` methods. The classifier must also have a ``feature_importances_`` attribute. title (string, optional): Title of the generated plot. Defaults to "Feature importances". feature_names (None, :obj:`list` of string, optional): Determines the feature names used to plot the feature importances. If None, feature names will be numbered. max_num_features (int): Determines the maximum number of features to plot. Defaults to 20. order ('ascending', 'descending', or None, optional): Determines the order in which the feature importances are plotted. Defaults to 'descending'. x_tick_rotation (int, optional): Rotates x-axis tick labels by the specified angle. This is useful in cases where there are numerous categories and the labels overlap each other. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> rf = RandomForestClassifier() >>> rf.fit(X, y) >>> skplt.plot_feature_importances( ... rf, feature_names=['petal length', 'petal width', ... 'sepal length', 'sepal width']) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_feature_importances.png :align: center :alt: Feature Importances """ if not hasattr(clf, 'feature_importances_'): raise TypeError('"feature_importances_" attribute not in classifier. ' 'Cannot plot feature importances.') importances = clf.feature_importances_ if hasattr(clf, 'estimators_')\ and isinstance(clf.estimators_, list)\ and hasattr(clf.estimators_[0], 'feature_importances_'): std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0) else: std = None if order == 'descending': indices = np.argsort(importances)[::-1] elif order == 'ascending': indices = np.argsort(importances) elif order is None: indices = np.array(range(len(importances))) else: raise ValueError('Invalid argument {} for "order"'.format(order)) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) if feature_names is None: feature_names = indices else: feature_names = np.array(feature_names)[indices] max_num_features = min(max_num_features, len(importances)) ax.set_title(title, fontsize=title_fontsize) if std is not None: ax.bar(range(max_num_features), importances[indices][:max_num_features], color='r', yerr=std[indices][:max_num_features], align='center') else: ax.bar(range(max_num_features), importances[indices][:max_num_features], color='r', align='center') ax.set_xticks(range(max_num_features)) ax.set_xticklabels(feature_names[:max_num_features], rotation=x_tick_rotation) ax.set_xlim([-1, max_num_features]) ax.tick_params(labelsize=text_fontsize) return ax
35,186
def license(soup): """ Find the license text """ license = None try: license_section = get_license_section(soup) license = extract_node_text(license_section[0], "license-p") except(IndexError): return None return license
35,187
def filter_nofix(df,NoFrames): """ Filter for immobilized origami with DNA-PAINT based tracking handle (TH) as described in `spt`_. Positives are groups - with a trajectory within the first 5 frames after the start of the measurement - and number localizations within group are greater or equal to 20% of total measurement duration (in frames) Args: df(pandas.DataFrame): Immobile properties as calulated by apply_props() Returns: pandas.DataFrame: Positives in ``df`` according to TH filter as described above. """ istrue=df.min_frame<=5 istrue=istrue&(df.n_locs/NoFrames>=0.2) # Occupancy of more than 20% df_filter=df.loc[istrue,:] return df_filter
35,188
def read_array(cls, start=None,end=None,weight=None,use_datetime = False, convert_delta = False): """ Read arrays of values for start, end and weight values that represent either the cummulative value of the data steps or the direct step values seperately, indexed by the start and possibly end arrays. Parameters ============== start : array_like An array of step start location values. end : array_like, Optional An array of step end location values. weight : array_like, Optional An array of step weight values, if these are not provided, a value of 1 will be assigned for each row entry. use_datetime : bool, Opyional Assume start and end fields are of datetime format (Numpy.datetime64,datetime or Pandas.Timestamp). convert_delta : bool, Optional Assume weight values are individual step weights (default), or convert values by performing a delta between adjacent values. The data is assumed to be sorted by the provided start values. Returns ============== Steps See Also ============== read_dataframe read_dict """ if hasattr(start,'__iter__') or hasattr(end,'__iter__'): #needs to be an array like object if convert_delta: weight0 = 0 if weight[0] !=0: weight0 = weight[0] if weight0 !=0 and not pd.isnull(start[0]): weight = np.diff(weight) new_steps = cls(use_datetime).add_direct(start,end,weight) new_steps.add_steps([[get_epoch_start(False),1,weight0]]) else: weight = np.diff(weight,prepend=0) new_steps = cls(use_datetime).add_direct(start,end,weight) else: new_steps = cls(use_datetime).add_direct(start,end,weight) return new_steps else: raise TypeError("input data must be array like, python array or ndarray.")
35,189
def store_nugget_nodes(gold_nuggets, sys_nuggets, m_mapping): """ Store nuggets as nodes. :param gold_nuggets: :param sys_nuggets: :param m_mapping: :return: """ # Stores time ML nodes that actually exists in gold standard and system. gold_nodes = [] sys_nodes = [] # Store the mapping from nugget id to unified time ML node id. system_nugget_to_node = {} gold_nugget_to_node = {} mapped_system_mentions = set() tid = 0 for gold_index, (system_index, _) in enumerate(m_mapping): node_id = "te%d" % tid tid += 1 gold_script_instance_id = gold_nuggets[gold_index] gold_nugget_to_node[gold_script_instance_id] = node_id gold_nodes.append(node_id) if system_index != -1: system_nugget_id = sys_nuggets[system_index] system_nugget_to_node[system_nugget_id] = node_id sys_nodes.append(node_id) mapped_system_mentions.add(system_index) for system_index, system_nugget in enumerate(sys_nuggets): if system_index not in mapped_system_mentions: node_id = "te%d" % tid tid += 1 system_nugget_to_node[system_nugget] = node_id sys_nodes.append(node_id) return gold_nodes, sys_nodes, gold_nugget_to_node, system_nugget_to_node
35,190
def return_union_item(item): """union of statements, next statement""" return " __result.update({0})".format(item)
35,191
def normalize_basename(s, force_lowercase=True, maxlen=255): """Replaces some characters from s with a translation table: trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} then if the generated name is longer than maxlen, the name is truncated to maxlen and the hash of the name modulo 0xffffffff is appended. """ # replace all whietspaces by _ l = s.lower() if force_lowercase else s # table = mktrans(" ", "_") # return l.translate(table) trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} n = ("".join([trans_table.get(x, x) for x in l])) if len(n) > maxlen - 8: h = format(hash(n) & 0xffffffff, "08x") n = n[:maxlen-8] + "_"+ h return n
35,192
def download_from_url(url, dst): """ kindly used from https://gist.github.com/wy193777/0e2a4932e81afc6aa4c8f7a2984f34e2 @param: url to download file @param: dst place to put the file """ file_size = int(requests.head(url).headers["Content-Length"]) if os.path.exists(dst): first_byte = os.path.getsize(dst) else: first_byte = 0 if first_byte >= file_size: return file_size header = {"Range": "bytes=%s-%s" % (first_byte, file_size)} pbar = tqdm( total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=url.split('/')[-1]) req = requests.get(url, headers=header, stream=True) with(open(dst, 'ab')) as f: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) pbar.update(1024) pbar.close() return file_size
35,193
def _get_toc_string_from_log(file_handle): """ Returns a toc string or None for a given log file (EAC or XLD) Copyright (c) 2018 Konstantin Mochalov Released under the MIT License Original source: https://gist.github.com/kolen/765526 """ def _filter_toc_entries(file_handle): """ Take file handle, return iterator of toc entries """ while True: line = file_handle.readline() # TOC table header: if re.match(r""" \s* .+\s+ \| (?#track) \s+.+\s+ \| (?#start) \s+.+\s+ \| (?#length) \s+.+\s+ \| (?#start sec) \s+.+\s*$ (?#end sec) """, line, re.X): file_handle.readline() break while True: line = file_handle.readline() m = re.match(r""" ^\s* (?P<num>\d+) \s*\|\s* (?P<start_time>[0-9:.]+) \s*\|\s* (?P<length_time>[0-9:.]+) \s*\|\s* (?P<start_sector>\d+) \s*\|\s* (?P<end_sector>\d+) \s*$ """, line, re.X) if not m: break yield m.groupdict() PREGAP = 150 try: entries = list(_filter_toc_entries(file_handle)) num_entries = len(entries) tracknums = [int(e['num']) for e in entries] if [x for x in range(1, num_entries+1)] != tracknums: # Non-standard track number sequence return None leadout_offset = int(entries[-1]['end_sector']) + PREGAP + 1 offsets = [(int(x['start_sector']) + PREGAP) for x in entries] toc_numbers = [1, num_entries, leadout_offset] + offsets return " ".join(str(x) for x in toc_numbers) except Exception as e: # can fail if the log file is malformed print("Ignoring log file because of the following error:") print(e) pass return None
35,194
def _matrix_M_entry(row, col): """Returns one entry for the matrix that maps alpha to theta. See Eq. (3) in `Möttönen et al. (2004) <https://arxiv.org/pdf/quant-ph/0407010.pdf>`_. Args: row (int): one-based row number col (int): one-based column number Returns: (float): transformation matrix entry at given row and column """ # (col >> 1) ^ col is the Gray code of col b_and_g = row & ((col >> 1) ^ col) sum_of_ones = 0 while b_and_g > 0: if b_and_g & 0b1: sum_of_ones += 1 b_and_g = b_and_g >> 1 return (-1) ** sum_of_ones
35,195
def get_environment_variable_names(): """Helper to return names of environment variables queried. Returns: tuple: name of environment variable to control log level, name of environment variable to control logging to file """ __log_file_environment_variable_name = mwi_env.get_env_name_log_file() __log_level_environment_variable_name = mwi_env.get_env_name_logging_level() return __log_level_environment_variable_name, __log_file_environment_variable_name
35,196
def examine_vmx(dsname): """ function to download any vmx file passed to it via the datastore browser and find the 'vc.uuid' and 'displayName' """ args = get_args() try: for file_vmx in VMX_PATH: # print(file_vmx) username = args.user password = args.password log_url = urljoin(file_vmx, 'vmware.log') + '?' + urlsplit(file_vmx).query r = requests.get(log_url, auth=(username, password)) if r.status_code == requests.codes.ok: logfile = r.text.splitlines() last_line = logfile[-1] log_timestamp = datetime.strptime(last_line.split('|')[0].replace('Z', 'UTC'), "%Y-%m-%dT%H:%M:%S.%f%Z") if log_timestamp < DATE_IN_PAST: # print(log_timestamp.isoformat() + " is before " + DATE_IN_PAST.isoformat()) vmxfile = requests.get(file_vmx, auth=(username, password)).text.splitlines() for line in vmxfile: if line.startswith("displayName"): dn = line elif line.startswith("vc.uuid"): vcid = line # print(line) uuid = vcid.replace('"', "") uuid = uuid.replace("vc.uuid = ", "") uuid = uuid.strip("\n") uuid = uuid.replace(" ", "") uuid = uuid.replace("-", "") newdn = dn.replace('"', "") newdn = newdn.replace("displayName = ", "") newdn = newdn.strip("\n") vmfold = file_vmx.split("folder/") vmfold = vmfold[1].split("/") vmfold = vmfold[0] dspath = "%s/%s" % (dsname, vmfold) tempds_vm = [newdn, dspath] DS_VM[uuid] = tempds_vm # print(newdn + "'s last log entry was " + log_timestamp.isoformat()) except Exception as e: print("Caught exception in examine_vmx function : " + str(e))
35,197
def get_device_total_memory(index=0): """ Return total memory of CUDA device with index """ pynvml.nvmlInit() return pynvml.nvmlDeviceGetMemoryInfo( pynvml.nvmlDeviceGetHandleByIndex(index) ).total
35,198
def repeat_elements(x, rep, axis): """Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. # Arguments x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. # Returns A tensor. """ return KerasSymbol(mx.sym.repeat(x.symbol, repeats=rep, axis=axis))
35,199