content
stringlengths
22
815k
id
int64
0
4.91M
def create_background_consumers(count, before_start=None, target='run', *args, **kwargs): """Create new Consumer instances on background threads, starts them, and return a tuple ([consumer], [thread]). :param count: The number of Consumer instances to start. :param before_start: A callable that will be called with each consumer as an argument before the threads are started. :param target: The name of the method of the Consumer that will be run. The remaining arguments of the function are passed to the __init__() method of each Consumer. """ consumers = [] threads = [] for _ in range(count): consumer = Consumer(*args, **kwargs) consumers.append(consumer) thread_target = partial(_consumer_run_and_close_connection, consumer, target) thread = threading.Thread(target=thread_target) threads.append(thread) if before_start: for consumer in consumers: before_start(consumer) for thread in threads: thread.start() return (consumers, threads)
5,326,700
def linkGen(year): """ This function generates the download links based on user input. """ current_year = datetime.datetime.now().year try: if (int(year) >= 2016) and (int(year) <= int(current_year)): url = f"http://dev.hsl.fi/citybikes/od-trips-{year}/od-trips-{year}.zip" return url else: raise ValueError except: print(f"Incorrect input.\nThe value should be an integer between 2016 and {current_year}") quit()
5,326,701
def factory_dict(value_factory, *args, **kwargs): """A dict whose values are computed by `value_factory` when a `__getitem__` key is missing. Note that values retrieved by any other method will not be lazily computed; eg: via `get`. :param value_factory: :type value_factory: A function from dict key to value. :param *args: Any positional args to pass through to `dict`. :param **kwrags: Any kwargs to pass through to `dict`. :rtype: dict """ class FactoryDict(collections.defaultdict): @staticmethod def __never_called(): raise AssertionError('The default factory should never be called since we override ' '__missing__.') def __init__(self): super().__init__(self.__never_called, *args, **kwargs) def __missing__(self, key): value = value_factory(key) self[key] = value return value return FactoryDict()
5,326,702
def squash(): """ Squash an image and remove layers """ print("[maple.image.squash] not available for singularity backend")
5,326,703
def build_or_passthrough(model, obj, signal): """Builds the obj on signal, or returns the signal if obj is None.""" return signal if obj is None else model.build(obj, signal)
5,326,704
def dump(value, filename, *, compress=("zlib", 7), protocol=HIGHEST_PROTOCOL): """Dump a Python object to disk.""" filename = os.path.abspath(filename) try: try: # sometimes the latter won't work where the externals does despite same __version__ from sklearn.externals.joblib import dump as jobdump except Exception: from joblib import dump as jobdump if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) jobdump(value, filename, compress=compress, protocol=protocol) except Exception as e: logger.error("Unexpected error in dump: " + e)
5,326,705
def load_generated_energy_gwh_yearly_irena(): """Returns xr.DataArray with dims=year and integer as coords, not timestamp!""" generated_energy_twh = pd.read_csv( INPUT_DIR / "energy_generation_irena" / "irena-us-generation.csv", delimiter=";", names=("year", "generation"), ) generated_energy_twh_xr = xr.DataArray( generated_energy_twh.generation, dims="year", coords={"year": generated_energy_twh.year}, ) return 1e3 * generated_energy_twh_xr
5,326,706
def warc_url(url): """ Search the WARC archived version of the URL :returns: The WARC URL if found, else None """ query = "http://archive.org/wayback/available?url={}".format(url) response = requests.get(query) if not response: raise RuntimeError() data = json.loads(response.text) snapshots = data["archived_snapshots"] if not snapshots: return None return snapshots["closest"]["url"]
5,326,707
def check_gpu(): """ Check if GPUs are available on this machine """ try: cuda.gpus.lst tf = True except cuda.CudaSupportError: tf = False return tf
5,326,708
def vm_impl_avg_pool(self): """Generate vm_impl function for AvgPool""" def vm_impl(x): x = x.asnumpy() out = vm.avg_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2]) return Tensor(out) return vm_impl
5,326,709
def daemonize(stdin=os.devnull, stdout=os.devnull, stderr=None, pidfile=None, exit=True, chdir='/'): """ Does a double-fork to daemonize the current process using the technique described at http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 . If exit is True (default), parent exits immediately. If false, caller will receive the pid of the forked child. """ target_dir = chdir if chdir else os.getcwd() # First fork. try: pid = os.fork() if pid > 0: if exit: # Exit from the first parent. sys.exit(0) # Wait for child to fork again (otherwise we have a zombie) os.waitpid(pid, 0) return pid except OSError as e: #log.error("Initial daemonize fork failed: %d, %s\n", e.errno, e.strerror) sys.exit(1) os.chdir(target_dir) os.setsid() # Second fork. try: pid = os.fork() if pid > 0: # Exit from the second parent. sys.exit(0) except OSError as e: #log.error("Second daemonize fork failed: %d, %s\n", e.errno, e.strerror) sys.exit(1) # Create new standard file descriptors. if not stderr: stderr = stdout stdin = open(stdin, 'rb') stdout = open(stdout, 'a+b') stderr = open(stderr, 'a+b', 0) if pidfile: open(pidfile, 'w+b').write("%d\n" % os.getpid()) # Remap standard fds. os.dup2(stdin.fileno(), sys.stdin.fileno()) os.dup2(stdout.fileno(), sys.stdout.fileno()) os.dup2(stderr.fileno(), sys.stderr.fileno())
5,326,710
def plotAllPoints(x,y,z,f,x0,con): """ Args: x- initial x points y- initial y points z- initial z points f- objective function for optimization x0- flattened initial values to be shoved into objective function con- list of dicts of constraints to be placed on the values """ #plt.close(5006) fig = plt.figure(num=5006) ax = fig.add_subplot(111, projection='3d') ax.scatter(x, y, z, color='blue') plt.rc('axes',linewidth=2) plt.rc('lines',linewidth=2) plt.rc('font',weight='bold') plt.show(block=False) out01k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':100}) out01kx, out01ky, out01kz = splitOut(out01k) ax.scatter(out01kx, out01ky, out01kz,color='purple') out1k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':1000}) out1kx, out1ky, out1kz = splitOut(out1k) ax.scatter(out1kx, out1ky, out1kz,color='red') plt.show(block=False) out2k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':2000}) out2kx, out2ky, out2kz = splitOut(out2k) ax.scatter(out2kx, out2ky, out2kz,color='green') plt.show(block=False) out4k = minimize(f,x0, method='SLSQP',constraints=(con), options={'ftol':1e-4, 'maxiter':4000}) out4kx, out4ky, out4kz = splitOut(out4k) ax.scatter(out4kx, out4ky, out4kz,color='cyan') plt.legend(['Initial','100 iter.','1k iter.','2k iter.','4k iter.'],loc='uplter left') ax.set_xlabel('X',weight='bold') ax.set_ylabel('Y',weight='bold') ax.set_zlabel('Z',weight='bold') plt.title('Points Distributed on a Sphere',weight='bold') plt.show(block=False) # To Save this figure: # gca() # savefig('figurename.png') return fig, out01k, out1k, out2k, out4k
5,326,711
def plot_sfr_vout_correlation_with_binning(OIII_outflow_results, OIII_outflow_error, hbeta_outflow_results, hbeta_outflow_error, hbeta_no_outflow_results, hbeta_no_outflow_error, BIC_outflow, BIC_no_outflow, statistical_results, z, radius, header, data_descriptor, plot_data_fits=False): """ Plots the SFR surface density against the outflow velocity, with Sigma_SFR calculated using only the narrow component. Parameters ---------- OIII_outflow_results : :obj:'~numpy.ndarray' array of outflow results from KOFFEE for OIII line. Used to calculate the outflow velocity. Should be (7, statistical_results.shape) OIII_outflow_err : :obj:'~numpy.ndarray' array of the outflow result errors from KOFFEE for OIII line hbeta_outflow_results : :obj:'~numpy.ndarray' array of outflow results from KOFFEE for Hbeta line. Used to calculate the Sigma SFR. Should be (7, statistical_results.shape) hbeta_outflow_err : :obj:'~numpy.ndarray' array of the outflow result errors from KOFFEE for Hbeta line hbeta_no_outflow_results : :obj:'~numpy.ndarray' array of single gaussian results from KOFFEE for Hbeta line. Used to calculate the Sigma SFR. Should be (4, statistical_results.shape) hbeta_no_outflow_err : :obj:'~numpy.ndarray' array of the single gaussian result errors from KOFFEE for Hbeta line BIC_outflow : :obj:'~numpy.ndarray' array of BIC values from the double gaussian fits, this is usually chi_square[1,:,:] returned from koffee BIC_no_outflow : :obj:'~numpy.ndarray' array of BIC values from the single gaussian fits, this is usually chi_square[0,:,:] returned from koffee statistical_results : :obj:'~numpy.ndarray' array of statistical results from KOFFEE. z : float redshift radius : :obj:'~numpy.ndarray' array of galaxy radius values header : FITS header object the header from the fits file weighted_average : boolean whether or not to take a weighted average using the errors (Default=True) plot_data_fits : boolean whether to plot the fit to the data points, and the fit to the data medians in red on top of the plot (default is False) Returns ------- A graph of outflow velocity against the SFR surface density in three panels with different data selections """ #create figure plt.rcParams.update(pf.get_rc_params()) #fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True, sharex=True, figsize=(8,4), constrained_layout=True) fig, ax = plt.subplots(nrows=1, ncols=1, sharey=True, sharex=True, figsize=(5,4), constrained_layout=True) #get colours from cmasher colours = cmr.take_cmap_colors('cmr.gem', 3, cmap_range=(0.25, 0.85), return_fmt='hex') #create arrays to save results to spaxel_area = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_all = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_physical = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_strong = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_all_median = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_physical_median = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) corr_coeff_strong_median = np.full(len(OIII_outflow_results), np.nan, dtype=np.double) #calculate the proper distance proper_dist = cosmo.kpc_proper_per_arcmin(z).to(u.kpc/u.arcsec) #iterate through all of the data sets for i in np.arange(len(OIII_outflow_results)): #calculate the outflow velocity vel_disp, vel_disp_err, vel_diff, vel_diff_err, vel_out, vel_out_err = calc_outvel.calc_outflow_vel(OIII_outflow_results[i], OIII_outflow_error[i], statistical_results[i], z) #calculate the sfr surface density - using just the systemic line, and including the flux line #don't include extinction since this was included in the continuum subtraction using ppxf sfr, sfr_err, total_sfr, sfr_surface_density, sfr_surface_density_err = calc_sfr.calc_sfr_koffee(hbeta_outflow_results[i], hbeta_outflow_error[i], hbeta_no_outflow_results[i], hbeta_no_outflow_error[i], statistical_results[i], z, header[i], include_extinction=False, include_outflow=False) #get the sfr for the outflow spaxels flow_mask = (statistical_results[i]>0) #& (sfr_surface_density>0.1) #flatten all the arrays and get rid of extra spaxels sig_sfr = sfr_surface_density[flow_mask] sig_sfr_err = sfr_surface_density_err[flow_mask] vel_out = vel_out[flow_mask] vel_out_err = vel_out_err[flow_mask] BIC_outflow_masked = BIC_outflow[i][flow_mask] BIC_no_outflow_masked = BIC_no_outflow[i][flow_mask] vel_disp = vel_disp[flow_mask] radius_masked = radius[i][flow_mask] #create BIC diff BIC_diff = BIC_outflow_masked - BIC_no_outflow_masked #BIC_diff_weak = (BIC_diff < -10) & (BIC_diff >= -30) #BIC_diff_moderate = (BIC_diff < -30) & (BIC_diff >= -50) BIC_diff_strong = (BIC_diff < -50) #physical limits mask - #for the radius mask 6.1" is the 90% radius #also mask out the fits which lie on the lower limit of dispersion < 51km/s physical_mask = (radius_masked < 6.1) & (vel_disp>51) print(sig_sfr[physical_mask]) print(sig_sfr[physical_mask].shape) #strong BIC and physical limits mask #clean_mask = (radius < 6.1) & (vel_disp > 51) & (BIC_diff < -50) #make sure none of the errors are nan values vel_out_err[np.where(np.isnan(vel_out_err)==True)] = np.nanmedian(vel_out_err) #do the calculations for all the bins num_bins = 5 min_bin = None #-0.05 max_bin = None #0.6 bin_center_all, v_out_bin_medians_all, v_out_bin_lower_q_all, v_out_bin_upper_q_all = pf.binned_median_quantile_log(sig_sfr, vel_out, num_bins=num_bins, weights=None, min_bin=min_bin, max_bin=max_bin) bin_center_physical, v_out_bin_medians_physical, v_out_bin_lower_q_physical, v_out_bin_upper_q_physical = pf.binned_median_quantile_log(sig_sfr[physical_mask], vel_out[physical_mask], num_bins=num_bins, weights=None, min_bin=min_bin, max_bin=max_bin) bin_center_clean, v_out_bin_medians_clean, v_out_bin_lower_q_clean, v_out_bin_upper_q_clean = pf.binned_median_quantile_log(sig_sfr[BIC_diff_strong], vel_out[BIC_diff_strong], num_bins=num_bins, weights=None, min_bin=min_bin, max_bin=max_bin) #calculate the r value for the median values r_vel_out_med_all, p_value_v_out_all = pf.pearson_correlation(bin_center_all, v_out_bin_medians_all) r_vel_out_med_physical, p_value_v_out_physical = pf.pearson_correlation(bin_center_physical, v_out_bin_medians_physical) r_vel_out_med_clean, p_value_v_out_clean = pf.pearson_correlation(bin_center_clean, v_out_bin_medians_clean) #calculate the r value for all the values r_vel_out_all, p_value_v_out_all = pf.pearson_correlation(sig_sfr, vel_out) r_vel_out_physical, p_value_v_out_physical = pf.pearson_correlation(sig_sfr[physical_mask], vel_out[physical_mask]) r_vel_out_clean, p_value_v_out_clean = pf.pearson_correlation(sig_sfr[BIC_diff_strong], vel_out[BIC_diff_strong]) #save results to arrays spaxel_area[i] = ((header[i]['CD1_2']*60*60*header[i]['CD2_1']*60*60)*(proper_dist**2)).value corr_coeff_all[i] = r_vel_out_all corr_coeff_physical[i] = r_vel_out_physical corr_coeff_strong[i] = r_vel_out_clean corr_coeff_all_median[i] = r_vel_out_med_all corr_coeff_physical_median[i] = r_vel_out_med_physical corr_coeff_strong_median[i] = r_vel_out_med_clean #print average numbers for the different panels print(data_descriptor[i]) print('Number of spaxels with outflows', vel_out.shape) print('All spaxels median v_out:', np.nanmedian(vel_out)) print('All spaxels standard deviation v_out:', np.nanstd(vel_out)) print('All spaxels median sigma_sfr:', np.nanmedian(sig_sfr)) print('All spaxels standard deviation sigma_sfr:', np.nanstd(sig_sfr)) print('') print('Number of spaxels with broad sigmas at the instrument dispersion:', vel_out[vel_disp<=51].shape) print('') print('Number of spaxels beyond R_90:', vel_out[radius_masked>6.1].shape) print('') print('Number of spaxels in the middle panel:', vel_out[physical_mask].shape) print('') print('Physical spaxels median v_out:', np.nanmedian(vel_out[physical_mask])) print('Physical spaxels standard deviation v_out:', np.nanstd(vel_out[physical_mask])) print('Physical spaxels median sigma_sfr:', np.nanmedian(sig_sfr[physical_mask])) print('Physical spaxels standard deviation sigma_sfr:', np.nanstd(sig_sfr[physical_mask])) print('') print('Number of spaxels with strong BIC differences:', vel_out[BIC_diff_strong].shape) print('') print('Clean spaxels median v_out:', np.nanmedian(vel_out[BIC_diff_strong])) print('Clean spaxels standard deviation v_out:', np.nanstd(vel_out[BIC_diff_strong])) print('Clean spaxels median sigma_sfr:', np.nanmedian(sig_sfr[BIC_diff_strong])) print('Clean spaxels standard deviation sigma_sfr:', np.nanstd(sig_sfr[BIC_diff_strong])) print('') #convert spaxel area to circularised radius Area = pi*r^2 #so r = sqrt(Area/pi) circularised_radius = np.sqrt(spaxel_area/np.pi) #------- #plot it #------- ax.plot(circularised_radius, corr_coeff_all, marker='o', label='S/N>20 and $\delta_{BIC}$<-10', color=colours[0]) ax.plot(circularised_radius, corr_coeff_physical, marker='o', label=r'$\delta_{BIC}$<-10, $r$<$r_{90}$ and $\sigma_{broad}$>$\sigma_{inst}$', color=colours[1]) ax.plot(circularised_radius, corr_coeff_strong, marker='o', label='strongly likely BIC $\delta_{BIC}$<-50', color=colours[2]) #ax[1].scatter(spaxel_area, corr_coeff_all_median, marker='s', s=20, color=colours[0]) #ax[1].scatter(spaxel_area, corr_coeff_physical_median, marker='s', s=20, color=colours[1]) #ax[1].scatter(spaxel_area, corr_coeff_strong_median, marker='s', s=20, color=colours[2]) lgnd = ax.legend(frameon=True, fontsize='small', loc='upper left', framealpha=0.5) ax.set_ylabel('Pearson Correlation Coefficient') ax.set_xlabel('Circularised Bin Radius [kpc]') #ax[0].set_xlabel('Spaxel Area [kpc$^{2}$]') #ax[1].set_xlabel('Spaxel Area [kpc$^{2}$]') #ax[0].set_title('All points') #ax[1].set_title('Median values') plt.show()
5,326,712
def add_samples(request, product_id): """Adds passed samples (by request body) to product with passed id. """ parent_product = Product.objects.get(pk=product_id) for temp_id in request.POST.keys(): if temp_id.startswith("product") is False: continue temp_id = temp_id.split("-")[1] add_sample(parent_product, temp_id) # This isn't necessary but it cleans the cache. See lfs.cache listeners # for more parent_product.save() html = [["#samples-inline", manage_samples_inline(request, product_id, as_string=True)]] result = json.dumps({ "html": html, "message": _(u"Samples have been added.") }, cls=LazyEncoder) return HttpResponse(result, content_type='application/json')
5,326,713
def array_to_top_genes(data_array, cluster1, cluster2, is_pvals=False, num_genes=10): """ Given a data_array of shape (k, k, genes), this returns two arrays: genes and values. """ data_cluster = data_array[cluster1, cluster2, :] if is_pvals: order = data_cluster.argsort() else: order = data_cluster.argsort()[::-1] genes = order[:num_genes] values = data_cluster[order[:num_genes]] return genes, values
5,326,714
def parse_fastani_write_prophage(CRISPR_mates): """[summary] Args: CRISPR_mates ([type]): [description] Returns: [type]: [description] """ if not os.path.exists('prophageannotations'): os.system('mkdir -p prophageannotations') viral_cluster_prophages = dict() for bacterial_clusters in CRISPR_mates: fileout = os.path.join('prophageannotations',bacterial_clusters+'.prophage.annotations.txt') with open(fileout,'w') as outfile: fastanifile = os.path.join('fastani','bac'+bacterial_clusters+'.viruses.fastani.txt') if os.path.exists(fastanifile): with open(fastanifile,'r') as infile: for line in infile: line = line.strip().split('\t') ANI = float(line[2]) coverage = (int(line[3])/int(line[4]))*100 if ANI >= 95 and coverage >= 75: binid = os.path.basename(line[0]).replace('.fna','') viral_cluster = binid.split('_')[1] viral_sample = binid.split('_')[0] bacterialbin = os.path.basename(line[1]).replace('.fna','') if not viral_cluster in viral_cluster_prophages: viral_cluster_prophages[viral_cluster] = dict() viral_cluster_prophages[viral_cluster][bacterialbin] = 1 else: viral_cluster_prophages[viral_cluster][bacterialbin] = 1 sample = bacterialbin.split('_')[0] lineout = '\t'.join([bacterial_clusters,bacterialbin,sample,viral_cluster,viral_sample,binid,str(ANI),str(coverage)]) outfile.write(lineout+'\n') return viral_cluster_prophages
5,326,715
def set_attrib(node, key, default): """ Parse XML key for a given node If key does not exist, use default value """ return node.attrib[key] if key in node.attrib else default
5,326,716
def nested_hexagon_stretched(): """A stretched, nested hexagon""" poly = [ [ (0.86603, -0.5), (0.86603, 1.5), (0.0, 2.0), (-0.86603, 1.5), (-0.86603, -0.5), (-0.0, -1.0), (0.86603, -0.5), ], [ (1.29904, -0.75), (1.29904, 1.75), (0.0, 2.5), (-1.29904, 1.75), (-1.29904, -0.75), (-0.0, -1.5), (1.29904, -0.75), ], ] # convert to triangulation input conv = ToPointsAndSegments() conv.add_polygon(poly) conv = ToPointsAndSegments() conv.add_polygon(poly) return conv, 35, 24, 6
5,326,717
def _patch_dynamodb_connection(**kwargs): """:class:`boto.dynamodb2.layer1.DynamoDBConnection` patcher. It partially applies the keyword arguments to the :class:`boto.dynamodb2.layer1.DynamoDBConnection` initializer method. The common usage of this function would be patching host and port to the local DynamoDB or remote DynamoDB as the project configuration changes. """ if hasattr(DynamoDBConnection, '__original_init__'): return DynamoDBConnection.__original_init__ = DynamoDBConnection.__init__ def init(self, **fkwargs): fkwargs.update(kwargs) self.__original_init__(**fkwargs) DynamoDBConnection.__init__ = init
5,326,718
def vector(math_engine, size, dtype): """ """ if dtype != "float32" and dtype != "int32": raise ValueError('The `dtype` must be one of {`float32`, `int32`}.') if size < 1: raise ValueError('The `size` must be > 0.') shape = (size, 1, 1, 1, 1, 1, 1) return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
5,326,719
def images(arrays, labels=None, domain=None, width=None): """Display a list of images with optional labels. Args: arrays: A list of NumPy arrays representing images labels: A list of strings to label each image. Defaults to show index if None domain: Domain of pixel values, inferred from min & max values if None width: width of output image, scaled using nearest neighbor interpolation. size unchanged if None """ string = '<div style="display: flex; flex-direction: row;">' for i, array in enumerate(arrays): label = labels[i] if labels is not None else i img_html = _image_html(array, width=width, domain=domain) string += """<div style="margin-right:10px; margin-top: 4px;"> {label} <br/> {img_html} </div>""".format(label=label, img_html=img_html) string += "</div>" _display_html(string)
5,326,720
def is_order_exist(context, symbol, side) -> bool: """判断同方向订单是否已经存在 :param context: :param symbol: 交易标的 :param side: 交易方向 :return: bool """ uo = context.unfinished_orders if not uo: return False else: for o in uo: if o.symbol == symbol and o.side == side: context.logger.info("同类型订单已存在:{} - {}".format(symbol, side)) return True return False
5,326,721
def getBestMove(board,selectedFunction = "minmax"): """find the best move Args: board (board): board object from chess Returns: UCI.move: piece movement on the board, i.e. "g2g4" """ #Get AI Movement maxWeight = 0 deplacement = None #Get movement in the polyglot deplacement = pg.bestMove(board) #If no deplacement if not deplacement: #Déplacement de l'IA de facon aléatoire deplacement = random.choice(list(board.legal_moves)) #Déplacement de l'IA avec l'algorithme de minmax if(selectedFunction == "minmax"): val, deplacement = minmax(board,3) elif(selectedFunction == "minmaxAlphaBeta"): val, deplacement = minmaxAlphaBeta(board,5,-math.inf,math.inf) return deplacement
5,326,722
def newline_prep(target_str, do_escape_n=True, do_br_tag=True): """ Set up the newlines in a block of text so that they will be processed correctly by Reportlab and logging :param target_str: :param do_escape_n: :param do_br_tag: :return: """ newline_str = get_newline_str(do_escape_n=do_escape_n, do_br_tag=do_br_tag) # Save the newline characters that appear together to a temporary tag target_str = target_str.replace("\n<br/>", "<newline>").replace("<br/>\n", "<newline>") # Change the characters that appear individually target_str = target_str.replace("\n", "<newline>").replace("<br/>", "<newline>") target_str = target_str.replace("\r", "<newline>") # Set everything to the target newline string target_str = target_str.replace("<newline><newline>", "<newline>").replace("<newline>", newline_str) return target_str
5,326,723
def train( seed, _log, size=100, window=5, min_count=5, epochs=5, use_fasttext=False, workers=1, vectors_only=True, save_to='vectors.txt'): """Train word2vec/fastText word vectors.""" if not FAST_VERSION: warnings.warn( "Gensim's FAST_VERSION is not set. Install C compiler before installing " "Gensim to get the fast version of word2vec.") cls = FastText if use_fasttext else Word2Vec _log.info('Start training') model = cls( SentencesCorpus(read_corpus), size=size, window=window, min_count=min_count, workers=workers, iter=epochs, seed=seed) _log.info('Training finished, saving model to %s', save_to) if vectors_only: model.wv.save_word2vec_format(save_to) else: model.save(save_to)
5,326,724
def romb_extrap(sr, der_init, expon, compute_amp = False): """ Perform Romberg extrapolation for estimates formed within derivest. Arguments: sr : Decrease ratio between successive steps. der_init : Initial derivative estimates. expon : List of orders corresponding to the higher-order terms to be cancelled via Romberg step. The accepted parameter values of derivest will use a list of, at most, three values. A warning is issued if a longer list is received. compute_amp : Boolean specifying whether to also compute the noise amplification factor. (Default: False) Returns a 2-tuple or 3-tuple, containing: der_romb : Derivative estimates. err_est : Error estimates. amp : Computed noise amplification factor (if compute_amp == True). """ # Guarantee that expon is a one-dimensional array of floats: if isinstance(expon, list): expon = np.array(expon).flatten() elif not isinstance(expon, np.ndarray): expon = np.array([float(expon)]) else: expon = expon.flatten() num_expon = expon.size # Construct the Romberg matrix: sr_inv = 1.0/sr rmat = np.ones((num_expon + 2, num_expon + 1)) if num_expon > 3: warnings.warn("Ordinary use of derivest() should not need more than " "three terms to be cancelled.", RuntimeWarning) elif num_expon > 0: for i in range(1, num_expon + 2): # Compute QR factorization, uncertainty estimates: rmat[i, np.arange(1, num_expon + 1)] = sr_inv**(i*expon) (Q, R) = np.linalg.qr(rmat) # Extrapolate to a zero step-size: rhs = diag_tile(der_init, (num_expon + 2, max(1, der_init.size - num_expon - 2)), flatten = True) # Compute Romberg coefficients by solving linear systems: coeffs = np.linalg.lstsq(R, Q.T @ rhs, rcond = None)[0] der_romb = coeffs[0,:] # Extract derivative estimates. # Approximate the uncertainty: s = np.sqrt(np.sum((rhs - rmat @ coeffs)**2.0, axis = 0)) R_inv = np.linalg.lstsq(R, np.eye(num_expon + 1), rcond = None)[0] cov = np.sum(R_inv**2.0, axis = 1) err_est = 12.7062047361747*np.sqrt(cov[0])*s if compute_amp: return (der_romb, err_est, np.linalg.cond(R)) else: return (der_romb, err_est)
5,326,725
def cadmin_assign_coord(course_id): """ Set someone as course coordinator """ course = Courses2.get_course(course_id) if not course: abort(404) if not "coord" in request.form: abort(400) new_uname = request.form['coord'] # TODO: Sanitize username try: new_uid = Users2.uid_by_uname(new_uname) except KeyError: flash("User '%s' Not Found" % new_uname) else: if not new_uid: flash("User '%s' Not Found" % new_uname) else: Permissions.add_perm(new_uid, course_id, 3) # courseadmin Permissions.add_perm(new_uid, course_id, 4) # coursecoord flash("%s can now control the course." % (new_uname,)) return redirect(url_for('cadmin_config', course_id=course_id))
5,326,726
def nativeTextOverline(self): """ TOWRITE :rtype: bool """ return self.textOverline()
5,326,727
def test_directory_without_notebooks(capsys: "CaptureFixture") -> None: """ Check sensible error message is returned if none of the directories passed have notebooks. Parameters ---------- capsys Pytest fixture to capture stdout and stderr. """ with pytest.raises(SystemExit): main(["black", "docs"]) _, err = capsys.readouterr() assert err == "No .ipynb notebooks found in given directories: docs\n"
5,326,728
def ca_(arr, l_bound=4000, guard_len=4, noise_len=8): """Perform CFAR-CA detection on the input array. Args: arr (list or ndarray): Noisy array. l_bound (int): Additive lower bound of detection threshold. guard_len (int): Left and right side guard samples for leakage protection. noise_len (int): Left and right side noise samples after guard samples. Returns: threshold (ndarray): CFAR generated threshold based on inputs (Peak detected if arr[i] > threshold[i]) \ for designated false-positive rate noise_floor (ndarray): noise values with the same shape as input arr. Example: >>> signal = np.random.randint(100, size=10) >>> signal array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85]) >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3) >>> threshold (array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59])) FEATURE NOT YET ADDED - Perform a non-wrapping cfar thresholding. >>> signal = np.random.randint(100, size=10) >>> signal array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85]) >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3, wrap=False) >>> threshold (array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59])) """ if isinstance(arr, list): arr = np.array(arr) assert type(arr) == np.ndarray kernel = np.ones(1 + (2 * guard_len) + (2 * noise_len), dtype=arr.dtype) / (2 * noise_len) kernel[noise_len:noise_len + (2 * guard_len) + 1] = 0 noise_floor = convolve1d(arr, kernel, mode='wrap') threshold = noise_floor + l_bound return threshold, noise_floor
5,326,729
def get(): """ vert notifier get """ table: object = Table(box=SQUARE) table.add_column('ID') table.add_column('Title') table.add_column('Message', justify='center') table.add_column('Hour') rows: tuple[tuple] = Routines.get() for row in rows: table.add_row('%d' % (row[0]), row[1], row[2], time_format(row[3])) console.print(table) raise Exit()
5,326,730
def test_graph_del_node_with_edges(test_graph): """Test graph del node with edges.""" test_graph[2].add_edge('A', 'C') test_graph[2].add_edge('B', 'C') test_graph[2].add_edge('B', 'C') test_graph[2].add_edge('A', 'D') test_graph[2].del_node('C') assert sorted(test_graph[2].nodes()) == ['A', 'B', 'D', 'E'] assert sorted(test_graph[2].edges()) == ['D']
5,326,731
def create_nodes_encoder(properties): """Create an one-hot encoder for node labels.""" nodes_encoder = OneHotEncoder(handle_unknown='ignore') nodes_labels = list(get_nodes_labels(properties)) nodes_encoder.fit(np.array(nodes_labels).reshape((-1, 1))) return nodes_encoder
5,326,732
def dummy(): """Placeholder function. .. warning:: This function is not implemented yet! """ raise NotImplementedError("This feature is not implemented yet.")
5,326,733
def _run_cli( input_file_path: Optional[str], molecule_smiles: Optional[str], output_file_path: str, force_field_path: Optional[str], spec_name: Optional[str], spec_file_name: Optional[str], directory: str, n_fragmenter_workers: int, n_qc_compute_workers: int, n_optimizer_workers: int, launch_redis_if_unavailable: bool, ): """Run bespoke optimization using a temporary executor. If you are running many bespoke optimizations it is recommended that you first launch a bespoke executor using the `launch` command and then submit the optimizations to it using the `submit` command. """ pretty.install() console = rich.get_console() print_header(console) from openff.bespokefit.executor import BespokeExecutor, wait_until_complete executor_status = console.status("launching the bespoke executor") executor_status.start() with BespokeExecutor( directory=directory, n_fragmenter_workers=n_fragmenter_workers, n_qc_compute_workers=n_qc_compute_workers, n_optimizer_workers=n_optimizer_workers, launch_redis_if_unavailable=launch_redis_if_unavailable, ): executor_status.stop() console.print("[[green]✓[/green]] bespoke executor launched") console.line() response = _submit( console=console, input_file_path=input_file_path, molecule_smiles=molecule_smiles, force_field_path=force_field_path, spec_name=spec_name, spec_file_name=spec_file_name, ) if response is None: return console.print(Padding("3. running the fitting pipeline", (1, 0, 1, 0))) results = wait_until_complete(response.id) if results is None: return with open(output_file_path, "w") as file: file.write(results.json())
5,326,734
def get_response(data_type, client_id=None, device_id=None): """Request GET from API server based on desired type, return raw response data Args: data_type (str): Type of request, 'client', 'site', 'device', 'scan' client_id (int): Active client ID number device_id (int): Active device ID number """ if data_type == 'client': payload = {'service': 'list_clients'} temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload) resp = temp_resp.text if data_type == 'site': payload = {'service': 'list_sites', 'clientid': client_id} temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload) resp = temp_resp.text if data_type == 'device': device_type = ('workstation', 'server') resp = '' for dev_type in device_type: payload = {'service': 'list_devices_at_client', 'clientid': client_id, 'devicetype': dev_type} temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload) resp += temp_resp.text if data_type == 'scan': payload = {'service': 'list_mav_scans', 'deviceid': device_id, 'details': 'YES'} temp_resp = requests.get('https://%s/api/?apikey=%s&' % (query_server, api_key), params=payload) resp = temp_resp.text return resp
5,326,735
def average_pool(inputs, masks, axis=-2, eps=1e-10): """ inputs.shape: [A, B, ..., Z, dim] masks.shape: [A, B, ..., Z] inputs.shape[:-1] (A, B, ..., Z) must be match masks.shape """ assert inputs.shape[:-1] == masks.shape, f"inputs.shape[:-1]({inputs.shape[:-1]}) must be equal to masks.shape({masks.shape})" masks_unsq = masks.unsqueeze(-1) return (inputs * masks_unsq).sum(axis) / (masks_unsq.sum(axis)+eps)
5,326,736
def validate_graph_without_circle(data): """ validate if a graph has not cycle return { "result": False, "message": "error message", "error_data": ["node1_id", "node2_id", "node1_id"] } """ nodes = [data["start_event"]["id"], data["end_event"]["id"]] nodes += list(data["gateways"].keys()) + list(data["activities"].keys()) flows = [ [flow["source"], flow["target"]] for _, flow in list(data["flows"].items()) ] cycle = Graph(nodes, flows).get_cycle() if cycle: return { "result": False, "message": "pipeline graph has circle", "error_data": cycle, } return {"result": True, "data": []}
5,326,737
def generate_landsat_ndvi(src_info, no_data_value): """Generate Landsat NDVI Args: src_info <SourceInfo>: Information about the source data no_data_value <int>: No data (fill) value to use Returns: <numpy.2darray>: Generated NDVI band data list(<int>): Locations containing no data (fill) values """ logger = logging.getLogger(__name__) logger.info('Building TOA based NDVI band for Landsat data') # NIR ---------------------------------------------------------------- nir_data = emis_util.extract_raster_data(src_info.toa.nir.name, 1) nir_no_data_locations = np.where(nir_data == no_data_value) nir_data = nir_data * src_info.toa.nir.scale_factor # RED ---------------------------------------------------------------- red_data = emis_util.extract_raster_data(src_info.toa.red.name, 1) red_no_data_locations = np.where(red_data == no_data_value) red_data = red_data * src_info.toa.red.scale_factor # NDVI --------------------------------------------------------------- ndvi_data = ((nir_data - red_data) / (nir_data + red_data)) # Cleanup no data locations ndvi_data[nir_no_data_locations] = no_data_value ndvi_data[red_no_data_locations] = no_data_value # Memory cleanup del red_data del nir_data del nir_no_data_locations del red_no_data_locations # Capture these before less than zero operation no_data_locations = np.where(ndvi_data == no_data_value) # Turn all negative values to zero # Use a realy small value so that we don't have negative zero (-0.0) ndvi_data[ndvi_data < 0.0000001] = 0 return (ndvi_data, no_data_locations)
5,326,738
def plot_df1D(df_slice, x_feature_axis, y_feature_name="obj", sign=+1, **kwargs): """ Plots values from data frame in 1D. Args: df_slice data frame slice with known function values (in column y_feature_name) x_feature_axis name of the feature that the plot will go along """ # obtaining means and stds df_means = df_slice.groupby(x_feature_axis).mean().reset_index() arg = df_means[x_feature_axis] means = sign*df_means[y_feature_name] df_stds = df_slice.groupby(x_feature_axis).std().reset_index() stds = df_stds[y_feature_name] plot_primitives.plot_err(arg, means, stds, label=kwargs.get("label", ""), color=kwargs.get("color", "salmon"), lw=kwargs.get("lw", 2.5), ls=kwargs.get("ls", "-")) plt.xlabel(kwargs.get("xlabel", x_feature_axis)); plt.ylabel(kwargs.get("ylabel", y_feature_name)); plt.grid();
5,326,739
def ordinal(string): """ Converts an ordinal word to an integer. Arguments: - string -- the word to parse. Returns: an integer if successful; otherwise None. ----------------------------------------------------------------- """ try: # Full word. if string in ORD_LIST: return ORD_LIST.index(string) # end if try: return int(string[:-2]) except ValueError: return None # end if except Exception as err: _z_exc("wl_resource.py/ordinal", err) # end try
5,326,740
def get_tables_stats(dbs=None,tables=None,period=365*86400): """ obtains counts and frequencies stats from all data tables from all dbs """ dbs = dbs or pta.multi.get_hdbpp_databases() result = fn.defaultdict(fn.Struct) date = int(fn.clsub('[^0-9]','',fn.time2str().split()[0])) if period: date0 = int(fn.clsub('[^0-9]','', fn.time2str(fn.now()-period).split()[0])) else: date0 = 0 print(date0,date) for d in dbs: api = pta.api(d) dbtables = tables or api.getTables() for t in dbtables: result[(d,t)].db = d result[(d,t)].table = t result[(d,t)].partitions = [p for p in api.getTablePartitions(t) if date0 < fn.str2int(p) < date] result[(d,t)].attributes = (api.get_attributes_by_table(t) if t in api.get_data_tables() else []) result[(d,t)].last = (api.get_last_partition(t,tref=fn.now()) if t in api.get_data_tables() else '') if len(result[(d,t)].partitions) > 1: result[(d,t)].size = sum(api.getPartitionSize(t,p) for p in result[(d,t)].partitions) result[(d,t)].rows = sum(api.getPartitionRows(t,p) for p in result[(d,t)].partitions) else: result[(d,t)].size = api.getTableSize(t) result[(d,t)].rows = api.getTableRows(t) for k,v in result.items(): v.partitions = len(v.partitions) v.attributes = len(v.attributes) v.attr_size = float(v.size)/v.attributes if v.attributes else 0 v.attr_rows = float(v.rows)/v.attributes if v.attributes else 0 v.row_size = v.size/v.rows if v.rows else 0 v.part_size = v.size/v.partitions if v.partitions else 0 v.row_freq = v.rows/float(period) if period else 0 v.size_freq = v.size/float(period) if period else 0 v.attr_freq = v.row_freq/v.attributes if v.attributes else 0 return result
5,326,741
def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. A value of factor 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor. image2: An image Tensor. factor: A floating point value above 0.0. Returns: A blended image Tensor. """ image1 = tf.cast(image1, tf.float32) image2 = tf.cast(image2, tf.float32) return to_uint8(image1 + factor * (image2 - image1))
5,326,742
def call_subprocess_Popen(command, **params): """ Calls subprocess_Popen and discards the output, returning only the exit code. """ if 'stdout' in params or 'stderr' in params: raise TypeError("don't use stderr or stdout with call_subprocess_Popen") null = open(os.devnull, 'wb') # stdin to devnull is a workaround for a crash in a weird Windows # environement where sys.stdin was None params.setdefault('stdin', null) params['stdout'] = null params['stderr'] = null p = subprocess_Popen(command, **params) p.wait() return p.returncode
5,326,743
def _minmax_scaler(data, settings): """Normalize by min max mode.""" info = settings['model'] frag = settings['id_frag'] features = settings['input_col'] alias = settings.get('output_col', []) min_r, max_r = settings.get('feature_range', (0, 1)) remove_input = settings.get('remove', False) if len(alias) != len(features): alias = features values = data[features].values to_remove = [c for c in alias if c in data.columns] if remove_input: to_remove += features data.drop(to_remove, axis=1, inplace=True) if len(data) > 0: from sklearn.preprocessing import MinMaxScaler minimum, maximum = info minimum = np.array(minimum) maximum = np.array(maximum) scale_ = (max_r - min_r) / (maximum - minimum) scaler = MinMaxScaler() scaler.data_min_ = minimum scaler.data_max_ = maximum scaler.scale_ = scale_ scaler.data_range_ = maximum - minimum scaler.min_ = min_r - minimum * scale_ res = scaler.transform(values) del values data = pd.concat([data, pd.DataFrame(res, columns=alias)], axis=1) else: for col in alias: data[col] = np.nan info = generate_info(data, frag) return data, info
5,326,744
def find_by_id(widget_id): """ Get a widget by its ulid. """ return db.select_single('widgets', {'widget_id':widget_id}, None, ['widget_id', 'widget_name', 'user_id', 'user_email', 'description'])
5,326,745
def sainte_lague(preliminary_divisor, data, total_available_seats): """Iterative Sainte-Lague procedure which applies core_sainte_lague Input: preliminary_divisor (float): Guess for the divisor data (pd.DateFrame): data processed with divisors (e.g. votes by party) total_available_seats (int): number of seats in parliament for the respective Bundesland, Germany, etc. Output: allocated_seats (DataFrame): seats by party, state, etc. """ allocated_seats, sum_of_seats = core_sainte_lague(preliminary_divisor, data) while sum_of_seats != total_available_seats: if sum_of_seats > total_available_seats: preliminary_divisor = preliminary_divisor + 50 elif sum_of_seats < total_available_seats: preliminary_divisor = preliminary_divisor - 50 else: pass allocated_seats, sum_of_seats = core_sainte_lague(preliminary_divisor, data) else: return allocated_seats, preliminary_divisor
5,326,746
def nonchangingdims(index, ndim, axes, shape=None): """nonchanging for particular dimensions Args: index(index): object used in slicing (expanded) ndim(num): dimensions before indexings axes(array): dimensions for which you want to know the index shape(Optional(tuple)): dimension before applying index Returns: tuple """ index2 = [ind for ind in index if ind is not np.newaxis] index2 = expand(index, ndim) index2 = tuple(listtools.listadvanced(index2, axes)) if shape is not None: shape = tuple(listtools.listadvanced(list(shape), axes)) b = nonchanging(index2, shape) axesorder, _ = axesorder_afterindexing(index, ndim) i = listtools.where(axesorder, lambda x: instance.islistgen(x)) if len(i) == 1: i = i[0] if len(axesorder[i]) == 1: axesorder[i] = axesorder[i][0] try: b &= listtools.listadvanced(axesorder, axes) == axes except: b = False return b
5,326,747
def test_longer_random_sequence_of_queue_ops(buffer_type): """A long random sequence of added and retrieved values""" q = buffer_type(100, 80) for _ in six.moves.xrange(10000): if q.can_add(): _add_many(q, np.random.random((np.random.randint(1, 10),))) assert q.size < 100 + 10 for _ in range(np.random.randint(1, 10)): if not q.can_retrieve(): break # Make sure never get to less than `min_after_retrieve` elements assert 80 <= q.size _retrieve(q)
5,326,748
def get_voxels(df, center, config, rot_mat=np.eye(3, 3)): """ Generate the 3d grid from coordinate format. Args: df (pd.DataFrame): region to generate grid for. center (3x3 np.array): center of the grid. rot_mat (3x3 np.array): rotation matrix to apply to region before putting in grid. Returns: 4-d numpy array representing an occupancy grid where last dimension is atom channel. First 3 dimension are of size radius_ang * 2 + 1. """ size = grid_size(config) true_radius = size * config.resolution / 2.0 # Select valid atoms. at = df[['x', 'y', 'z']].values.astype(np.float32) elements = df['element'].values # Center atoms. at = at - center # Apply rotation matrix. at = np.dot(at, rot_mat) # at = (np.around((at + true_radius) / config.resolution - 0.5)).astype(np.int16) bins = np.linspace(-true_radius, true_radius, size+1) at_bin_idx = np.digitize(at, bins) # Prune out atoms outside of grid as well as non-existent atoms. sel = np.all(at_bin_idx > 0, axis=1) & np.all(at_bin_idx < size+1, axis=1) & (elements != '') at = at_bin_idx[sel] - 1 # Form final grid. labels = elements[sel] lsel = np.nonzero([_recognized(x, config.element_mapping) for x in labels]) labels = labels[lsel] labels = np.array([config.element_mapping[x] for x in labels], dtype=np.int8) grid = np.zeros(grid_shape(config), dtype=np.float32) grid[at[lsel, 0], at[lsel, 1], at[lsel, 2], labels] = 1 return grid
5,326,749
def dispatch_factory(msg: Result, **kwargs) -> DispatchCallOutSchema: """result_factory Generate result as expected by Daschat Examples: from daschat_base.messages import MSGS, msg_factory msg_factory(MSGS.success) msg_factory(MSGS.not_logged_in, user="abner") Args: msg (Result): Any result type in the MSGS contant Raises: ValueError: Parameter name not allowed ValueError: Result don't accept params ValueError: Wrong number of params ValueError: Wrong parameter type ValueError: Wrong parameter size Returns: ResultFieldSchema: [description] """ call_params: int = len(kwargs) msg_params: int = len(msg.params) params: dict = {} if call_params > 0 and msg_params == 0: raise ValueError("This message do not accept params") if not call_params == msg_params: raise ValueError( f"Wrong number of params. This message only accepts {msg_params} parameter(s)" ) if len(kwargs) > 0: for k in kwargs: param_def = next((item for item in msg.params if item.name == k), None) if param_def is None: raise ValueError(f"This parameter name is not allowed: {k}") if not type(kwargs[k]) == param_def.type: raise ValueError( f"Wrong parameter type: '{k}' must be {param_def.type}" ) if param_def.type == str: if not param_def.min_size <= len(kwargs[k]) <= param_def.max_size: raise ValueError( f"Wrong parameter size: '{k}' must be between {param_def.min_size} and {param_def.max_size}" ) params[k] = kwargs[k] return DispatchCallOutSchema( result=ResultFieldSchema(id=msg.id, status=msg.status, params=params) )
5,326,750
def local_config(config_path=FIXTURE_CONFIG_PATH): """Return an instance of the Config class as a fixture available for a module.""" return Config(config_path=config_path)
5,326,751
def ln_prior(theta, parameters_to_fit): """priors - we only reject obviously wrong models""" if theta[parameters_to_fit.index("t_E")] < 0.: return -np.inf if theta[parameters_to_fit.index("t_star")] < 0.: return -np.inf return 0.0
5,326,752
def get_axis_letter_aimed_at_child(transform): """ Returns the axis letter that is poinitng to the given transform :param transform: str, name of a transform :return: str """ vector = get_axis_aimed_at_child(transform) return get_vector_axis_letter(vector)
5,326,753
def computeLatitudePrecision(codeLength): """ Compute the latitude precision value for a given code length. Lengths <= 10 have the same precision for latitude and longitude, but lengths > 10 have different precisions due to the grid method having fewer columns than rows. """ if codeLength <= 10: return pow(20, math.floor((codeLength / -2) + 2)) return pow(20, -3) / pow(GRID_ROWS_, codeLength - 10)
5,326,754
def EmptyStateMat(nX,nU,nY): """ Returns state matrices with proper dimensions, filled with 0 """ Xx = np.zeros((nX,nX)) # Ac Yx = np.zeros((nY,nX)) # Gc Xu = np.zeros((nX,nU)) # Xu Yu = np.zeros((nY,nU)) # Jc return Xx,Xu,Yx,Yu
5,326,755
def update_db(): """Update all parts of the DB""" dbpopulate.ships() dbpopulate.equip() dbpopulate.items() dbpopulate.recipe_resources() dbpopulate.expeditions() try: setup() except sqlalchemy.exc.IntegrityError: pass
5,326,756
def get_market(code): """ 非常粗糙的通过代码获取交易市场的函数 :param code: :return: """ trans = { "USD": "US", "GBP": "UK", "HKD": "HK", "CNY": "CN", "CHF": "CH", "JPY": "JP", "EUR": "DE", "AUD": "AU", "INR": "IN", "SGD": "SG", } try: if code in market_info: return market_info[code] elif code.startswith("CNY/") or code.endswith("/CNY"): return "CM" # china money 中间价市场标记 elif code.startswith("HK") and code[2:].isdigit(): return "HK" market = get_rt(code)["market"] if market is None: market = get_currency(code) market = trans.get(market, market) except (TypeError, AttributeError, ValueError, IndexError): market = "CN" return market
5,326,757
def rawPlot(): """Description. More... """ def f(t): return numpy.exp(-t) * numpy.cos(2*numpy.pi*-t) plot_x = 495 plot_y = 344 fig = plt.figure(figsize=[plot_x * 0.01, plot_y * 0.01], # Inches. dpi=100, # 100 dots per inch, so the resulting buffer is 395x344 pixels ) fig.set_size_inches(plot_x * 0.01, plot_y * 0.01) ax = fig.gca() plt.xlabel('xlabel') plt.ylabel('ylabel') plt.title("Title") plt.gcf().subplots_adjust(bottom=0.15, top=0.90, left=0.14, right=0.95) #l1, = ax.plot([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [1, 2, 4, 8, 15, 17, 18, 22, 23, 23, 24, 24, 25, 25]) #l1, = ax.plot(numpy.sin(numpy.linspace(0, 2 * numpy.pi)), 'r-o') t1 = numpy.arange(0.0, 5.0, 0.10) t2 = numpy.arange(0.0, 5.0, 0.02) #l1, = ax.plot(t1, f(t1), 'bo', t2, f(t2), 'k') plt.figure(1) p1 = plt.subplot(211) l1, = plt.plot(t1, f(t1), 'o') p2 = plt.subplot(212) l2, = plt.plot(t2, numpy.cos(2*numpy.pi*t2), 'r--') l1.set_color((162/255, 19/255, 24/255)) l2.set_color((0/255, 166/255, 56/255)) #plt.xlabel('xlabel') #plt.ylabel('ylabel') #plt.title("Title") p1.spines['right'].set_visible(False) p1.spines['top'].set_visible(False) p2.spines['right'].set_visible(False) p2.spines['top'].set_visible(False) return fig
5,326,758
def CanopyHeight(strPathLAS, strPathDTM, fltCellSize, strPathBEFile, strUTMZone, strAdlSwitches = None, strParamBoss = None): """Function CanopyModel args: strPathLAS = input LAS file strPathDTM = output canopy surface/height dtm fltCellSize = cell size in map units strPathBEFile = bare earth dtm strUTMZone = utm zone strAdlSwitches= optional additional switches Command Syntax: CanopyModel [switches] surfacefile cellsize xyunits zunits coordsys zone horizdatum vertdatum datafile1 datafile2 """ strSwitches = '/ground:' + strPathBEFile if strAdlSwitches: strSwitches = strSwitches + ' ' + strAdlSwitches if strParamBoss: strParams = strParamBoss else: strParams = str(fltCellSize) + ' M M 1 ' + str(strUTMZone) + ' 2 2' lstCMD = [strPathFuInstall + os.sep + 'CanopyModel', strSwitches, strPathDTM, strParams, strPathLAS] return ' '.join(lstCMD)
5,326,759
def corrupt_single_relationship(triple: tf.Tensor, all_triples: tf.Tensor, max_range: int, name=None): """ Corrupt the relationship by __sampling from [0, max_range] :param triple: :param all_triples: :param max_range: :param name: :return: corrupted 1-d [h,r,t] triple """ with tf.name_scope(name, 'corrupt_single_relation', [triple, all_triples]): h, r, t = tf.unstack(triple, name='unstack_triple') head_mask = tf.equal(all_triples[:, 0], h, name='head_mask') head_matched_triples = tf.boolean_mask(all_triples[:, 1:], head_mask, name='head_matched_triples') tail_mask = tf.equal(head_matched_triples[:, 1], t, name='tail_mask') true_rels = tf.boolean_mask(head_matched_triples[:, 0], tail_mask) corrupted_rel = tf.reshape(single_negative_sampling(true_rels, max_range), ()) return tf.stack([h, corrupted_rel, t], name='rel_corrupted_triple')
5,326,760
def save_ipynb_from_py(folder: str, py_filename: str) -> str: """Save ipynb file based on python file""" full_filename = f"{folder}/{py_filename}" with open(full_filename) as pyfile: code_lines = [line.replace("\n", "\\n").replace('"', '\\"') for line in pyfile.readlines()] pycode = '",\n"'.join(code_lines) with open('template.ipynb') as template: template_body = ''.join(template.readlines()) ipynb_code = template_body.replace('{{TEMPLATE}}', pycode) new_filename = full_filename.replace('.py', '.ipynb') with open(new_filename, "w") as ipynb_file: ipynb_file.write(ipynb_code) return py_filename.replace('.py', '.ipynb')
5,326,761
def send(): """ Updates the database with the person who is responding and their busy times. """ invitee = request.args.get('invitee') busy_times = request.args.get('busy_times') meetcode = flask.session['meetcode'] # Get the record with this meet code. record = collection.find({"code": meetcode})[0] # First indicate the person who just responded. if "{}".format(invitee) in record['participants']: # The invitee should always be in the record unless # users are doing something wrong, like multiple people # choosing the same name at the same time. # Either way, this if statement protects in that case. record['participants'].remove("{}".format(invitee)) record['already_checked_in'].append("{}".format(invitee)) # Next append the new list of busy times to the list from the db. # First the new list will need to be converted from a str to a list. busy_times = busy_times[3:-3].split("\"],[\"") for i in range(len(busy_times)): record['busy'].append(busy_times[i].split("\",\"")) # Now update the database with the new busy times, # and updated info on who has checked in. collection.find_one_and_update( {"code": meetcode}, {'$set': {"participants": record['participants'], "already_checked_in": record['already_checked_in'], "busy": record['busy']}}) result = {"meetcode": meetcode} return flask.jsonify(result=result)
5,326,762
def get_plos_article_type_list(article_list=None, directory=None): """Makes a list of of all internal PLOS article types in the corpus Sorts them by frequency of occurrence :param article_list: list of articles, defaults to None :param directory: directory of articles, defaults to get_corpus_dir() :returns: dictionary with each PLOS type matched to number of occurrences :rtype: dict """ if directory is None: directory = get_corpus_dir() if article_list is None: article_list = listdir_nohidden(directory) PLOS_article_type_list = [] for article_file in tqdm(article_list): article = Article.from_filename(article_file, directory=directory) PLOS_article_type_list.append(article.plostype) print(len(set(PLOS_article_type_list)), 'types of articles found.') PLOS_article_types_structured = counter(PLOS_article_type_list).most_common() return PLOS_article_types_structured
5,326,763
def e_coordenada(arg): """tuplo -> Boole Esta funcao verifica se o argumento que recebe e um tuplo do tipo coordenada""" return isinstance(arg,tuple) and len(arg)==2 and 1<=coordenada_linha(arg)<=4 and 1<=coordenada_coluna(arg)<=4 and isinstance(coordenada_linha(arg),int) and isinstance(coordenada_coluna(arg),int)
5,326,764
def make_coro(func): """Wrap a normal function with a coroutine.""" async def wrapper(*args, **kwargs): """Run the normal function.""" return func(*args, **kwargs) return wrapper
5,326,765
def list_repos_by_owner(owner): """Print and return a list with the owner's repositories. """ repos = get_repos_by_owner(owner) x = PrettyTable(['Name', 'ID', 'Last build ID']) x.align['Name'] = 'l' x.align['Description'] = 'l' for r in repos: row = [] [row.append(r[field]) for field in ['slug', 'id', 'last_build_id']] x.add_row(row) print x
5,326,766
def eliminate(values): """Apply the eliminate strategy to a Sudoku puzzle The eliminate strategy says that if a box has a value assigned, then none of the peers of that box can have the same value. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the assigned values eliminated from peers """ solved_boxes = [box for box in values.keys() if len(values[box]) == 1] for box in solved_boxes: digit = values[box] if len(digit) == 1: for peerBox in peers[box]: values[peerBox] = values[peerBox].replace(digit,'') return values
5,326,767
def cli( connection, path, all, table, skip, redact, sql, output, pk, index_fks, progress ): """ Load data from any database into SQLite. PATH is a path to the SQLite file to create, e.c. /tmp/my_database.db CONNECTION is a SQLAlchemy connection string, for example: postgresql://localhost/my_database postgresql://username:passwd@localhost/my_database mysql://root@localhost/my_database mysql://username:passwd@localhost/my_database More: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls """ if not all and not table and not sql: raise click.ClickException("--all OR --table OR --sql required") if skip and not all: raise click.ClickException("--skip can only be used with --all") redact_columns = {} for table_name, column_name in redact: redact_columns.setdefault(table_name, set()).add(column_name) db = Database(path) db_conn = create_engine(connection).connect() inspector = inspect(db_conn) # Figure out which tables we are copying, if any tables = table if all: tables = inspector.get_table_names() if tables: foreign_keys_to_add = [] for i, table in enumerate(tables): if progress: click.echo("{}/{}: {}".format(i + 1, len(tables), table), err=True) if table in skip: if progress: click.echo(" ... skipping", err=True) continue pks = inspector.get_pk_constraint(table)["constrained_columns"] if len(pks) == 1: pks = pks[0] fks = inspector.get_foreign_keys(table) foreign_keys_to_add.extend( [ ( # table, column, other_table, other_column table, fk["constrained_columns"][0], fk["referred_table"], fk["referred_columns"][0], ) for fk in fks ] ) count = None if progress: count = db_conn.execute( "select count(*) from {}".format(table) ).fetchone()[0] results = db_conn.execute("select * from {}".format(table)) redact_these = redact_columns.get(table) or set() rows = (redacted_dict(r, redact_these) for r in results) # Make sure generator is not empty try: first = next(rows) except StopIteration: pass else: rows = itertools.chain([first], rows) if progress: with click.progressbar(rows, length=count) as bar: db[table].insert_all(bar, pk=pks, replace=True) else: db[table].insert_all(rows, pk=pks, replace=True) foreign_keys_to_add_final = [] for table, column, other_table, other_column in foreign_keys_to_add: # Make sure both tables exist and are not skipped - they may not # exist if they were empty and hence .insert_all() didn't have a # reason to create them. if ( db[table].exists() and table not in skip and db[other_table].exists() and other_table not in skip # Also skip if this column is redacted and ((table, column) not in redact) ): foreign_keys_to_add_final.append( (table, column, other_table, other_column) ) if foreign_keys_to_add_final: # Add using .add_foreign_keys() to avoid running multiple VACUUMs if progress: click.echo( "\nAdding {} foreign key{}\n{}".format( len(foreign_keys_to_add_final), "s" if len(foreign_keys_to_add_final) != 1 else "", "\n".join( " {}.{} => {}.{}".format(*fk) for fk in foreign_keys_to_add_final ), ), err=True, ) db.add_foreign_keys(foreign_keys_to_add_final) if sql: if not output: raise click.ClickException("--sql must be accompanied by --output") results = db_conn.execute(sql) rows = (dict(r) for r in results) db[output].insert_all(rows, pk=pk) if index_fks: db.index_foreign_keys()
5,326,768
def test_mg_i019_mg_i019_v(mode, save_output, output_format): """ TEST :model groups (ALL) : choice: with children 4 any, 4 elements """ assert_bindings( schema="msData/modelGroups/mgI019.xsd", instance="msData/modelGroups/mgI019.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,326,769
def set_new_shortcut(name, command, binding): """Creates a new shortcuts. Creates a new custom shortcut in Gnome's settings. The new identifier of this shortcut is `/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/<name>/`. Parameters ---------- name: str Name referring to the shortcut's functionality. command: str Shell command to execute when this shortcut is triggered. binding: str Key presses to react to, e.g. `<Ctrl><Alt>f`. To learn more about possible bindings and their format, look into the module's `README.md`. """ current = query_all_shortcuts() shortcut_id_base = "/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings" new_shortcut_identifier = f"{shortcut_id_base}/{name}/" current.append(new_shortcut_identifier) overwrite_shortcut_list(current) # schema for setting a key is followed by the identifier schema = f"{shortcut_schema}.custom-keybinding:{new_shortcut_identifier}" key_value_pairs = [ ("name", name), ("command", command), ("binding", binding) ] for k, v in key_value_pairs: set_gsetting(schema, k, v)
5,326,770
def sqlpool_blob_auditing_policy_update( cmd, instance, workspace_name, resource_group_name, sql_pool_name, state=None, blob_storage_target_state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, storage_account_subscription_id=None, is_storage_secondary_key_in_use=None, retention_days=None, audit_actions_and_groups=None, log_analytics_target_state=None, log_analytics_workspace_resource_id=None, event_hub_target_state=None, event_hub_authorization_rule_id=None, event_hub=None, is_azure_monitor_target_enabled=None, blob_auditing_policy_name=None): """ Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance. """ _audit_policy_update( cmd=cmd, instance=instance, workspace_name=workspace_name, resource_group_name=resource_group_name, sql_pool_name=sql_pool_name, state=state, blob_storage_target_state=blob_storage_target_state, storage_account=storage_account, storage_endpoint=storage_endpoint, storage_account_access_key=storage_account_access_key, storage_account_subscription_id=storage_account_subscription_id, is_storage_secondary_key_in_use=is_storage_secondary_key_in_use, retention_days=retention_days, category_name='SQLSecurityAuditEvents', log_analytics_target_state=log_analytics_target_state, log_analytics_workspace_resource_id=log_analytics_workspace_resource_id, event_hub_target_state=event_hub_target_state, event_hub_authorization_rule_id=event_hub_authorization_rule_id, event_hub_name=event_hub, audit_actions_and_groups=audit_actions_and_groups, is_azure_monitor_target_enabled=is_azure_monitor_target_enabled) return instance
5,326,771
def load_plugins(ginga): """Load the ``stginga`` plugins. Parameters ---------- ginga The ginga app object that is provided to ``pre_gui_config`` in ``ginga_config.py``. """ stglobal_plugins, stlocal_plugins = _get_stginga_plugins() # Add custom global plugins for gplg in stglobal_plugins: if gplg['module'] in ginga.global_plugins: ginga.logger.info('Plugin {0} already loaded in Ginga. Not adding ' 'again.'.format(gplg['module'])) else: ginga.add_global_plugin(gplg) # Add custom local plugins for lplg in stlocal_plugins: if lplg['module'] in ginga.local_plugins: ginga.logger.info('Plugin {0} already loaded in Ginga. Not adding ' 'again.'.format(lplg['module'])) else: ginga.add_local_plugin(lplg)
5,326,772
def find_unique_ID(list_of_input_smpls): """Attempt to determine a unique ID shared among all input sample names/IDs, via a largest substring function performed combinatorially exhaustively pairwise among the input list. Parameters ---------- list_of_input_smpls : list Returns ------- list Unique set of all possible found shared uid's """ if len(list_of_input_smpls) == 1: uid = list_of_input_smpls uid = list( set([ largest_substr(a, b) for (a, b) in [*itl.combinations(list_of_input_smpls, 2)] ])) return uid
5,326,773
def cartesian_to_polar(xy): """Convert :class:`np.ndarray` `xy` to polar coordinates `r` and `theta`. Args: xy (:class:`np.ndarray`): x,y coordinates Returns: r, theta (tuple of float): step-length and angle """ assert xy.ndim == 2, f"Dimensions are {xy.ndim}, expecting 2" x, y = np.split(xy,[-1], axis=1) x, y = np.squeeze(x), np.squeeze(y) r = math.sqrt(x * x + y * y) theta = math.atan2(y, x) return r, theta
5,326,774
def savefiles(linklist, outputflag, args): """Using links collected by getlinks() and parsed by bs4, this function saves file(s) of the html contents of the <body> tag of web pages in one of three ways: as separate html files, as separate PDF files (using the pdfkit package and wkhtmltopdf), or by default as a single appended html file. """ for link in linklist: print('\n Getting ' + link) try: page = requests.get(link).content except requests.exceptions.MissingSchema: # Non-fatal error print('\033[1m\033[31m' + ' URL error: ' + link + ' cannot be \ processed' + '\033[0m') continue else: bsObject = BeautifulSoup(page, 'lxml') htmltext = bsObject.body try: if outputflag == 'html': seperate_file = (link.rsplit(sep='/', maxsplit=1))[1] # Remove # url path from link print(' Saving <' + seperate_file + '> in current directory') with open(seperate_file, 'w') as outfile: outfile.write(str(htmltext)) elif outputflag == 'pdf': wk_options = {'quiet':None} # Dict for wkhtmltopdf options temp_link = (link.rsplit(sep='/', maxsplit=1))[1] pdf_file = (temp_link.rsplit(sep='.', maxsplit=1))[0] + '.pdf' print(' Saving <' + pdf_file + '> in current directory') pdfkit.from_string( str(htmltext), str(pdf_file), options=wk_options) else: with open(args.outfile, 'a') as outfile: outfile.write(str(htmltext)) logging.info('Saved {}'.format(link)) except UnicodeError: # Non-fatal error print('\033[1m\033[31m' + ' Unicode codec error. One or more \ characters may be corrupted in: ' + link + '\033[0m') continue
5,326,775
def return_manifold(name): """ Returns a list of possible manifolds with name 'name'. Args: name: manifold name, str. Returns: list of manifolds, name, metrics, retractions """ m_list = [] descr_list = [] if name == 'ChoiMatrix': list_of_metrics = ['euclidean'] for metric in list_of_metrics: m_list.append(manifolds.ChoiMatrix(metric=metric)) descr_list.append((name, metric)) if name == 'DensityMatrix': list_of_metrics = ['euclidean'] for metric in list_of_metrics: m_list.append(manifolds.DensityMatrix(metric=metric)) descr_list.append((name, metric)) if name == 'HermitianMatrix': list_of_metrics = ['euclidean'] for metric in list_of_metrics: m_list.append(manifolds.HermitianMatrix(metric=metric)) descr_list.append((name, metric)) if name == 'PositiveCone': list_of_metrics = ['log_euclidean', 'log_cholesky'] for metric in list_of_metrics: m_list.append(manifolds.PositiveCone(metric=metric)) descr_list.append((name, metric)) if name == 'StiefelManifold': list_of_metrics = ['euclidean', 'canonical'] list_of_retractions = ['svd', 'cayley', 'qr'] for metric in list_of_metrics: for retraction in list_of_retractions: m_list.append(manifolds.StiefelManifold(metric=metric, retraction=retraction)) descr_list.append((name, metric, retraction)) return m_list, descr_list
5,326,776
def change_to_rgba_array(image, dtype="uint8"): """Converts an RGB array into RGBA with the alpha value opacity maxed.""" pa = image if len(pa.shape) == 2: pa = pa.reshape(list(pa.shape) + [1]) if pa.shape[2] == 1: pa = pa.repeat(3, axis=2) if pa.shape[2] == 3: alphas = 255 * np.ones( list(pa.shape[:2]) + [1], dtype=dtype, ) pa = np.append(pa, alphas, axis=2) return pa
5,326,777
def spacetime_lookup(ra,dec,time=None,buffer=0,print_table=True): """ Check for overlapping TESS ovservations for a transient. Uses the Open SNe Catalog for discovery/max times and coordinates. ------ Inputs ------ ra : float or str ra of object dec : float or str dec of object time : float reference time to use, must be in MJD buffer : float overlap buffer time in days ------- Options ------- print_table : bool if true then the lookup table is printed ------- Returns ------- tr_list : list list of ra, dec, and sector that can be put into tessreduce. """ if time is None: print('!!! WARNING no MJD time specified, using default of 59000') time = 59000 if type(ra) == str: c = SkyCoord(ra,dec, unit=(u.hourangle, u.deg)) ra = c.ra.deg dec = c.dec.deg outID, outEclipLong, outEclipLat, outSecs, outCam, outCcd, outColPix, \ outRowPix, scinfo = focal_plane(0, ra, dec) sec_times = pd.read_csv(package_directory + 'sector_mjd.csv') if len(outSecs) > 0: ind = outSecs - 1 secs = sec_times.iloc[ind] disc_start = secs['mjd_start'].values - time disc_end = secs['mjd_end'].values - time covers = [] differences = [] tr_list = [] tab = [] for i in range(len(disc_start)): ds = disc_start[i] de = disc_end[i] if (ds-buffer < 0) & (de + buffer> 0): cover = True dif = 0 elif (de+buffer < 0): cover = False dif = de elif (ds-buffer > 0): cover = False dif = ds covers += [cover] differences += [dif] tab += [[secs.Sector.values[i], cover, dif]] tr_list += [[ra, dec, secs.Sector.values[i], cover]] if print_table: print(tabulate(tab, headers=['Sector', 'Covers','Time difference \n(days)'], tablefmt='orgtbl')) return tr_list else: print('No TESS coverage') return None
5,326,778
def main(): """ Main function. """ # First Specify all parameters domain_vars = [{'name': 'rw', 'type': 'float', 'min': 0.05, 'max': 0.15, 'dim': 1}, {'name': 'L_Kw', 'type': 'float', 'min': 0, 'max': 1, 'dim': 2}, {'name': 'Tu', 'type': 'int', 'min': 63070, 'max': 115600, 'dim': ''}, {'name': 'Tl', 'type': 'float', 'min': 63.1, 'max': 116}, {'name': 'Hu_Hl', 'type': 'int', 'min': 0, 'max': 240, 'dim': 2}, {'name': 'r', 'type': 'float', 'min': 100, 'max': 50000}, ] domain_constraints = [{'constraint': 'np.sqrt(rw[0]) + L_Kw[1] <= 0.9'}, {'constraint': 'r/100.0 + Hu_Hl[1] < 200'} ] fidel_vars = [{'name': 'fidel_0', 'type': 'float', 'min': 0.05, 'max': 0.25}, {'name': 'fidel_1', 'type': 'discrete_numeric', 'items': "0.1:0.05:1.01"}, ] fidel_space_constraints = [ {'name': 'fsc1', 'constraint': 'fidel_0 + fidel_1 <= 0.9'} ] fidel_to_opt = [0.1, 0.75] # Budget of evaluations max_num_evals = 100 # Optimisation budget (max number of evaluations) max_mf_capital = max_num_evals * mf_cost(fidel_to_opt) # Multi-fideltiy capital # First do the MF version config_params = {'domain': domain_vars, 'fidel_space': fidel_vars, 'domain_constraints': domain_constraints, 'fidel_space_constraints': fidel_space_constraints, 'fidel_to_opt': fidel_to_opt} config = load_config(config_params) # Optimise mf_opt_pt, mf_opt_val, history = maximise_multifidelity_function(mf_objective, config.fidel_space, config.domain, config.fidel_to_opt, mf_cost, max_mf_capital, config=config) print(mf_opt_pt, mf_opt_val) # Non-MF version config_params = {'domain': domain_vars, 'domain_constraints': domain_constraints} config = load_config(config_params) max_capital = 100 # Optimisation budget (max number of evaluations) # Optimise opt_pt, opt_val, history = maximise_function(objective, config.domain, max_num_evals, config=config) print(opt_pt, opt_val)
5,326,779
def utc2local(utc: Union[date, datetime]) -> Union[datetime, date]: """Returns the local datetime Args: utc: UTC type date or datetime. Returns: Local datetime. """ epoch = time.mktime(utc.timetuple()) offset = datetime.fromtimestamp(epoch) - datetime.utcfromtimestamp(epoch) return utc + offset
5,326,780
def getAsDateTimeStr(value, offset=0,fmt=_formatTimeStr()): """ return time as 2004-01-10T00:13:50.000Z """ import sys,time import types from datetime import datetime if (not isinstance(offset,str)): if isinstance(value, (tuple, time.struct_time,)): return time.strftime(fmt, value) if isinstance(value, (int, float,)): secs = time.gmtime(value+offset) return time.strftime(fmt, secs) if isinstance(value, str): try: value = time.strptime(value, fmt) return time.strftime(fmt, value) except Exception as details: info_string = formattedException(details=details) sys.stderr.write('ERROR :: getDateTimeTuple Could not parse "%s".\n%s\n' % (value,info_string)) secs = time.gmtime(time.time()+offset) return time.strftime(fmt, secs) elif (isinstance(value,datetime)): from datetime import timedelta if (offset is not None): value += timedelta(offset) ts = time.strftime(fmt, value.timetuple()) return ts else: sys.stderr.write('ERROR :: offset must be a numeric type rather than string type.\n')
5,326,781
def gvisc(P, T, Z, grav): """Function to Calculate Gas Viscosity in cp""" #P pressure, psia #T temperature, °R #Z gas compressibility factor #grav gas specific gravity M = 28.964 * grav x = 3.448 + 986.4 / T + 0.01009 * M Y = 2.447 - 0.2224 * x rho = (1.4926 / 1000) * P * M / Z / T K = (9.379 + 0.01607 * M) * T ** 1.5 / (209.2 + 19.26 * M + T) return K * math.exp(x * rho ** Y) / 10000
5,326,782
def drawTime2avgFlow(df4time2flow: pd.DataFrame, save: bool = True) -> NoReturn: """生成车站-时段图(采用七天平均值) Args: df4time2flow (pd.DataFrame): 经generateTime2FlowDf函数生成的数据集 save (bool, optional): 是否将图像进行保存而非显示. Defaults to True. """ # 绘制车站-时段图(采用七天平均值) plt.figure(figsize=(13, 9)) fig = sns.heatmap( df4time2flow, cmap=sns.diverging_palette(230, 20, as_cmap=True), linewidths=0.5 ) # 设置x-y轴标签 plt.xlabel("时段") plt.ylabel("始发站") # 重新设定x轴标签 xlabels = [x for x in range(18)] plt.xlim([0, 17]) plt.xticks(xlabels) fig.set_xticklabels([f"{label+7:d} : 00" for label in xlabels]) if save: fig.get_figure().savefig( SEPERATOR.join([".", "result", "raw", "车站-时段图(采用七天平均值)"]) ) else: plt.show()
5,326,783
def get_closest_mesh_normal_to_pt(mesh, pt): """ Finds the closest vertex normal to the point. Parameters ---------- mesh: :class: 'compas.datastructures.Mesh' pt: :class: 'compas.geometry.Point' Returns ---------- :class: 'compas.geometry.Vector' The closest normal of the mesh. """ closest_vkey = get_closest_mesh_vkey_to_pt(mesh, pt) v = mesh.vertex_normal(closest_vkey) return Vector(v[0], v[1], v[2])
5,326,784
async def apiDiscordAssignrolesDelete(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response: """ Default url: /api/discord/assignroles/delete """ Data:WebRequestContent = WebRequestContent(WebRequest) await Data.load() # get required vars guild_id:str = Data.getStr("guild_id", UNDEFINED, must_be_digit=True) assignrole_id:str = Data.getStr("assignrole_id", UNDEFINED, must_be_digit=True) # checks if not guild_id: return await apiMissingData(cls, WebRequest, msg="missing or invalid 'guild_id'") if not assignrole_id: return await apiMissingData(cls, WebRequest, msg="missing or invalid 'assignrole_id'") PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id)) if not Guild: return await cls.Tree.Api.Discord.errors.apiDiscordGuildUnknown(cls, WebRequest) # get user info AuthDiscord:AuthDiscordWebUser = await authDiscordWebUser(cls, WebRequest) if not AuthDiscord.found: return await apiMissingAuthorisation(cls, WebRequest) # get member CheckMember:discord.Member = Guild.get_member(int(AuthDiscord.User.user_id)) if not CheckMember: return await cls.Tree.Api.Discord.errors.apiDiscordMemberNotFound(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id) # check permissions if not (CheckMember.guild_permissions.administrator or CheckMember.guild_permissions.manage_guild): return await cls.Tree.Api.Discord.errors. apiDiscordMissingPermission(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id) # get assign roles res_assignroles:list = await getDiscordServerAssignRoles(PhaazeDiscord, guild_id=guild_id, assignrole_id=assignrole_id) if not res_assignroles: return await cls.Tree.Api.Discord.Assignroles.errors.apiDiscordAssignRoleNotExists(cls, WebRequest, assignrole_id=assignrole_id) AssignRoleToDelete:DiscordAssignRole = res_assignroles.pop(0) cls.BASE.PhaazeDB.deleteQuery(""" DELETE FROM `discord_assignrole` WHERE `guild_id` = %s AND `id` = %s""", (AssignRoleToDelete.guild_id, AssignRoleToDelete.assignrole_id) ) # logging GuildSettings:DiscordServerSettings = await getDiscordSeverSettings(PhaazeDiscord, guild_id, prevent_new=True) log_coro:Coroutine = loggingOnAssignroleDelete(PhaazeDiscord, GuildSettings, Deleter=CheckMember, assign_role_trigger=AssignRoleToDelete.trigger, ) asyncio.ensure_future(log_coro, loop=cls.BASE.DiscordLoop) cls.BASE.Logger.debug(f"(API/Discord) Assignroles: {guild_id=} deleted {assignrole_id=}", require="discord:role") return cls.response( text=json.dumps(dict(msg="Assignroles: Deleted entry", deleted=AssignRoleToDelete.trigger, status=200)), content_type="application/json", status=200 )
5,326,785
def handle_new_application(sender, instance, created, **kwargs): """ Send welcome message to learner introducing them to their facilitator """ if not created: return application = instance # get a random piece of advice # TODO only supported in English atm advice = None if application.study_group.language == 'en': advice = Advice.objects.order_by('?').first() # activate language and timezone for message reminder with use_language(application.study_group.language), timezone.override(pytz.timezone(application.study_group.timezone)): # Send welcome message to learner learner_signup_subject = render_to_string_ctx( 'studygroups/email/learner_signup-subject.txt', { 'application': application, 'advice': advice, } ).strip('\n') learner_signup_html = render_html_with_css( 'studygroups/email/learner_signup.html', { 'application': application, 'advice': advice, } ) learner_signup_body = html_body_to_text(learner_signup_html) to = [application.email] # CC facilitator and put in reply-to welcome_message = EmailMultiAlternatives( learner_signup_subject, learner_signup_body, settings.DEFAULT_FROM_EMAIL, to, cc=[application.study_group.facilitator.email], reply_to=[application.study_group.facilitator.email] ) welcome_message.attach_alternative(learner_signup_html, 'text/html') welcome_message.send()
5,326,786
def plot_water_levels(station, dates, levels): """ displays a plot of the water level data against time for a station """ plt.plot(dates, levels, label="Fetched Data") plt.hlines( station.typical_range, min(dates, default=0), max(dates, default=0), linestyles="dashed", label="Typical Range", ) plt.xlabel("date") plt.ylabel("water level (m)") plt.title("{}, {}".format(station.name, station.town), ) plt.xticks(rotation=45) plt.tight_layout() plt.legend() plt.show()
5,326,787
def algo_reg_deco(func): """ Decorator for making registry of functions """ algorithms[str(func.__name__)] = func return func
5,326,788
def find_top_slices(metrics: List[metrics_for_slice_pb2.MetricsForSlice], metric_key: Text, statistics: statistics_pb2.DatasetFeatureStatisticsList, comparison_type: Text = 'HIGHER', min_num_examples: int = 10, num_top_slices: int = 10, rank_by: Text = 'EFFECT_SIZE'): """Finds top-k slices. Args: metrics: List of slice metrics protos. We assume that the metrics have MetricValue.confidence_interval field populated. This will be populated when the metrics computed with confidence intervals enabled. metric_key: Name of the metric based on which significance testing is done. statistics: Data statistics used to configure AutoSliceKeyExtractor. comparison_type: Type of comparison indicating if we are looking for slices whose metric is higher (`HIGHER`) or lower (`LOWER`) than the metric of the base slice (overall dataset). min_num_examples: Minimum number of examples that a slice should have. num_top_slices: Number of top slices to return. rank_by: Indicates how the slices should be ordered in the result. Returns: List of ordered slices. """ assert comparison_type in ['HIGHER', 'LOWER'] assert min_num_examples > 0 assert 0 < num_top_slices assert rank_by in ['EFFECT_SIZE', 'PVALUE'] metrics_dict = { slicer_lib.deserialize_slice_key(slice_metrics.slice_key): slice_metrics for slice_metrics in metrics } overall_slice_metrics = metrics_dict[()] del metrics_dict[()] boundaries = auto_slice_key_extractor._get_bucket_boundaries(statistics) # pylint: disable=protected-access overall_metrics_dict = _get_metrics_as_dict(overall_slice_metrics) to_be_sorted_slices = [] for slice_key, slice_metrics in metrics_dict.items(): slice_metrics_dict = _get_metrics_as_dict(slice_metrics) num_examples = slice_metrics_dict['example_count'].unsampled_value if num_examples < min_num_examples: continue # Prune non-interesting slices. if np.isnan(slice_metrics_dict[metric_key].unsampled_value): continue if comparison_type == 'HIGHER': comparison_fn = operator.le else: comparison_fn = operator.ge if comparison_fn(slice_metrics_dict[metric_key].unsampled_value, overall_metrics_dict[metric_key].unsampled_value): continue # Only consider statistically significant slices. is_significant, pvalue = _is_significant_slice( slice_metrics_dict[metric_key].unsampled_value, slice_metrics_dict[metric_key].sample_standard_deviation, slice_metrics_dict['example_count'].unsampled_value, overall_metrics_dict[metric_key].unsampled_value, overall_metrics_dict[metric_key].sample_standard_deviation, overall_metrics_dict['example_count'].unsampled_value, comparison_type) if not is_significant: continue # Format the slice info (feature names, values) in the proto into a # slice key. transformed_slice_key = [] for (feature, value) in slice_key: if feature.startswith( auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX): feature = feature[len(auto_slice_key_extractor .TRANSFORMED_FEATURE_PREFIX):] value = _bucket_to_range(value, boundaries[feature]) transformed_slice_key.append((feature, value)) slice_key = slicer_lib.stringify_slice_key(tuple(transformed_slice_key)) # Compute effect size for the slice. effect_size = _compute_effect_size( slice_metrics_dict[metric_key].unsampled_value, slice_metrics_dict[metric_key].sample_standard_deviation, overall_metrics_dict[metric_key].unsampled_value, overall_metrics_dict[metric_key].sample_standard_deviation) to_be_sorted_slices.append( SliceComparisonResult(slice_key, num_examples, slice_metrics_dict[metric_key].unsampled_value, overall_metrics_dict[metric_key].unsampled_value, pvalue, effect_size)) # Rank the slices. ranking_fn, reverse = operator.attrgetter('effect_size'), True if rank_by == 'PVALUE': ranking_fn, reverse = operator.attrgetter('pvalue'), False result = sorted( to_be_sorted_slices, key=ranking_fn, reverse=reverse)[:num_top_slices] return result
5,326,789
def parse_texts(texts): """ Create a set of parsed documents from a set of texts. Parsed documents are sequences of tokens whose embedding vectors can be looked up. :param texts: text documents to parse :type texts: sequence of strings :return: parsed documents :rtype: sequence of spacy.Doc """ return _load_text_parser().pipe(texts)
5,326,790
def dict_to_string(d): """Return the passed dict of items converted to a json string. All items should have the same type Args: d (dict): Dictionary to convert Returns: str: JSON version of dict """ j = {} for key, value in d.items(): if value is None: j[key] = None else: j[key] = value.to_data() return _json.dumps(j)
5,326,791
def domains(request): """ A page with number of services and layers faceted on domains. """ url = '' query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0' if settings.SEARCH_TYPE == 'elasticsearch': url = '%s/select?q=%s' % (settings.SEARCH_URL, query) if settings.SEARCH_TYPE == 'solr': url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query) LOGGER.debug(url) response = urllib2.urlopen(url) data = response.read().replace('\n', '') # stats layers_count = Layer.objects.all().count() services_count = Service.objects.all().count() template = loader.get_template('aggregator/index.html') context = RequestContext(request, { 'data': data, 'layers_count': layers_count, 'services_count': services_count, }) return HttpResponse(template.render(context))
5,326,792
def load_test_data(path, var, years=slice('2017', '2018')): """ Args: path: Path to nc files var: variable. Geopotential = 'z', Temperature = 't' years: slice for time window Returns: dataset: Concatenated dataset for 2017 and 2018 """ assert var in ['z', 't'], 'Test data only for Z500 and T850' ds = xr.open_mfdataset(f'{path}/*.nc', combine='by_coords')[var] try: ds = ds.sel(level=500 if var == 'z' else 850).drop('level') except ValueError: pass return ds.sel(time=years)
5,326,793
def measure_of_dispersion(type_, df, col): """Calculate the measure of dispersion. This function accepts the measure of dispersion to be calculated, the data frame and the required column(s). It returns the calculated measure. Keyword arguments: type_ -- type of central tendency to be calculated df -- the dataframe col -- the column(s) in the dataframe to do the calculations, this is a list with 2 elements if we want to calculate covariance Returns: disp -- the calculated measure of dispersion """ if type_ == 'range': min_ = df[col].min() max_ = df[col].max() range_ = max_ - min_ disp = round(range_,3) print(f"Range for {col} is {disp}") elif type_ == 'mean absolute deviation': # MAD = 1/n*(summation(|x-mean|)) mean_absolute_deviation = df[col].mad() disp = round(mean_absolute_deviation, 3) print(f"Mean Absolute Deviation for {col} is {disp}") elif type_ == 'standard deviation': # standard deviation = sqrt(1/n*(summation[(x-mean)^2])) standard_deviation = df[col].std() disp = round(standard_deviation, 3) print(f"Standard Deviation for {col} is {disp}") elif type_ == 'coefficient of variation': # coefficient of variation = (standard_deviation/mean)*100 standard_deviation = df[col].std() mean = df[col].mean() coeff_of_variation = (standard_deviation/mean)*100 disp = round(coeff_of_variation, 3) print(f"Coefficient of Variation for {col} is {disp}") elif type_ == 'iqr': q1 = df[col].quantile(q=0.25) q3 = df[col].quantile(q=0.75) iqr = q3 - q1 disp = round(iqr, 3) print(f"IQR for {col} is {disp}") elif type_ == 'covariance': # Covariance = 1/n*[summation(x-x_mean)*(y-y_mean)] col1 = col[0] col2 = col[1] mean_col1 = df[col1].mean() mean_col2 = df[col2].mean() diff_col1 = df[col1] - mean_col1 diff_col2 = df[col2] - mean_col2 mul = diff_col1*diff_col2 summation = mul.sum() covariance = summation/mul.size disp = round(covariance, 3) print(f"Covariance for {col1} and {col2} is {disp}")
5,326,794
def encode(state, b=None): """ Encode a base-*b* array of integers into a single integer. This function uses a `big-endian`__ encoding scheme. That is, the most significant bits of the encoded integer are determined by the left-most end of the unencoded state. >>> from pyinform.utils import * >>> encode([0,0,1], b=2) 1 >>> encode([0,1,0], b=3) 3 >>> encode([1,0,0], b=4) 16 >>> encode([1,0,4], b=5) 29 If *b* is not provided (or is None), the base is inferred from the state with a minimum value of 2. >>> from pyinform.utils import * >>> encode([0,0,2]) 2 >>> encode([0,2,0]) 6 >>> encode([1,2,1]) 16 See also :py:func:`.decode`. .. __: https://en.wikipedia.org/wiki/Endianness#Examples :param sequence state: the state to encode :param int b: the base in which to encode :return: the encoded state :rtype: int :raises ValueError: if the state is empty :raises InformError: if an error occurs in the ``inform`` C call """ xs = np.ascontiguousarray(state, dtype=np.int32) data = xs.ctypes.data_as(POINTER(c_int)) if xs.size == 0: raise ValueError("cannot encode an empty array") if b is None: b = max(2, np.amax(xs)+1) e = ErrorCode(0) encoding = _inform_encode(data, c_ulong(xs.size), c_int(b), byref(e)) error_guard(e) return encoding
5,326,795
def get_equivalent(curie: str, cutoff: Optional[int] = None) -> Set[str]: """Get equivalent CURIEs.""" canonicalizer = Canonicalizer.get_default() r = canonicalizer.single_source_shortest_path(curie=curie, cutoff=cutoff) return set(r or [])
5,326,796
def _standardize_input(y_true, y_pred, multioutput): """ This function check the validation of the input input should be one of list/tuple/ndarray with same shape and not be None input will be changed to corresponding 2-dim ndarray """ if y_true is None or y_pred is None: raise ValueError("The input is None.") if not isinstance(y_true, (list, tuple, np.ndarray, pd.DataFrame)): raise ValueError("Expected array-like input." "Only list/tuple/ndarray/pd.DataFrame are supported") if isinstance(y_true, (list, tuple)): y_true = np.array(y_true) if isinstance(y_pred, (list, tuple)): y_pred = np.array(y_pred) if isinstance(y_true, pd.DataFrame) and isinstance(y_pred, pd.DataFrame): y_true = y_true.to_numpy() y_pred = y_pred.to_numpy() original_shape = y_true.shape[1:] if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) else: y_true = y_true.reshape((y_true.shape[0], -1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) else: y_pred = y_pred.reshape((y_pred.shape[0], -1)) if y_true.shape[0] != y_pred.shape[0]: raise ValueError("y_true and y_pred have different number of samples " "({0}!={1})".format(y_true.shape[0], y_pred.shape[0])) if y_true.shape[1] != y_pred.shape[1]: raise ValueError("y_true and y_pred have different number of output " "({0}!={1})".format(y_true.shape[1], y_pred.shape[1])) allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. " "You provided multioutput={!r}" .format(allowed_multioutput_str, multioutput)) return y_true, y_pred, original_shape
5,326,797
def warning_si_demora(limite_segs, mensaje): """ Context manager para chequear la duración de algo, y si demora demasiado, disparar un warning. """ inicio = datetime.now() yield fin = datetime.now() duracion_segs = int((fin - inicio).total_seconds()) if limite_segs is not None and duracion_segs > limite_segs: warnings.warn(mensaje + f" [duración: {duracion_segs} segundos]")
5,326,798
def disk_detach(vmdk_path, vm): """detach disk (by full path) from a vm and return None or err(msg)""" device = findDeviceByPath(vmdk_path, vm) if not device: # Could happen if the disk attached to a different VM - attach fails # and docker will insist to sending "unmount/detach" which also fails. # Or Plugin retrying operation due to socket errors #1076 # Return success since disk is anyway not attached logging.warning("*** Detach disk={0} not found. VM={1}".format( vmdk_path, vm.config.uuid)) return None return disk_detach_int(vmdk_path, vm, device)
5,326,799