content
stringlengths
22
815k
id
int64
0
4.91M
def setForceFieldVersion(version): """ Set the forcefield parameters using a version number (string) and return a handle to the object holding them current version are: '4.2': emulating AutoDock4.2 'default': future AutoDock5 Parameters <- setForceFieldVersion(version) """ _parameters.clearAll() if version=='default': _parameters.loadDefaults() elif version=='4.2': _parameters.loadFromDatFile(os.path.join(ADFRcc.__path__[0], 'Data', 'AD42PARAM.DAT')) else: raise ValueError("ERROR: unknown forcefield version %s"%version) return _parameters
24,200
async def test_not_callable_modifier(): """ Test that an non-callable ``conn_modifier`` raises a specific ``TypeError``. """ with pytest.raises(TypeError) as err: await Dispatcher().dispatch("otus", "update", {"test": True}, conn_modifier="abc") assert "conn_modifier must be callable" in str(err.value)
24,201
def run(args): """ Runs the simulator """ for i in range(args.numtrack): run_time = 0 init_x, init_y, init_z, velx, vely, velz, accx, accy, accz = initialize_track_creation() sim = Simulator(init_x, init_y, init_z, x_vel=velx, y_vel=vely, z_vel=velz, x_acc=accx, y_acc=accy, z_acc=accz, time=run_time) f = None if (args.filepath): f = open(args.filepath+str(i)+".csv", 'w') f.write("# Measurement time, meas_x, meas_y, meas_z, true_x, true_y, true_z, true_velx, true_vely, true_velz, true_accx, true_accy, true_accz\n") while run_time < args.time: # Grab the current time and predict the simulated measurement to this time updatetime = round(run_time) logging.debug(f"updatetiem {updatetime}") measurement_update = sim.predict_at_time(updatetime) for i in range(len(measurement_update.measurements)): measurement = measurement_pb2.measurement() measurement.ParseFromString(measurement_update.measurements[i]) print(f"measurements{ measurement.time}") if f: f.write(f"{measurement.time},{measurement.x},{measurement.y},{measurement.z}," f"{measurement.true_x},{measurement.true_y},{measurement.true_z}," f"{velx},{vely},{velz}," f"{accx},{accy},{accz}\n") # repeat measurement production every 5 seconds run_time = run_time + 2 if f: f.close()
24,202
def filter_words(data: TD_Data_Dictionary): """This function removes all instances of Key.ctrl from the list of keys and any repeats because of Press and Realese events""" # NOTE: We may just want to remove all instances of Key.ctrl from the list and anything that follows that keys = data.get_letters() return keys
24,203
def check_url_secure( docker_ip: str, public_port: int, *, auth_header: Dict[str, str], ssl_context: SSLContext, ) -> bool: """ Secure form of lovey/pytest/docker/compose.py::check_url() that checks when the secure docker registry service is operational. Args: docker_ip: IP address on which the service is exposed. public_port: Port on which the service is exposed. auth_header: HTTP basic authentication header to using when connecting to the service. ssl_context: SSL context referencing the trusted root CA certificated to used when negotiating the TLS connection. Returns: (bool) True when the service is operational, False otherwise. """ try: https_connection = HTTPSConnection( context=ssl_context, host=docker_ip, port=public_port ) https_connection.request("HEAD", "/v2/", headers=auth_header) return https_connection.getresponse().status < 500 except Exception: # pylint: disable=broad-except return False
24,204
def ResNet101(pretrained=False, use_ssld=False, **kwargs): """ ResNet101 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `ResNet101` model depends on args. """ model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) return model
24,205
def LineTextInCurrentBuffer( line_number ): """ Returns the text on the 1-indexed line (NOT 0-indexed) """ return vim.current.buffer[ line_number - 1 ]
24,206
def imported_instrumentor(library): """ Convert a library name to that of the correlated auto-instrumentor in the libraries package. """ instrumentor_lib = "signalfx_tracing.libraries.{}_".format(library) return get_module(instrumentor_lib)
24,207
def parse_cache_entry_into_seconds(isoTime): """ Returns the number of seconds from the UNIX epoch. See :py:attribute:`synapseclient.utils.ISO_FORMAT` for the parameter's expected format. """ # Note: The `strptime() method is not thread-safe (http://bugs.python.org/issue7980) strptimeLock.acquire() cacheTime = time.strptime(isoTime, utils.ISO_FORMAT) strptimeLock.release() return calendar.timegm(cacheTime)
24,208
def parse_runtime(log_file): """ Parse the job run-time from a log-file """ with open(log_file, 'r') as f: for line in f: l0 = line.rstrip("\n") break l1 = tail(log_file, 1)[0].rstrip("\n") l0 = l0.split()[:2] l1 = l1.split()[:2] try: y0, m0, d0 = list(map(int, l0[0].split('-'))) h0, min0, s0 = list(map(float, l0[1][:-1].split(':'))) except ValueError as e: print(log_file) print(l0) raise e try: y1, m1, d1 = list(map(int, l1[0].split('-'))) h1, min1, s1 = list(map(float, l1[1][:-1].split(':'))) except ValueError as e: print(log_file) print(l1) raise e date0 = datetime.datetime(y0, m0, d0, int(h0), int(min0), int(s0)) date1 = datetime.datetime(y1, m1, d1, int(h1), int(min1), int(s1)) diff = (date1 - date0).total_seconds() return diff
24,209
def load_answerer(): """Loads the answerer model.""" global _answerer if _model is None: load_model() _answerer = _model
24,210
def calculateEMA(coin_pair, period, unit): """ Returns the Exponential Moving Average for a coin pair """ closing_prices = getClosingPrices(coin_pair, period, unit) previous_EMA = calculateSMA(coin_pair, period, unit) constant = (2 / (period + 1)) current_EMA = (closing_prices[-1] * (2 / (1 + period))) + (previous_EMA * (1 - (2 / (1 + period)))) return current_EMA
24,211
def euler_method(r0, N): """ euler_method function description: This method computes the vector r(t)'s using Euler's method. Args: r0 - the initial r-value N - the number of steps in each period """ delta_t = (2*np.pi)/N # delta t r = np.zeros((5*N, 2)) # 5Nx2 array r[0] = r0 # initial r-value J = np.array(([0,1],[-1,0])) # antisymmetric matrix (meaning its transpose equals its negative) for i in range(1, 5*N): r[i] = r[i-1] + delta_t*(J@(r[i-1])) # euler's method return r
24,212
def main_menu(found_exists): """prints main menu and asks for user input returns task that is chosen by user input""" show_main_menu(found_exists) inp = input(">> ") if inp == "1": return "update" elif inp == "2": return "show_all" elif inp == "3": return "show_waypoints" elif inp == "4": return "map-menu" elif inp == "5": return "show_one" elif inp == "6": return "search" elif inp == "7" and found_exists: return "show_founds" elif inp == "8" and found_exists: return "exit" elif inp == "7" and not found_exists: return "exit" else: print("Ungueltige Eingabe!")
24,213
def fromPsl(psl, qCdsRange=None, inclUnaln=False, projectCds=False, contained=False): """generate a PairAlign from a PSL. cdsRange is None or a tuple. In inclUnaln is True, then include Block objects for unaligned regions""" qCds = _getCds(qCdsRange, psl.qStrand, psl.qSize) qSeq = _mkPslSeq(psl.qName, psl.qStart, psl.qEnd, psl.qSize, psl.qStrand, qCds) tSeq = _mkPslSeq(psl.tName, psl.tStart, psl.tEnd, psl.tSize, psl.tStrand) aln = PairAlign(qSeq, tSeq) prevBlk = None for i in range(psl.blockCount): prevBlk = _addPslBlk(psl.blocks[i], aln, prevBlk, inclUnaln) if projectCds and (aln.qSeq.cds is not None): aln.projectCdsToTarget(contained) return aln
24,214
def drawMatrix(ax, mat, **kwargs): """Draw a view to a matrix into the axe." TODO ---- * pg.core.BlockMatrix Parameters ---------- ax : mpl axis instance, optional Axis instance where the matrix will be plotted. mat: obj obj can be so far: * pg.core.*Matrix * scipy.sparce Returns ------- ax: """ mat = pg.utils.sparseMatrix2coo(mat) ax.spy(mat) return ax
24,215
def test_template_observation(workbench, template_sequence, app_dir, caplog): """Test that new templates are properly detected. """ import logging caplog.set_level(logging.WARNING) plugin = workbench.get_plugin('exopy.pulses') assert template_sequence in plugin.sequences template_path = os.path.join(app_dir, 'pulses', 'templates') prof = ConfigObj(os.path.join(template_path, 'template.temp_pulse.ini')) prof.write() from time import sleep sleep(1) assert template_sequence in plugin.sequences assert 'template' in plugin.sequences os.remove(os.path.join(template_path, 'template.temp_pulse.ini')) sleep(1) assert template_sequence in plugin.sequences assert 'template' not in plugin.sequences plugin.templates_folders = [''] assert caplog.records plugin.templates_folders = [os.path.join(app_dir, 'pulses', 'templates')] assert template_sequence in plugin.sequences
24,216
def get_graph(identifier: str, *, rows: Optional[int] = None) -> pybel.BELGraph: """Get the graph surrounding a given GO term and its descendants.""" graph = pybel.BELGraph() enrich_graph(graph, identifier, rows=rows) return graph
24,217
def parse_custom_variant(self, cfg): """Parse custom variant definition from a users input returning a variant dict an example of user defined variant configuration 1) integrated: cpu=2 ram=4 max_nics=6 chassis=sr-1 slot=A card=cpm-1 slot=1 mda/1=me6-100gb-qsfp28 2) distributed: cp: cpu=2 ram=4 chassis=ixr-e slot=A card=cpm-ixr-e ___ lc: cpu=2 ram=4 max_nics=34 chassis=ixr-e slot=1 card=imm24-sfp++8-sfp28+2-qsfp28 mda/1=m24-sfp++8-sfp28+2-qsfp28 """ def _parse(cfg, obj, is_cp=False): timos_line = [] chassis = None xiom = None for elem in cfg.split(): # skip cp: lc: markers if elem in ["cp:", "lc:"]: continue if "cpu=" in elem: obj["cpu"] = int(elem.split("=")[1]) continue if "ram=" in elem: obj["min_ram"] = int(elem.split("=")[1]) continue if not is_cp and "max_nics=" in elem: obj["max_nics"] = int(elem.split("=")[1]) continue # JvB for SR-xs check supported MDA type and determine #nics if "chassis=" in elem: chassis = elem.split("=")[1].lower() elif chassis in SRS_CHASSIS: # SR-1s through SR-14s if "xiom/" in elem: # /x1 or /x2 if is_cp: raise ValueError(f"Cannot configure XIOM for control plane VM: {elem}") xiom = elem.split("=")[1] elif "mda/" in elem: if is_cp: raise ValueError(f"Cannot configure MDA for control plane VM: {elem}") mda = elem.split("=")[1] supported = SR_S_MDA_VARIANTS_IOM_S if xiom else SR_S_MDA_VARIANTS_NO_XIOM if mda not in supported: raise ValueError(f"Unsupported {chassis} MDA type for XIOM {xiom}: {mda}") obj["max_nics"] = supported[mda] timos_line.append(elem) obj["timos_line"] = " ".join(timos_line) # set default cpu and ram if "cpu" not in obj: obj["cpu"] = 2 if "min_ram" not in obj: obj["min_ram"] = 4 return obj # init variant object that gets returned variant = { "max_nics": 40 } # some default value for num nics if it is not provided in user cfg # parsing distributed custom variant if "___" in cfg: variant["deployment_model"] = "distributed" for hw_part in cfg.split("___"): if "cp:" in hw_part: logging.debug( f"Parsing cp:{hw_part}" ) variant["cp"] = _parse(hw_part.strip(),obj={},is_cp=True) elif "lc:" in hw_part: logging.debug( f"Parsing lc:{hw_part}" ) variant["lc"] = _parse(hw_part.strip(),obj={}) if "max_nics" in variant["lc"]: variant["max_nics"] = int(variant["lc"]["max_nics"]) variant["lc"].pop("max_nics") else: raise ValueError(f"Missing 'cp:' or 'lc:' in distributed config string: {cfg}") else: # parsing integrated mode config variant["deployment_model"] = "integrated" variant = _parse(cfg.strip(),obj=variant) return variant
24,218
def test_update_w_up_to_date(version, supplied_version, latest_version, mocker): """Tests footing.update.update when the template is already up to date""" mocker.patch('footing.check.not_has_branch', autospec=True) mocker.patch('footing.check.in_git_repo', autospec=True) mocker.patch('footing.check.in_clean_repo', autospec=True) mocker.patch('footing.check.is_footing_project', autospec=True) mocker.patch( 'footing.utils.read_footing_config', autospec=True, return_value={'_version': version, '_template': 't'}, ) mocker.patch( 'footing.update._get_latest_template_version', autospec=True, return_value=latest_version, ) assert not footing.update.update(new_version=supplied_version)
24,219
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5, gammaval=1, min_spat_freq=0.5, max_spat_freq=3, fwhm=4, debug=False, recenter_median=False, fit_type='gaus', negative=True, crop=True, subframesize=21, mask=None, imlib='vip-fft', interpolation='lanczos4', border_mode='reflect', plot=True, full_output=False): """ Registers frames based on the median speckle pattern. Optionally centers based on the position of the vortex null in the median frame. Images are filtered to isolate speckle spatial frequencies. Parameters ---------- cube_sci : numpy ndarray Science cube. cube_ref : numpy ndarray Reference cube (e.g. for NIRC2 data in RDI mode). alignment_iter : int, optional Number of alignment iterations (recomputes median after each iteration). gammaval : int, optional Applies a gamma correction to emphasize speckles (useful for faint stars). min_spat_freq : float, optional Spatial frequency for low pass filter. max_spat_freq : float, optional Spatial frequency for high pass filter. fwhm : float, optional Full width at half maximum. debug : bool, optional Outputs extra info. recenter_median : bool, optional Recenter the frames at each iteration based on a 2d fit. fit_type : str, optional If recenter_median is True, this is the model to which the image is fitted to for recentering. 'gaus' works well for NIRC2_AGPM data. 'ann' works better for NACO+AGPM data. negative : bool, optional If True, uses a negative gaussian fit to determine the center of the median frame. crop: bool, optional Whether to calculate the recentering on a cropped version of the cube that is speckle-dominated (recommended). subframesize : int, optional Sub-frame window size used. Should cover the region where speckles are the dominant noise source. mask: 2D np.ndarray, optional Binary mask indicating where the cross-correlation should be calculated in the images. If provided, should be the same size as array frames. imlib : str, optional Image processing library to use. interpolation : str, optional Interpolation method to use. border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'} Points outside the boundaries of the input are filled accordingly. With 'reflect', the input is extended by reflecting about the edge of the last pixel. With 'nearest', the input is extended by replicating the last pixel. With 'constant', the input is extended by filling all values beyond the edge with zeros. With 'mirror', the input is extended by reflecting about the center of the last pixel. With 'wrap', the input is extended by wrapping around to the opposite edge. Default is 'reflect'. plot : bool, optional If True, the shifts are plotted. full_output: bool, optional Whether to return more variables, useful for debugging. Returns ------- if full_output is False, returns: cube_reg_sci: Registered science cube (numpy 3d ndarray) If cube_ref is not None, also returns: cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray) If full_output is True, returns in addition to the above: cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray) cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray) cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array) cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array) And if cube_ref is not None, also returns: cum_x_shifts_ref: Vector of x shifts for ref. frames. cum_y_shifts_ref: Vector of y shifts for ref. frames. """ n, y, x = cube_sci.shape check_array(cube_sci, dim=3) if recenter_median and fit_type not in {'gaus','ann'}: raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'") if crop and not subframesize < y/2.: raise ValueError('`Subframesize` is too large') if cube_ref is not None: ref_star = True nref = cube_ref.shape[0] else: ref_star = False if crop: cube_sci_subframe = cube_crop_frames(cube_sci, subframesize, verbose=False) if ref_star: cube_ref_subframe = cube_crop_frames(cube_ref, subframesize, verbose=False) else: subframesize = cube_sci.shape[-1] cube_sci_subframe = cube_sci.copy() if ref_star: cube_ref_subframe = cube_ref.copy() ceny, cenx = frame_center(cube_sci_subframe[0]) print('Sub frame shape: {}'.format(cube_sci_subframe.shape)) print('Center pixel: ({}, {})'.format(ceny, cenx)) # Filtering cubes. Will be used for alignment purposes cube_sci_lpf = cube_sci_subframe.copy() if ref_star: cube_ref_lpf = cube_ref_subframe.copy() cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf)) if ref_star: cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf)) median_size = int(fwhm * max_spat_freq) # Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt', median_size=median_size, verbose=False) if min_spat_freq>0: cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss', fwhm_size=min_spat_freq * fwhm, verbose=False) else: cube_sci_lpf = cube_sci_hpf if ref_star: cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt', median_size=median_size, verbose=False) if min_spat_freq>0: cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss', fwhm_size=min_spat_freq * fwhm, verbose=False) else: cube_ref_lpf = cube_ref_hpf if ref_star: alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize)) alignment_cube[1:(n + 1), :, :] = cube_sci_lpf alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf else: alignment_cube = np.zeros((1 + n, subframesize, subframesize)) alignment_cube[1:(n + 1), :, :] = cube_sci_lpf n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref cum_y_shifts = 0 cum_x_shifts = 0 for i in range(alignment_iter): alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0) if recenter_median: # Recenter the median frame using a 2d fit if fit_type == 'gaus': crop_sz = int(fwhm) else: crop_sz = int(6*fwhm) if not crop_sz%2: crop_sz+=1 sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz, y=ceny, x=cenx, position=True) if fit_type == 'gaus': if negative: sub_image = -sub_image + np.abs(np.min(-sub_image)) y_i, x_i = fit_2dgaussian(sub_image, crop=False, threshold=False, sigfactor=1, debug=debug, full_output=False) elif fit_type == 'ann': y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False, hole_rad=0.5, sampl_cen=0.1, sampl_rad=0.2, ann_width=0.5, unc_in=2.) yshift = ceny - (y1 + y_i) xshift = cenx - (x1 + x_i) alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift, xshift, imlib=imlib, interpolation=interpolation, border_mode=border_mode) # center the cube with stretched values cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval) if mask is not None and crop: mask_tmp = frame_crop(mask, subframesize) else: mask_tmp = mask res = cube_recenter_dft_upsampling(cube_stret, (ceny, cenx), fwhm=fwhm, subi_size=None, full_output=True, verbose=False, plot=False, mask=mask_tmp, imlib=imlib, interpolation=interpolation) _, y_shift, x_shift = res sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2)) print('Square sum of shift vecs: ' + str(sqsum_shifts)) for j in range(1, n_frames): alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j], x_shift[j], imlib=imlib, interpolation=interpolation, border_mode=border_mode) cum_y_shifts += y_shift cum_x_shifts += x_shift cube_reg_sci = cube_sci.copy() cum_y_shifts_sci = cum_y_shifts[1:(n + 1)] cum_x_shifts_sci = cum_x_shifts[1:(n + 1)] for i in range(n): cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i], cum_x_shifts_sci[i], imlib=imlib, interpolation=interpolation, border_mode=border_mode) if plot: plt.figure(figsize=vip_figsize) plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5) plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5) plt.legend(loc='best') plt.grid('on', alpha=0.2) plt.ylabel('Pixels') plt.xlabel('Frame number') plt.figure(figsize=vip_figsize) b = int(np.sqrt(n)) la = 'Histogram' _ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X') _ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y') plt.legend(loc='best') plt.ylabel('Bin counts') plt.xlabel('Pixels') if ref_star: cube_reg_ref = cube_ref.copy() cum_y_shifts_ref = cum_y_shifts[(n + 1):] cum_x_shifts_ref = cum_x_shifts[(n + 1):] for i in range(nref): cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i], cum_x_shifts_ref[i], imlib=imlib, interpolation=interpolation, border_mode=border_mode) if ref_star: if full_output: return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret, cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref, cum_y_shifts_ref) else: return (cube_reg_sci, cube_reg_ref) else: if full_output: return (cube_reg_sci, cube_sci_lpf, cube_stret, cum_x_shifts_sci, cum_y_shifts_sci) else: return cube_reg_sci
24,220
def root_histogram_shape(root_hist, use_matrix_indexing=True): """ Return a tuple corresponding to the shape of the histogram. If use_matrix_indexing is true, the tuple is in 'reversed' zyx order. Matrix-order is the layout used in the internal buffer of the root histogram - keep True if reshaping the array). """ dim = root_hist.GetDimension() shape = np.array([root_hist.GetNbinsZ(), root_hist.GetNbinsY(), root_hist.GetNbinsX()][3 - dim:]) + 2 if not use_matrix_indexing: shape = reversed(shape) return tuple(shape)
24,221
def json_file_to_dict(fname): """ Read a JSON file and return its Python representation, transforming all the strings from Unicode to ASCII. The order of keys in the JSON file is preserved. Positional arguments: fname - the name of the file to parse """ try: with io.open(fname, encoding='ascii', errors='ignore') as file_obj: return json.load( file_obj, object_pairs_hook=_ordered_dict_collapse_dups ) except (ValueError, IOError) as e: sys.stderr.write("Error parsing '%s': %s\n" % (fname, e)) raise
24,222
def get_weather_sensor_by( weather_sensor_type_name: str, latitude: float = 0, longitude: float = 0 ) -> Union[WeatherSensor, ResponseTuple]: """ Search a weather sensor by type and location. Can create a weather sensor if needed (depends on API mode) and then inform the requesting user which one to use. """ # Look for the WeatherSensor object weather_sensor = ( WeatherSensor.query.filter( WeatherSensor.weather_sensor_type_name == weather_sensor_type_name ) .filter(WeatherSensor.latitude == latitude) .filter(WeatherSensor.longitude == longitude) .one_or_none() ) if weather_sensor is None: create_sensor_if_unknown = False if current_app.config.get("FLEXMEASURES_MODE", "") == "play": create_sensor_if_unknown = True # either create a new weather sensor and post to that if create_sensor_if_unknown: current_app.logger.info("CREATING NEW WEATHER SENSOR...") weather_sensor = WeatherSensor( name="Weather sensor for %s at latitude %s and longitude %s" % (weather_sensor_type_name, latitude, longitude), weather_sensor_type_name=weather_sensor_type_name, latitude=latitude, longitude=longitude, ) db.session.add(weather_sensor) db.session.flush() # flush so that we can reference the new object in the current db session # or query and return the nearest sensor and let the requesting user post to that one else: nearest_weather_sensor = WeatherSensor.query.order_by( WeatherSensor.great_circle_distance( latitude=latitude, longitude=longitude ).asc() ).first() if nearest_weather_sensor is not None: return unrecognized_sensor( nearest_weather_sensor.latitude, nearest_weather_sensor.longitude, ) else: return unrecognized_sensor() return weather_sensor
24,223
def _npy_loads(data): """ Deserializes npy-formatted bytes into a numpy array """ logger.info("Inside _npy_loads fn") stream = six.BytesIO(data) return np.load(stream,allow_pickle=True)
24,224
def parse_string(string): """Parse the string to a datetime object. :param str string: The string to parse :rtype: `datetime.datetime` :raises: :exc:`InvalidDateFormat` when date format is invalid """ try: # Try to parse string as a date value = dateutil.parser.parse(string) except (OverflowError, TypeError, ValueError): raise InvalidDateFormat("Invalid date format %r" % (string, )) return value
24,225
def get_elfs_oriented(atoms, density, basis, mode, view = serial_view()): """ Outdated, use get_elfs() with "mode='elf'/'nn'" instead. Like get_elfs, but returns real, oriented elfs mode = {'elf': Use the ElF algorithm to orient fingerprint, 'nn': Use nearest neighbor algorithm} """ return get_elfs(atoms, density, basis, view, orient_mode = mode)
24,226
def test_medicinalproduct_1(base_settings): """No. 1 tests collection for MedicinalProduct. Test File: medicinalproduct-example.json """ filename = base_settings["unittest_data_dir"] / "medicinalproduct-example.json" inst = medicinalproduct.MedicinalProduct.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "MedicinalProduct" == inst.resource_type impl_medicinalproduct_1(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "MedicinalProduct" == data["resourceType"] inst2 = medicinalproduct.MedicinalProduct(**data) impl_medicinalproduct_1(inst2)
24,227
def on_new_thread_with_config(match, state, logger): """It happens when a new DB GC thread is created.""" kind = match[0] name = match[1] if match[1][:4] != "rDsp" else "rDsp" priority = int(match[2]) stack_size = int(match[3], 16) if "threads" not in state: state['threads'] = {} state['threads'][name] = { 'name': name, 'kind': kind, 'priority': priority, 'stack_size': stack_size, 'tid': -1, 'affinity': '??'}
24,228
def get_alliances_alliance_id_contacts(*, alliance_id, token, if_none_match=None, page='1'): """ :param alliance_id: An EVE alliance ID :param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag :param page: Which page of results to return :param token: Access token to use if unable to set a header Return contacts of an alliance --- Alternate route: `/dev/alliances/{alliance_id}/contacts/` Alternate route: `/latest/alliances/{alliance_id}/contacts/` --- This route is cached for up to 300 seconds """ ESI_request.request(alliance_id=alliance_id, if_none_match=if_none_match, page=page, token=token, data_source='tranquility', version='v2', HTTP_method='GET', path=f'/alliances/{alliance_id}/contacts/')
24,229
def _get_corpora_json_contents(corpora_file): """ Get the contents of corpora.json, or an empty dict """ exists = os.path.isfile(corpora_file) if not exists: print("Corpora file not found at {}!".format(corpora_file)) return dict() with open(corpora_file, "r") as fo: return json.loads(fo.read())
24,230
def error_logger(param=None): """ Function to get an error logger, object of Logger class. @param param : Custom parameter that can be passed to the logger. @return: custom logger """ logger = Logger('ERROR_LOGGER', param) return logger.get_logger()
24,231
def _run_make_examples(pipeline_args): """Runs the make_examples job.""" def get_region_paths(regions): return filter(_is_valid_gcs_path, regions or []) def get_region_literals(regions): return [ region for region in regions or [] if not _is_valid_gcs_path(region) ] def get_extra_args(): """Optional arguments that are specific to make_examples binary.""" extra_args = [] if pipeline_args.gvcf_outfile: extra_args.extend( ['--gvcf', '"${GVCF}"/gvcf_output.tfrecord@"${SHARDS}".gz']) if pipeline_args.gvcf_gq_binsize: extra_args.extend( ['--gvcf_gq_binsize', str(pipeline_args.gvcf_gq_binsize)]) if pipeline_args.regions: num_localized_region_paths = len(get_region_paths(pipeline_args.regions)) localized_region_paths = map('"${{INPUT_REGIONS_{0}}}"'.format, range(num_localized_region_paths)) region_literals = get_region_literals(pipeline_args.regions) extra_args.extend([ '--regions', '\'%s\'' % ' '.join(region_literals + localized_region_paths) ]) if pipeline_args.sample_name: extra_args.extend(['--sample_name', pipeline_args.sample_name]) if pipeline_args.hts_block_size: extra_args.extend(['--hts_block_size', str(pipeline_args.hts_block_size)]) return extra_args if pipeline_args.gcsfuse: command = _MAKE_EXAMPLES_COMMAND_WITH_GCSFUSE.format( EXTRA_ARGS=' '.join(get_extra_args())) else: command = _MAKE_EXAMPLES_COMMAND_NO_GCSFUSE.format( EXTRA_ARGS=' '.join(get_extra_args())) machine_type = 'custom-{0}-{1}'.format( pipeline_args.make_examples_cores_per_worker, pipeline_args.make_examples_ram_per_worker_gb * 1024) num_workers = min(pipeline_args.make_examples_workers, pipeline_args.shards) shards_per_worker = pipeline_args.shards / num_workers threads = multiprocessing.Pool(num_workers) results = [] for i in range(num_workers): outputs = [ 'EXAMPLES=' + _get_staging_examples_folder_to_write(pipeline_args, i) + '/*' ] if pipeline_args.gvcf_outfile: outputs.extend(['GVCF=' + _get_staging_gvcf_folder(pipeline_args) + '/*']) inputs = [ 'INPUT_BAI=' + pipeline_args.bai, 'INPUT_REF=' + pipeline_args.ref, 'INPUT_REF_FAI=' + pipeline_args.ref_fai, ] + [ 'INPUT_REGIONS_%s=%s' % (k, region_path) for k, region_path in enumerate( get_region_paths(pipeline_args.regions)) ] if pipeline_args.ref_gzi: inputs.extend([pipeline_args.ref_gzi]) env_args = [ '--set', 'SHARDS=' + str(pipeline_args.shards), '--set', 'SHARD_START_INDEX=' + str(int(i * shards_per_worker)), '--set', 'SHARD_END_INDEX=' + str(int((i + 1) * shards_per_worker - 1)) ] if pipeline_args.gcsfuse: env_args.extend([ '--set', 'GCS_BUCKET=' + _get_gcs_bucket(pipeline_args.bam), '--set', 'BAM=' + _get_gcs_relative_path(pipeline_args.bam) ]) else: inputs.extend(['INPUT_BAM=' + pipeline_args.bam]) job_name = pipeline_args.job_name_prefix + _MAKE_EXAMPLES_JOB_NAME output_path = os.path.join(pipeline_args.logging, _MAKE_EXAMPLES_JOB_NAME, str(i)) run_args = _get_base_job_args(pipeline_args) + env_args + [ '--name', job_name, '--vm-labels', 'dv-job-name=' + job_name, '--image', pipeline_args.docker_image, '--output', output_path, '--inputs', ','.join(inputs), '--outputs', ','.join(outputs), '--machine-type', machine_type, '--disk-size', str(pipeline_args.make_examples_disk_per_worker_gb), '--command', command ] results.append(threads.apply_async(_run_job, [run_args, output_path])) _wait_for_results(threads, results)
24,232
def get_iou(mask, label): """ :param mask: predicted mask with 0 for background and 1 for object :param label: label :return: iou """ # mask = mask.numpy() # label = labels.numpy() size = mask.shape mask = mask.flatten() label = label.flatten() m = mask + label i = len(np.argwhere(m == 2)) u = len(np.argwhere(m != 0)) if u == 0: u = size[0] * size[1] iou = float(i) / u if i == 0 and u == 0: iou = 1 return iou
24,233
async def test_route_events( mock_socket: MagicMock, mock_subscriber: AsyncGenerator, topic_event ) -> None: """Test that an event is read from subscriber and sent to websocket.""" with patch.object(handle_subscriber, "send") as mock_send: await handle_subscriber.route_events(mock_socket, mock_subscriber) mock_send.assert_called_once_with(mock_socket, topic_event)
24,234
def mean_jaccard_distance(sets: List[Set[Any]]) -> float: """ Compute the mean Jaccard distance for sets A_1, \dots A_n: d = \frac{1}{n} \sum_{i=1}^{n-1} \sum_{j=i+1}^n (1 - J(A_i, A_j)) where J(A, B) is the Jaccard index between sets A and B and 1-J(A, B) is the Jaccard distance. """ n = len(sets) assert n > 0 if n == 1: return 0 else: d = 0.0 for i in range(n - 1): for j in range(i + 1, n): d += 1 - jaccard_index(sets[i], sets[j]) d /= n * (n - 1) / 2 return d
24,235
def nx_find_connected(graph, start_set, end_set, cutoff=np.inf): """Return the nodes in end_set connected to start_set.""" reachable = [] for end in end_set: if nx_is_reachable(graph, end, start_set): reachable.append(end) if len(reachable) >= cutoff: break return reachable
24,236
def svn_client_proplist(*args): """ svn_client_proplist(char target, svn_opt_revision_t revision, svn_boolean_t recurse, svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t """ return _client.svn_client_proplist(*args)
24,237
def dev_transform(signal, input_path='../data/', is_denoised=True): """ normalization function that transforms each fature based on the scaling of the trainning set. This transformation should be done on test set(developmental set), or any new input for a trained neural network. Due to existence of a denoising step in the normal_seq funciton, this transformation can not reproduce the exact same of initial sequences, instead it transforms to the scale of denoised version of training set. Parameters ---------- signal : numpy array or pandas dataframe in the shape of (n_samples, n_features) input_path : str, default='../data/' is_denoised : boolean it specifies the state if original sequence is denoised by a threshold, if it's set to False it means that user used q=None in normal_seq function. Returns ------- transformed : numpy array a normalised sequence or features """ transformed = [] if isinstance(signal, pd.DataFrame): signal = signal.to_numpy(copy=True) elif isinstance(signal, list): signal = np.array(signal) scales = pd.read_csv(input_path + 'min_max_inputs.csv') max_element = scales.to_numpy(copy=True)[1, 1:] min_element = scales.to_numpy(copy=True)[0, 1:] if signal.ndim == 1: if is_denoised is True: signal[signal > max_element] = max_element transformed.append((signal-min_element)/( max_element-min_element)) else: for i in range(signal.shape[1]): if is_denoised is True: signal[signal[:, i] > max_element[i]] = max_element[i] transformed.append((signal[:, i]-min_element[i])/( max_element[i]-min_element[i])) transformed = np.array(transformed).T # transpose for correspondence return transformed
24,238
def moveTo(self, parent): """Move this element to new parent, as last child""" self.getParent().removeChild(self) parent.addChild(self) return self
24,239
def csv2timeseries_graphs(directories_dic={}, output_dir='', base_name=None, plot_numbers='', quantities=['stage'], extra_plot_name='', assess_all_csv_files=True, create_latex=False, verbose=False): """ Read in csv files that have the right header information and plot time series such as Stage, Speed, etc. Will also plot several time series on one plot. Filenames must follow this convention, <base_name><plot_number>.csv eg gauge_timeseries3.csv NOTE: relies that 'elevation' is in the csv file! Each file represents a location and within each file there are time, quantity columns. For example: if "directories_dic" defines 4 directories and in each directories there is a csv files corresponding to the right "plot_numbers", this will create a plot with 4 lines one for each directory AND one plot for each "quantities". ??? FIXME: unclear. Usage: csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], 'fixed_wave'+sep:['Fixed Wave',0,0]}, output_dir='fixed_wave'+sep, base_name='gauge_timeseries_', plot_numbers='', quantities=['stage','speed'], extra_plot_name='', assess_all_csv_files=True, create_latex=False, verbose=True) this will create one plot for stage with both 'slide' and 'fixed_wave' lines on it for stage and speed for each csv file with 'gauge_timeseries_' as the prefix. The graghs will be in the output directory 'fixed_wave' and the graph axis will be determined by assessing all the ANOTHER EXAMPLE new_csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], 'fixed_wave'+sep:['Fixed Wave',0,0]}, output_dir='fixed_wave'+sep, base_name='gauge_timeseries_', plot_numbers=['1-3'], quantities=['stage','speed'], extra_plot_name='', assess_all_csv_files=False, create_latex=False, verbose=True) This will plot csv files called gauge_timeseries_1.csv and gauge_timeseries3.csv from both 'slide' and 'fixed_wave' directories to 'fixed_wave'. There will be 4 plots created two speed and two stage one for each csv file. There will be two lines on each of these plots. And the axis will have been determined from only these files, had assess_all_csv_files = True all csv file with 'gauges_timeseries_' prefix would of been assessed. ANOTHER EXAMPLE csv2timeseries_graphs({'J:'+sep+'anuga_validation'+sep:['new',20,-.1], 'J:'+sep+'conical_island'+sep:['test',0,0]}, output_dir='', plot_numbers=['1','3'], quantities=['stage','depth','bearing'], base_name='gauge_b', assess_all_csv_files=True, verbose=True) This will produce one plot for each quantity (therefore 3) in the current directory, each plot will have 2 lines on them. The first plot named 'new' will have the time offseted by 20secs and the stage height adjusted by -0.1m Inputs: directories_dic: dictionary of directory with values (plot legend name for directory), (start time of the time series) and the (value to add to stage if needed). For example {dir1:['Anuga_ons',5000, 0], dir2:['b_emoth',5000,1.5], dir3:['b_ons',5000,1.5]} Having multiple directories defined will plot them on one plot, therefore there will be 3 lines on each of these plot. If you only want one line per plot call csv2timeseries_graph separately for each directory, eg only have one directory in the 'directories_dic' in each call. output_dir: directory for the plot outputs. Only important to define when you have more than one directory in your directories_dic, if you have not defined it and you have multiple directories in 'directories_dic' there will be plots in each directory, however only one directory will contain the complete plot/graphs. base_name: Is used a couple of times. 1) to find the csv files to be plotted if there is no 'plot_numbers' then csv files with 'base_name' are plotted 2) in the title of the plots, the length of base_name is removed from the front of the filename to be used in the title. This could be changed if needed. Note is ignored if assess_all_csv_files=True plot_numbers: a String list of numbers to plot. For example [0-4,10,15-17] will read and attempt to plot the follow 0,1,2,3,4,10,15,16,17 NOTE: if no plot numbers this will create one plot per quantity, per gauge quantities: Will get available quantities from the header in the csv file. Quantities must be one of these. NOTE: ALL QUANTITY NAMES MUST BE lower case! extra_plot_name: A string that is appended to the end of the output filename. assess_all_csv_files: if true it will read ALL csv file with "base_name", regardless of 'plot_numbers' and determine a uniform set of axes for Stage, Speed and Momentum. IF FALSE it will only read the csv file within the 'plot_numbers' create_latex: NOT IMPLEMENTED YET!! sorry Jane.... OUTPUTS: saves the plots to <output_dir><base_name><plot_number><extra_plot_name>.png """ # try: # import pylab # except ImportError: # msg='csv2timeseries_graphs needs pylab to be installed correctly' # raise Exception(msg) # #ANUGA don't need pylab to work so the system doesn't # #rely on pylab being installed # return try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as pylab except: #print "Couldn't import module from matplotlib, probably you need to update matplotlib" return from os import sep from anuga.utilities.file_utils import get_all_files_with_extension seconds_in_hour = 3600 seconds_in_minutes = 60 quantities_label={} # quantities_label['time'] = 'time (hours)' quantities_label['time'] = 'time (minutes)' quantities_label['stage'] = 'wave height (m)' quantities_label['speed'] = 'speed (m/s)' quantities_label['momentum'] = 'momentum (m^2/sec)' quantities_label['depth'] = 'water depth (m)' quantities_label['xmomentum'] = 'momentum (m^2/sec)' quantities_label['ymomentum'] = 'momentum (m^2/sec)' quantities_label['bearing'] = 'degrees (o)' quantities_label['elevation'] = 'elevation (m)' if extra_plot_name != '': extra_plot_name = '_' + extra_plot_name new_plot_numbers=[] #change plot_numbers to list, eg ['0-4','10'] #to ['0','1','2','3','4','10'] for i, num_string in enumerate(plot_numbers): if '-' in num_string: start = int(num_string[:num_string.rfind('-')]) end = int(num_string[num_string.rfind('-') + 1:]) + 1 for x in range(start, end): new_plot_numbers.append(str(x)) else: new_plot_numbers.append(num_string) #finds all the files that fit the specs provided and return a list of them #so to help find a uniform max and min for the plots... list_filenames=[] all_csv_filenames=[] if verbose: log.critical('Determining files to access for axes ranges.') for i,directory in enumerate(directories_dic.keys()): all_csv_filenames.append(get_all_files_with_extension(directory, base_name, '.csv')) filenames=[] if plot_numbers == '': list_filenames.append(get_all_files_with_extension(directory, base_name,'.csv')) else: for number in new_plot_numbers: filenames.append(base_name + number) list_filenames.append(filenames) #use all the files to get the values for the plot axis max_start_time= -1000. min_start_time = 100000 if verbose: log.critical('Determining uniform axes') #this entire loop is to determine the min and max range for the #axes of the plots # quantities.insert(0,'elevation') quantities.insert(0,'time') directory_quantity_value={} # quantity_value={} min_quantity_value={} max_quantity_value={} for i, directory in enumerate(directories_dic.keys()): filename_quantity_value = {} if assess_all_csv_files == False: which_csv_to_assess = list_filenames[i] else: #gets list of filenames for directory "i" which_csv_to_assess = all_csv_filenames[i] for j, filename in enumerate(which_csv_to_assess): quantity_value = {} dir_filename = join(directory,filename) attribute_dic, title_index_dic = load_csv_as_dict(dir_filename + '.csv') directory_start_time = directories_dic[directory][1] directory_add_tide = directories_dic[directory][2] if verbose: log.critical('reading: %s.csv' % dir_filename) #add time to get values for k, quantity in enumerate(quantities): quantity_value[quantity] = [float(x) for x in attribute_dic[quantity]] #add tide to stage if provided if quantity == 'stage': quantity_value[quantity] = num.array(quantity_value[quantity], num.float) + directory_add_tide #condition to find max and mins for all the plots # populate the list with something when i=0 and j=0 and # then compare to the other values to determine abs max and min if i==0 and j==0: min_quantity_value[quantity], \ max_quantity_value[quantity] = \ get_min_max_values(quantity_value[quantity]) if quantity != 'time': min_quantity_value[quantity] = \ min_quantity_value[quantity] *1.1 max_quantity_value[quantity] = \ max_quantity_value[quantity] *1.1 else: min, max = get_min_max_values(quantity_value[quantity]) # min and max are multipled by "1+increase_axis" to get axes # that are slighty bigger than the max and mins # so the plots look good. increase_axis = (max-min)*0.05 if min <= min_quantity_value[quantity]: if quantity == 'time': min_quantity_value[quantity] = min else: if round(min,2) == 0.00: min_quantity_value[quantity] = -increase_axis # min_quantity_value[quantity] = -2. #min_quantity_value[quantity] = \ # -max_quantity_value[quantity]*increase_axis else: # min_quantity_value[quantity] = \ # min*(1+increase_axis) min_quantity_value[quantity]=min-increase_axis if max > max_quantity_value[quantity]: if quantity == 'time': max_quantity_value[quantity] = max else: max_quantity_value[quantity] = max + increase_axis # max_quantity_value[quantity]=max*(1+increase_axis) #set the time... ??? if min_start_time > directory_start_time: min_start_time = directory_start_time if max_start_time < directory_start_time: max_start_time = directory_start_time filename_quantity_value[filename]=quantity_value directory_quantity_value[directory]=filename_quantity_value #final step to unifrom axis for the graphs quantities_axis={} for i, quantity in enumerate(quantities): quantities_axis[quantity] = (float(min_start_time) \ / float(seconds_in_minutes), (float(max_quantity_value['time']) \ + float(max_start_time)) \ / float(seconds_in_minutes), min_quantity_value[quantity], max_quantity_value[quantity]) if verbose and (quantity != 'time' and quantity != 'elevation'): log.critical('axis for quantity %s are x:(%s to %s)%s ' 'and y:(%s to %s)%s' % (quantity, quantities_axis[quantity][0], quantities_axis[quantity][1], quantities_label['time'], quantities_axis[quantity][2], quantities_axis[quantity][3], quantities_label[quantity])) cstr = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] if verbose: log.critical('Now start to plot') i_max = len(list(directories_dic.keys())) legend_list_dic = {} legend_list = [] for i, directory in enumerate(directories_dic.keys()): if verbose: log.critical('Plotting in %s %s' % (directory, new_plot_numbers)) # FIXME THIS SORT IS VERY IMPORTANT # Without it the assigned plot numbers may not work correctly # there must be a better way list_filenames[i].sort() for j, filename in enumerate(list_filenames[i]): if verbose: log.critical('Starting %s' % filename) directory_name = directories_dic[directory][0] directory_start_time = directories_dic[directory][1] directory_add_tide = directories_dic[directory][2] # create an if about the start time and tide height if don't exist attribute_dic, title_index_dic = load_csv_as_dict(directory + sep + filename + '.csv') #get data from dict in to list #do maths to list by changing to array t = old_div((num.array(directory_quantity_value[directory][filename]['time']) + directory_start_time), seconds_in_minutes) #finds the maximum elevation, used only as a test # and as info in the graphs max_ele=-100000 min_ele=100000 elevation = [float(x) for x in attribute_dic["elevation"]] min_ele, max_ele = get_min_max_values(elevation) if min_ele != max_ele: log.critical("Note! Elevation changes in %s" % dir_filename) # creates a dictionary with keys that is the filename and attributes # are a list of lists containing 'directory_name' and 'elevation'. # This is used to make the contents for the legends in the graphs, # this is the name of the model and the elevation. All in this # great one liner from DG. If the key 'filename' doesn't exist it # creates the entry if the entry exist it appends to the key. legend_list_dic.setdefault(filename,[]) \ .append([directory_name, round(max_ele, 3)]) # creates a LIST for the legend on the last iteration of the # directories which is when "legend_list_dic" has been fully # populated. Creates a list of strings which is used in the legend # only runs on the last iteration for all the gauges(csv) files # empties the list before creating it if i == i_max - 1: legend_list = [] for name_and_elevation in legend_list_dic[filename]: legend_list.append('%s (elevation = %sm)'\ % (name_and_elevation[0], name_and_elevation[1])) #skip time and elevation so it is not plotted! for k, quantity in enumerate(quantities): if quantity != 'time' and quantity != 'elevation': pylab.figure(int(k*100+j)) pylab.ylabel(quantities_label[quantity]) pylab.plot(t, directory_quantity_value[directory]\ [filename][quantity], c = cstr[i], linewidth=1) pylab.xlabel(quantities_label['time']) pylab.axis(quantities_axis[quantity]) pylab.legend(legend_list,loc='upper right') pylab.title('%s at %s gauge' % (quantity, filename[len(base_name):])) if output_dir == '': figname = '%s%s%s_%s%s.png' \ % (directory, sep, filename, quantity, extra_plot_name) else: figname = '%s%s%s_%s%s.png' \ % (output_dir, sep, filename, quantity, extra_plot_name) if verbose: log.critical('saving figure here %s' % figname) pylab.savefig(figname) if verbose: log.critical('Closing all plots') pylab.close('all') del pylab if verbose: log.critical('Finished closing plots')
24,240
def coerce_to_end_of_day_datetime(value): """ gets the end of day datetime equivalent of given date object. if the value is not a date, it returns the same input. :param date value: value to be coerced. :rtype: datetime | object """ if not isinstance(value, datetime) and isinstance(value, date): return end_of_day(value) return value
24,241
def GenerateOutput(target_list, target_dicts, data, params): """Called by gyp as the final stage. Outputs results.""" config = Config() try: config.Init(params) if not config.files: raise Exception('Must specify files to analyze via config_path generator ' 'flag') toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir)) if debug: print ('toplevel_dir', toplevel_dir_) if _WasGypIncludeFileModified(params, config.files): result_dict = { 'status': all_changed_string, 'test_targets': list(config.test_target_names), 'compile_targets': list( config.additional_compile_target_names | config.test_target_names) } _WriteOutput(params, **result_dict) return calculator = TargetCalculator(config.files, config.additional_compile_target_names, config.test_target_names, data, target_list, target_dicts, toplevel_dir, params['build_files']) if not calculator.is_build_impacted(): result_dict = { 'status': no_dependency_string, 'test_targets': [], 'compile_targets': [] } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) return test_target_names = calculator.find_matching_test_target_names() compile_target_names = calculator.find_matching_compile_target_names() found_at_least_one_target = compile_target_names or test_target_names result_dict = { 'test_targets': test_target_names, 'status': found_dependency_string if found_at_least_one_target else no_dependency_string, 'compile_targets': list( set(compile_target_names) | set(test_target_names)) } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) except Exception as e: _WriteOutput(params, error=str(e))
24,242
def greybody(nu, temperature, beta, A=1.0, logscale=0.0, units='cgs', frequency_units='Hz', kappa0=4.0, nu0=3000e9, normalize=max): """ Same as modified blackbody... not sure why I have it at all, though the normalization constants are different. """ h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c'] modification = (1. - exp(-(nu/nu0)**beta)) I = blackbody(nu,temperature,units=units,frequency_units=frequency_units,normalize=normalize)*modification if normalize and hasattr(I,'__len__'): if len(I) > 1: return I/normalize(I) * 10.**logscale else: return I * 10.**logscale else: return I * 10.**logscale
24,243
def GCLarsen_v0(WF, WS, WD, TI, pars=[0.435449861, 0.797853685, -0.124807893, 0.136821858, 15.6298, 1.0]): """Computes the WindFarm flow and Power using GCLarsen [Larsen, 2009, A simple Stationary...] Inputs ---------- WF: WindFarm Windfarm instance WS: list Rotor averaged Undisturbed wind speed [m/s] for each WT WD: float Rotor averaged Undisturbed wind direction [deg] for each WT Meteorological axis. North = 0 [deg], clockwise. TI: float Rotor averaged turbulence intensity [-] for each WT Returns ------- P_WT: ndarray Power production of the wind turbines (nWT,1) [W] U_WT: ndarray Wind speed at hub height (nWT,1) [m/s] Ct: ndarray Thrust coefficients for each wind turbine (nWT,1) [-] """ Dist, nDownstream, id0 = WF.turbineDistance(np.mean(WD)) zg = WF.vectWTtoWT[2,:,:] # Initialize arrays to NaN Ct = np.nan * np.ones([WF.nWT]) U_WT = copy(WS) P_WT = np.nan * np.ones([WF.nWT]) # Initialize first upstream turbine Ct[id0[0]] = WF.WT[id0[0]].get_CT(WS[id0[0]]) P_WT[id0[0]] = WF.WT[id0[0]].get_P(WS[id0[0]]) U_WT[id0[0]] = WS[id0[0]] for i in range(1, WF.nWT): cWT = id0[i] # Current wind turbine (wake operating) cR = WF.WT[cWT].R LocalDU = np.zeros([WF.nWT, 1]) for j in range(i-1, -1, -1): # Loop on the upstream turbines (uWT) of the cWT uWT = id0[j] uWS = U_WT[uWT] # Wind speed at wind turbine uWT uR = WF.WT[uWT].R uCT = Ct[uWT] if np.isnan(uCT): uCT = WF.WT[uWT].get_CT(uWS) # WT2WT vector in wake coordinates Dist, _,_ = WF.turbineDistance(WD[uWT]) x = Dist[0, uWT, cWT] y = Dist[1, uWT, cWT] z = zg[uWT, cWT] r = np.sqrt(y**2.+z**2.) # Calculate the wake width of uWT at the position of cWT Rw = get_Rw(x, uR, TI[uWT], uCT, pars)[0] if (r <= Rw+cR or uWS > 0): LocalDU[uWT] = uWS*get_dUeq(x,y,z,cR,uR,uCT,TI[uWT],pars) # Wake superposition DU = LocalDU.sum() U_WT[cWT] = U_WT[cWT] + DU if U_WT[cWT] > WF.WT[cWT].u_cutin: Ct[cWT] = WF.WT[cWT].get_CT(U_WT[cWT]) P_WT[cWT] = WF.WT[cWT].get_P(U_WT[cWT]) else: Ct[cWT] = WF.WT[cWT].CT_idle P_WT[cWT] = 0.0 return (P_WT,U_WT,Ct)
24,244
def log(message, exception=None): """Log a message (with optional exception) to a file.""" if exception: message = message[0:-1] + ': ' + str(exception) time = datetime.now() print "Log", time, ":", message logdir = autoplatform.personaldir() if not os.path.exists(logdir): mkdir(logdir) logfile = os.path.join(logdir, "log.txt") with open(logfile, 'a') as fil: fil.write("{0}:{1}\n".format(time, message))
24,245
def guess_table_address(*args): """ guess_table_address(insn) -> ea_t Guess the jump table address (ibm pc specific) @param insn (C++: const insn_t &) """ return _ida_ua.guess_table_address(*args)
24,246
def xmind_to_excel_file(xmind_file): """Convert XMind file to a excel csv file""" xmind_file = get_absolute_path(xmind_file) logging.info('Start converting XMind file(%s) to excel file...', xmind_file) testcases = get_xmind_testcase_list(xmind_file) fileheader = ["所属模块", "用例标题", "前置条件", "步骤", "预期", "关键词", "优先级", "用例类型", "适用阶段"] wbk = xlwt.Workbook() sheet1 = wbk.add_sheet('测试用例', cell_overwrite_ok=False) # 自动换行 style1 = xlwt.easyxf('align: wrap on, vert top') sheet1.col(0).width = 256*30 sheet1.col(1).width = 256*40 sheet1.col(2).width = 256*30 sheet1.col(3).width = 256*40 sheet1.col(4).width = 256*40 # 用例title for i in range(0, len(fileheader)): sheet1.write(0, i, fileheader[i]) #第二行开始写入用例 case_index = 1 for testcase in testcases: # row = gen_a_testcase_row(testcase) row = gen_a_testcase_row(testcase) # print("row_list >> ", row_list) for i in range(0,len(row)): sheet1.write(case_index, i, row[i], style1) case_index = case_index + 1 excel_file = xmind_file[:-5] + 'xls' if os.path.exists(excel_file): logging.info('The excel file already exists, return it directly: %s', excel_file) return excel_file if excel_file: wbk.save(excel_file) logging.info('Convert XMind file(%s) to a iwork excel file(%s) successfully!', xmind_file, excel_file) return excel_file
24,247
def reverse_file(filename): """reverse txt file contents with index""" with open(filename, "r", encoding="utf-8") as f: lines = f.readlines() lines.reverse() index = 0 with open(filename, "w", encoding="utf-8") as f: for line in lines: index += 1 # replace numbers start of line with index line = re.sub(r"^\d+", f"{index:05d}", line) f.write(line)
24,248
def test_6_1_9_etc_gshadow_dash_user(host): """ CIS Ubuntu 20.04 v1.1.0 - Rule # 6.1.9 Tests if /etc/gshadow- is owned by user root """ assert host.file(ETC_GSHADOW_DASH).user == 'root'
24,249
def get_elbs(account, region): """ Get elastic load balancers """ elb_data = [] aws_accounts = AwsAccounts() if not account: session = boto3.session.Session(region_name=region) for account_rec in aws_accounts.all(): elb_data.extend( query_elbs_for_account(account_rec, region, session)) elif account.isdigit() and len(account) == 12: session = boto3.session.Session() aws_account = aws_accounts.with_number(account) if aws_account: elb_data.append( query_elbs_for_account( aws_account, region, session)) else: return dict(Message="Account not found"), 404 # print(elb_data) return dict(LoadBalancers=elb_data), 200
24,250
def handle_commit_from_git_author(repository, commit, signed, missing): """ Helper method to triage commits between signed and not-signed user signatures. This method deals with non-GitLab users found in the commit information. :param repository: The repository this commit belongs to. :type repository: cla.models.model_interfaces.Repository :param commit: Commit object that we're handling. :type commit: gitlab.ProjectCommit :param signed: List of information on contributors who have signed. Should be modified in-place to add a signer if found. :type signed: [dict] :param missing: List of information on contributors who have not yet signed. Should be modified in-place to add a missing signer if found. :type missing: [dict] """ user = cla.utils.get_user_instance().get_user_by_email(commit.author_email) if user is None: # Git commit author not in system yet, signature does not exist for this user. cla.log.info('Git commit author not found: %s <%s>', commit.author_name, commit.author_email) missing.append((commit.id, commit.author_name)) else: cla.log.info('Git commit author found: %s <%s>', user.get_user_name(), user.get_user_email()) if cla.utils.user_signed_project_signature(user, repository): signed.append((commit.id, user.get_user_name())) else: missing.append((commit.id, None))
24,251
def make_call_context(sender_address: Address, gas: int=None, value: int=None, gas_price: int=None, data: bytes=None) -> Generator[Tuple[str, Any], None, None]: """ Makes the context for message call. """ if not is_address(sender_address): raise ValueError('Message call sender provided is not an address') # 'from' is required in eth_tester yield 'from', to_checksum_address(sender_address) if gas is not None: yield 'gas', gas if value is not None: yield 'value', value if gas_price is not None: yield 'gas_price', gas_price if data is not None: yield 'data', data
24,252
def nyul_normalize(img_dir, mask_dir=None, output_dir=None, standard_hist=None, write_to_disk=True): """ Use Nyul and Udupa method ([1,2]) to normalize the intensities of a set of MR images Args: img_dir (str): directory containing MR images mask_dir (str): directory containing masks for MR images output_dir (str): directory to save images if you do not want them saved in same directory as data_dir standard_hist (str): path to output or use standard histogram landmarks write_to_disk (bool): write the normalized data to disk or nah Returns: normalized (np.ndarray): last normalized image from img_dir References: [1] N. Laszlo G and J. K. Udupa, “On Standardizing the MR Image Intensity Scale,” Magn. Reson. Med., vol. 42, pp. 1072–1081, 1999. [2] M. Shah, Y. Xiao, N. Subbanna, S. Francis, D. L. Arnold, D. L. Collins, and T. Arbel, “Evaluating intensity normalization on MRIs of human brain with multiple sclerosis,” Med. Image Anal., vol. 15, no. 2, pp. 267–282, 2011. """ input_files = io.glob_nii(img_dir) if output_dir is None: out_fns = [None] * len(input_files) else: out_fns = [] for fn in input_files: _, base, ext = io.split_filename(fn) out_fns.append(os.path.join(output_dir, base + '_hm' + ext)) if not os.path.exists(output_dir): os.mkdir(output_dir) mask_files = [None] * len(input_files) if mask_dir is None else io.glob_nii(mask_dir) if standard_hist is None: logger.info('Learning standard scale for the set of images') standard_scale, percs = train(input_files, mask_files) elif not os.path.isfile(standard_hist): logger.info('Learning standard scale for the set of images') standard_scale, percs = train(input_files, mask_files) np.save(standard_hist, np.vstack((standard_scale, percs))) else: logger.info('Loading standard scale ({}) for the set of images'.format(standard_hist)) standard_scale, percs = np.load(standard_hist) normalized = None for i, (img_fn, mask_fn, out_fn) in enumerate(zip(input_files, mask_files, out_fns)): _, base, _ = io.split_filename(img_fn) logger.info('Transforming image {} to standard scale ({:d}/{:d})'.format(base, i + 1, len(input_files))) img = io.open_nii(img_fn) mask = io.open_nii(mask_fn) if mask_fn is not None else None normalized = do_hist_norm(img, percs, standard_scale, mask) if write_to_disk: io.save_nii(normalized, out_fn, is_nii=True) return normalized
24,253
def likelihood_params(ll_mode, mode, behav_tuple, num_induc, inner_dims, inv_link, tbin, jitter, J, cutoff, neurons, mapping_net, C): """ Create the likelihood object. """ if mode is not None: kernel_tuples_, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims) if ll_mode =='hZIP': inv_link_hetero = 'sigmoid' elif ll_mode =='hCMP': inv_link_hetero = 'identity' elif ll_mode =='hNB': inv_link_hetero = 'softplus' else: inv_link_hetero = None if inv_link_hetero is not None: mean_func = np.zeros((inner_dims)) kt, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims) gp_lvms = GP_params(ind_list, kt, num_induc, neurons, inv_link, jitter, mean_func, None, learn_mean=True) else: gp_lvms = None inv_link_hetero = None if ll_mode == 'IBP': likelihood = mdl.likelihoods.Bernoulli(tbin, inner_dims, inv_link) elif ll_mode == 'IP': likelihood = mdl.likelihoods.Poisson(tbin, inner_dims, inv_link) elif ll_mode == 'ZIP' or ll_mode =='hZIP': alpha = .1*np.ones(inner_dims) likelihood = mdl.likelihoods.ZI_Poisson(tbin, inner_dims, inv_link, alpha, dispersion_mapping=gp_lvms) #inv_link_hetero = lambda x: torch.sigmoid(x)/tbin elif ll_mode == 'NB' or ll_mode =='hNB': r_inv = 10.*np.ones(inner_dims) likelihood = mdl.likelihoods.Negative_binomial(tbin, inner_dims, inv_link, r_inv, dispersion_mapping=gp_lvms) elif ll_mode == 'CMP' or ll_mode =='hCMP': log_nu = np.zeros(inner_dims) likelihood = mdl.likelihoods.COM_Poisson(tbin, inner_dims, inv_link, log_nu, J=J, dispersion_mapping=gp_lvms) elif ll_mode == 'IG': # renewal process shape = np.ones(inner_dims) likelihood = mdl.likelihoods.Gamma(tbin, inner_dims, inv_link, shape, allow_duplicate=False) elif ll_mode == 'IIG': # renewal process mu_t = np.ones(inner_dims) likelihood = mdl.likelihoods.invGaussian(tbin, inner_dims, inv_link, mu_t, allow_duplicate=False) elif ll_mode == 'LN': # renewal process sigma_t = np.ones(inner_dims) likelihood = mdl.likelihoods.logNormal(tbin, inner_dims, inv_link, sigma_t, allow_duplicate=False) elif ll_mode == 'U': likelihood = mdl.likelihoods.Universal(inner_dims//C, C, inv_link, cutoff, mapping_net) else: raise NotImplementedError return likelihood
24,254
def is_num_idx(k): """This key corresponds to """ return k.endswith("_x") and (k.startswith("tap_x") or k.startswith("sig"))
24,255
def convert_to_boolarr(int_arr, cluster_id): """ :param int_arr: array of integers which relate to no, one or multiple clusters cluster_id: 0=Pleiades, 1=Meingast 1, 2=Hyades, 3=Alpha Per, 4=Coma Ber """ return np.array((np.floor(int_arr/2**cluster_id) % 2), dtype=bool)
24,256
def serialize(root): # """Serialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment. Design an algorithm to serialize and deserialize a binary tree. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that a binary tree can be serialized to a string and this string can be deserialized to the original tree structure. Input: 1 / \ 2 3 / \ 4 5 1 / \ 2 3 / \ 4 56 7 1 / \ 10 11 / \ 100 101 110 111 Output: [1,2,3,null,null,4,5] 0 = 0 1 = 2**0 + 1 2 = 2**0 + 2 3 = 2**1 + 1 4 = 2**1 + 2 5 = 2**2 + 1 6 = 2**2 + 2**1 7 = 2**2 + 2**1 + 2**0 """ queue = [(root, "1")] indices = {} max_location = 0 while queue: node, location = queue.pop(0) current_location = int(location, 2) max_location = max(max_location, current_location) indices[int(location, 2)] = node.val if node.left: queue.append((node.left, location + "0")) if node.right: queue.append((node.right, location + "1")) result = [None] * (max_location + 1) for k, v in indices.items(): result[k] = v return result[1:]
24,257
def test_sort(): """Test sort container by specific index""" a = (10, 3, 3) b = (5, 1, -1) c = (1, -3, 4) list0 = (a, b, c) # Test sort on 1st entry list1 = floodsystem.utils.sorted_by_key(list0, 0) assert list1[0] == c assert list1[1] == b assert list1[2] == a # Test sort on 2nd entry list1 = floodsystem.utils.sorted_by_key(list0, 1) assert list1[0] == c assert list1[1] == b assert list1[2] == a # Test sort on 3rd entry list1 = floodsystem.utils.sorted_by_key(list0, 2) assert list1[0] == b assert list1[1] == a assert list1[2] == c
24,258
def ensure_daemon(f): """A decorator for running an integration test with and without the daemon enabled.""" def wrapper(self, *args, **kwargs): for enable_daemon in [False, True]: enable_daemon_str = str(enable_daemon) env = { "HERMETIC_ENV": "PANTS_PANTSD,PANTS_SUBPROCESSDIR", "PANTS_PANTSD": enable_daemon_str, } with environment_as(**env): try: f(self, *args, **kwargs) except Exception: print(f"Test failed with enable-pantsd={enable_daemon}:") if not enable_daemon: print( "Skipping run with pantsd=true because it already " "failed with pantsd=false." ) raise finally: kill_daemon() return wrapper
24,259
def create_auth_token(sender, instance=None, created=False, **kwargs): """ Tokens for users would be created as soon as they register through signals """ if created: Token.objects.create(user=instance)
24,260
def reproject_dataset(dataset, out_srs, in_srs=None): """Standalone function to reproject a given dataset with the option of forcing an input reference system :param out_srs: The desired output format in WKT. :type out_srs: str :param in_srs: The input format in WKT from which to convert. The default is the dataset's current reference system. :type in_srs: str :return: A reprojected dataset :rtype: adaptivefiltering.DataSet """ from adaptivefiltering.pdal import execute_pdal_pipeline from adaptivefiltering.pdal import PDALInMemoryDataSet dataset = PDALInMemoryDataSet.convert(dataset) if in_srs is None: in_srs = dataset.spatial_reference config = { "type": "filters.reprojection", "in_srs": in_srs, "out_srs": out_srs, } pipeline = execute_pdal_pipeline(dataset=dataset, config=config) spatial_reference = json.loads(pipeline.metadata)["metadata"][ "filters.reprojection" ]["comp_spatialreference"] return PDALInMemoryDataSet( pipeline=pipeline, spatial_reference=spatial_reference, )
24,261
def conditional(condition, decorator): """ Decorator for a conditionally applied decorator. Example: @conditional(get_config('use_cache'), ormcache) def fn(): pass """ if condition: return decorator else: return lambda fn: fn
24,262
def variance(data, mu=None): """Compute variance over a list.""" if mu is None: mu = statistics.mean(data) return sum([(x - mu) ** 2 for x in data]) / len(data)
24,263
def make_note(outfile, headers, paragraphs, **kw): """Builds a pdf file named outfile based on headers and paragraphs, formatted according to parameters in kw. :param outfile: outfile name :param headers: <OrderedDict> of headers :param paragraphs: <OrderedDict> of paragraphs :param kw: keyword arguments for formatting """ story = [Paragraph(x, headers[x]) for x in headers.keys()] for headline, paragraph in paragraphs.items(): story.append(Paragraph(headline, paragraph.get("style", h3))) if not paragraph.has_key("tpl"): for sub_headline, sub_paragraph in paragraph.items(): story.append(Paragraph(sub_headline, paragraph.get("style", h4))) story.append(Paragraph(sub_paragraph.get("tpl").render(**kw), p)) else: if isinstance(paragraph.get("tpl"), Template): story.append(Paragraph(paragraph.get("tpl").render(**kw), p)) elif isinstance(paragraph.get("tpl"), Table): story.append(Spacer(1, 0.2 * inch)) story.append(paragraph.get("tpl")) story.append(Spacer(1, 0.2 * inch)) else: pass doc = SimpleDocTemplate(outfile) doc.build(story, onFirstPage=formatted_page, onLaterPages=formatted_page) return doc
24,264
def output_file_exists(filename): """Check if a file exists and its size is > 0""" if not file_exists(filename): return False st = stat(filename) if st[stat_module.ST_SIZE] == 0: return False return True
24,265
def IMF_N(m,a=.241367,b=.241367,c=.497056): """ returns number of stars with mass m """ # a,b,c = (.241367,.241367,.497056) # a=b=c=1/3.6631098624 if .1 <= m <= .3: res = c*( m**(-1.2) ) elif .3 < m <= 1.: res = b*( m**(-1.8) ) elif 1. < m <= 100.: # res = a*( m**(-1.3)-100**(-1.3) )/1.3 res = a*( m**(-2.3) ) else: res = 0 return res
24,266
def features_disable(partial_name, partial_name_field, force, **kwargs): """Disable a feature""" mode = "disable" params = {"mode": "force"} if force else None feature = _okta_get("features", partial_name, selector=_selector_field_find(partial_name_field, partial_name)) feature_id = feature["id"] rv = okta_manager.call_okta(f"/features/{feature_id}/{mode}", REST.post, params=params) return rv
24,267
def _get_images(): """Get the official AWS public AMIs created by Flambe that have tag 'Creator: flambe@asapp.com' ATTENTION: why not just search the tags? We need to make sure the AMIs we pick were created by the Flambe team. Because of tags values not being unique, anyone can create a public AMI with 'Creator: flambe@asapp.com' as a tag. If we pick that AMI, then we could potentially be creating instances with unknown AMIs, causing potential security issues. By filtering by our acount id (which can be public), then we can make sure that all AMIs that are being scanned were created by Flambe team. """ client = boto3.client('ec2') return client.describe_images(Owners=[const.AWS_FLAMBE_ACCOUNT], Filters=[{'Name': 'tag:Creator', 'Values': ['flambe@asapp.com']}])
24,268
def ar_coefficient(x, param): """ This feature calculator fits the unconditional maximum likelihood of an autoregressive AR(k) process. The k parameter is the maximum lag of the process .. math:: X_{t}=\\varphi_0 +\\sum _{{i=1}}^{k}\\varphi_{i}X_{{t-i}}+\\varepsilon_{t} For the configurations from param which should contain the maxlag "k" and such an AR process is calculated. Then the coefficients :math:`\\varphi_{i}` whose index :math:`i` contained from "coeff" are returned. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"coeff": x, "k": y} with x,y int :type param: list :return x: the different feature values :return type: pandas.Series """ calculated_ar_params = {} x_as_list = list(x) calculated_AR = AR(x_as_list) res = {} k = param["k"] p = param["coeff"] column_name = "k_{}__coeff_{}".format(k, p) if k not in calculated_ar_params: try: calculated_ar_params[k] = calculated_AR.fit(maxlag=k, solver="mle").params except (np.linalg.LinAlgError, ValueError): calculated_ar_params[k] = [np.NaN]*k mod = calculated_ar_params[k] if p <= k: try: res[column_name] = mod[p] except IndexError: res[column_name] = 0 else: res[column_name] = np.NaN return [value for key, value in res.items()][0]
24,269
def measure_ir( sweep_length=1.0, sweep_type="exponential", fs=48000, f_lo=0.0, f_hi=None, volume=0.9, pre_delay=0.0, post_delay=0.1, fade_in_out=0.0, dev_in=None, dev_out=None, channels_input_mapping=None, channels_output_mapping=None, ascending=False, deconvolution=True, plot=True, ): """ Measures an impulse response by playing a sweep and recording it using the sounddevice package. Parameters ---------- sweep_length: float, optional length of the sweep in seconds sweep_type: SweepType, optional type of sweep to use linear or exponential (default) fs: int, optional sampling frequency (default 48 kHz) f_lo: float, optional lowest frequency in the sweep f_hi: float, optional highest frequency in the sweep, can be a negative offset from fs/2 volume: float, optional multiply the sweep by this number before playing (default 0.9) pre_delay: float, optional delay in second before playing sweep post_delay: float, optional delay in second before stopping recording after playing the sweep fade_in_out: float, optional length in seconds of the fade in and out of the sweep (default 0.) dev_in: int, optional input device number dev_out: int, optional output device number channels_input_mapping: array_like, optional List of channel numbers (starting with 1) to record. If mapping is given, channels is silently ignored. channels_output_mapping: array_like, optional List of channel numbers (starting with 1) where the columns of data shall be played back on. Must have the same length as number of channels in data (except if data is mono, in which case the signal is played back on all given output channels). Each channel number may only appear once in mapping. ascending: bool, optional wether the sweep is from high to low (default) or low to high frequencies deconvolution: bool, optional if True, apply deconvolution to the recorded signal to remove the sweep (default 0.) plot: bool, optional plot the resulting signal Returns ------- Returns the impulse response if `deconvolution == True` and the recorded signal if not """ if not sounddevice_available: raise ImportError( "Sounddevice package not availble. Install it to use this function." ) N = int(sweep_length * fs) + 1 sweep_func = _sweep_types[sweep_type] sweep = sweep_func( sweep_length, fs, f_lo=f_lo, f_hi=f_hi, fade=fade_in_out, ascending=ascending ) # adjust the amplitude sweep *= volume # zero pad as needed pre_zeros = int(pre_delay * fs) post_zeros = int(post_delay * fs) test_signal = np.concatenate((np.zeros(pre_zeros), sweep, np.zeros(post_zeros))) # setup audio interface parameters if channels_input_mapping is None: channels_input_mapping = [1] if channels_output_mapping is None: channels_output_mapping = [1] if dev_in is not None: sd.default.device[0] = dev_in if dev_out is not None: sd.default.device[1] = dev_out # repeat if we need to play in multiple channels if len(channels_output_mapping) > 1: play_signal = np.tile( test_signal[:, np.newaxis], (1, len(channels_output_mapping)) ) else: play_signal = test_signal recorded_signal = sd.playrec( test_signal, samplerate=fs, input_mapping=channels_input_mapping, output_mapping=channels_output_mapping, blocking=True, ) h = None if deconvolution: h = np.array( [ wiener_deconvolve(recorded_signal[:, c], sweep) for c in range(len(channels_input_mapping)) ] ).T if plot: try: import matplotlib.pyplot as plt except ImportError: import warnings warnings.warn("Matplotlib is required for plotting") return if h is not None: plt.figure() plt.subplot(1, 2, 1) plt.plot(np.arange(h.shape[0]) / fs, h) plt.title("Impulse Response") plt.subplot(1, 2, 2) freq = np.arange(h.shape[0] // 2 + 1) * fs / h.shape[0] plt.plot(freq, 20.0 * np.log10(np.abs(np.fft.rfft(h, axis=0)))) plt.title("Frequency content") plt.figure() plt.subplot(1, 2, 1) plt.plot(np.arange(recorded_signal.shape[0]) / fs, recorded_signal) plt.title("Recorded signal") plt.subplot(1, 2, 2) freq = ( np.arange(recorded_signal.shape[0] // 2 + 1) * fs / recorded_signal.shape[0] ) plt.plot(freq, 20.0 * np.log10(np.abs(np.fft.rfft(recorded_signal, axis=0)))) plt.title("Frequency content") plt.show() if deconvolution: return recorded_signal, h else: return recorded_signal
24,270
def address_factory(sqla): """Create a fake address.""" fake = Faker() # Use a generic one; others may not have all methods. addresslines = fake.address().splitlines() areas = sqla.query(Area).all() if not areas: create_multiple_areas(sqla, random.randint(3, 6)) areas = sqla.query(Area).all() current_area = random.choice(areas) address = { 'name': fake.name(), 'address': addresslines[0], 'city': addresslines[1].split(",")[0], 'area_id': current_area.id, 'country_code': current_area.country_code, 'latitude': random.random() * 0.064116 + -2.933783, 'longitude': random.random() * 0.09952 + -79.055411 } return address
24,271
def generate_test_bench(file_path): """ Main function to generate a testbench from a VHDL source file. The script takes the input path of the file and generates a new file in that directory with a prefix defined by testbench_prefix. :param file_path: path to the .vhd file :return: nothing """ output_file_path = get_output_file_path(file_path) # print(output_file_path) testbench_metadata = dict() testbench_metadata["header_description"] = list() testbench_metadata["libraries"] = list() testbench_metadata["entity_name"] = "" testbench_metadata["generics"] = list() testbench_metadata["ports"] = list() testbench_metadata["archs"] = list() with open(file_path) as fd: entity_name = None header_description = list() libraries = list() data_generics = list() data_ports = list() arch_names = list() entity_complete = False for line in fd: if entity_name is None: # look for header result = regex_header.findall(line) if result: header_description.append(result[0]) # look for libraries result = regex_libraries.findall(line) if result: libraries.append(result[0]) # look for entity name result = regex_entity.findall(line) if result: entity_name = result[0] if entity_name and not entity_complete: # generics: result = regex_generics.findall(line) result = list(result[0]) if result else None if result: data_generics.append(result) # ports: # result = regex_ports.findall(line.lower()) result = regex_ports.findall(line) result = list(result[0]) if result else None if result: data_ports.append(result) # end of the entity description if ("end " + entity_name in line) or ("end entity" in line): entity_complete = True if entity_complete: # architecture name result = regex_arch.findall(line) result = result[0] if result else None if result: arch_names.append(result) testbench_metadata["header_description"] = header_description testbench_metadata["libraries"] = libraries testbench_metadata["entity_name"] = entity_name testbench_metadata["generics"] = data_generics testbench_metadata["ports"] = data_ports testbench_metadata["archs"] = arch_names write_testbench(output_file_path, testbench_metadata)
24,272
def discover_performance_dfg(log: Union[EventLog, pd.DataFrame], business_hours: bool = False, worktiming: List[int] = [7, 17], weekends: List[int] = [6, 7]) -> Tuple[dict, dict, dict]: """ Discovers a performance directly-follows graph from an event log Parameters --------------- log Event log business_hours Enables/disables the computation based on the business hours (default: False) worktiming (If the business hours are enabled) The hour range in which the resources of the log are working (default: 7 to 17) weekends (If the business hours are enabled) The weekends days (default: Saturday (6), Sunday (7)) Returns --------------- performance_dfg Performance DFG start_activities Start activities end_activities End activities """ general_checks_classical_event_log(log) if check_is_pandas_dataframe(log): check_pandas_dataframe_columns(log) from pm4py.util import constants properties = get_properties(log) from pm4py.algo.discovery.dfg.adapters.pandas.df_statistics import get_dfg_graph activity_key = properties[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in properties else xes_constants.DEFAULT_NAME_KEY timestamp_key = properties[constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] if constants.PARAMETER_CONSTANT_TIMESTAMP_KEY in properties else xes_constants.DEFAULT_TIMESTAMP_KEY case_id_key = properties[constants.PARAMETER_CONSTANT_CASEID_KEY] if constants.PARAMETER_CONSTANT_CASEID_KEY in properties else constants.CASE_CONCEPT_NAME dfg = get_dfg_graph(log, activity_key=activity_key, timestamp_key=timestamp_key, case_id_glue=case_id_key, measure="performance", perf_aggregation_key="all", business_hours=business_hours, worktiming=worktiming, weekends=weekends) from pm4py.statistics.start_activities.pandas import get as start_activities_module from pm4py.statistics.end_activities.pandas import get as end_activities_module start_activities = start_activities_module.get_start_activities(log, parameters=properties) end_activities = end_activities_module.get_end_activities(log, parameters=properties) else: from pm4py.algo.discovery.dfg.variants import performance as dfg_discovery properties = get_properties(log) properties[dfg_discovery.Parameters.AGGREGATION_MEASURE] = "all" properties[dfg_discovery.Parameters.BUSINESS_HOURS] = business_hours properties[dfg_discovery.Parameters.WORKTIMING] = worktiming properties[dfg_discovery.Parameters.WEEKENDS] = weekends dfg = dfg_discovery.apply(log, parameters=properties) from pm4py.statistics.start_activities.log import get as start_activities_module from pm4py.statistics.end_activities.log import get as end_activities_module start_activities = start_activities_module.get_start_activities(log, parameters=properties) end_activities = end_activities_module.get_end_activities(log, parameters=properties) return dfg, start_activities, end_activities
24,273
def mifs(data, target_variable, prev_variables_index, candidate_variable_index, **kwargs): """ This estimator computes the Mutual Information Feature Selection criterion. Parameters ---------- data : np.array matrix Matrix of data set. Columns are variables, rows are observations. target_variable : int or float Target variable. Can not be in data! prev_variables_index: list of ints, set of ints Indexes of previously selected variables. candidate_variable_index : int Index of candidate variable in data matrix. beta: float Impact of redundancy segment in MIFS approximation. Higher the beta is, higher the impact. Returns ------- j_criterion_value : float J_criterion approximated by the Mutual Information Feature Selection. """ assert isinstance(data, np.ndarray), "Argument 'data' must be a numpy matrix" assert isinstance(target_variable, np.ndarray), "Argument 'target_variable' must be a numpy matrix" assert isinstance(candidate_variable_index, int), "Argument 'candidate_variable_index' must be an integer" assert len(data.shape) == 2, "For 'data' argument use numpy array of shape (n,p)" assert data.shape[0] == len(target_variable), "Number of rows in 'data' must equal target_variable length" assert candidate_variable_index < data.shape[1], "Index 'candidate_variable_index' out of range in 'data'" for i in prev_variables_index: assert isinstance(i, int), "All previous variable indexes must be int." if kwargs.get('beta') is None: beta = 1 warnings.warn("Parameter `beta` not provided, default value of 1 is selected.", Warning) else: beta = kwargs.pop('beta') assert isinstance(beta, int) or isinstance(beta, float), "Argument 'beta' must be int or float" candidate_variable = data[:, candidate_variable_index] if len(prev_variables_index) == 0: redundancy_sum = 0 else: redundancy_sum = np.apply_along_axis(mutual_information, axis=0, arr=data[:, prev_variables_index], vector_2=candidate_variable).sum() return mutual_information(candidate_variable, target_variable) - beta*redundancy_sum
24,274
async def cmd_denybnc(text: str, message, bnc_queue, conn: 'Conn'): """<user> - Deny [user]'s BNC request""" nick = text.split()[0] if nick not in bnc_queue: message(f"{nick} is not in the BNC queue.") return conn.rem_queue(nick) message( f"SEND {nick} Your BNC auth could not be added at this time", "MemoServ" ) conn.chan_log(f"{nick} has been denied. Memoserv sent.")
24,275
def main(args): """Main function""" args_check(args) mkdir(TMP) mkdir(RESULT) # set_cpu_mode or set_gpu_mode decides whether using # CPU/GPU to do weights calibration, but activation calibration is # controled by caffe APIs: caffe.set_mode_cpu() or set_mode_gpu(). # Need to set amct mode before the whole calibration process, # default using CPU mode to do weights calibration. # amct.set_gpu_mode() does not set which GPU card to use. Users can # set GPU card in two ways: # 1) use pycaffe API set_device(gpu_id) # 2) use environment variable CUDA_VISIBLE_DEVICES if args.gpu_id is not None and not args.cpu_mode: caffe.set_mode_gpu() caffe.set_device(args.gpu_id) amct.set_gpu_mode() else: caffe.set_mode_cpu() # Run pre model test if args.pre_test: if not args.benchmark: run_caffe_model( args.model_file, args.weights_file, args.iterations) else: do_benchmark_test( args, args.model_file, args.weights_file, args.iterations) print('[AMCT][INFO]Run ResNet-50 without quantize success!') return # Quantize configurations config_json_file = os.path.join(TMP, 'config.json') skip_layers = [] batch_num = 2 if args.cfg_define is not None: # do weights calibration with non uniform quantize configure amct.create_quant_config(config_json_file, args.model_file, args.weights_file, skip_layers, batch_num, config_defination=args.cfg_define) else: amct.create_quant_config(config_json_file, args.model_file, args.weights_file, skip_layers, batch_num) # Phase0: Init amct task scale_offset_record_file = os.path.join(TMP, 'scale_offset_record.txt') graph = amct.init(config_json_file, args.model_file, args.weights_file, scale_offset_record_file) # Phase1: do conv+bn+scale fusion, weights calibration and fake # quant, insert quant and dequant layer modified_model_file = os.path.join(TMP, 'modified_model.prototxt') modified_weights_file = os.path.join(TMP, 'modified_model.caffemodel') amct.quantize_model(graph, modified_model_file, modified_weights_file) # Phase2: run caffe model to do activation calibration if not args.benchmark: run_caffe_model( modified_model_file, modified_weights_file, batch_num) else: do_benchmark_test( args, modified_model_file, modified_weights_file, batch_num) # Phase3: save final model, one for caffe do fake quant test, one # deploy model for GE result_path = os.path.join(RESULT, 'ResNet50') amct.save_model(graph, 'Both', result_path) # Phase4: do final fake quant model test fake_quant_model = os.path.join( RESULT, 'ResNet50_fake_quant_model.prototxt') fake_quant_weights = os.path.join( RESULT, 'ResNet50_fake_quant_weights.caffemodel') if not args.benchmark: run_caffe_model( fake_quant_model, fake_quant_weights, args.iterations) else: do_benchmark_test( args, fake_quant_model, fake_quant_weights, args.iterations) print('[AMCT][INFO]Run ResNet-50 with quantize success!')
24,276
def input_thing(): """输入物品信息""" name_str, price_str, weight_str = input('请输入物品信息(名称 价格 重量):').split() return name_str, int(price_str), int(weight_str)
24,277
def extract_features_mask(img, mask): """Computes law texture features for masked area of image.""" preprocessed_img = laws_texture.preprocess_image(img, size=15) law_images = laws_texture.filter_image(preprocessed_img, LAW_MASKS) law_energy = laws_texture.compute_energy(law_images, 10) energy_features_list = [] for type, energy in law_energy.items(): # extract features for mask energy_masked = energy[np.where(mask != 0)] energy_feature = np.mean(energy_masked, dtype=np.float32) energy_features_list.append(energy_feature) return energy_features_list
24,278
def construct_grid_with_k_connectivity(n1,n2,k,figu = False): """Constructs directed grid graph with side lengths n1 and n2 and neighborhood connectivity k""" """For plotting the adjacency matrix give fig = true""" def feuclidhorz(u , v): return np.sqrt((u[0] - (v[0]-n2))**2+(u[1] - v[1])**2) def feuclidvert(u , v): return np.sqrt((u[0] - (v[0]))**2+(u[1] - (v[1]-n1))**2 ) def fperiodeuc(u , v): return np.sqrt((u[0] - (v[0]-n2))**2 + (u[1] - (v[1]-n1))**2 ) def finvperiodic(u,v): return fperiodeuc(v,u) def finvvert(u,v): return feuclidvert(v,u) def finvhorz(u,v): return feuclidhorz(v,u) def fperiodeucb(u , v): return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 ) def fperiodeucc(v, u): return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 ) def fchhorz(u , v): return max(abs(u[0] - (v[0]-n2)), abs(u[1] - v[1])) def fchvert(u , v): return max(abs(u[0] - (v[0])),abs(u[1] - (v[1]-n1)) ) def fperiodch(u , v): return max(abs(u[0] - (v[0]-n2)) , abs(u[1] - (v[1]-n1)) ) def finvperiodicch(u,v): return fperiodch(v,u) def finvvertch(u,v): return fchvert(v,u) def finvhorzch(u,v): return fchhorz(v,u) def fperiodchb(u , v): return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1))) def fperiodchc(v, u): return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1)) ) def fperiodchd(u , v): return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1]))) def fperiodche(v, u): return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1])) ) #distF = distance Function #distM = distance meter for case in switch(k): if case(4): distF = 'euclidean' distM = 1 #.41 break if case(8): distF = 'euclidean' distM = 1.5 break if case(12): distF = 'euclidean' distM = 2 break if case(20): distF = 'euclidean' distM = 2.3 #2.5 break if case(24): #check this again distF = 'chebyshev' distM = 2 #or euclidean 2.9 break if case(36): distF = 'euclidean' distM = 3.5 break if case(44): distF = 'euclidean' distM = 3.8 break if case(28): distF = 'euclidean' distM = 3 break if case(48): distF = 'euclidean' distM = 4 break x = np.linspace(1,n1,n1) y = np.linspace(1,n2,n2) X,Y = np.meshgrid(x,y) XY = np.vstack((Y.flatten(), X.flatten())) adj = squareform( (pdist(XY.T, metric = distF)) <= distM ) if k!= 24: adjb = squareform( (pdist(XY.T, metric = feuclidhorz)) <= distM ) adjc = squareform( (pdist(XY.T, metric = feuclidvert)) <= distM ) adjd = squareform( (pdist(XY.T, metric = fperiodeuc)) <= distM ) adje = squareform( (pdist(XY.T, metric = finvperiodic)) <= distM ) adjf = squareform( (pdist(XY.T, metric = finvvert)) <= distM ) adjg = squareform( (pdist(XY.T, metric = finvhorz)) <= distM ) adjx = squareform( (pdist(XY.T, metric = fperiodeucc)) <= distM ) adjy = squareform( (pdist(XY.T, metric = fperiodeucb)) <= distM ) Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1) if k == 24: adjb = squareform( (pdist(XY.T, metric = fchhorz)) <= distM ) adjc = squareform( (pdist(XY.T, metric = fchvert)) <= distM ) adjd = squareform( (pdist(XY.T, metric = fperiodch)) <= distM ) adje = squareform( (pdist(XY.T, metric = finvperiodicch)) <= distM ) adjf = squareform( (pdist(XY.T, metric = finvvertch)) <= distM ) adjg = squareform( (pdist(XY.T, metric = finvhorzch)) <= distM ) adjx = squareform( (pdist(XY.T, metric = fperiodchb)) <= distM ) adjy = squareform( (pdist(XY.T, metric = fperiodchc)) <= distM ) Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1) #Adj = ( adj+adjb >=1 ) #print adj #plt.plot(sum(Adj)) if figu: plt.figure(figsize=(1000,1000)) plt.imshow(Adj,interpolation = 'none', extent = [0,n1*n2 , n1*n2,0] ) plt.xticks(np.arange(n1*n2)) plt.yticks(np.arange(n1*n2)) plt.grid(ls = 'solid') #plt.colorbar() """ #text portion min_val = 0 max_val = n1*n2 diff = 1 ind_array = np.arange(min_val, max_val, diff) x, y = np.meshgrid(ind_array, ind_array) for x_val, y_val in zip(x.flatten(), y.flatten()): c = adj[x_val,y_val] plt.text(x_val+0.5, y_val+0.5, '%.2f' % c, fontsize=8,va='center', ha='center') """ G = nx.from_numpy_matrix(Adj) return (G,Adj)
24,279
def test_remove_duplicate_events(): """Test the remove_duplicate_events function by ensuring that duplicate events are removed.""" one = EventRecord(received_at=datetime(2018, 2, 19, 0, 0, 11), source_type="datastoretest", owner="a", data={}) one.fingerprint = "f1" two = EventRecord(received_at=datetime(2018, 2, 20, 0, 0, 11), source_type="datastoretest", owner="a", data={}) two.fingerprint = "f2" three = EventRecord(received_at=datetime(2018, 2, 21, 0, 0, 11), source_type="datastoretest2", owner="a", data={}) three.fingerprint = "f1" records = [one, two, three] records = remove_duplicate_events(records) assert three in records assert one not in records assert len(records) == 2
24,280
def get_tar_file(tar_url, dump_dir=os.getcwd()): """ Downloads and unpacks compressed folder Parameters ---------- tar_url : string url of world wide web location dump_dir : string path to place the content Returns ------- tar_names : list list of strings of file names within the compressed folder """ ftp_stream = urllib.request.urlopen(tar_url) tar_file = tarfile.open(fileobj=ftp_stream, mode="r|gz") tar_file.extractall(path=dump_dir) tar_names = tar_file.getnames() return tar_names
24,281
def check_auth(*args, **kwargs): """A tool that looks in config for 'auth.require'. If found and it is not None, a login is required and the entry is evaluated as a list of conditions that the user must fulfill""" conditions = cherrypy.request.config.get('auth.require', None) if conditions is not None: username = cherrypy.session.get(SESSION_KEY) if username: cherrypy.request.login = username for condition in conditions: # A condition is just a callable that returns true or false result = condition() #PP. will return a reason message instead of boolean if check failed if result != True: raise cherrypy.HTTPError(403, result) else: raise cherrypy.HTTPRedirect("/auth/login?destination=" + cherrypy.url())
24,282
def _optimal_shift(pos, r_pad, log): """ Find the shift for the periodic unit cube that would minimise the padding. """ npts, ndim = pos.shape # +1 whenever a region starts, -1 when it finishes start_end = empty(npts*2, dtype=np.int32) start_end[:npts] = 1 start_end[npts:] = -1 pad_min = [] # Go along each axis, find the point that would require least padding for ax in range(ndim): start_reg = pos[:,ax] - r_pad end_reg = pos[:,ax] + r_pad # make periodic start_reg -= floor(start_reg) end_reg -= floor(end_reg) # Order from 0-1, add 1 whenever we come into range of a new point, -1 # whenever we leave idx_sort = argsort(concatenate([start_reg, end_reg])) region_change = cumsum(start_end[idx_sort]) # Find the minimum min_chg = argmin(region_change) # Note since this is the minimum trough: # start_end[idx_sort[min_chg]==-1 (a trough) # start_end[idx_sort[min_chg+1]] == +1 (otherwise it wasnt the minimum) trough0 = end_reg[idx_sort[min_chg]-npts] # has to be a -1 (i.e. region end) if min_chg+1==2*npts: trough1 = start_reg[idx_sort[0]]+1 mid_trough = 0.5 * (trough0 + trough1) mid_trough -= floor(mid_trough) else: trough1 = start_reg[idx_sort[min_chg+1]] mid_trough = 0.5 * (trough0 + trough1) pad_min.append(mid_trough) shift = array([1.0-x for x in pad_min], dtype=pos.dtype) print("Best shift", ', '.join('%.3f'%x for x in shift), file=log) return shift
24,283
def test_i(kind): """Test I methods. :param str kind: Type of string to test. """ instance = get_instance(kind, 'tantamount') assert instance.index('t') == 0 assert instance.index('t', 0) == 0 assert instance.index('t', 0, 1) == 0 assert instance.index('t', 1) == 3 assert instance.index('t', 1, 4) == 3 assert instance.index('m') == 5 with pytest.raises(ValueError): assert instance.index('t', 1, 3) with pytest.raises(ValueError): assert instance.index('x') assert instance.isalnum() is True assert get_instance(kind, '123').isalnum() is True assert get_instance(kind, '.').isalnum() is False assert instance.isalpha() is True assert get_instance(kind, '.').isalpha() is False if sys.version_info[0] != 2: assert instance.isdecimal() is False assert get_instance(kind, '123').isdecimal() is True assert get_instance(kind, '.').isdecimal() is False assert instance.isdigit() is False assert get_instance(kind, '123').isdigit() is True assert get_instance(kind, '.').isdigit() is False if sys.version_info[0] != 2: assert instance.isnumeric() is False assert get_instance(kind, '123').isnumeric() is True assert get_instance(kind, '.').isnumeric() is False assert instance.isspace() is False assert get_instance(kind, ' ').isspace() is True assert instance.istitle() is False assert get_instance(kind, 'Test').istitle() is True assert instance.isupper() is False assert get_instance(kind, 'TEST').isupper() is True
24,284
def ShowActStack(cmd_args=None): """ Routine to print out the stack of a specific thread. usage: showactstack <activation> """ if cmd_args == None or len(cmd_args) < 1: print "No arguments passed" print ShowAct.__doc__.strip() return False threadval = kern.GetValueFromAddress(cmd_args[0], 'thread *') print GetThreadSummary.header print GetThreadSummary(threadval) print GetThreadBackTrace(threadval, prefix="\t") return
24,285
def upload(dbx, fullname, dbx_folder, subfolder, name, overwrite=False): """Upload a file. Return the request response, or None in case of error. """ path = '/%s/%s/%s' % (dbx_folder, subfolder.replace(os.path.sep, '/'), name) while '//' in path: path = path.replace('//', '/') mode = (dropbox.files.WriteMode.overwrite if overwrite else dropbox.files.WriteMode.add) mtime = os.path.getmtime(fullname) with open(fullname, 'rb') as f: data = f.read() with stopwatch('upload %d bytes' % len(data)): try: res = dbx.files_upload( data, path, mode, client_modified=datetime.datetime(*time.gmtime(mtime)[:6]), mute=True) except dropbox.exceptions.ApiError as err: print('*** API error', err) return None print('uploaded as', res.name.encode('utf8')) return res
24,286
def load_request(possible_keys): """Given list of possible keys, return any matching post data""" pdata = request.json if pdata is None: pdata = json.loads(request.body.getvalue().decode('utf-8')) for k in possible_keys: if k not in pdata: pdata[k] = None # print('pkeys: %s pdata: %s' % (possible_keys, pdata)) return pdata
24,287
def get_random_tcp_start_pos(): """ reachability area: x = [-0.2; 0.4] y = [-0.28; -0.1] """ z_up = 0.6 tcp_x = round(random.uniform(-0.2, 0.4), 4) tcp_y = round(random.uniform(-0.28, -0.1), 4) start_tcp_pos = (tcp_x, tcp_y, z_up) # start_tcp_pos = (-0.2, -0.28, z_up) return start_tcp_pos
24,288
def CalculateChiv3p(mol): """ ################################################################# Calculation of valence molecular connectivity chi index for path order 3 ---->Chiv3 Usage: result=CalculateChiv3p(mol) Input: mol is a molecule object. Output: result is a numeric value ################################################################# """ return _CalculateChivnp(mol,NumPath=3)
24,289
def coerce(version: str) -> Tuple[Version, Optional[str]]: """ Convert an incomplete version string into a semver-compatible Version object * Tries to detect a "basic" version string (``major.minor.patch``). * If not enough components can be found, missing components are set to zero to obtain a valid semver version. :param str version: the version string to convert :return: a tuple with a :class:`Version` instance (or ``None`` if it's not a version) and the rest of the string which doesn't belong to a basic version. :rtype: tuple(:class:`Version` | None, str) """ match = BASEVERSION.search(version) if not match: return (None, version) ver = { key: 0 if value is None else value for key, value in match.groupdict().items() } ver = Version(**ver) rest = match.string[match.end():] # noqa:E203 return ver, rest
24,290
def choose_wyckoff(wyckoffs, number): """ choose the wyckoff sites based on the current number of atoms rules 1, the newly added sites is equal/less than the required number. 2, prefer the sites with large multiplicity """ for wyckoff in wyckoffs: if len(wyckoff[0]) <= number: return choose(wyckoff) return False
24,291
def _to_system(abbreviation): """Converts an abbreviation to a system identifier. Args: abbreviation: a `pronto.Term.id` Returns: a system identifier """ try: return { 'HP': 'http://www.human-phenotype-ontology.org/' }[abbreviation] except KeyError: raise RuntimeError( 'system abbreviation \'%s\' is not supported' % abbreviation)
24,292
def apply_gate(circ: QuantumCircuit, qreg: QuantumRegister, gate: GateObj, parameterise: bool = False, param: Union[Parameter, tuple] = None): """Applies a gate to a quantum circuit. More complicated gates such as RXX gates should be decomposed into single qubit gates and CNOTs prior to calling this function. If parameterise is True, then qiskit's placeholder parameter theta will be used in place of any explicit parameters. """ if not isinstance(gate.qubits, list): q = gate.qubits params = gate.params if gate.name == 'I': pass elif gate.name == 'H': circ.h(qreg[q]) elif gate.name == 'HSdag': circ.h(qreg[q]) circ.s(qreg[q]) circ.h(qreg[q]) elif gate.name == 'X': circ.x(qreg[q]) elif gate.name == 'Y': circ.y(qreg[q]) elif gate.name == 'Z': circ.z(qreg[q]) elif gate.name == 'RX': if parameterise: circ.rx(param, qreg[q]) else: circ.rx(params, qreg[q]) elif gate.name == 'RY': if parameterise: circ.ry(param, qreg[q]) else: circ.ry(params, qreg[q]) elif gate.name == 'RZ': if parameterise: circ.rz(param, qreg[q]) else: circ.rz(params, qreg[q]) elif gate.name == 'U3': if parameterise: _params = [i for i in param] circ.u3(_params[0], _params[1], _params[2], qreg[q]) else: circ.u3(params[0], params[1], params[2], qreg[q]) else: cntrl = gate.qubits[0] trgt = gate.qubits[1] circ.cx(qreg[cntrl], qreg[trgt]) return circ
24,293
def write_MOM6_solo_mosaic_file(mom6_grid): """Write the "solo mosaic" file, which describes to the FMS infrastructure where to find the grid file(s). Based on tools in version 5 of MOM (http://www.mom-ocean.org/).""" # NOTE: This function is very basic, since we're skipping the # finding of "contact regions" between the tiles that the real # make_solo_mosaic tool performs. It's not needed right now, # since we only have one (regional) tile, but I think this feature # will be needed if we ever use a tripolar grid. with netCDF4.Dataset(mom6_grid['filenames']['mosaic'], 'w', format='NETCDF3_CLASSIC') as mosaic_ds: # Dimenisons mosaic_ds.createDimension('ntiles', 1) mosaic_ds.createDimension('string', mom6_grid['netcdf_info']['string_length']) # Variables & Values hmosaic = mosaic_ds.createVariable('mosaic', 'c', ('string',)) hmosaic.standard_name = 'grid_mosaic_spec' hmosaic.children = 'gridtiles' hmosaic.contact_regions = 'contacts' hmosaic.grid_descriptor = '' dirname,filename = os.path.split(mom6_grid['filenames']['mosaic']) filename,ext = os.path.splitext(filename) hmosaic[:len(filename)] = filename hgridlocation = mosaic_ds.createVariable('gridlocation', 'c', ('string',)) hgridlocation.standard_name = 'grid_file_location' this_dir = mom6_grid['filenames']['directory'] hgridlocation[:len(this_dir)] = this_dir hgridfiles = mosaic_ds.createVariable('gridfiles', 'c', ('ntiles', 'string',)) hgridfiles[0, :len(mom6_grid['filenames']['supergrid'])] = mom6_grid['filenames']['supergrid'] hgridtiles = mosaic_ds.createVariable('gridtiles', 'c', ('ntiles', 'string',)) hgridtiles[0, :len(mom6_grid['netcdf_info']['tile_str'])] = mom6_grid['netcdf_info']['tile_str'] # Global attributes _add_global_attributes(mom6_grid, mosaic_ds)
24,294
def preprocess_datasets(data: str, seed: int = 0) -> Tuple: """Load and preprocess raw datasets (Yahoo! R3 or Coat).""" if data == 'yahoo': with codecs.open(f'../data/{data}/train.txt', 'r', 'utf-8', errors='ignore') as f: data_train = pd.read_csv(f, delimiter='\t', header=None) data_train.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True) with codecs.open(f'../data/{data}/test.txt', 'r', 'utf-8', errors='ignore') as f: data_test = pd.read_csv(f, delimiter='\t', header=None) data_test.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True) for _data in [data_train, data_test]: _data.user, _data.item = _data.user - 1, _data.item - 1 elif data == 'coat': col = {'level_0': 'user', 'level_1': 'item', 2: 'rate', 0: 'rate'} with codecs.open(f'../data/{data}/train.ascii', 'r', 'utf-8', errors='ignore') as f: data_train = pd.read_csv(f, delimiter=' ', header=None) data_train = data_train.stack().reset_index().rename(columns=col) data_train = data_train[data_train.rate.values != 0].reset_index(drop=True) with codecs.open(f'../data/{data}/test.ascii', 'r', 'utf-8', errors='ignore') as f: data_test = pd.read_csv(f, delimiter=' ', header=None) data_test = data_test.stack().reset_index().rename(columns=col) data_test = data_test[data_test.rate.values != 0].reset_index(drop=True) test = data_test.values train, val = train_test_split(data_train.values, test_size=0.1, random_state=seed) num_users, num_items = train[:, 0].max() + 1, train[:, 1].max() + 1 return train, val, test, num_users, num_items
24,295
def loadSentimentVector(file_name): """ Load sentiment vector [Surprise, Sorrow, Love, Joy, Hate, Expect, Anxiety, Anger] """ contents = [ line.strip('\n').split() for line in open(file_name, 'r').readlines() ] sentiment_dict = { line[0].decode('utf-8'): [float(w) for w in line[1:]] for line in contents } return sentiment_dict
24,296
def unique_boxes(boxes, scale=1.0): """Return indices of unique boxes.""" v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index)
24,297
def compscan_key(compscan): """List of strings that identifies compound scan.""" # Name of data set that contains compound scan path = compscan.scans[0].path filename_end = path.find('.h5') dataset_name = os.path.basename(path[:filename_end]) if filename_end > 0 else os.path.basename(path) # Time when compound scan is exactly half-way through its operation (i.e. 50% complete) middle_time = np.median(np.hstack([scan.timestamps for scan in compscan.scans]), axis=None) return compscan.dataset.antenna.name, dataset_name, compscan.target.name, str(katpoint.Timestamp(middle_time))
24,298
def HEX2DEC(*args) -> Function: """ Converts a signed hexadecimal number to decimal format. Learn more: https//support.google.com/docs/answer/3093192 """ return Function("HEX2DEC", args)
24,299