content
stringlengths
22
815k
id
int64
0
4.91M
def build_si(cp, instruction): """ Build the integer representation of the source indices. :param cp: CoreParameters instance for the target architecture :param instruction: Instruction instance :return: integer representation of si """ # Check sizing. if len(instruction.source_indices) > 3: exception_string = f"The {len(instruction.source_indices)} sources exceed the architecture's specified " \ + f"maximum of 3." raise AssemblyException(exception_string) # Concatenate the values together. si = 0 num_source_indices = len(instruction.source_indices) if num_source_indices > 2: if instruction.source_indices[2] != 0: si |= instruction.source_indices[2] si <<= cp.single_si_width if num_source_indices > 1: if instruction.source_indices[1] != 0: si |= instruction.source_indices[1] si <<= cp.single_si_width if num_source_indices > 0: if instruction.source_indices[0] != 0: si |= instruction.source_indices[0] # Check sizing, and return the integer. si = int(si) if si.bit_length() > cp.si_width: raise AssemblyException("si exceeds its allotted bit width.") return si
5,325,200
def getRegSampledPrfFitsByOffset(prfArray, colOffset, rowOffset): """ The 13x13 pixel PRFs on at each grid location are sampled at a 9x9 intra-pixel grid, to describe how the PRF changes as the star moves by a fraction of a pixel in row or column. To extract out a single PRF, you need to address the 117x117 array in a funny way (117 = 13x9). Essentially you need to pull out every 9th element in the array, i.e. .. code-block:: python img = array[ [colOffset, colOffset+9, colOffset+18, ...], [rowOffset, rowOffset+9, ...] ] Inputs ------ prfArray 117x117 interleaved PRF array colOffset, rowOffset The offset used to address the column and row in the interleaved PRF Returns ------ prf 13x13 PRF image for the specified column and row offset """ gridSize = 9 assert colOffset < gridSize assert rowOffset < gridSize # Number of pixels in regularly sampled PRF. Should be 13x13 nColOut, nRowOut = prfArray.shape nColOut /= float(gridSize) nRowOut /= float(gridSize) iCol = colOffset + (np.arange(nColOut) * gridSize).astype(np.int) iRow = rowOffset + (np.arange(nRowOut) * gridSize).astype(np.int) tmp = prfArray[iRow, :] prf = tmp[:,iCol] return prf
5,325,201
def generate_list_display(object, attrs): """Generate a display string for an object based on some attributes. Args: object: An object which has specific attributes. attrs: An interable of strings containing attributes to get from the above object. Returns: A string containing a list display of the object with respect to the passed in attributes. """ return "\n".join( click.style(attr, bold=True) + ": %s" % getattr(object, attr) for attr in attrs )
5,325,202
def create_temporary_config_file(): """ Create a minimal config file with some default values """ toml_config = document() toml_config.add("name", "Test Suite") tmp_config_file = tempfile.NamedTemporaryFile(delete=False) with tmp_config_file: content = dumps(toml_config).encode("utf-8") tmp_config_file.write(content) return tmp_config_file.name
5,325,203
def csv_to_sql(directory=RAW_TWEET_DIR, **kwargs): """ csv to sql pipeline using pandas params: directory: str (file path to csv files) """ dbconn = MySqlHook(mysql_conn_id="mysql_default") conn = dbconn.get_connection() cursor = conn.cursor() for fname in glob.glob("{}/*.csv".format(directory)): if "_read" not in fname: try: df = pd.read_csv(fname) df.to_sql("tweets", dbconn, if_exists="append", index=False) shutil.move(fname, fname.replace(".csv", "_read.csv")) except pd.io.common.EmptyDataError: # probably an io error with another task / open file continue
5,325,204
def shear(image, shear_factor, **kwargs): """ Shear image. For details see: http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform >>> image = np.eye(3, dtype='uint8') >>> rotated = rotate(image, 45) :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'. :param float shear_factor: Shear factor [0, 1] :param kwargs kwargs: Keyword arguments for the underlying scikit-image warp function, e.g. order=1 for linear interpolation. :return: Sheared image :rtype: numpy array with range [0,255] and dtype 'uint8' """ set_default_order(kwargs) transform = skt.AffineTransform(shear=shear_factor) return skt.warp(image, transform, preserve_range=True, **kwargs).astype('uint8')
5,325,205
def segments(): """Yields all segments in the unnannotated training Qatar-Living dataset.""" return (segment for document in documents() for segment in document.segments)
5,325,206
def cleanup() -> None: """Public wrapper for `_onwiki_logger.cleanup()`.""" if _onwiki_logger.cleanup(): print("Cleaning up RfD log on-wiki")
5,325,207
def get_options_between_dates( start_date, end_date): """get_options_between_dates :param start_date: start date :param end_date: end date """ valid_options = [] for rec in historical_options(): opt_date = datetime.datetime.strptime( str(rec), '%m-%d-%Y').date() if start_date <= opt_date <= end_date: valid_options.append(opt_date.strftime('%Y-%m-%d')) return valid_options
5,325,208
def setFontSizes(labelsize=20, fsize=17, titlesize=None, ax=None,): """ Update font sizes for a matplotlib plot """ if ax is None: ax = plt.gca() for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(fsize) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(fsize) for x in [ax.xaxis.label, ax.yaxis.label]: # ax.title, x.set_fontsize(labelsize) plt.tick_params(axis='both', which='major', labelsize=fsize) if titlesize is not None: ax.title.set_fontsize(titlesize) plt.draw()
5,325,209
def format_timestamp(timestamp_str, datetime_formatter): """Parse and stringify a timestamp to a specified format. Args: timestamp_str (str): A timestamp. datetime_formatter (str): A format string. Returns: str: The formatted, stringified timestamp. """ try: if '"' in timestamp_str or '\'' in timestamp_str: # Make sure the timestamp is not surrounded by any quotes timestamp_str = timestamp_str.replace('"', '') timestamp_str = timestamp_str.replace('\'', '') formatted_timestamp = ( dateutil_parser.parse(timestamp_str).strftime(datetime_formatter)) except (TypeError, ValueError) as e: LOGGER.warn('Unable to parse/format timestamp: %s\n,' ' datetime_formatter: %s\n%s', timestamp_str, datetime_formatter, e) formatted_timestamp = None return formatted_timestamp
5,325,210
def test_now(): """ Tests creating a Time object with the `now` class method. """ now = datetime.datetime.utcnow() t = Time.now() assert t.format == 'datetime' assert t.scale == 'utc' dt = t.datetime - now # a datetime.timedelta object # this gives a .1 second margin between the `utcnow` call and the `Time` # initializer, which is really way more generous than necessary - typical # times are more like microseconds. But it seems safer in case some # platforms have slow clock calls or something. assert dt.total_seconds() < 0.1
5,325,211
def post_worker_init(worker): """ Launch a process to listen worker after gunicorn worker is initialized. Children processes of gunicorn worker should be killed when worker has been killed because gunicorn master murders this worker for some reasons such as worker timeout. Args: worker (ThreadWorker): worker instance. """ def murder_worker_children_processes(): signal.signal( signal.SIGTERM, lambda signal_num, handler: sys.exit(0)) processes_to_kill = [] # sleep 3 seconds so that all worker children processes have been launched. time.sleep(3) process = psutil.Process(worker.pid) for child in process.children(recursive=True): if child.pid != os.getpid(): processes_to_kill.append(child) while True: if os.getppid() != worker.pid: # Kill the remaining sub-processed after the worker process died _, alive = psutil.wait_procs(processes_to_kill, 0.1) current_worker_pid = os.getppid() for proc in alive: worker.log.info("Original worker pid: %d, current worker pid: %d, stop process %d", worker.pid, current_worker_pid, proc.pid) try: proc.send_signal(signal.SIGKILL) except psutil.NoSuchProcess: continue except psutil.Error as ex: worker.log.error("Stop process %d failed. Detail: %s.", proc.pid, str(ex)) worker.log.info("%d processes have been terminated by listener.", len(alive)) break time.sleep(1) listen_process = multiprocessing.Process(target=murder_worker_children_processes, name="murder_worker_children_processes") listen_process.start() global LISTEN_PROCESS LISTEN_PROCESS = listen_process worker.log.info("Service start state: success.") worker.log.info("Server pid: %d, start to listening.", worker.ppid)
5,325,212
def dyn_stdev(val, prev_stdev, prev_mean, n): """Dynamic stdev: computes the standard deviation based on a previous stdev plus a new value. Useful when stdev is built incrementally, it saves the usage of huge arrays. Keyword arguments: val -- new val to add to the mean prev_stdev -- previous stdev value n -- number of total elements in the mean including the new val """ if n < 1: raise ValueError("n < 1, stdev only defined for a positive number of elements") if n == 1: return 0 curr_mean = dyn_mean(val, prev_mean, n) return math.sqrt(((n-1)*prev_stdev*prev_stdev + (val - prev_mean)*(val - curr_mean)) / float(n))
5,325,213
def get_detail_backtest_results(input_df, features, return_col_name='returns', equity_identifier='Equity Parent', date_col_name='date', n_bins=5, bin_labels=None, corr_method='spearman', items_per_bin_deviation_threshold=1, drop_months_outside_of_threshold=False): """ Description: This function generates the back testing results for a list of features. This procedure does not handle subsetting for a specified date range. Subset to a specified date range needs to be done prior to passing the input dataframe. The procedure works on the assumption that the bin_labels are specified in descending order. eg: ['Q1','Q2','Q3','Q4'] implies Q1 is the highest portfolio and Q4 is the lowest. The generation of bins work ideally with a sufficient number of unique values in a feature. The items_per_bin_deviation_threshold parameter can be used to decide how strict we want to be with the effect of non-unique values. items_per_bin_deviation_threshold acts on the difference between the expected number of items in a bin vs the actual number of items. drop_months_outside_of_threshold can be set to True, if the months deviating from the above threshold should be excluded from back testing. :param input_df: Type pandas dataframe. long format dataframe. :param features: Type list. list of features for which backtesting needs to be perforrmed. These should correspond to the names of the columns in the df_long dataframe. :param return_col_name: Type str. Name of the return column. :param equity_identifier : Type str. Name of the equity identifier column. :param date_col_name:Type str. Name of the date column. :param n_bins:Type int. number of bins to split the equities into. :param bin_labels:Type list. list of bin labels. It is assumed that the labels are in descending order. eg: ['Q1','Q2','Q3'] implies Q1 is the highest portfolio and Q3 is the lowest. :param corr_method:Type string. correlation method being used. :param items_per_bin_deviation_threshold:Type int. Permissible deviation from the expected number of items per bin. :param drop_months_outside_of_threshold:Type boolean. Decision to drop months that break deviate beyond the acceptable items_per_bin_deviation_threshold. :return:Type pandas dataframe. detail backtesting results for each period """ if bin_labels is None: bin_labels = ['Q' + str(i + 1) for i in range(n_bins)] df_long = input_df.copy() long_cols = list(df_long.columns) if date_col_name not in long_cols: df_long = df_long.reset_index() df_long.rename(columns={'index': 'date'}, inplace=True) if return_col_name in features: features.remove(return_col_name) detail_results = [] features = sorted(features) feature_cnt = 0 total_features = len(features) print('Total features for processing: ' + str(total_features)) warnings.formatwarning = custom_formatwarning for feature in features: category = feature.split('_bshift')[0] feature_cols = [equity_identifier, date_col_name, return_col_name, feature] df_feature_detail = df_long[feature_cols].copy() df_feature_detail = get_ranks(df_feature_detail, date_col_name, feature) df_feature_detail = add_bins_col_to_rank_df(df_feature_detail, n_bins) df_bin_check = pd.DataFrame(df_feature_detail.groupby(date_col_name)['bin_no'].max()) bin_check_mask = df_bin_check['bin_no'] != n_bins insufficient_bins_dates = [item.date().strftime("%Y-%m-%d") for item in df_bin_check[bin_check_mask].index.tolist()] if len(insufficient_bins_dates) > 0: warnings.warn('\nInsufficient bins warning:\nFeature: ' + feature+'\n'+'\n' + 'Months with insufficient bins:' + str(insufficient_bins_dates)+ '\n' + '\n' + 'These months are excluded from the back testing computation') df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(insufficient_bins_dates)] print(df_feature_detail.shape) total_no_of_items = df_feature_detail[equity_identifier].unique().shape[0] expected_no_of_items_per_bin = total_no_of_items/n_bins mask_bin_lowest = df_feature_detail['bin_no'] == 1 mask_bin_highest = df_feature_detail['bin_no'] == n_bins df_bin_lowest = df_feature_detail[mask_bin_lowest].copy() df_bin_highest = df_feature_detail[mask_bin_highest].copy() bin_lowest_bad_dates = get_dates_deviating_from_threshold(df_bin_lowest, date_col_name, equity_identifier, items_per_bin_deviation_threshold, expected_no_of_items_per_bin) bin_highest_bad_dates = get_dates_deviating_from_threshold(df_bin_highest, date_col_name, equity_identifier, items_per_bin_deviation_threshold, expected_no_of_items_per_bin) if len(bin_lowest_bad_dates) > 0 or len(bin_highest_bad_dates) > 0: warnings.warn('\nDeviation from threshold warning:\nFeature: ' + feature+'\n'+'\n' + 'Top Portfolio - Months which deviate from threshold: '+str(bin_highest_bad_dates)+'\n'+'\n' + 'Bottom Portfolio - Months which deviate from threshold: '+str(bin_lowest_bad_dates)) if drop_months_outside_of_threshold: months_to_drop = bin_lowest_bad_dates + bin_highest_bad_dates warnings.warn('\nMonths dropped warning:\nFeature: ' + feature + '\n'+'\n' + 'Months: '+str(months_to_drop) +' will be dropped from computation') df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(months_to_drop)] df_feature_detail_agg = compute_date_level_metrics(df_feature_detail, bin_labels, date_col_name, return_col_name, feature, corr_method) df_feature_detail_agg['feature'] = feature df_feature_detail_agg['category'] = category detail_results.append(df_feature_detail_agg) feature_cnt += 1 if feature_cnt % 100 == 0: print(str(feature_cnt) + ' features completed') detail_results_df = pd.concat(detail_results) return detail_results_df
5,325,214
def check_one(filename): """Given a filename, return None or restart. """ # The file may have been removed from the filesystem. # =================================================== if not os.path.isfile(filename): if filename in mtimes: aspen.log("File deleted: %s" % filename) execute() else: # We haven't seen the file before. It has probably been loaded # from a zip (egg) archive. return # Or not, in which case, check the modification time. # =================================================== mtime = os.stat(filename).st_mtime if filename not in mtimes: # first time we've seen it mtimes[filename] = mtime if mtime > mtimes[filename]: aspen.log("File changed: %s" % filename) execute()
5,325,215
def test_create_flat_file(input_value): """ Test the whether the files are created with the given number of lines :param input_value: :return: """ file_name = input_value[0] number_of_lines = input_value[2] tot_num_char = input_value[3] create_flat_file.create_flat_file(file_name, number_of_lines, tot_num_char) assert os.path.exists(file_name) cnt = 0 for count in open(file_name): cnt += 1 assert len(count)-1 == tot_num_char, "total characters not generator as expected" assert cnt == number_of_lines
5,325,216
def sid_invalid_retry(retry=1): """ 鉴权失败,一般是因为在手机上登录,导致上次登录的 token 失效。 该函数用于返回一个函数修饰器,被修饰的 API 函数如果发生鉴权错误, 则重新登录后重新调用 API 函数,最高 retry 次 Args: retry int 重新尝试的次数,默认只重试一次 Raises: JoyrunSidInvalidError 超过重复校验次数后仍然鉴权失败 """ def func_wrapper(func): @wraps(func) def return_wrapper(self, *args, **kwargs): count = 0 while True: try: return func(self, *args, **kwargs) except JoyrunSidInvalidError: # 该错误类捕获 鉴权失败 的异常 count += 1 if count > retry: break else: self.logger.debug("sid invalid, retry %s" % count) self.login() except Exception as err: raise err raise JoyrunSidInvalidError("reach retry limit %s" % retry) return return_wrapper return func_wrapper
5,325,217
def _check_predictions(original_model, model, X, y): """ Check to make sure both models produce the same predictions. """ if model.objective == 'regression': p1 = original_model.predict(X) X = check_input_data(X) p2 = model.predict(X)[:, 0] elif model.objective == 'binary': p1 = original_model.predict_proba(X)[:, 1] X = check_input_data(X) p2 = model.predict(X)[:, 0] else: assert model.objective == 'multiclass' p1 = original_model.predict_proba(X) X = check_input_data(X) p2 = model.predict(X) try: assert np.allclose(p1, p2) except: max_diff = np.max(np.abs(p1 - p2)) print(f'[WARNING] Parsed model predictions differ ' f'significantly from original, max. diff.: {max_diff:.8f}')
5,325,218
def write_cfradial(filename, radar, format='NETCDF4', time_reference=None, arm_time_variables=False): """ Write a Radar object to a CF/Radial compliant netCDF file. The files produced by this routine follow the `CF/Radial standard`_. Attempts are also made to to meet many of the standards outlined in the `ARM Data File Standards`_. .. _CF/Radial standard: http://www.ral.ucar.edu/projects/titan/docs/radial_formats/cfradial.html .. _ARM Data File Standards: https://docs.google.com/document/d/1gBMw4Kje6v8LBlsrjaGFfSLoU0jRx-07TIazpthZGt0/edit?pli=1 To control how the netCDF variables are created, set any of the following keys in the radar attribute dictionaries. * _Zlib * _DeflateLevel * _Shuffle * _Fletcher32 * _Continguous * _ChunkSizes * _Endianness * _Least_significant_digit * _FillValue See the netCDF4 documentation for details on these settings. Parameters ---------- filename : str Filename to create. radar : Radar Radar object. format : str, optional NetCDF format, one of 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. See netCDF4 documentation for details. time_reference : bool True to include a time_reference variable, False will not include this variable. The default, None, will include the time_reference variable when the first time value is non-zero. arm_time_variables : bool True to create the ARM standard time variables base_time and time_offset, False will not create these variables. """ dataset = netCDF4.Dataset(filename, 'w', format=format) # determine the maximum string length max_str_len = len(radar.sweep_mode['data'][0]) for k in ['follow_mode', 'prt_mode', 'polarization_mode']: if ((radar.instrument_parameters is not None) and (k in radar.instrument_parameters)): sdim_length = len(radar.instrument_parameters[k]['data'][0]) max_str_len = max(max_str_len, sdim_length) str_len = max(max_str_len, 32) # minimum string legth of 32 # create time, range and sweep dimensions dataset.createDimension('time', None) dataset.createDimension('range', radar.ngates) dataset.createDimension('sweep', radar.nsweeps) dataset.createDimension('string_length', str_len) # global attributes # remove global variables from copy of metadata metadata_copy = dict(radar.metadata) global_variables = ['volume_number', 'platform_type', 'instrument_type', 'primary_axis', 'time_coverage_start', 'time_coverage_end', 'time_reference'] for var in global_variables: if var in metadata_copy: metadata_copy.pop(var) # determine the history attribute if it doesn't exist, save for # the last attribute. if 'history' in metadata_copy: history = metadata_copy.pop('history') else: user = getpass.getuser() node = platform.node() time_str = datetime.datetime.now().isoformat() t = (user, node, time_str) history = 'created by %s on %s at %s using Py-ART' % (t) dataset.setncatts(metadata_copy) if 'Conventions' not in dataset.ncattrs(): dataset.setncattr('Conventions', "CF/Radial") if 'field_names' not in dataset.ncattrs(): dataset.setncattr('field_names', ', '.join(radar.fields.keys())) # history should be the last attribute, ARM standard dataset.setncattr('history', history) # arm time variables base_time and time_offset if requested if arm_time_variables: dt = netCDF4.num2date(radar.time['data'][0], radar.time['units']) td = dt - datetime.datetime.utcfromtimestamp(0) base_time = { 'data': np.array([td.seconds + td.days * 24 * 3600], 'int32'), 'string': dt.strftime('%d-%b-%Y,%H:%M:%S GMT'), 'units': 'seconds since 1970-1-1 0:00:00 0:00', 'ancillary_variables': 'time_offset', 'long_name': 'Base time in Epoch', } _create_ncvar(base_time, dataset, 'base_time', ()) time_offset = { 'data': radar.time['data'], 'long_name': 'Time offset from base_time', 'units': radar.time['units'].replace('T', ' ').replace('Z', ''), 'ancillary_variables': 'time_offset', 'calendar': 'gregorian', } _create_ncvar(time_offset, dataset, 'time_offset', ('time', )) # standard variables _create_ncvar(radar.time, dataset, 'time', ('time', )) _create_ncvar(radar.range, dataset, 'range', ('range', )) _create_ncvar(radar.azimuth, dataset, 'azimuth', ('time', )) _create_ncvar(radar.elevation, dataset, 'elevation', ('time', )) # optional sensor pointing variables if radar.scan_rate is not None: _create_ncvar(radar.scan_rate, dataset, 'scan_rate', ('time', )) if radar.antenna_transition is not None: _create_ncvar(radar.antenna_transition, dataset, 'antenna_transition', ('time', )) # fields for field, dic in radar.fields.items(): _create_ncvar(dic, dataset, field, ('time', 'range')) # sweep parameters _create_ncvar(radar.sweep_number, dataset, 'sweep_number', ('sweep', )) _create_ncvar(radar.fixed_angle, dataset, 'fixed_angle', ('sweep', )) _create_ncvar(radar.sweep_start_ray_index, dataset, 'sweep_start_ray_index', ('sweep', )) _create_ncvar(radar.sweep_end_ray_index, dataset, 'sweep_end_ray_index', ('sweep', )) _create_ncvar(radar.sweep_mode, dataset, 'sweep_mode', ('sweep', 'string_length')) if radar.target_scan_rate is not None: _create_ncvar(radar.target_scan_rate, dataset, 'target_scan_rate', ('sweep', )) if radar.rays_are_indexed is not None: _create_ncvar(radar.rays_are_indexed, dataset, 'rays_are_indexed', ('sweep', 'string_length')) if radar.ray_angle_res is not None: _create_ncvar(radar.ray_angle_res, dataset, 'ray_angle_res', ('sweep', )) # instrument_parameters if ((radar.instrument_parameters is not None) and ('frequency' in radar.instrument_parameters.keys())): size = len(radar.instrument_parameters['frequency']['data']) dataset.createDimension('frequency', size) if radar.instrument_parameters is not None: for k in radar.instrument_parameters.keys(): if k in _INSTRUMENT_PARAMS_DIMS: dim = _INSTRUMENT_PARAMS_DIMS[k] else: dim = () _create_ncvar(radar.instrument_parameters[k], dataset, k, dim) # radar_calibration variables if radar.radar_calibration is not None and radar.radar_calibration != {}: size = [len(d['data']) for k, d in radar.radar_calibration.items() if k not in ['r_calib_index', 'r_calib_time']][0] dataset.createDimension('r_calib', size) for key, dic in radar.radar_calibration.items(): if key == 'r_calib_index': dims = ('time', ) elif key == 'r_calib_time': dims = ('r_calib', 'string_length') else: dims = ('r_calib', ) _create_ncvar(dic, dataset, key, dims) # latitude, longitude, altitude, altitude_agl if radar.latitude['data'].size == 1: # stationary platform _create_ncvar(radar.latitude, dataset, 'latitude', ()) _create_ncvar(radar.longitude, dataset, 'longitude', ()) _create_ncvar(radar.altitude, dataset, 'altitude', ()) if radar.altitude_agl is not None: _create_ncvar(radar.altitude_agl, dataset, 'altitude_agl', ()) else: # moving platform _create_ncvar(radar.latitude, dataset, 'latitude', ('time', )) _create_ncvar(radar.longitude, dataset, 'longitude', ('time', )) _create_ncvar(radar.altitude, dataset, 'altitude', ('time', )) if radar.altitude_agl is not None: _create_ncvar(radar.altitude_agl, dataset, 'altitude_agl', ('time', )) # time_coverage_start and time_coverage_end variables time_dim = ('string_length', ) units = radar.time['units'] start_dt = netCDF4.num2date(radar.time['data'][0], units) if start_dt.microsecond != 0: # truncate to nearest second start_dt -= datetime.timedelta(microseconds=start_dt.microsecond) end_dt = netCDF4.num2date(radar.time['data'][-1], units) if end_dt.microsecond != 0: # round up to next second end_dt += (datetime.timedelta(seconds=1) - datetime.timedelta(microseconds=end_dt.microsecond)) start_dic = {'data': np.array(start_dt.isoformat() + 'Z', dtype='S'), 'long_name': 'UTC time of first ray in the file', 'units': 'unitless'} end_dic = {'data': np.array(end_dt.isoformat() + 'Z', dtype='S'), 'long_name': 'UTC time of last ray in the file', 'units': 'unitless'} _create_ncvar(start_dic, dataset, 'time_coverage_start', time_dim) _create_ncvar(end_dic, dataset, 'time_coverage_end', time_dim) # time_reference is required or requested. if time_reference is None: if radar.time['data'][0] == 0: time_reference = False else: time_reference = True if time_reference: ref_dic = {'data': np.array(radar.time['units'][-20:], dtype='S'), 'long_name': 'UTC time reference', 'units': 'unitless'} _create_ncvar(ref_dic, dataset, 'time_reference', time_dim) # global variables # volume_number, required vol_dic = {'long_name': 'Volume number', 'units': 'unitless'} if 'volume_number' in radar.metadata: vol_dic['data'] = np.array([radar.metadata['volume_number']], dtype='int32') else: vol_dic['data'] = np.array([0], dtype='int32') _create_ncvar(vol_dic, dataset, 'volume_number', ()) # platform_type, optional if 'platform_type' in radar.metadata: dic = {'long_name': 'Platform type', 'data': np.array(radar.metadata['platform_type'], dtype='S')} _create_ncvar(dic, dataset, 'platform_type', ('string_length', )) # instrument_type, optional if 'instrument_type' in radar.metadata: dic = {'long_name': 'Instrument type', 'data': np.array(radar.metadata['instrument_type'], dtype='S')} _create_ncvar(dic, dataset, 'instrument_type', ('string_length', )) # primary_axis, optional if 'primary_axis' in radar.metadata: dic = {'long_name': 'Primary axis', 'data': np.array(radar.metadata['primary_axis'], dtype='S')} _create_ncvar(dic, dataset, 'primary_axis', ('string_length', )) # moving platform geo-reference variables if radar.rotation is not None: _create_ncvar(radar.rotation, dataset, 'rotation', ('time', )) if radar.tilt is not None: _create_ncvar(radar.tilt, dataset, 'tilt', ('time', )) if radar.roll is not None: _create_ncvar(radar.roll, dataset, 'roll', ('time', )) if radar.drift is not None: _create_ncvar(radar.drift, dataset, 'drift', ('time', )) if radar.heading is not None: _create_ncvar(radar.heading, dataset, 'heading', ('time', )) if radar.pitch is not None: _create_ncvar(radar.pitch, dataset, 'pitch', ('time', )) if radar.georefs_applied is not None: _create_ncvar(radar.georefs_applied, dataset, 'georefs_applied', ('time', )) dataset.close()
5,325,219
def test_hash_get(): ## 2 """[summary] Retrieving based on a key returns the value stored """ ht = Hashtable() ht.add('Saleh','what ever you want') actual = ht.find('Saleh') assert actual == 'what ever you want'
5,325,220
def check_train_balance(df,idx_train,keys): """ check the balance of the training set. if only one of the classes has more 2 instances than the other we will randomly take out those 'extra instances' from the major class """ Counts = dict(Counter(df.iloc[idx_train]['targets'].values)) if np.abs(Counts[keys[0]] - Counts[keys[1]]) > 2: if Counts[keys[0]] > Counts[keys[1]]: key_major = keys[0] key_minor = keys[1] else: key_major = keys[1] key_minor = keys[0] ids_major = df.iloc[idx_train]['id'][df.iloc[idx_train]['targets'] == key_major] idx_train_new = idx_train.copy() for n in range(len(idx_train_new)): random_pick = np.random.choice(np.unique(ids_major),size = 1)[0] # print(random_pick,np.unique(ids_major)) idx_train_new = np.array([item for item,id_temp in zip(idx_train_new,df.iloc[idx_train_new]['id']) if (id_temp != random_pick)]) ids_major = np.array([item for item in ids_major if (item != random_pick)]) new_counts = dict(Counter(df.iloc[idx_train_new]['targets'])) if np.abs(new_counts[keys[0]] - new_counts[keys[1]]) > 3: if new_counts[keys[0]] > new_counts[keys[1]]: key_major = keys[0] key_minor = keys[1] else: key_major = keys[1] key_minor = keys[0] ids_major = df.iloc[idx_train_new]['id'][df.iloc[idx_train_new]['targets'] == key_major] elif np.abs(new_counts[keys[0]] - new_counts[keys[1]]) < 3: break return idx_train_new else: return idx_train
5,325,221
def print_ascii(file_name): """ This function print out contents from a txt file each char at time. The parameter file_name is type string. When calling the func, the format of the argument should be "filename.txt" """ with open("./ascii/" + file_name) as f: print(f.read())
5,325,222
def cli_podcast_play(podcast_name, episode_id): """ CLI facing function to play the podcast episode, when passed podcast name and episode id, (NOT episode number). :param podcast_name: name of the podcast. :param episode_id: id(index) of episode. :return: """ episode = None print("Wait for a few seconds ...\n") episode_list = podcast_extractor(podcast_name) if episode_list: try: episode = episode_list[episode_id] except IndexError: print("Episode id is incorrect") except Exception as e: print(e) print("Something went wrong, when getting podcast episode data to play.") else: # episode title without new line characters. cleaned_episode_title = episode.episode_title.replace("\n", "") print( f"Episode:: {episode.episode_number} ->> Title:: {cleaned_episode_title}\nDate:: {episode.date}" ) player(episode.stream_url)
5,325,223
def eye(w, n): """Create diagonal matrix with w on diagonal.""" return np.array([[w if i==j else 0.0*w for i in range(n)] for j in range(n)])
5,325,224
def Messaging(): """ Messaging Creates JMS resources. Only to use in a resource block. """ if state().block and state().block != 'resources': raise SyntaxError('Messaging can only be used in a resources block') return subscope('messaging')
5,325,225
def time_series_plot_from_json(time_series_dict, single_day=False, save=False): """Given a time series dictionary, plot the trend in likelihood Arguments: time_series_dict {[type]} -- format shown as below { "[time]": { [room_number]: [likelihood] }, ... } Keyword Arguments: single_day {bool} -- [description] (default: {False}) save {bool} -- [description] (default: {False}) """ fig, ax = plt.subplots() sorted_intervals = sorted(list(time_series_dict.keys())) formatted_intervals = [datetime.strptime(t, "%Y%m%d_%H%M%S") for t in sorted_intervals] start_time = formatted_intervals[0] formatted_start_time = datetime_to_string(start_time) if single_day: xticks = range(len(formatted_intervals)) # show ticks only for every half hour period xtick_labels = [str(t.hour) +":"+ str(t.minute) for t in formatted_intervals ] title = formatted_start_time else: end_time = formatted_intervals[-1] formatted_end_time = datetime_to_string(end_time) temp_labels = [str(t.hour) +":"+ str(t.minute) for t in formatted_intervals ] num_intervals = len(formatted_intervals) xticks = range(0,num_intervals,2) # show ticks only for every hour period xtick_labels = [temp_labels[i] for i in range(num_intervals) if i % 2 != 0] title = formatted_start_time + " to " + formatted_end_time ax.set_title(title) ax.set_xlabel("Time") ax.set_xticks(xticks) ax.set_xticklabels(xtick_labels) plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment='right') ax.set_ylabel("Percentage of time spent in 30min interval") ytick_labels = sorted(list(time_series_dict[sorted_intervals[0]].keys())) for label in ytick_labels: y_values = [] for interval in sorted_intervals: time_series_dict[interval][label] y_values.append(time_series_dict[interval][label]) ax.plot(y_values, marker='o', label=label) ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.) fig.tight_layout() if save: plt.savefig("./time_series_plt.png") plt.show()
5,325,226
def timestamp2WP(timestamp): """ Converts a Unix Epoch-based timestamp (seconds since Jan. 1st 1970 GMT) timestamp to one acceptable by Wikipedia. :Parameters: timestamp : int Unix timestamp to be converted :Return: string Wikipedia style timestamp """ return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y%m%d%H%M%S')
5,325,227
def txt(solution1:List[List[int]],solution2:List[List[int]],matrix:List[List[int]]): """ Reorder matrix based on solution. Parameters ---------- solution1: clustering of rows as a list solution2: clustering of columns as a list matrix: matrix of the original relation Returns ------- None. Just prints the matrix to the screen. """ solr_len = [len(sublist) for sublist in solution1] stop=0 stopr = [stop:=stop+s for s in solr_len] flatr_list = [item-1 for group in solution1 for item in group] solc_len = [len(sublist) for sublist in solution2] stop=0 stopc = [stop:=stop+s for s in solc_len] flatc_list = [item-1 for group in solution2 for item in group] for i,x in enumerate(flatr_list): if i in stopr: print(" \n") else: print() print(f"{x+1:2}\t", end="") for j,y in enumerate(flatc_list): if j in stopc: print(" ", end="") print(f"{matrix[x][y]}", end="")
5,325,228
def get_available_node_types(nodenet_uid): """ Return a dict of available built-in node types and native module types""" return True, runtime.get_available_node_types(nodenet_uid)
5,325,229
def dos_element_orbitals( folder, element_orbital_dict, output='dos_element_orbitals.png', fill=True, alpha=0.3, linewidth=1.5, sigma=0.05, energyaxis='x', color_list=None, legend=True, total=True, figsize=(4, 3), erange=[-6, 6], spin='up', soc_axis=None, combination_method='add', fontsize=12, save=True, shift_efermi=0, ): """ This function plots the element projected density of states on specific orbitals. Parameters: folder (str): This is the folder that contains the VASP files element_orbital_dict (dict[str:list]): A dictionary that contains the individual elements and the corresponding orbitals to project onto. For example, if the user wants to project onto the s, py, pz, and px orbitals of In and the s orbital of As for and InAs structure then the dictionary would be {'In':[0,1,2,3], 'As':[0]} output (str): File name of the resulting plot. fill (bool): Determines wether or not to fill underneath the plot alpha (float): Alpha value for the fill linewidth (float): Linewidth of lines sigma (float): Standard deviation for gaussian filter energyaxis (str): Determines the axis to plot the energy on ('x' or 'y') color_list (list): List of colors that is the same length as the number of projections in the plot. legend (bool): Determines whether to draw the legend or not total (bool): Determines wheth to draw the total density of states or not spin (str): Which spin direction to parse ('up' or 'down') figsize (list / tuple): Desired size of the image in inches (width, height) erange (list): Energy range for the DOS plot ([lower bound, upper bound]) combination_method (str): If spin == 'both', this determines if the spin up and spin down desnities are added or subtracted. ('add' or 'sub') fontsize (float): Font size of the text in the figure. save (bool): Determines whether to automatically save the figure or not. If not the figure and axis are return for further manipulation. Returns: If save == True, this function will return nothing and directly save the image as the output name. If save == False, the function will return the matplotlib figure and axis for further editing. """ dos = Dos(shift_efermi=shift_efermi, folder=folder, spin=spin, combination_method=combination_method) fig = plt.figure(figsize=figsize, dpi=400) ax = fig.add_subplot(111) _figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis) dos.plot_element_orbitals( ax=ax, element_orbital_dict=element_orbital_dict, fill=fill, alpha=alpha, linewidth=linewidth, sigma=sigma, energyaxis=energyaxis, color_list=color_list, legend=legend, total=total, erange=erange, ) plt.tight_layout(pad=0.4) if save: plt.savefig(output) else: return fig, ax
5,325,230
def get_ceph_nodes(): """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) return hosts
5,325,231
def dotmm(a, b, out): """dotmm(a, b) Computes an efficient dot product of a 4x4, 2x2 or a less efficient general matrix multiplication. """ if a.shape[0] == 2 and a.shape[1] == 2: _dotmm2(a, b, out) elif a.shape[0] == 4 and a.shape[1] == 4: _dotmm4(a, b, out) else: _dotmm(a, b, out)
5,325,232
def metadataAbstractElementIllegalChildElementTest1(): """ No child elements, child elements not allowed. >>> doctestMetadataAbstractElementFunction( ... testMetadataAbstractElementIllegalChildElements, ... metadataAbstractElementIllegalChildElementTest1()) [] """ metadata = """<?xml version="1.0" encoding="UTF-8"?> <test> </test> """ return ElementTree.fromstring(metadata)
5,325,233
def discover_fields(layout): """Discover all fields defined in a layout object This is used to avoid defining the field list in two places -- the layout object is instead inspected to determine the list """ fields = [] try: comps = list(layout) except TypeError: return fields for comp in comps: if isinstance(comp, str): fields.append(comp) else: fields.extend(discover_fields(comp)) return fields
5,325,234
def replace(project_symbols): """ replace old source with non annotated signatures :param project_symbols: symbols we will use to write out new source code :return: bool """ for module_symbols in project_symbols: if not write_new_source(module_symbols, access_attribute("get_non_annotated_source")): return False return True
5,325,235
def element_list_as_string(elements): """Flatten a list of elements into a space separated string.""" names = [] for element in elements: if isinstance(element, AOVGroup): names.append("@{}".format(element.name)) else: names.append(element.variable) return " ".join(names)
5,325,236
def load_sample_metadata(path: str, sample_id: str) -> Dict[str, str]: """ Read sample metadata from either an sqlite '.db' database or a tab-delimited file. The tab-file has to have one sample per line and a header with a column labelled 'Name' or 'SampleId' in which a match to function argument sample_id should be found. Args: path (str): path to sqlite database (.db) or tab-delimited metadata file sample_id (str): Id of sample to annotate Returns: A Dict of metadata keys -> values """ if not os.path.exists(path): raise ValueError(f"Samples metadata file '{path}' not found.") if path.endswith(".db"): # sqlite3 with sqlite.connect(path) as db: cursor = db.cursor() cursor.execute("SELECT * FROM sample WHERE name = ?", (sample_id,)) keys = [x[0] for x in cursor.description] vals = cursor.fetchone() if vals is not None: return dict(zip(keys, vals)) raise ValueError(f"SampleID '{sample_id}' was not found in the samples database.") else: result = {} with open(path) as f: headers = [x.lower() for x in f.readline()[:-1].split("\t")] if "sampleid" not in headers and 'name' not in headers: raise ValueError("Required column 'SampleID' or 'Name' not found in sample metadata file") if "sampleid" in headers: sample_metadata_key_idx = headers.index("sampleid") else: sample_metadata_key_idx = headers.index("name") sample_found = False for line in f: items = line[:-1].split("\t") if len(items) > sample_metadata_key_idx and items[sample_metadata_key_idx] == sample_id: for i, item in enumerate(items): result[headers[i]] = item sample_found = True if not sample_found: raise ValueError(f"SampleID '{sample_id}' not found in sample metadata file") return result
5,325,237
def test_delete_nat_gateway(): """ This method tests the deletion Nat Gateway :return: """ ec2_client = boto3.client('ec2', region_name=region_name) vpc_response = ec2_client.create_vpc(CidrBlock='10.0.0.0/16') vpc_id = vpc_response['Vpc']['VpcId'] subnet1 = ec2_client.create_subnet(VpcId=vpc_id, CidrBlock='10.0.1.0/24')['Subnet']['SubnetId'] nat_gateway_id = ec2_client.create_nat_gateway(TagSpecifications=[{'ResourceType': 'nat-gateway', 'Tags': tags}], SubnetId=subnet1)['NatGateway']['NatGatewayId'] zombie_cluster_resources = ZombieClusterResources(cluster_prefix='kubernetes.io/cluster/', delete=True, cluster_tag='kubernetes.io/cluster/unittest-test-cluster', region=region_name, resource_name='zombie_cluster_nat_gateway') zombie_cluster_resources.zombie_cluster_nat_gateway() assert EC2Operations(region_name).find_nat_gateway(nat_gateway_id)
5,325,238
def handle_rlscope_args(parser=None, args=None, directory=None, reports_progress=False, delay=False, delay_register_libs=False): """ Initialize the RL-Scope profiler (:py:obj:`rlscope.api.prof`) using :py:obj:`sys.argv`. Arguments --------- parser : argparse.ArgumentParser The training script's argparse parser, *with* RL-Scope specific arguments added (i.e., you should call :py:func:`rlscope.api.add_rlscope_args` on parser before calling this function). directory : str The directory used by the training script for saving its own files (e.g., weight checkpoints). If the user doesn't provide ``--rlscope-directory`` (i.e. a separate directory for storing profiling data), we fall back on this. """ # if args.rlscope_directory is not None: # rlscope_directory = args.rlscope_directory # else: # rlscope_directory = directory if args is None: args = dict() _add_rlscope_args(args) if directory is not None: rlscope_directory = directory else: rlscope_directory = get_arg(args, 'rlscope_directory') if delay_register_libs: from rlscope.profiler import clib_wrap as rlscope_clib_wrap rlscope_clib_wrap.delay_register_libs() # TODO: train.py apparently like to launch separate process all willy-nilly. # I'm not sure what it's doing this for, but it's certainly true that python-side RL-Scope stuff will do it too. # We do NOT want to wrap those separate processes with rls-prof. # We only want to wrap the "main" training script processes; # i.e. the processes we already explicitly handle via the python-api. # # TLDR: remove librlscope.so from env['LD_PRELOAD'] if it's there, re-add it when we use python-RL-Scope to # launch new training scripts. patch_environ() rlscope_enabled = not get_arg(args, 'rlscope_disable') if rlscope_directory is None and rlscope_enabled: raise RLScopeAPIError("You must provide a location to store trace files: --rlscope-directory <dir>") if rlscope_enabled: success = profilers.check_avail_gpus() if not success: sys.exit(1) nvidia_gpu_query.check_nvidia_smi() init_profiler( directory=rlscope_directory, reports_progress=reports_progress, delay=delay, args=args, )
5,325,239
def delete(ctx, session_token): """Delete a session from Fl33t""" session = ctx.obj['get_fl33t_client']().get_session(session_token) if not session: click.echo('Session does not exist in Fl33t. Cannot proceed with ' 'deletion.') return if session.delete(): click.echo('Session was deleted.') else: click.echo('Session failed to be deleted.')
5,325,240
def encode_corpus(storage: LetterStorage, corpus: tuple) -> tuple: """ Encodes sentences by replacing letters with their ids :param storage: an instance of the LetterStorage class :param corpus: a tuple of sentences :return: a tuple of the encoded sentences """ if not isinstance(storage, LetterStorage) or not isinstance(corpus, tuple): return () encoded_corpus = [] for element in corpus: list_element = [] for word in element: list_element.append(tuple([storage.get_id_by_letter(letter) for letter in word])) encoded_corpus.append(tuple(list_element)) return tuple(encoded_corpus)
5,325,241
def test_expected_cols(): """Testing for matching columns""" assert col_checker('http://www-bcf.usc.edu/~gareth/ISL/Auto.csv')
5,325,242
def _assert_strides_are_log2_contiguous(strides): """ Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". """ for i, stride in enumerate(strides[1:], 1): assert stride == 2 * strides[i - 1], \ f"Strides {stride} {strides[i - 1]} are not log2 contiguous"
5,325,243
def setBaseTheme( theme ): """ Sets the name for the current base theme to be used. :param theme | <str> """ global _BASE_THEME _BASE_THEME = str(theme)
5,325,244
def test_cert_option(parse_requirements, runner, option, attr, expected): """ The options --cert and --client-cert have to be passed to the PyPIRepository. """ with open("requirements.in", "w"): pass runner.invoke(cli, [option, expected]) # Ensure the options in parse_requirements has the expected option args, kwargs = parse_requirements.call_args assert getattr(kwargs["options"], attr) == expected
5,325,245
def aggregated_column_unique(chart: Type[BaseChart], data): """ description: main function to calculate histograms input: - chart - data output: list_of_unique_values """ a_range = cuda.to_device(np.array([chart.min_value, chart.max_value])) temp_df = cudf.DataFrame() temp_df.add_column( chart.x, get_binwise_reduced_column( data[chart.x].copy().to_gpu_array(), chart.stride, a_range ), ) return temp_df[chart.x].unique().to_pandas().tolist()
5,325,246
def convert_image(qcow_image=None): """ :return: """ # A convert QCOW to VMDK new_filename = Path(qcow_image).stem + ".vmdk" path_to_vmdk = str(Path(qcow_image).parent.joinpath(new_filename)) if os.path.isfile(path_to_vmdk) is False: print("Creating vmdk image.") # convert qcow2 to vmdk call(["/usr/local/bin/qemu-img", "convert", "-f", "qcow2", "-O", "vmdk", "-o", "subformat=streamOptimized", "-o", "adapter_type=lsilogic", PATH_TO_QCOW, path_to_vmdk]) # fix qemu-img bug and write hdr payload = b'\xe2\x80\x98\x78\x30\x33\xe2\x80\x99' with open(path_to_vmdk, "r+b") as f: f.seek(4) f.write(payload) f.close() return path_to_vmdk
5,325,247
def collocations_table_exist(con): """Return True if the collocations table exist""" query = con.query( "select 1 from information_schema.tables " "where table_name='collocations'") return bool(list(query.dictresult()))
5,325,248
def ftp_profile(publish_settings): """Takes PublishSettings, extracts ftp user, password, and host""" soup = BeautifulSoup(publish_settings, 'html.parser') profiles = soup.find_all('publishprofile') ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0] matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl']) host = matches.group(1) if matches else '' username = ftp_profile['username'].replace("\\$", "%5C%24") password = ftp_profile['userpwd'] return host, username, password, ftp_profile['publishurl']
5,325,249
def main(): """Given a .bed file with probe sequences, runs Jellyfish to screen these for high abundance k-mers.""" startTime = timeit.default_timer() # Allow user to input parameters on command line. userInput = argparse.ArgumentParser(description=\ '%s version %s. Requires a .bed file containing probe sequences in the ' 'fourth column. Also requires Jellyfish to be installed and in your ' 'PATH. Returns a .bed file in the same format as the input file ' 'containing only probes passing the specified kmer filter.' \ % (scriptName, Version)) requiredNamed = userInput.add_argument_group('required arguments') requiredNamed.add_argument('-f', '--file', action='store', required=True, help='The probe file to do kmer tallying in') requiredNamed.add_argument('-j', '--jf', action='store', required=True, default='hg38_18mer.jf', type=str, help='The Jellyfish .jf file to use') requiredNamed.add_argument('-m', '--merLength', action='store', required=True, default=18, type=int, help='The length of kmer used to build the .jf ' 'being used, default=18') requiredNamed.add_argument('-k', '--kmerThreshold', action='store', required=True, default=5, type=int, help='Filter probes with kmers occurring => ' 'than this many times, default=5') userInput.add_argument('-o', '--output', action='store', default=None, type=str, help='The output name prefix') userInput.add_argument('-I', '--ID', action='store', type=str, default=None, help='Specify an ID to be associated with temporary ' 'file names & the default output name. Can be ' 'useful for parallelization. Null by default. ' 'Will not be automatically included in output ' 'file name if -o is flagged') userInput.add_argument('-R', '--Report', action='store_true', default=False, help='Write a Report file detailing the results of ' 'the kmer filtering. Off by default. Note, ' 'selecting this option will slow the script.') userInput.add_argument('-D', '--Debug', action='store_true', default=False, help='The same as -Report, but prints info to ' 'terminal instead of writing a log file. Off ' 'by default') userInput.add_argument('-M', '--Meta', action='store_true', default=False, help='Write a text file containing meta ' 'information. Off by default. Reports input ' 'file <tab> estimated runtime <tab> kmerFilter ' 'version <tab> kmer occurrence threshold used ' '<tab> kmer length used <tab> number of input ' 'probes passing kmer filter <tab> number of ' 'input probes') # Import user-specified command line values. args = userInput.parse_args() inputFile = args.file outNameVal = args.output merLengthVal = args.merLength jellyfishFile = args.jf kVal = args.kmerThreshold IDval = args.ID reportVal = args.Report debugVal = args.Debug metaVal = args.Meta # Run the filter logic. runFilter(inputFile, outNameVal, merLengthVal, jellyfishFile, kVal, IDval, reportVal, debugVal, metaVal, startTime) # Print wall-clock runtime to terminal. print 'Program took %f seconds' % (timeit.default_timer() - startTime)
5,325,250
def train(name,train_data:List[tuple],test_data=None)->tuple: """ Train Naive Bayes Classifier for Multinomial Models :param list train_data: list train data of tuple (text,tag) :param object get_features: function of features :param list test_data: list test data of tuple (text,tag) :return: tuple(model,accuracy) """ X_data=[] y_data=[] for sent in train_data: X_data.append(sent[0]) y_data.append(sent[1]) nb.fit(X_data, y_data) if test_data!=None: X_test=[] y_test=[] for sent in test_data: X_test.append(sent[0]) y_test.append(sent[1]) y_pred = nb.predict(X_test) return (nb, accuracy_score(y_pred, y_test)) return (nb,)
5,325,251
def validate_article(article_json): """ Validate the content of a raw article """ if article_json is None: return False try: # Filter title if not vstrlen(article_json['title'], 16): return False # Filter contents if not vstrlen(article_json['contents'], 30): return False # Filter Outlet if not vstrlen(article_json['feedlabel']): return False # Filter Article link if not vstrlen(article_json['url'], 10): return False except KeyError: return False return True
5,325,252
def apply_overrides( posts: NewPostsInfo, overrides: GlobalOverridesConfig, subscriptions: Iterable[Subscription], ) -> None: """Apply global overrides to a set of notifications. Modifies notifications in-place. :param posts: The group of notifications to modify. :param overrides: Overrides to apply to the notifications, potentially muting (i.e. removing) them. :param subscriptions: A user's manual subscriptions, which unlike the name suggests, can be used to override an override. """ subs, unsubs = [], [] for subscription in subscriptions: if subscription["sub"]: subs.append(subscription) else: unsubs.append(subscription) posts["thread_posts"] = [ post for post in posts["thread_posts"] if not mutes_notification( post, overrides, subs, unsubs, is_reply=False ) ] posts["post_replies"] = [ reply for reply in posts["post_replies"] if not mutes_notification( reply, overrides, subs, unsubs, is_reply=True ) ]
5,325,253
def get_model_memory_usage(batch_size, model): """ Estimate how much memory the model will take, assuming all parameters is in float32 and float32 takes 4 bytes (32 bits) in memory. :param batch_size: :param model: :return: """ # Calculate the total number of outputs from all layers shapes_mem_count = 0 for l in model.layers: single_layer_mem = 1 for s in l.output_shape: if s is None: continue single_layer_mem *= s shapes_mem_count += single_layer_mem # Calculate the total number of trainable parameters trainable_count = np.sum( [keras.backend.count_params(p) for p in set(model.trainable_weights)] ) # Calculate the total number of non trainable parameters non_trainable_count = np.sum( [keras.backend.count_params(p) for p in set(model.non_trainable_weights)] ) # total memory = 4 bytes * total number of numbers in each run * number of images in each run total_memory = 4.0 * batch_size * (shapes_mem_count + 2 * trainable_count + non_trainable_count) # convert to GB gbytes = np.round(total_memory / (1024.0 ** 3), 3) return gbytes
5,325,254
def calculate_deltaangle_distance( org_ligs, smiles_keys, fg_factory, file_prefix=None ): """ Calculate the change of bite angle of each ligand in the cage. This function will not work for cages built from FGs other than metals + AromaticCNC and metals + AromaticCNN. Parameters ---------- org_lig : :class:`dict` of :class:`stk.BuildingBlock` Dictionary of building blocks where the key is the file name, and the value is the stk building block. smiles_keys : :class:`dict` of :class:`int` Key is the linker smiles, value is the idx of that smiles. fg_factory : :class:`iterable` of :class:`stk.FunctionalGroupFactory` Functional groups to asign to molecules. NN_distance calculator will not work for cages built from FGs other than metals + AromaticCNC and metals + AromaticCNN. file_prefix : :class:`str`, optional Prefix to file name of each output ligand structure. Eventual file name is: "file_prefix"{number of atoms}_{idx}_{i}.mol Where `idx` determines if a molecule is unique by smiles. Returns ------- delta_angles : :class:`dict` Bite angle in cage - free optimised ligand for each ligand. Output is absolute values. """ delta_angles = {} # Iterate over ligands. for lig in org_ligs: stk_lig = org_ligs[lig] smiles_key = stk.Smiles().get_key(stk_lig) idx = smiles_keys[smiles_key] sgt = str(stk_lig.get_num_atoms()) # Get optimized ligand name that excludes any cage # information. if file_prefix is None: filename_ = f'organic_linker_s{sgt}_{idx}_opt.mol' else: filename_ = f'{file_prefix}{sgt}_{idx}_opt.mol' _in_cage = stk.BuildingBlock.init_from_molecule( stk_lig, functional_groups=fg_factory ) _in_cage = _in_cage.with_functional_groups( functional_groups=get_furthest_pair_FGs(_in_cage) ) _free = stk.BuildingBlock.init_from_file( filename_, functional_groups=fg_factory ) _free = _free.with_functional_groups( functional_groups=get_furthest_pair_FGs(_free) ) angle_in_cage = calculate_bite_angle(bb=_in_cage) angle_free = calculate_bite_angle(bb=_free) delta_angles[lig] = abs(angle_in_cage - angle_free) return delta_angles
5,325,255
def wait_for_file_to_finish_writing(**args) -> tuple: """ This wait shouldn't be required but appears to be help with larger files. """ config = args.get('config') logging.info("waiting {} seconds for file to finish writing and unlock".format(config.BULK_IMPORT_WAIT)) time.sleep(config.BULK_IMPORT_WAIT) return True, args
5,325,256
def get_max_id(connection, generic_sensor_type: str) -> int: """ Get the max id of a given generic sensor type. :param generic_sensor_type: "asset", "market", or "weather_sensor" """ t_generic_sensor = sa.Table( generic_sensor_type, sa.MetaData(), sa.Column("id", sa.Integer), ) max_id = connection.execute( sa.select( [ sa.sql.expression.func.max( t_generic_sensor.c.id, ) ] ) ).scalar() # None if there are none max_id = 0 if max_id is None else max_id return max_id
5,325,257
def downloadEnclosures(i, q): """This is the worker thread function. It processes items in the queue one after another. These daemon threads go into an infinite loop, and only exit when the main thread ends. """ while True: print '%s: Looking for the next enclosure' % i url = q.get() print '%s: Downloading:' % i, url # instead of really downloading the URL, # we just pretend and sleep time.sleep(i + 2) q.task_done()
5,325,258
def one(count, test_number, p): """ create a return list. IRAM have 128 byte, so you can fill most 64 PC value to IRAM. """ instruction_len = 5 code_page_start_addr = test_number << 11 fisrt_addr = code_page_start_addr + 3 # reserve space for ljmp end_addr = code_page_start_addr + 0x7FA jl = ntl.jl(count) start_seg_no = random.choice(jl) p += f'''LJMP RET_SEG_{test_number}_{start_seg_no}''' p += f''';;;;;;;;;;;;;;;;;;; SEG {test_number}, {hex(code_page_start_addr)}''' start_limit_addr = fisrt_addr for seg_no, next_seg_no in enumerate(jl): # must revseve enough code space from other segment end_limit_addr = end_addr - (count - seg_no) * instruction_len addr = random.choice(list(range(start_limit_addr, end_limit_addr + 1))) start_limit_addr = addr + instruction_len p += f''' CSEG AT {hex(addr)} RET_SEG_{test_number}_{seg_no}:''' if next_seg_no != start_seg_no: target = f'RET_SEG_{test_number}_{next_seg_no}' else: target = f'RET_SEG_{test_number}_END' p += f''' AJMP {target} {atl.crash()} ''' p += f'RET_SEG_{test_number}_END:'
5,325,259
def _temp_dict_file_name(): """Name of the expected python dictionary as a json file from run_external_python(). .. versionadded:: 9.1 """ return '__shared_dictionary__'
5,325,260
def test_set_empty(): """Test set on empty table.""" string = "test" ht = HashTable() check = ht._hashing(string, ht._size) ht.set("test", "test") assert ht._table[check] == [(check, "test")]
5,325,261
def pSpectrum( data: Union[np.ndarray, List, None], samplefreq: float = 44100 ) -> (np.ndarray, np.ndarray): """Power spectrum computation. Compute the power spectrum of a data set using standard ffts, after padding the data set to the next higher power of 2. Assumes data is regularly sampled in the time domain. Parameters ---------- data : list or numpy array the signal for which the power spectrume will be computed arg2 : float The sample frequency for the data set, in Hz Returns ------- (powerspectrum, frequency) Tuple of numpy arrays with the computed power spectrum, and the associated frequency arrays (in Hz) """ npts = len(data) # we should window the data here if npts == 0: print("? no data in pSpectrum") return # pad to the nearest higher power of 2 (a, b) = np.frexp(npts) if a <= 0.5: b = b = 1 npad = 2 ** b - npts if debugFlag: print("npts: %d npad: %d npad+npts: %d" % (npts, npad, npad + npts)) padw = np.append(data, np.zeros(npad)) npts = len(padw) sigfft = spFFT.fft(padw) nUniquePts = int(np.ceil((npts + 1) / 2.0)) # print nUniquePts sigfft = sigfft[0:nUniquePts] spectrum = abs(sigfft) spectrum = spectrum / float(npts) # scale by the number of points so that # the magnitude does not depend on the length # of the signal or on its sampling frequency spectrum = spectrum ** 2 # square it to get the power spmax = np.amax(spectrum) spectrum = spectrum + 1e-12 * spmax # multiply by two (see technical document for details) # odd nfft excludes Nyquist point if npts % 2 > 0: # we've got odd number of points fft spectrum[1 : len(spectrum)] = spectrum[1 : len(spectrum)] * 2 else: spectrum[1 : len(spectrum) - 1] = ( spectrum[1 : len(spectrum) - 1] * 2 ) # we've got even number of points fft freqAzero = np.arange(0, nUniquePts, 1.0) * (samplefreq / npts) return (spectrum, freqAzero)
5,325,262
def main(): """ Run when the program is being used standalone, uses stdin as the input """ parser = TreeArgumentParser() parser.add_argument('-i', '--mode', '--input-mode', choices=map_mode, default='auto', nargs='?', const='normal', ) parser.add_argument('-c', '--color', '--colour', choices=['auto', 'always', 'none'], default='auto', ) parser.add_argument('-e', '--encoding', choices=['auto', 'utf-8', 'ascii'], default='auto', ) parser.add_argument('-a', action='store_true') parser.add_argument('target', nargs='?', default='.') args = parser.parse_args() # Character set to use chars = SafeChars if (args.encoding == 'utf-8' or args.encoding == 'auto' and sys.stdout.encoding == 'UTF-8'): # Switch to UTF-8 Chars chars = Chars # Are we using ANSI Escape Codes for Color? use_color = (args.color == 'always' or args.color == 'auto' and hasattr(sys.stdout, "isatty") and sys.stdout.isatty()) # Work out parsing mode (auto detect tty) args.mode = map_mode[args.mode] if args.mode == ParsingMode.Auto: if hasattr(sys.stdin, "isatty") and sys.stdin.isatty(): args.mode = ParsingMode.NoInput else: args.mode = ParsingMode.Normal # Create Tree t = Tree(args.mode, use_color, chars) # Setup colour dicts using environment variables if use_color: setup_color(args) # If the mode is ParsingMode.NoInput, we walk the directory structure # instead if args.mode == ParsingMode.NoInput: parser.print_usage() def recursive_add(target): for child in sorted(os.listdir(target)): # Only show hiddent files if required if args.a or not child.startswith('.'): full = os.path.join(target, child) t.add_line(full) if os.path.isdir(full): try: recursive_add(full) except OSError as e: print("Error Reading Target: {}".format(e), file=sys.stderr) try: recursive_add(args.target) except OSError as e: print("Error Reading Target: {}".format(e)) exit(1) else: try: for line in sys.stdin: t.add_line(line) except KeyboardInterrupt: # Gracefully handle interrupt when stdin is not being piped and # user exits pass except UnicodeDecodeError: print("ERROR: tree.py could not read from stdin!") exit(1) for line in t.gen_lines(): print(line) print ("\n{} director{}, {} file{}" .format(t.dir_count, "y" if t.dir_count == 1 else "ies", t.file_count, "s"[t.file_count==1:]))
5,325,263
def accumulate_grad_ms(session, model, verbose=True): """run the given model over its data""" start_time = time.time() iters = 0 # zeros initial state for all devices state = [] for k in range(model.gpu_num): state.append(session.run(model.initial_state(k))) # evaluate loss and final state for all devices fetches = { "ms_update": model.dynamic_eval.accu_global_ms() } for k in range(model.gpu_num): fetches["final_state%d" % k] = model.final_state(k) for step in range(model.input.epoch_size): # pass states between time batches feed_dict = {} for i in range(model.gpu_num): gpu_state = model.initial_state(i) for j, (c, h) in enumerate(gpu_state): feed_dict[c] = state[i][j].c feed_dict[h] = state[i][j].h vals = session.run(fetches, feed_dict) for k in range(model.gpu_num): state[k] = vals["final_state%d" % k] iters += model.input.time_steps if verbose and step % (model.input.epoch_size // 10) == 10: print("%.3f speed: %.0f wps" % (step * 1.0 / model.input.epoch_size, iters * model.input.batch_size / (time.time() - start_time))) session.run(model.dynamic_eval.average_global_ms()) # fetches = {"ms": model.dynamic_eval.global_ms()} # vals = session.run(fetches) # ms = vals["ms"] # print([np.sum(x) for x in ms]) return
5,325,264
def run(vendor): """Script runner. Args: vendor (Type[Vendor]): Subclass of :class:`~pcapkit.vendor.default.Vendor` from :mod:`pcapkit.vendor`. Warns: VendorRuntimeWarning: If failed to initiate the ``vendor`` class. """ print(f'{vendor.__module__}.{vendor.__name__}: {vendor.__doc__}') try: vendor() except Exception as error: warnings.warn(f'{vendor.__module__}.{vendor.__name__} <{error!r}>', VendorRuntimeWarning, stacklevel=2)
5,325,265
def save() -> None: """Save the image on a file.""" figure.save_image()
5,325,266
def get_environment(config, stage): """Find default environment name in stage.""" stage_data = get_stage_data(stage, config.get('stages', {})) if not stage_data: sys.exit(NO_STAGE_DATA.format(stage)) try: return stage_data['environment'] except KeyError: sys.exit(NO_ENV_IN_STAGE.format(stage))
5,325,267
def all(a, axis=None): """ all(a, [,axis=(axis0, axis1, ...)]) --> bool or Array of booleans Returns True if all the components of iterable a evaluate to True. If axis are specified will return an Array of all(x) for x in a.axisiter(*axis). >>> A = Array([[True,True,True],[False,True,False]]) >>> print A.formated() [[True, True, True], [False, True, False]] >>> all(A) False >>> all(A, axis=(0, 1)) False >>> all(A, axis=0) Array([False, True, False]) >>> all(A, axis=1) Array([True, False]) """ pass
5,325,268
def anisotropic_Gaussian(ksize=25, theta=np.pi, l1=6, l2=6): """ https://github.com/cszn/KAIR/blob/master/utils/utils_sisr.py Generate an anisotropic Gaussian kernel Args: ksize : e.g., 25, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
5,325,269
def get_mail(db, messages_obj, msg_id, user_id='me'): """Fetch a message from GMail by id and adds it to passed db. Args: db: Local db connection object. messages_obj: Authenticated GMail user object. msg_id: Id of Gmail message. user_id: User's email address. The special value "me" can be used to indicate the authenticated user. """ message = messages_obj.get( userId=user_id, id=msg_id, format='full' ).execute() if set.intersection(set(_cfg.labels_to_skip()), set(message['labelIds'])) != set(): return False _log.logger.debug("adding message: %s" % (message['id'])) if not _db.add_message(db, message): return False return True
5,325,270
def get_history(ticker, start_date, end_date = None, granularity=granularity): """Fetch/load historical data from Coinbase API at specified granularity params: start_date: (str) (see pandas.to_datetime for acceptable formats) end_date: (str) granularity: (int) seconds (default: 300) price data is saved by ticker and date and stored in audiolizer_temp_dir """ start_date = pd.to_datetime(start_date).tz_localize(None) today = pd.Timestamp.now().tz_localize(None) if end_date is None: end_date = today + pd.Timedelta('1D') else: end_date = min(today, pd.to_datetime(end_date).tz_localize(None)) fnames = [] for int_ in pd.interval_range(start_date, end_date): fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format( ticker, int_.left.strftime('%Y-%m-%d')) if not os.path.exists(fname): int_df = load_date(ticker, granularity, int_) int_df.to_csv(fname, compression='gzip') fnames.append(fname) df = pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True), fnames)).drop_duplicates() gaps = get_gaps(df, granularity) if len(gaps) > 0: print('found data gaps') # fetch the data for each date for start_date in gaps.groupby(pd.Grouper(freq='1d')).first().index: print('\tfetching {}'.format(start_date)) int_ = pd.interval_range(start=start_date, periods=1, freq='1d') int_ = pd.Interval(int_.left[0], int_.right[0]) int_df = load_date(ticker, granularity, int_) fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format( ticker, int_.left.strftime('%Y-%m-%d')) int_df.to_csv(fname, compression='gzip') df = pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True, compression='gzip'), fnames)).drop_duplicates() return df
5,325,271
def run_command(net,cmd_list,cmd_str): """ This function returns the output of IOS command Exit script if there is no any network device. Parameters ---------- net (list): network device ip andid cmd_list (list): available IOS commands in list cmd (string): available IOS commands in string Return: ------- str: output of running IOS command """ # getting command input while True: cmd = input('\n=> Enter IOS command you like to run on this device, ip='+net[0]+' or "exit" to exit: ') cmd = cmd.lstrip() # Ignore leading space cmd = cmd.lower() # to lower case if cmd == 'exit': sys.exit() elif cmd == "": # no input print ("Oops! command cannot be NULL please try again or enter 'exit'") elif cmd.split(' ')[0] not in cmd_list: # retrieve the first word of command string print("Invalid command, valid commands are the following (some of them maybe not available in certain devices) : ",cmd_str,'\n') else: break # JOSN for posting IOS command cmd_json = { "commands" : [cmd], "deviceUuids" : [net[1]] # net = [ip,deviceid] } try: # print (cmd_json) # print("\nExecuting \"",cmd,"\" please wait ........\n\n") resp = post(api="network-device-poller/cli/read-request", data=cmd_json) # prettyPrint(resp) response_json = resp.json() taskId=response_json["response"]["taskId"] except: print ("\n For some reason cannot get taskId") sys.exit() else: r = get(api="task/"+taskId) response_json = r.json() progress = response_json["response"]["progress"] count = 0 # We can only see fileId when tsak is finished while "fileId" not in progress: try: r = get(api="task/"+taskId) response_json = r.json() progress = response_json["response"]["progress"] except: # Something is wrong print ("\nSomething is wrong when executing get task/"+taskId) sys.exit() time.sleep(1) count += 1 if count > 20: # timeout after ~20 seconds print ("\nTaking too long, script time out!") return ("Error") sys.exit() # convert string to dict p=ast.literal_eval(progress) fileid=p["fileId"] # now retrieve the output of running IOS command r = get(api="file/"+fileid) response_json = r.json() # real output if response_json[0]["commandResponses"]["FAILURE"] != {}: print (response_json[0]["commandResponses"]["FAILURE"][cmd]) else: try: output = response_json[0]["commandResponses"]["SUCCESS"][cmd] print (output) return output except: # Something is wrong if cmd.split(' ')[1] == '?': output = response_json[0]["commandResponses"]["FAILURE"][cmd] print (output) else: print ("Response from get task\n",json.dumps(response_json,indent=4)) print ("\nSomething is wrong when parsing the command output") return ("Error") sys.exit()
5,325,272
def fakemag_to_parallax(fakemag, mag, fakemag_err=None): """ To convert fakemag to parallax, Magic Number will be preserved :param fakemag: astroNN fakemag :type fakemag: Union[float, ndarray] :param mag: apparent magnitude :type mag: Union[float, ndarray] :param fakemag_err: Optional, fakemag_err :type fakemag_err: Union[NoneType, float, ndarray] :return: array of parallax in mas with astropy Quantity (with additional return of propagated error if fakemag_err is provided) :rtype: astropy Quantity :History: 2018-Aug-11 - Written - Henry Leung (University of Toronto) """ fakemag = np.array(fakemag) mag = np.array(mag) # treat non-positive fakemag as MAGIC_NUMBER, check for magic number and negative fakemag magic_idx = ((fakemag == MAGIC_NUMBER) | (mag == MAGIC_NUMBER) | (fakemag <= 0.) | np.isnan(fakemag) | np.isnan(mag)) with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER warnings.simplefilter("ignore") parallax = fakemag / (10. ** (0.2 * mag)) if fakemag.shape != (): # check if its only 1 element parallax[magic_idx] = MAGIC_NUMBER else: # for float parallax = MAGIC_NUMBER if magic_idx == [1] else parallax if fakemag_err is None: return parallax * u.mas else: with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER warnings.simplefilter("ignore") parallax_err = (fakemag_err / fakemag) * parallax if fakemag.shape != (): # check if its only 1 element parallax_err[magic_idx] = MAGIC_NUMBER else: # for float parallax_err = MAGIC_NUMBER if magic_idx == [1] else parallax_err return parallax * u.mas, parallax_err * u.mas
5,325,273
def save(potential, f): """ Write a :class:`~gala.potential.PotentialBase` object out to a text (YAML) file. Parameters ---------- potential : :class:`~gala.potential.PotentialBase` The instantiated :class:`~gala.potential.PotentialBase` object. f : str, file_like A filename or file-like object to write the input potential object to. """ d = to_dict(potential) if hasattr(f, 'write'): yaml.dump(d, f, default_flow_style=False) else: with open(f, 'w') as f2: yaml.dump(d, f2, default_flow_style=False)
5,325,274
def provenance_stamp(routine): """Return dictionary satisfying QCSchema, https://github.com/MolSSI/QCSchema/blob/master/qcschema/dev/definitions.py#L23-L41 with QCElemental's credentials for creator and version. The generating routine's name is passed in through `routine`. """ return {'creator': 'QCElemental', 'version': get_versions()['version'], 'routine': routine}
5,325,275
async def test_simple_raise(event_loop, rpc_peer): """Check basic error routing - RpcMethodError is raised and contains info about the actual remote error. """ REMOTE_EXCEPTIONS = [ ('RuntimeError', 'some message?'), ('ValueError', 'another message?') ] async with rpc_peer('util', event_loop) as peer: for exc_type, exc_msg in REMOTE_EXCEPTIONS: call = peer.connection.call_unary( 'raise_builtin', [exc_type, exc_msg] ) async with call: with pytest.raises(prpc.RpcMethodError): try: await call.result except prpc.RpcMethodError as ex: assert ex.cause_type == exc_type assert ex.cause_message == exc_msg assert exc_type in ex.remote_traceback raise
5,325,276
def index_of(y): """ A helper function to get the index of an input to plot against if x values are not explicitly given. Tries to get `y.index` (works if this is a pd.Series), if that fails, return np.arange(y.shape[0]). This will be extended in the future to deal with more types of labeled data. Parameters ---------- y : scalar or array-like The proposed y-value Returns ------- x, y : ndarray The x and y values to plot. """ try: return y.index.values, y.values except AttributeError: y = _check_1d(y) return np.arange(y.shape[0], dtype=float), y
5,325,277
def get_centered_box(center: np.ndarray, box_size: np.ndarray): """ Get box of size ``box_size``, centered in the ``center``. If ``box_size`` is odd, ``center`` will be closer to the right. """ start = center - box_size // 2 stop = center + box_size // 2 + box_size % 2 return start, stop
5,325,278
def save_all_editors(): """ Saves all editors. """ _window().save_all()
5,325,279
def _make_context(frames, cameras): """ Generate Context named tuple using camera, frame information Args: - cameras: - frames: Returns: A Context named tuple encapsulating given information """ return Context(cameras=cameras, frames=frames)
5,325,280
def test_shift_array_if_len_even(): """ test for if number of elements is even """ old_array = [0, 0, 0, 0] value = 1 old_array = shift_array.insert_shift_array(old_array, value) assert old_array[2] == 1
5,325,281
def isfile(hdfs_path, project=None): """ Return True if path refers to a file. Args: :hdfs_path: You can specify either a full hdfs pathname or a relative one (relative to your Project's path in HDFS). :project: If this value is not specified, it will get the path to your project. If you need to path to another project, you can specify the name of the project as a string. Returns: True if path refers to a file. Raises: IOError """ if project == None: project = project_name() hdfs_path = _expand_path(hdfs_path, project) return path.isfile(hdfs_path)
5,325,282
def get_list_from_file(filename): """ Returns a list of containers stored in a file (one on each line) """ with open(filename) as fh: return [_ for _ in fh.read().splitlines() if _]
5,325,283
def convert_hard_sigmoid(g, op, block): """Operator converter for hard_sigmoid.""" slope = op.attr("slope") x = g.get_node(op.input("X")[0]) dtype = infer_type(x).checked_type.dtype out = x * _expr.const(slope, dtype) + _expr.const(0.5, dtype) out = _op.clip(out, 0, 1) g.add_node(op.output("Out")[0], out)
5,325,284
def import_recipe(): """Import recipe from base64 encoded text.""" form = Import() errors = None if form.validate_on_submit(): encoded = request.form["encoded"] try: decoded = loads(b64decode(encoded.encode("utf-8")).decode("utf-8")) # recipe table title = decoded["title"] servings = decoded["servings"] source = decoded["source"] notes = decoded["notes"] directions = dumps(decoded["directions"]) db.execute("INSERT INTO recipes (title, servings, source, notes, directions) VALUES " "(?, ?, ?, ?, ?)", (title, servings, source, notes, directions)) # categories table recipe_id = db.execute("SELECT recipe_id FROM recipes WHERE title = ?", (title,))[0]["recipe_id"] for category in decoded["categories"]: db.execute("INSERT INTO categories (category, recipe_id) VALUES (?, ?)", (category, recipe_id)) # ingredients table for ingredient in decoded["ingredients"]: db.execute("INSERT INTO ingredients (ingredient, recipe_id) VALUES (?, ?)", (ingredient, recipe_id)) # owners table db.execute("INSERT INTO owners (recipe_id, user_id) VALUES (?, ?)", (recipe_id, session["user_id"])) flash("Recipe imported!") return redirect(url_for("index")) except TypeError: errors = ["Invalid text."] return render_template("import.html", form=form, errors=errors)
5,325,285
def parse_xyz(filename, nbits): """Read xyz format point data and return header, points and points data.""" pointstrings = [] with open(filename) as points_file: for line in points_file: if not line.startswith('#'): if not line.isspace(): line = line.replace(',', ' ') line = line.split() pointstrings.append(line) points = np.zeros((len(pointstrings), 3), dtype=np.float) if nbits > 0: datalen = len(pointstrings[0][3:]) pointsdata = np.zeros((len(pointstrings), datalen), dtype=np.int) for idx, line in enumerate(pointstrings): coords = line[: 3] coords = [float(i) for i in coords] points[idx] = coords if nbits > 0: data = line[3:] data = [int(i) for i in data] pointsdata[idx] = data minvals = points.min(axis=0).tolist() maxvals = points.max(axis=0).tolist() bbox = [minvals, maxvals] if nbits > 0: return bbox, points, pointsdata else: return bbox, points, None
5,325,286
def compare_vecs(est, truth, zero_tol=0): """ Parameters ---------- est: array-like The estimated vector. truth: array-like The true vector parameter. zero_tol: float Zero tolerance for declaring an element equal to zero. Output ------ out: dict Dictonary containing various error measurements. """ est = np.array(est).reshape(-1) truth = np.array(truth).reshape(-1) # true norms true_L2 = np.sqrt((truth ** 2).sum()) true_L1 = abs(truth).sum() # we will divide by this later so make sure it isn't zero true_L2 = max(true_L2, np.finfo(float).eps) true_L1 = max(true_L1, np.finfo(float).eps) assert len(est) == len(truth) support_est = abs(est) > zero_tol support_true = abs(truth) > zero_tol n = len(est) resid = est - truth out = {} #################### # size of residual # #################### out['L2'] = np.sqrt((resid ** 2).sum()) out['L1'] = abs(resid).sum() out['L2_rel'] = out['L2'] / true_L2 out['L1_rel'] = out['L1'] / true_L1 out['MSE'] = out['L2'] / np.sqrt(n) out['MAE'] = out['L1'] / n out['max'] = abs(resid).max() # compare supports out.update(compare_supports(support_est, support_true)) out['support_auc'] = roc_auc_score(y_true=support_true, y_score=abs(est)) ################# # compare signs # ################# _est = deepcopy(est) _est[~support_est] = 0 _truth = deepcopy(truth) _truth[~support_true] = 0 out['sign_error'] = np.mean(np.sign(_est) != np.sign(_truth)) # # only compute at true non-zero # est_at_true_nz = est[nz_mask_true] # true_at_true_nz = truth[nz_mask_true] # sign_misses = np.sign(est_at_true_nz) != np.sign(true_at_true_nz) # out['sign_error'] = np.mean(sign_misses) return out
5,325,287
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
5,325,288
def process_po_folder(domain, folder, extra=''): """ Process each PO file in folder """ result = True for fname in glob.glob(os.path.join(folder, '*.po')): basename = os.path.split(fname)[1] name = os.path.splitext(basename)[0] mo_path = os.path.normpath('%s/%s%s' % (MO_DIR, name, MO_LOCALE)) mo_name = '%s.mo' % domain if not os.path.exists(mo_path): os.makedirs(mo_path) # Create the MO file mo_file = os.path.join(mo_path, mo_name) print 'Compile %s' % mo_file ret, output = run('%s %s -o "%s" "%s"' % (TOOL, extra, mo_file, fname)) if ret != 0: print '\nMissing %s. Please install this package first.' % TOOL exit(1) if 'WARNING:' in output: print output result = False return result
5,325,289
def create_service( *, db_session: Session = Depends(get_db), service_in: ServiceCreate = Body( ..., example={ "name": "myService", "type": "pagerduty", "is_active": True, "external_id": "234234", }, ), ): """ Create a new service. """ service = get_by_external_id_and_project_name( db_session=db_session, external_id=service_in.external_id, project_name=service_in.project.name, ) if service: raise HTTPException( status_code=400, detail=f"A service with this identifier ({service_in.external_id}) already exists.", ) service = create(db_session=db_session, service_in=service_in) return service
5,325,290
def set_brightness_dimmed(deck_id: str, brightness_dimmed: int) -> None: """Sets the percentage value that will be used for dimming the full brightness""" state.setdefault(deck_id, {})["brightness_dimmed"] = brightness_dimmed _save_state()
5,325,291
def test_get_homography_matrx(): """ Test if the function gives exact homography matrix """ test_src = [[0, 6], [3, 40], [32, 54], [40, 60]] test_dst = [[0, 0], [0, 30], [30, 0], [30, 30]] result = get_homography_matrix(test_src, test_dst) expected_result = cv2.findHomography(np.array(test_src), np.array(test_dst))[0] assert str(result) == str(expected_result)
5,325,292
def GetBuildRevisions(src_dir, webkit_dir=None, revision_dir=None): """Parses build revisions out of the provided directories. Args: src_dir: The source directory to be used to check the revision in. webkit_dir: Optional WebKit directory, relative to src_dir. revision_dir: If provided, this dir will be used for the build revision instead of the mandatory src_dir. Returns a tuple of the build revision and (optional) WebKit revision. NOTICE: These revisions are strings, since they can be both Subversion numbers and Git hashes. """ abs_src_dir = os.path.abspath(src_dir) webkit_revision = None if webkit_dir: webkit_dir = os.path.join(abs_src_dir, webkit_dir) webkit_revision = GetHashOrRevision(webkit_dir) if revision_dir: revision_dir = os.path.join(abs_src_dir, revision_dir) build_revision = GetHashOrRevision(revision_dir) else: build_revision = GetHashOrRevision(src_dir) return (build_revision, webkit_revision)
5,325,293
def _get_scheme_dict(distribution_name: str) -> Dict[str, str]: """Calculate the scheme dictionary for the current Python environment.""" scheme_dict = sysconfig.get_paths() installed_base = sysconfig.get_config_var("base") assert installed_base # calculate 'headers' path, not currently in sysconfig - see # https://bugs.python.org/issue44445. This is based on what distutils does. # TODO: figure out original vs normalised distribution names scheme_dict["headers"] = os.path.join( sysconfig.get_path("include", vars={"installed_base": installed_base}), distribution_name, ) return scheme_dict
5,325,294
def biokit_data(filename, where=None): """Simple utilities to retrieve data sets from biokit/data directory""" import os import easydev biokit_path = easydev.get_package_location('biokit') share = os.sep.join([biokit_path , "biokit", 'data']) # in the code one may use / or \ if where: filename = os.sep.join([share, where, filename]) else: filename = os.sep.join([share, filename]) if os.path.exists(filename) is False: raise Exception('unknown file %s' % filename) return filename
5,325,295
def setup_module(): """ Build the required testing extension module """ global wrap # Check compiler availability first if not util.has_c_compiler(): pytest.skip("No C compiler available") if wrap is None: config_code = """ config.add_extension('test_array_from_pyobj_ext', sources=['wrapmodule.c', 'fortranobject.c'], define_macros=[]) """ d = os.path.dirname(__file__) src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), os.path.join(d, '..', 'src', 'fortranobject.c'), os.path.join(d, '..', 'src', 'fortranobject.h')] wrap = util.build_module_distutils(src, config_code, 'test_array_from_pyobj_ext')
5,325,296
def get_handler(event, context): # pylint: disable=unused-argument """REST API GET method to get data about a Minecraft game server.""" # gather the server data name = event.get('pathParameters', {}).get('name') server = gather(name) # return the HTTP payload return { 'statusCode': 200, 'body': json.dumps(server) }
5,325,297
def get_item(dataframe: DataFrame, col: str, new_col: str, index: any) -> DataFrame: """Return DF with a column that contains one item for an array :param str col: name of the column :param str new_col: type of the new column :param any index: the index key Examples: ``` SectionName: Type: transform::generic Input: InputBlock Properties: Functions: - get_item: col: name new_col: firstname index: 2 ``` """ _validate_column_exists(dataframe, col) return dataframe.withColumn(new_col, F.col(col).getItem(index))
5,325,298
def appdatadirectory(): """Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory. """ if shell: # on Win32 and have Win32all extensions, best-case return shell_getShellFolder(shellcon.CSIDL_APPDATA) if winreg: # on Win32, but no Win32 shell com available, this uses # a direct registry access, likely to fail on Win98/Me return _winreg_getShellFolder('AppData') # okay, what if for some reason winreg is missing? would we want to allow ctypes? # default case, look for name in environ... for name in ['APPDATA', 'HOME']: if name in os.environ: return os.path.join(os.environ[name], '.config') # well, someone's being naughty, see if we can get ~ to expand to a directory... possible = os.path.abspath(os.path.expanduser('~/.config')) if os.path.exists(possible): return possible raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""")
5,325,299