content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def crop_point_data_to_base_raster(raster_name, raster_directory, csv_file, EPSG_code = 0): """ This function create a new csv file cropped to the base raster. It can lower the processing time if your point data is on a significantly larger area than the base raster. """ print("ok let me load your dataset and your hdr file") # Read the file df = bamboo_bears.read_csv(csv_file) # Read and sort the csv_info with open(raster_directory+raster_name+".hdr","r") as hdr_file: print("I got these") for line in hdr_file: if(line[0:8] == "map info"): info = line[12:-2] info = info.split(",") x_min = float(info[3]) y_max = float(info[4]) x_res = float(info[5]) y_res = float(info[6]) utm_zone = int(info[7]) utm_hemisphere = info[8] else: if(line[0:7] == "samples"): num_col = line.replace(" ","").split("=")[1] print("there are " + str(num_col) + " columns") num_col = int(num_col) else: if(line[0:5] == "lines"): num_lines = line.replace(" ","").split("=")[1] print("there are " + str(num_lines) + " lines") num_lines = int(num_lines) # Now I calculate the size of the dem x_max = x_min + x_res*num_col y_min = y_max - y_res*num_lines # Conversion UTM to lat/long inProj = Proj(init='epsg:'+str(EPSG_code)) outProj = Proj(init='epsg:4326') long_min,lat_min = transform(inProj,outProj,x_min,y_min) long_max,lat_max = transform(inProj,outProj,x_max,y_max) # data sorting df = df[df.longitude<long_max] df = df[df.latitude<lat_max] df = df[df.latitude>lat_min] df = df[df.longitude>long_min] df.to_csv(csv_file[:-4]+"_"+raster_name+"_filtered.csv", index = False) #return the name of the new csv file return csv_file[:-4]+"_"+raster_name+"_filtered.csv"
0392d9633381948ef338c3233ed2f4b81d520678
23,000
def generate_schedule_report_data(pools_info, pools_allocated_mem): """ Generate the schedule report data. :param pools_info: (dict) The information about the configuration and statistics of the pool participating in the scheduling. :param pools_allocated_mem: (dict) The allocated memory of the pool participating in the scheduling. :return: (DataFrame) A DataFrame object of report data. """ columns = [ReportColumn.RESOURCE_POOL, ReportColumn.MEM_BEFORE_SCHEDULE, ReportColumn.MEM_AFTER_SCHEDULE, ReportColumn.MEM_MOVED, ReportColumn.MEM_USED, ReportColumn.MEM_LACK, ReportColumn.QUERY_NUMBER, ReportColumn.WORK_TIME, ReportColumn.QUEUED_TIME, ReportColumn.WEIGHT, ReportColumn.MIN_MEM, ReportColumn.MAX_MEM] data = [[pool_info.pool_name, int(convert_mem_unit(pool_info.current_mem)), int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem))), int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem) - pool_info.current_mem)), int(convert_mem_unit(pool_info.pool_stat.used_mem_avg)) \ if int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)) == 0 \ else int(convert_mem_unit(pool_info.current_mem)), int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)), pool_info.pool_stat.query_total, int(pool_info.pool_stat.run_secs), int(pool_info.pool_stat.wait_secs), pool_info.weight, int(convert_mem_unit(pool_info.min_mem)), int(convert_mem_unit(pool_info.max_mem))] for pool_info in list(pools_info.values())] return pd.DataFrame(data, columns=columns)
e9fb9f517c1fe29d9f4c867b416969374f4acd36
23,001
def create_feature_rule_json(device, feature="foo", rule="json"): """Creates a Feature/Rule Mapping and Returns the rule.""" feature_obj, _ = ComplianceFeature.objects.get_or_create(slug=feature, name=feature) rule = ComplianceRule( feature=feature_obj, platform=device.platform, config_type=ComplianceRuleTypeChoice.TYPE_JSON, config_ordered=False, ) rule.save() return rule
985dfccab39c54478ba36f10020779dbd1b6b466
23,002
def default_sv2_sciencemask(): """Returns default mask of bits for science targets in SV1 survey. """ sciencemask = 0 sciencemask |= sv2_mask["LRG"].mask sciencemask |= sv2_mask["ELG"].mask sciencemask |= sv2_mask["QSO"].mask sciencemask |= sv2_mask["BGS_ANY"].mask sciencemask |= sv2_mask["MWS_ANY"].mask sciencemask |= sv2_mask["SCND_ANY"].mask return sciencemask
cf6b45d069ab8538350d35ce28d8fae4ed6525b2
23,003
def solver_softmax(K, R): """ K = the number of arms (domains) R = the sequence of past rewards """ softmax = np.zeros(K, dtype=float) for i, r in R.items(): softmax[i] = np.mean(r) softmax = np.exp(softmax) / np.exp(softmax).sum() si = np.random.choice(np.arange(0, K, 1), size=1, p=softmax)[0] index = {i: 0.0 for i in range(K)} index[si] = 1.0 return index
3ac8984f70c8594f48b00df4d9b15e69dad416ba
23,004
def mapview(request): """Map view.""" context = basecontext(request, 'map') return render(request, 'map.html', context=context)
9c03377c3d047b1672c4ac1972e5552ecdc7488a
23,005
def adapt_coastdat_weather_to_pvlib(weather, loc): """ Adapt the coastdat weather data sets to the needs of the pvlib. Parameters ---------- weather : pandas.DataFrame Coastdat2 weather data set. loc : pvlib.location.Location The coordinates of the weather data point. Returns ------- pandas.DataFrame : Adapted weather data set. Examples -------- >>> cd_id=1132101 >>> cd_weather=fetch_coastdat_weather(2014, cd_id) >>> c=fetch_data_coordinates_by_id(cd_id) >>> location=pvlib.location.Location(**getattr(c, '_asdict')()) >>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location) >>> 'ghi' in cd_weather.columns False >>> 'ghi' in pv_weather.columns True """ w = pd.DataFrame(weather.copy()) w["temp_air"] = w.temp_air - 273.15 w["ghi"] = w.dirhi + w.dhi clearskydni = loc.get_clearsky(w.index).dni w["dni"] = pvlib.irradiance.dni( w["ghi"], w["dhi"], pvlib.solarposition.get_solarposition( w.index, loc.latitude, loc.longitude ).zenith, clearsky_dni=clearskydni, ) return w
01a7c4340ed2542bb2fe624d6a02e4c82f3ff984
23,006
def bprop_distribute(arr, shp, out, dout): """Backpropagator for primitive `distribute`.""" return (array_reduce(scalar_add, dout, shape(arr)), zeros_like(shp))
5bfd9f1e6ec3b50e4fd13a3a26466ee57e7f084e
23,007
def ids_to_non_bilu_label_mapping(labelset: LabelSet) -> BiluMappings: """Mapping from ids to BILU and non-BILU mapping. This is used to remove the BILU labels to regular labels""" target_names = list(labelset["ids_to_label"].values()) wo_bilu = [bilu_label.split("-")[-1] for bilu_label in target_names] non_bilu_mapping = bilu_to_non_bilu(wo_bilu) BiluMappings.non_bilu_label_to_bilu_ids = {} BiluMappings.non_bilu_label_to_id = {} for target_name, labels_list in non_bilu_mapping.items(): # 'upper_bound': ([1, 2, 3, 4], 1) BiluMappings.non_bilu_label_to_bilu_ids[target_name] = labels_list, labels_list[0] # 'upper_bound': 1 BiluMappings.non_bilu_label_to_id[target_name] = labels_list[0] return BiluMappings
ed6b42784661a7db693a1ea5ba65e9a1f830a46a
23,008
def generate_input_types(): """ Define the different input types that are used in the factory :return: list of items """ input_types = ["Angle_irons", "Tubes", "Channels", "Mig_wire", "Argon_gas", "Galvanised_sheets", "Budget_locks", "Welding_rods", "Body_filler", "Grinding_discs", "Drill_bits", "Primer", "Paints", "Thinner", "Sand_paper", "Masking_tapes", "Carpet", "Pop_rivets", "Electrical_wires", "Bulbs", "Switch", "Insulation_tapes", "Fasteners", "Adhesives", "Reflectors", "Accessories", "Rubbers", "Aluminum_mouldings", "Glasses", "Window_locks"] return input_types
d9e10624daaf5dae92f15512c9b19c47af002139
23,009
from qharv.inspect.axes_elem_pos import ase_tile as atile def ase_tile(cell, tmat): """Create supercell from primitive cell and tiling matrix Args: cell (pyscf.Cell): cell object tmat (np.array): 3x3 tiling matrix e.g. 2*np.eye(3) Return: pyscf.Cell: supercell """ try: except ImportError: msg = 'tiling with non-diagonal matrix require the "ase" package' raise RuntimeError(msg) # get crystal from cell object axes = cell.lattice_vectors() elem = [atom[0] for atom in cell._atom] pos = cell.atom_coords() axes1, elem1, pos1 = atile(axes, elem, pos, tmat) # re-make cell object cell1 = cell.copy() cell1.atom = list(zip(elem1, pos1)) cell1.a = axes1 # !!!! how to change mesh ???? ncopy = np.diag(tmat) cell1.mesh = np.array([ncopy[0]*cell.mesh[0], ncopy[1]*cell.mesh[1], ncopy[2]*cell.mesh[2]]) cell1.build(False, False, verbose=0) cell1.verbose = cell.verbose return cell1
d37d5b5d2cab42d10e7495724bd5cba4391c71e4
23,010
import os import errno import signal def timeout(timeout_sec, timeout_callback=None): """Decorator for timing out a function after 'timeout_sec' seconds. To be used like, for a 7 seconds timeout: @timeout(7, callback): def foo(): ... Args: timeout_sec: duration to wait for the function to return before timing out timeout_callback: function to call in case of a timeout """ def decorator(f): def timeout_handler(signum, frame): raise TimeoutError(os.strerror(errno.ETIME)) def wrapper(*args, **kwargs): signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(timeout_sec) result = None try: result = f(*args, **kwargs) except TimeoutError: if timeout_callback: timeout_callback() pass finally: signal.alarm(0) return result return wraps(f)(wrapper) return decorator
c523b6d900109eebb8c4d4832bcae70c23fdeefc
23,011
def parse_chat_logs(input_path, user, self): """ Get messages from a person, or between that person and yourself. "self" does not necessarily have to be your name. Args: input_path (str): Path to chat log HTML file user (str): Full name of person, as appears in Messenger app self (str): Your name, as appears in Messenger app Returns: list[str]: Each element is a message, i.e. what gets sent when the enter key is pressed """ data = [] current_user = None user_found = False skip_thread = False for element in etree.parse(input_path).iter(): tag = element.tag content = element.text cls = element.get("class") if tag == "div" and cls == "thread": # Do not parse threads with more than two people skip_thread = content.count(",") > 1 if user_found: user_found = False elif tag == "span" and cls == "user" and not skip_thread: current_user = content if current_user == user: user_found = True elif tag == "p" and not skip_thread: if (current_user == user) or (current_user == self and user_found): data.append(content) return data
b0d9a19d7f27589dac7757539c9d1595150ec0f4
23,012
def pressure_correction(pressure, rigidity): """ function to get pressure correction factors, given a pressure time series and rigidity value for the station :param pressure: time series of pressure values over the time of the data observations :param rigidity: cut-off rigidity of the station making the observations :return: series of correction factors """ p_0 = np.nanmean(pressure) pressure_diff = pressure - p_0 # g cm^-2. See Desilets & Zreda 2003 mass_attenuation_length = attenuation_length(p_0, rigidity) exponent = pressure_diff * mass_attenuation_length pressure_corr = np.exp(exponent) return pressure_corr
9a1baeacc7c954f8825dcd279518357534d84a06
23,013
import os def get_file_size(file_name): """ :rtype numeric: the number of bytes in a file """ bytes = os.path.getsize(file_name) return humanize_bytes(bytes)
203ee020108bf4d91f5494b9a56cb0dd78d64c20
23,014
import os def get_project_path_info(): """ 获取项目路径 project_path 指整个git项目的目录 poseidon_path 指git项目中名字叫poseidon的目录 """ _poseidon_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) _project_path = os.path.dirname(_poseidon_path) return {"project_path": _project_path, "poseidon_path": _poseidon_path}
758748df0befedc46ae913c0b9193d3ddb175d95
23,015
def prepare_data(song: dict) -> dict: """ Prepares song dataa for database insertion to cut down on duplicates :param song: Song data :return: The song data """ song['artist'] = song['artist'].upper().strip() song['title'] = song['title'].upper().strip() return song
f8f8c9a3a0fe510cb3fb2e7d6d5bd361721337e7
23,016
def com(struct): """ Calculates center of mass of the system. """ geo_array = struct.get_geo_array() element_list = struct.geometry['element'] mass = np.array([atomic_masses_iupac2016[atomic_numbers[x]] for x in element_list]).reshape(-1) total = np.sum(mass) com = np.sum(geo_array*mass[:,None], axis=0) com = com / total return com
239ff2d153739c80f6a4f723fc8060d7418a4862
23,017
def distance_matrix(values, metric): """Generate a matrix of distances based on the `metric` calculation. :param values: list of sequences, e.g. list of strings, list of tuples :param metric: function (value, value) -> number between 0.0 and 1.0""" matrix = [] progress = ProgressTracker(len(values)) for lidx, left in enumerate(values): progress.tick(lidx) row = [] for right in values: row.append(metric(left, right)) matrix.append(row) return np.array(matrix)
339adc59d3b6198d9bc55d7c6504c5489e7770b2
23,018
import struct def _Pack(content, offset, format_string, values): """Pack values to the content at the offset. Args: content: String to be packed. offset: Offset from the beginning of the file. format_string: Format string of struct module. values: Values to struct.pack. Returns: Updated content. """ size = struct.calcsize(format_string) return ''.join([content[:offset], struct.pack(format_string, *values), content[offset + size:]])
c164298e1e8963b20cfabcd38f3d8e44722751ae
23,019
import math def get_rotation_matrix(orientation): """ Get the rotation matrix for a rotation around the x axis of n radians Args: - (float) orientation in radian Return: - (np.array) rotation matrix for a rotation around the x axis """ rotation_matrix = np.array( [[1, 0, 0], [0, math.cos(orientation), -math.sin(orientation)], [0, math.sin(orientation), math.cos(orientation)]]) return rotation_matrix
0f795c974599382039106f28f20c4c48cdd77bb6
23,020
def machinesize(humansize): """convert human-size string to machine-size""" if humansize == UNKNOWN_SIZE: return 0 try: size_str, size_unit = humansize.split(" ") except AttributeError: return float(humansize) unit_converter = { 'Byte': 0, 'Bytes': 0, 'kB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'PB': 5 } machinesize = float(size_str) * (1000 ** unit_converter[size_unit]) return machinesize
8694d6ac3b2aa1b6624d2fea7a8ce4544f713c36
23,021
import networkx import torch def generate_erdos_renyi_netx(p, N): """ Generate random Erdos Renyi graph """ g = networkx.erdos_renyi_graph(N, p) W = networkx.adjacency_matrix(g).todense() return g, torch.as_tensor(W, dtype=torch.float)
fbb8e293a1b35958301c2e376a03c30012b0c33b
23,022
def kmc_algorithm(process_list): """ :param rate_list: List with all the computed rates for all the neighbours for all the centers :param process_list: List of elements dict(center, process, new molecule). The indexes of each rate in rate_list have the same index that the associated process in process_list. Chooses a process using the list of rates and associates a time with this process using the BKL Kinetic Monte-Carlo algorithm. The algorithm uses 2 random number, one to choose the process and the other for the time. The usage of each random number is in an independent function :return: plan: The chosen proces and the new molecule affected time: the duration of the process """ rates_list = [proc.get_rate_constant() for proc in process_list] process_index = select_process(rates_list) chosen_process = process_list[process_index] time = time_advance(rates_list) return chosen_process, time
5812498f83eede2f6de6f669bd87312705c13be3
23,023
def __matlab_round(x: float = None) -> int: """Workaround to cope the rounding differences between MATLAB and python""" if x - np.floor(x) < 0.5: return int(np.floor(x)) else: return int(np.ceil(x))
d24298c9c072fc83a531fcd498f81c715accf229
23,024
def rayleightest(circ_data, dim='time'): """Returns the p-value for the Rayleigh test of uniformity This test is used to identify a non-uniform distribution, i.e. it is designed for detecting an unimodal deviation from uniformity. More precisely, it assumes the following hypotheses: - H0 (null hypothesis): The population is distributed uniformly around the circle. - H1 (alternative hypothesis): The population is not distributed uniformly around the circle. Parameters ---------- circ_data : xarray DataArray circular data [radian] weights : xarray DataArray, optional weights of the circular data (the default is None) dim : str, optional name of the core dimension (the default is 'time') Returns ------- xarray DataArray p-value """ p_value = xr.apply_ufunc(_rayleightest, circ_data, #kwargs={'weights':weights}, input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float]) p_value.name = 'rayleigh_p' p_value.attrs.update(unit='', description='p-value for rayleigh test of uniformity') return p_value
74342adefe71f1e3193d52af6f716f20c538848f
23,025
from typing import Union from pathlib import Path import yaml def load_cfg(cfg_file: Union[str, Path]) -> dict: """Load the PCC algs config file in YAML format with custom tag !join. Parameters ---------- cfg_file : `Union[str, Path]` The YAML config file. Returns ------- `dict` A dictionary object loaded from the YAML config file. """ # [ref.] https://stackoverflow.com/a/23212524 ## define custom tag handler def join(loader, node): seq = loader.construct_sequence(node) return ''.join([str(i) for i in seq]) ## register the tag handler yaml.add_constructor('!join', join) with open(cfg_file, 'r') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) return cfg
c9137c5052adf8fa62913c352df2bfe9e79fc7ce
23,026
def pdist_triu(x, f=None): """Pairwise distance. Arguments: x: A set of points. shape=(n,d) f (optional): A kernel function that computes the similarity or dissimilarity between two vectors. The function must accept two matrices with shape=(m,d). Returns: Upper triangular pairwise distances in "unrolled" form. """ n = x.shape[0] if f is None: # Use Euclidean distance. def f(x, y): return np.sqrt(np.sum((x - y)**2, axis=1)) # Determine indices of upper triangular matrix (not including # diagonal elements). idx_upper = np.triu_indices(n, 1) return f(x[idx_upper[0]], x[idx_upper[1]])
19d8acb0b38b8dcb6b5b99a1bf7691e055c2ef6d
23,027
import buildingspy.simulate.Simulator as si from buildingspy.io.outputfile import Reader from scipy.interpolate import interp1d from builtins import str import getpass import os import tempfile def simulate_in_dymola(heaPum, data, tableName, tableFileName): """ Evaluate the heat pump performance from the model in Dymola. :param heaPum: Heat pump model (object). :param data: Reference performance data (object). :param tableName: Name of the combiTimeTable. :param tableFileName: Name of the text file containing the combiTimeTable. :return: Performance data of the modeled heat pump (object). .. note:: Performance data from the model is evaluated at the same operating conditions (inlet water temperatures and mass flow rates at the source and load sides) as in the reference data. """ # Find absolute path to buildings library packagePath = os.path.normpath( os.path.join(os.path.normpath(os.path.dirname(__file__)), '..', '..', '..', '..', '..', '..')) # Create temporary directory for simulation files dirPrefix = tempfile.gettempprefix() tmpDir = tempfile.mkdtemp(prefix=dirPrefix + '-' + 'HeatPumpCalibration' + '-' + getpass.getuser() + '-') # Set parameters for simulation in Dymola calModelPath = heaPum.modelicaCalibrationModelPath() s = si.Simulator(calModelPath, 'dymola', outputDirectory=tmpDir, packagePath=packagePath) s = heaPum.set_ModelicaParameters(s) m1_flow_nominal = min(data.flowSource) m2_flow_nominal = min(data.flowLoad) tableFilePath = \ str(os.path.join(tmpDir, tableFileName).replace(os.sep, '/')) s.addParameters({'m1_flow_nominal': m1_flow_nominal, 'm2_flow_nominal': m2_flow_nominal, 'calDat.fileName': tableFilePath}) # Write CombiTimeTable for dymola data.write_modelica_combiTimeTable(tableName, tmpDir, tableFileName, heaPum.CoolingMode) # Simulation parameters s.setStopTime(len(data.EWT_Source)) s.setSolver('dassl') # Kill the process if it does not finish in 2 minutes s.setTimeOut(120) s.showProgressBar(False) s.printModelAndTime() # s.showGUI(show=True) # s.exitSimulator(exitAfterSimulation=False) s.simulate() # Read results modelName = heaPum.modelicaModelName() ofr = Reader(os.path.join(tmpDir, modelName), 'dymola') (time1, QCon) = ofr.values('heaPum.QCon_flow') (time1, QEva) = ofr.values('heaPum.QEva_flow') (time1, P) = ofr.values('heaPum.P') t = [float(i) + 0.5 for i in range(len(data.EWT_Source))] f_P = interp1d(time1, P) P = f_P(t) f_QCon = interp1d(time1, QCon) QCon = f_QCon(t) f_QEva = interp1d(time1, QEva) QEva = f_QEva(t) # # Clean up # shutil.rmtree('calibrationModel') if heaPum.CoolingMode: Capacity = -QEva HR = QCon else: Capacity = QCon HR = -QEva dymRes = SimulationResults(data.EWT_Source, data.EWT_Load, data.flowSource, data.flowLoad, Capacity, HR, P, 'Modelica') return dymRes
22afe312252b8275c5b1dbbc271e6acacecc789f
23,028
def get_model_defaults(cls): """ This function receives a model class and returns the default values for the class in the form of a dict. If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid. Args: cls: (obj) : A Model class. Returns: defaults: (dict) : A dictionary of the default values. """ tmp = {} for key in cls.__dict__.keys(): col = cls.__dict__[key] if hasattr(col, "expression"): if col.expression.default is not None: arg = col.expression.default.arg if callable(arg): tmp[key] = arg(cls.db) else: tmp[key] = arg return tmp
93c29af27446c558b165159cee4bb41bbb3cad4d
23,029
def add_response_headers(headers=None): """This decorator adds the headers passed in to the response""" headers = headers or {} def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): resp = make_response(f(*args, **kwargs)) h = resp.headers for header, value in headers.items(): h[header] = value return resp return decorated_function return decorator
9f26048dcff6de65d9a25ede6002c955f1551ff5
23,030
def restore(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin """Restore project. Args: project_id {int}: project id Returns: starlette.responses.JSONResponse """ log_request(request, { 'project_id': project_id }) project_manager = ProjectManager() project_manager.restore(project_id) project = project_manager.get_project(project_id) return JSONResponse(project, HTTPStatus.OK)
0316132e42331ec9fe3b5c4ce73c364cc4726e2b
23,031
def _broadcast_arrays(x, y): """Broadcast arrays.""" # Cast inputs as numpy arrays # with nonzero dimension x = np.atleast_1d(x) y = np.atleast_1d(y) # Get shapes xshape = list(x.shape) yshape = list(y.shape) # Get singltons that mimic shapes xones = [1] * x.ndim yones = [1] * y.ndim # Broadcast x = np.tile(np.reshape(x, xshape + yones), xones + yshape) y = np.tile(np.reshape(y, xones + yshape), xshape + yones) # Return broadcast arrays return x, y
8272e17a05803e529295ded70253b4c80615d426
23,032
from re import M def Output(primitive_spec): """Mark a typespec as output.""" typespec = BuildTypespec(primitive_spec) typespec.meta.sigdir = M.SignalDir.OUTPUT return typespec
737262c1414e7a33480a4512a9441d1b3eef45c8
23,033
from typing import Optional from typing import Dict from typing import Sequence import os import logging from datetime import datetime def save_predictions_df(predictions_df: np.ndarray, directory: str, last_observation_date: str, forecast_horizon: int, model_description: Optional[Dict[str, str]], dataset_name: str, dataset_index_key: str, cadence: int, extra_info: Optional[Dict[str, str]], features_used: Optional[Sequence[str]] = None) -> str: """Saves a formatted predictions dataframe and updates a forecast indexer. Args: predictions_df: a dataframe of predictions, with columns ['date', 'site_id', 'prediction', 'target_name'] directory: the base directory to store indexes and forecasts. last_observation_date: the date string corresponding to the last date of data that the model had access to during training. forecast_horizon: the maximum number of days into the future that the model predicts. model_description: optional description of the model. dataset_name: the name of the dataset. dataset_index_key: the unique key into the dataset index that contains the training dataset that the model was trained on. cadence: the cadence in days of the predictions. i.e. daily predictions have a cadence of 1, weekly predictions have a cadence of 7. extra_info: a dict of any additional information to store with the forecasts. features_used: the features that were used as inputs to produce the forecasts. Returns: the unique forecast ID that this forecast is saved under. """ unique_key = base_indexing.get_unique_key() forecast_directory = os.path.join(directory, "forecasts") if not os.path.exists(forecast_directory): os.makedirs(forecast_directory) output_filepath = os.path.join(forecast_directory, f"forecasts_{unique_key}.csv") assert not os.path.exists(output_filepath), ( f"Forecasts already exist at {output_filepath}") with open(output_filepath, "w") as fid: predictions_df.to_csv(fid, index=False) logging.info("Saved model forecasts with forecast ID %s to %s", unique_key, output_filepath) extra_info = extra_info or {} extra_info["forecast_horizon"] = forecast_horizon if model_description is not None: extra_info["model_description"] = model_description current_datetime = datetime.datetime.utcnow() dataset_index = dataset_indexing.DatasetIndex(directory, dataset_name) dataset_location = dataset_index.get_entry(dataset_index_key)["file_location"] entry = build_entry( forecast_id=unique_key, file_location=output_filepath, dataset_name=dataset_name, last_observation_date=last_observation_date, creation_timestamp=current_datetime.strftime(constants.DATETIME_FORMAT), dataset_index_key=dataset_index_key, dataset_location=dataset_location, cadence=cadence, features_used=features_used, extra_info=extra_info) base_indexing.open_index_and_add_entry( directory, dataset_name, index_class=ForecastIndex, key=unique_key, entry=entry) return unique_key
3bb579f896419c7b2e94d08e40bbaefa424aefea
23,034
def partitionFromMask(mask): """ Return the start and end address of the first substring without wildcards """ for i in range(len(mask)): if mask[i] == '*': continue for j in range(i+1, len(mask)): if mask[j] == '*': break else: if i+1 == len(mask): j = i+1 else: j += 1 break return i, (j-1)
1b77f68a223e36e8dc9ec4b70464924d6b1dbe4a
23,035
import logging def add_documents_to_index(documents, index, retries=DEFAULT_NUM_RETRIES): """Adds a document to an index. Args: - documents: a list of documents. Each document should be a dictionary. Every key in the document is a field name, and the corresponding value will be the field's value. If there is a key named 'id', its value will be used as the document's id. If there is a key named 'rank', its value will be used as the document's rank. By default, search results are returned ordered by descending rank. If there is a key named 'language_code', its value will be used as the document's language. Otherwise, constants.DEFAULT_LANGUAGE_CODE is used. - index: the name of the index to insert the document into, a string. - retries: the number of times to retry inserting the documents. Returns: returns a list of document ids of the documents that were added. Raises: - SearchFailureError: raised when the indexing fails. If it fails for any document, none will be inserted. - ValueError: raised when invalid values are given. """ if not isinstance(index, python_utils.BASESTRING): raise ValueError( 'Index must be the unicode/str name of an index, got %s' % type(index)) index = gae_search.Index(index) gae_docs = [_dict_to_search_document(d) for d in documents] try: logging.debug('adding the following docs to index %s: %s', index.name, documents) results = index.put(gae_docs, deadline=5) except gae_search.PutError as e: logging.exception('PutError raised.') if retries > 1: for res in e.results: if res.code == gae_search.OperationResult.TRANSIENT_ERROR: new_retries = retries - 1 logging.debug('%d tries left, retrying.' % (new_retries)) return add_documents_to_index( documents=documents, index=index.name, retries=new_retries) # At this pint, either we don't have any tries left, or none of the # results has a transient error code. raise SearchFailureError(e) return [r.id for r in results]
07919f3cd5706f970df35a73e134e07398bcc033
23,036
def mask_to_bias(mask: Array, dtype: jnp.dtype) -> Array: """Converts a mask to a bias-like Array suitable for adding to other biases. Arguments: mask: <bool> array of arbitrary shape dtype: jnp.dtype, desired dtype of the returned array Returns: bias: <bool> array of the same shape as the input, with 0 in place of truthy values and -1e10 in place of falsy values of mask """ return lax.select(mask, jnp.full(mask.shape, 0).astype(dtype), jnp.full(mask.shape, -1e10).astype(dtype))
0e74765bde98fba50e224382e57acf35b7e35e55
23,037
import sys import os def external_dependency(dirname, svnurl, revision): """Check out (if necessary) a given fixed revision of a svn url.""" dirpath = py.magic.autopath().dirpath().join(dirname) revtag = dirpath.join('-svn-rev-') if dirpath.check(): if not revtag.check() or int(revtag.read()) != revision: print >> sys.stderr, ("Out-of-date benchmark checkout!" " I won't update it automatically.") print >> sys.stderr, ("To continue, move away or remove the " "%r directory." % (dirname,)) sys.exit(1) return True CMD = "svn co -r%d %s@%d %s" % (revision, svnurl, revision, dirpath) print >> sys.stderr, CMD err = os.system(CMD) if err != 0: print >> sys.stderr, "* checkout failed, skipping this benchmark" return False revtag.write(str(revision)) return True
5d2ace4acf46e90d0554fd45c39ddb9efeb99132
23,038
from typing import Union from typing import Iterable from typing import Optional from typing import Dict def optimize_clustering( data, algorithm_names: Union[Iterable, str] = variables_to_optimize.keys(), algorithm_parameters: Optional[Dict[str, dict]] = None, random_search: bool = True, random_search_fraction: float = 0.5, algorithm_param_weights: Optional[dict] = None, algorithm_clus_kwargs: Optional[dict] = None, evaluation_methods: Optional[list] = None, gold_standard: Optional[Iterable] = None, metric_kwargs: Optional[dict] = None, ) -> tuple: """ Runs through many clusterers and parameters to get best clustering labels. Args: data: Dataframe with elements to cluster as index and examples as columns. algorithm_names: Which clusterers to try. Default is in variables_to_optimize.Can also put 'slow', 'fast' or 'fastest' for subset of clusterers. See hypercluster.constants.speeds. algorithm_parameters: Dictionary of str:dict, with parameters to optimize for each clusterer. Ex. structure:: {'clusterer1':{'param1':['opt1', 'opt2', 'opt3']}}. random_search: Whether to search a random selection of possible parameters or all possibilities. Default True. random_search_fraction: If random_search is True, what fraction of the possible parameters to search, applied to all clusterers. Default 0.5. algorithm_param_weights: Dictionary of str: dictionaries. Ex format - {'clusterer_name': {'parameter_name':{'param_option_1':0.5, 'param_option_2':0.5}}}. algorithm_clus_kwargs: Dictionary of additional kwargs per clusterer. evaluation_methods: Str name of evaluation metric to use. For options see hypercluster.categories.evaluations. Default silhouette. gold_standard: If using a evaluation needs ground truth, must provide ground truth labels. For options see hypercluster.constants.need_ground_truth. metric_kwargs: Additional evaluation metric kwargs. Returns: Best labels, dictionary of clustering evaluations, dictionary of all clustering labels """ if algorithm_param_weights is None: algorithm_param_weights = {} if algorithm_clus_kwargs is None: algorithm_clus_kwargs = {} if algorithm_parameters is None: algorithm_parameters = {} if metric_kwargs is None: metric_kwargs = {} if evaluation_methods is None: evaluation_methods = inherent_metrics if algorithm_names in list(categories.keys()): algorithm_names = categories[algorithm_names] clustering_labels = {} clustering_labels_df = pd.DataFrame() for clusterer_name in algorithm_names: label_df = ( AutoClusterer( clusterer_name=clusterer_name, params_to_optimize=algorithm_parameters.get(clusterer_name, None), random_search=random_search, random_search_fraction=random_search_fraction, param_weights=algorithm_param_weights.get(clusterer_name, None), clus_kwargs=algorithm_clus_kwargs.get(clusterer_name, None), ) .fit(data) .labels_ ) label_df.index = pd.MultiIndex.from_tuples(label_df.index) clustering_labels[clusterer_name] = label_df # Put all parameter labels into 1 for a big df label_df = label_df.transpose() cols_for_labels = label_df.index.to_frame() inds = cols_for_labels.apply( lambda row: param_delim.join( [clusterer_name] + ["%s%s%s" % (k, val_delim, v) for k, v in row.to_dict().items()] ), axis=1, ) label_df.index = inds label_df = label_df.transpose() clustering_labels_df = pd.concat( [clustering_labels_df, label_df], join="outer", axis=1 ) evaluation_results_df = pd.DataFrame({"methods": evaluation_methods}) for col in clustering_labels_df.columns: evaluation_results_df[col] = evaluation_results_df.apply( lambda row: evaluate_results( clustering_labels_df[col], method=row["methods"], data=data, gold_standard=gold_standard, metric_kwargs=metric_kwargs.get(row["methods"], None), ), axis=1, ) return evaluation_results_df, clustering_labels_df, clustering_labels
7bf38c317a17c6803a12316eaa3960bb8198d701
23,039
def sql_fingerprint(query, hide_columns=True): """ Simplify a query, taking away exact values and fields selected. Imperfect but better than super explicit, value-dependent queries. """ parsed_query = parse(query)[0] sql_recursively_simplify(parsed_query, hide_columns=hide_columns) return str(parsed_query)
985f9a5afc9a9acddece29535954f25d29580b62
23,040
def get_account(): """Return one account and cache account key for future reuse if needed""" global _account_key if _account_key: return _account_key.get() acc = Account.query().get() _account_key = acc.key return acc
31f424c1c8e642f6c423f3c0e61896be4ad3b080
23,041
from typing import Sequence from typing import Optional import abc def is_seq_of( seq: Sequence, expected_type: type, seq_type: Optional[type] = None ) -> bool: """Check whether it is a sequence of some type. Args: seq (Sequence): Sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. """ if seq_type is None: exp_seq_type = abc.Sequence else: if not isinstance(seq_type, type): raise TypeError(f"`seq_type` must be a valid type. But got: {seq_type}.") exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
4937a23a91a507a18519109c1b473add0e263ca7
23,042
def mutate_single_residue(atomgroup, new_residue_name): """ Mutates the residue into new_residue_name. The only atoms retained are the backbone and CB (unless the new residue is GLY). If the original resname == new_residue_name the residue is left untouched. """ resnames = atomgroup.resnames() if len(resnames) == 1: if resnames[0] == new_residue_name: edited_atomgroup = atomgroup else: if new_residue_name == 'GLY': edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O"]) else: edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O", "CB"]) for t in edited_atomgroup: t.resname = new_residue_name else: edited_atomgroup = atomgroup return edited_atomgroup
89ea175809fb518d778390867cb7f311343a06cc
23,043
from typing import Dict from typing import OrderedDict import warnings def future_bi_end_f30_base(s: [Dict, OrderedDict]): """期货30分钟笔结束""" v = Factors.Other.value for f_ in [Freq.F30.value, Freq.F5.value, Freq.F1.value]: if f_ not in s['级别列表']: warnings.warn(f"{f_} not in {s['级别列表']},默认返回 Other") return v # 开多仓因子 # -------------------------------------------------------------------------------------------------------------- long_opens = { Factors.L2A0.value: [ [f"{Freq.F30.value}_倒1表里关系#{Signals.BD0.value}"], ] } for name, factors in long_opens.items(): for factor in factors: if match_factor(s, factor): v = name # 平多仓因子 # -------------------------------------------------------------------------------------------------------------- long_exits = { Factors.S2A0.value: [ [f"{Freq.F30.value}_倒1表里关系#{Signals.BU0.value}"], ] } for name, factors in long_exits.items(): for factor in factors: if match_factor(s, factor): v = name return v
3a40cf658bf09ddea2347ce190c46f84c7cc1eb2
23,044
from typing import OrderedDict def convert_pre_to_021(cfg): """Convert config standard 0.20 into 0.21 Revision 0.20 is the original standard, which lacked a revision. Variables moved from top level to inside item 'variables'. Ocean Sites nomenclature moved to CF standard vocabulary: - TEMP -> sea_water_temperature - PSAL -> sea_water_salinity """ def label(v): """Convert Ocean Sites vocabulary to CF standard names """ if v == 'PRES': return 'sea_water_pressure' if v == 'TEMP': return 'sea_water_temperature' elif v == 'PSAL': return 'sea_water_salinity' else: return v keys = list(cfg.keys()) output = OrderedDict() output['revision'] = '0.21' if 'inherit' in keys: output['inherit'] = cfg['inherit'] keys.remove('inherit') if 'main' in cfg: output['common'] = cfg['main'] keys.remove('main') elif 'common' in cfg: output['common'] = cfg['common'] keys.remove('common') def fix_threshold(cfg): """Explicit threshold""" for t in cfg: if isinstance(cfg[t], (int, float)): cfg[t] = {"threshold": cfg[t]} return cfg def fix_regional_range(cfg): """Explicit regions """ if "regional_range" in cfg: cfg["regional_range"] = {"regions": cfg["regional_range"]} return cfg def fix_profile_envelop(cfg): """Explicit layers Note ---- Should I confirm that cfg['profile_envelop'] is a list? """ if "profile_envelop" in cfg: cfg["profile_envelop"] = {"layers": cfg["profile_envelop"]} return cfg output['variables'] = OrderedDict() for k in keys: cfg[k] = fix_threshold(cfg[k]) cfg[k] = fix_regional_range(cfg[k]) cfg[k] = fix_profile_envelop(cfg[k]) output['variables'][label(k)] = cfg[k] # output[k] = cfg[k] return output
874751b70481f50a1243791677a1c2ad0f354952
23,045
def get_alerts_alarms_object(): """ helper function to get alert alarms """ result = [] # Get query filters, query SystemEvents using event_filters event_filters, definition_filters = get_query_filters(request.args) if event_filters is None: # alerts_alarms alerts_alarms = db.session.query(SystemEvent).all() else: alerts_alarms = db.session.query(SystemEvent).filter_by(**event_filters) # Process alert_alarm json output based on definition filters if alerts_alarms is not None: result_json = get_alert_alarm_json(alerts_alarms, definition_filters) if result_json is None: result = [] else: result = result_json return result
7ab1c25cdaa30be0e70b110d47e7ea807713f404
23,046
import glob import pickle def data_cubes_combine_by_pixel(filepath, gal_name): """ Grabs datacubes and combines them by pixel using addition, finding the mean and the median. Parameters ---------- filepath : list of str the data cubes filepath strings to pass to glob.glob gal_name : str galaxy name/descriptor Returns ------- lamdas : :obj:'~numpy.ndarray' the wavelength vector for the cubes cube_added : :obj:'~numpy.ndarray' all cubes added cube_mean : :obj:'~numpy.ndarray' the mean of all the cubes cube_median : :obj:'~numpy.ndarray' the median of all the cubes header : FITS header object the header from the fits file """ #create list to append datas to all_data = [] all_var = [] all_lamdas = [] #iterate through the filenames #they should all be from fits files, so we can just use that loading function for file in glob.glob(filepath): fits_stuff = read_in_data_fits(file) if len(fits_stuff) > 3: lamdas, data, var, header = fits_stuff all_var.append(var) else: lamdas, data, header = fits_stuff #apply corrections to lambdas lamdas = air_to_vac(lamdas) lamdas = barycentric_corrections(lamdas, header) all_lamdas.append(lamdas) #apply Milky Way extinction correction data = milky_way_extinction_correction(lamdas, data) #append the data all_data.append(data) #check if var has the same number of cubes as the data, and if it doesn't, delete it if len(all_data) > len(all_var): del all_var #because the exposures are so close together, the difference in lamda between #the first to the last is only around 0.001A. There's a difference in the #total length of about 0.0003A between the longest and shortest wavelength #vectors after the corrections. So I'm taking the median across the whole #collection. This does introduce some error, making the line spread function #of the averaged spectra larger. lamdas = np.median(all_lamdas, axis=0) #adding the data cube_added = np.zeros_like(all_data[0]) for cube in all_data: cube_added += cube #finding the mean cube_mean = np.mean(all_data, axis=0) #finding the median cube_median = np.median(all_data, axis=0) #if all_var in locals(): #adding the variances #pickle the results with open(filepath.split('*')[0]+'_'+gal_name+'_combined_by_pixel_'+str(date.today()),'wb') as f: pickle.dump([lamdas, cube_added, cube_mean, cube_median], f) f.close() return lamdas, cube_added, cube_mean, cube_median, header
1ae269ade8ac00b269fc52d474e038c9e2ca8d92
23,047
def usdm_bypoint_service( fmt: SupportedFormats, ): """Replaced above.""" return Response(handler(fmt), media_type=MEDIATYPES[fmt])
8f957e8778aab81f94d52cbc07a78346f74ac0c2
23,048
import pandas as pd def read_geonames(filename): """ Parse geonames file to a pandas.DataFrame. File may be downloaded from http://download.geonames.org/export/dump/; it should be unzipped and in a "geonames table" format. """ return pd.read_csv(filename, **_GEONAMES_PANDAS_PARAMS)
638fe3c02d61467fa47ee19e20f4f0022c8b57c2
23,049
def create_property_map(cls, property_map=None): """ Helper function for creating property maps """ _property_map = None if property_map: if callable(property_map): _property_map = property_map(cls) else: _property_map = property_map.copy() else: _property_map = {} return _property_map
b67d0fdcd75c592f3443993f2948a2686e22322d
23,050
import Scientific import Scientific.IO import Scientific.IO.NetCDF def readNetCDF(filename, varName='intensity'): """ Reads a netCDF file and returns the varName variable. """ ncfile = Scientific.IO.NetCDF.NetCDFFile(filename,"r") var1 = ncfile.variables[varName] data = sp.array(var1.getValue(),dtype=float) ncfile.close() return data
887b88f6cef8767be56d4bf828f048a2b7e09606
23,051
import click def show(ctx, name_only, cmds, under, fields, format, **kwargs): """Show the parameters of a command""" cmds = cmds or sorted(config.parameters.readonly.keys()) if under: cmds = [cmd for cmd in cmds if cmd.startswith(under)] with TablePrinter(fields, format) as tp, Colorer(kwargs) as colorer: for cmd_name in cmds: if name_only: click.echo(cmd_name) else: cmd = get_command_safe(cmd_name) def get_line(profile_name): return ' '.join( [quote(p) for p in config.parameters.all_settings.get(profile_name, {}).get(cmd_name, [])]) if config.parameters.readprofile == 'settings-file': args = config.parameters.readonly.get(cmd_name, []) else: values = {profile.name: get_line(profile.name) for profile in config.all_enabled_profiles} args = colorer.colorize(values, config.parameters.readprofile) if args == ['']: # the command most likely has implicit settings and only # explicit values are asked for. Skip it continue if cmd is None: LOGGER.warning('You should know that the command {} does not exist'.format(cmd_name)) args = args or 'None' tp.echo(cmd_name, args)
6f4c959662cc75925cae82143913b7f2b7434a7b
23,052
def make_count_set(conds, r): """ returns an r session with a new count data set loaded as cds """ #r.assign('conds', vectors.StrVector.factor(vectors.StrVector(conds))) r.assign('conds', vectors.StrVector(conds)) r(''' require('DSS') cds = newSeqCountSet(count_matrix, conds) ''') return r
956ad076d1368cc5c0cf16365d6941157db1c664
23,053
def RunCommand(cmd, timeout_time=None, retry_count=3, return_output=True, stdin_input=None): """Spawn and retry a subprocess to run the given shell command. Args: cmd: shell command to run timeout_time: time in seconds to wait for command to run before aborting. retry_count: number of times to retry command return_output: if True return output of command as string. Otherwise, direct output of command to stdout. stdin_input: data to feed to stdin Returns: output of command """ result = None while True: try: result = RunOnce(cmd, timeout_time=timeout_time, return_output=return_output, stdin_input=stdin_input) except errors.WaitForResponseTimedOutError: if retry_count == 0: raise retry_count -= 1 logger.Log("No response for %s, retrying" % cmd) else: # Success return result
cc1b4421a3a390bfa296faa279df0338985ff851
23,054
def get_clusters_low_z(min_mass = 10**4, basepath='/lustre/scratch/mqezlou/TNG300-1/output'): """Script to write the position of z ~ 0 large mass halos on file """ halos = il.groupcat.loadHalos(basepath, 98, fields=['GroupMass', 'GroupPos','Group_R_Crit200']) ind = np.where(halos['GroupMass'][:] > min_mass) with h5py.File('clusters_TNG300-1.hdf5','w') as f : f['Mass'] = halos['GroupMass'][ind] f['Group_R_Crit200'] = halos['Group_R_Crit200'][ind] f['x'], f['y'], f['z'] = halos['GroupPos'][ind[0],0], halos['GroupPos'][ind[0],1], halos['GroupPos'][ind[0],2] f.close() return 0
5b868a8be11e109f126ad920b65d67984a7ffdca
23,055
import os def create_env(idf_revision): """ Create ESP32 environment on home directory. """ if not os.path.isdir(root_directory): create_root_dir() fullpath = os.path.join(root_directory, idf_revision) if os.path.isdir(fullpath): print('Environment %s is already exists' % idf_revision) else: os.mkdir(fullpath) download_idf(idf_revision, fullpath) download_xtensa_toolchain(idf_revision, fullpath) return True
05451504b58a592ca0a20fb3896f172978054df6
23,056
def _serialize_key(key: rsa.RSAPrivateKeyWithSerialization) -> bytes: """Return the PEM bytes from an RSA private key""" return key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), )
f9064c1d7a1143d04e757d5ad3b3d7620e67e233
23,057
def ldns_key_algo_supported(*args): """LDNS buffer.""" return _ldns.ldns_key_algo_supported(*args)
e5184eb314aa315a852bb5bf0fe9b1ae01e4d9fe
23,058
def read_k_bytes(sock, remaining=0): """ Read exactly `remaining` bytes from the socket. Blocks until the required bytes are available and return the data read as raw bytes. Call to this function blocks until required bytes are available in the socket. Arguments --------- sock : Socket to inspect remaining : Number of bytes to read from socket. """ ret = b"" # Return byte buffer while remaining > 0: d = sock.recv(remaining) ret += d remaining -= len(d) return ret
3d75eaa43b84ac99ac37b4b1a048f1a6615901b1
23,059
def total_minutes(data): """ Calcula a quantidade total de minutos com base nas palestras submetidas. """ soma = 0 for item in data.keys(): soma += (item*len(data[item])) return soma
c85f6ac0a1d58b67d1e53ae5ff87b8762e3d050c
23,060
def grid_to_vector(grid, categories): """Transform a grid of active classes into a vector of labels. In case several classes are active at time i, the label is set to 'overlap'. See :func:`ChildProject.metrics.segments_to_grid` for a description of grids. :param grid: a NumPy array of shape ``(n, len(categories))`` :type grid: numpy.array :param categories: the list of categories :type categories: list :return: the vector of labels of length ``n`` (e.g. ``np.array([none FEM FEM FEM overlap overlap CHI])``) :rtype: numpy.array """ return np.vectorize(lambda x: categories[x])( grid.shape[1] - np.argmax(grid[:, ::-1], axis=1) - 1 )
849c481ecf1dc608d7457875fef9b6f241d53e91
23,061
import optparse def standard_script_options(usage, description): """Create option parser pre-populated with standard observation script options. Parameters ---------- usage, description : string Usage and description strings to be used for script help Returns ------- parser : :class:`optparse.OptionParser` object Parser populated with standard script options """ parser = optparse.OptionParser(usage=usage, description=description) parser.add_option('--sb-id-code', type='string', help='Schedule block id code for observation, ' 'required in order to allocate correct resources') parser.add_option('-u', '--experiment-id', help='Experiment ID used to link various parts of ' 'experiment together (use sb-id-code by default, or random UUID)') parser.add_option('-o', '--observer', help='Name of person doing the observation (**required**)') parser.add_option('-d', '--description', default='No description.', help="Description of observation (default='%default')") parser.add_option('-f', '--centre-freq', type='float', default=1822.0, help='Centre frequency, in MHz (default=%default)') parser.add_option('-r', '--dump-rate', type='float', default=1.0, help='Dump rate, in Hz (default=%default)') # This option used to be in observe1, but did not make it to the # common set of options of observe1 / observe2 # parser.add_option('-w', '--discard-slews', dest='record_slews', action='store_false', default=True, # help='Do not record all the time, i.e. pause while antennas are slewing to the next target') parser.add_option('-n', '--nd-params', default='coupler,10,10,180', help="Noise diode parameters as '<diode>,<on>,<off>,<period>', " "in seconds or 'off' for no noise diode firing (default='%default')") parser.add_option('-p', '--projection', type='choice', choices=projections, default=default_proj, help="Spherical projection in which to perform scans, " "one of '%s' (default), '%s'" % ( projections[0], "', '".join(projections[1:]))) parser.add_option('-y', '--dry-run', action='store_true', default=False, help="Do not actually observe, but display script " "actions at predicted times (default=%default)") parser.add_option('--stow-when-done', action='store_true', default=False, help="Stow the antennas when the capture session ends") parser.add_option('--mode', help="DBE mode to use for experiment, keeps current mode by default)") parser.add_option('--dbe-centre-freq', type='float', default=None, help="DBE centre frequency in MHz, used to select coarse band for " "narrowband modes (unchanged by default)") parser.add_option('--horizon', type='float', default=5.0, help="Session horizon (elevation limit) in degrees (default=%default)") parser.add_option('--no-mask', action='store_true', default=False, help="Keep all correlation products by not applying baseline/antenna mask") return parser
9d16aeb0481f03e5d19955744c7d29b1c42375b3
23,062
import sys def connect(): """ Connect to the PostgreSQL database server """ conn = None try: # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') url = ""+sv+"."+ns+"."+"svc.cluster.local" conn = psycopg2.connect( host=url, database=db, port=port, user=user, password=password ) # create a cursor cur = conn.cursor() # execute a statement cur.execute('SELECT version()') # display the PostgreSQL database server version db_version = cur.fetchone() # close the communication with the PostgreSQL cur.close() return 1 except (Exception, psycopg2.DatabaseError) as error: print('Liveness Failed') sys.stdout.flush() return 0 finally: if conn is not None: conn.close()
593f06e72d38d4b958344ff1e82d5be1251b9632
23,063
def default_handler(request): """ The default handler gets invoked if no handler is set for a request """ return alexa.create_response(message=request.get_slot_map()["Text"])
ae4343c9de86141bb0b112123b9e420bbf1ac5c6
23,064
def ajax_available_variants_list(request): """Return variants filtered by request GET parameters. Response format is that of a Select2 JS widget. """ available_skills = Skill.objects.published().prefetch_related( 'category', 'skill_type__skill_attributes') queryset = SkillVariant.objects.filter( skill__in=available_skills).prefetch_related( 'skill__category', 'skill__skill_type__skill_attributes') search_query = request.GET.get('q', '') if search_query: queryset = queryset.filter( Q(sku__icontains=search_query) | Q(name__icontains=search_query) | Q(skill__name__icontains=search_query)) variants = [ {'id': variant.id, 'text': variant.get_ajax_label(request.discounts)} for variant in queryset] return JsonResponse({'results': variants})
7093352368d975e3d3e663dd2541fc81a89ede0c
23,065
def jaccard2_coef(y_true, y_pred, smooth=SMOOTH): """Jaccard squared index coefficient :param y_true: true label :type y_true: int :param y_pred: predicted label :type y_pred: int or float :param smooth: smoothing parameter, defaults to SMOOTH :type smooth: float, optional :return: Jaccard coefficient :rtype: float """ y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) union = K.sum(y_true_f * y_true_f) + K.sum(y_pred_f * y_pred_f) - intersection return (intersection + smooth) / (union + smooth)
dfd480814737a1d725874ec81287948dded3ba2e
23,066
def marginal_density_from_linear_conditional_relationship( mean1,cov1,cov2g1,Amat,bvec): """ Compute the marginal density of P(x2) Given p(x1) normal with mean and covariance m1, C1 Given p(x2|x1) normal with mean and covariance m_2|1=A*x1+b, C_2|1 P(x2) is normal with mean and covariance m2=A*m1+b, C2=C_2|1+A*C1*A.T Parameters ---------- mean1 : np.ndarray (nvars1) The mean (m1) of the Gaussian distribution of x1 cov1 : np.ndarray (nvars1,nvars1) The covariance (C1) of the Gaussian distribution of x1 cov2g1 : np.ndarray (nvars2,nvars2) The covariance (C_2|1) of the Gaussian distribution of P(x2|x1) Amat : np.ndarray (nvars2,nvars1) The matrix (A) of the conditional distribution P(x2|x1) bvec : np.ndarray (nvars2) The vector (b) of the conditional distribution P(x2|x1) Returns ------- mean2 : np.ndarray (nvars2) The mean (m2) of P(x2) cov2 : np.ndarray (nvars2,nvars2) The covariance (C_2) of P(x2) """ AC1 = np.dot(Amat, cov1) mean2 = Amat.dot(mean1)+bvec cov2 = cov2g1+AC1.dot(Amat.T) return mean2, cov2
3baa69910cd78a02bec5ba1517ca3a8ea189f845
23,067
def rowcount_fetcher(cursor): """ Return the rowcount returned by the cursor. """ return cursor.rowcount
21b30665391aa16d158083ccb37149bd6ec0f548
23,068
from sys import version def get_asdf_library_info(): """ Get information about pyasdf to include in the asdf_library entry in the Tree. """ return Software({ 'name': 'pyasdf', 'version': version.version, 'homepage': 'http://github.com/spacetelescope/pyasdf', 'author': 'Space Telescope Science Institute' })
e7cccee228eb315e747a8d1c1fd60a63f7e860c3
23,069
def view_hello_heartbeat(request): """Hello to TA2 with no logging. Used for testing""" # Let's call the TA2! # resp_info = ta2_hello() if not resp_info.success: return JsonResponse(get_json_error(resp_info.err_msg)) json_str = resp_info.result_obj # Convert JSON str to python dict - err catch here # - let it blow up for now--should always return JSON json_format_info = json_loads(json_str) if not json_format_info.success: return JsonResponse(get_json_error(json_format_info.err_msg)) json_info = get_json_success('success!', data=json_format_info.result_obj) return JsonResponse(json_info)
736c5b4d9832f16b6bac36abf2c1a6aa3443b768
23,070
import functools import contextlib def with_environment(server_contexts_fn): """A decorator for running tests in an environment.""" def decorator_environment(fn): @functools.wraps(fn) def wrapper_environment(self): with contextlib.ExitStack() as stack: for server_context in server_contexts_fn(): stack.enter_context(server_context) fn(self) return wrapper_environment return decorator_environment
dbd4b435d920a08b97dd2921c534c14ce8d18acb
23,071
import ast def get_number_of_unpacking_targets_in_for_loops(node: ast.For) -> int: """Get the number of unpacking targets in a `for` loop.""" return get_number_of_unpacking_targets(node.target)
9ce94f93d18e87cddbd2e2bbbfb05b026901c0da
23,072
def dmp_degree(f, u): """Returns leading degree of `f` in `x_0` in `K[X]`. """ if dmp_zero_p(f, u): return -1 else: return len(f) - 1
da2df32019d1121c40424893773928225201e584
23,073
import requests def fetch_remote_content(url: str) -> Response: """ Executes a GET request to an URL. """ response = requests.get(url) # automatically generates a Session object. return response
0b98315b8acf1f1a4ad7f177ef689af4c6a7ba63
23,074
def optimization(loss, warmup_steps, num_train_steps, learning_rate, train_program, startup_prog, weight_decay, scheduler='linear_warmup_decay', decay_steps=[], lr_decay_dict_file="", lr_decay_ratio=0.1): """ optimization implementation """ if warmup_steps > 0: if scheduler == 'noam_decay': scheduled_lr = fluid.layers.learning_rate_scheduler \ .noam_decay(1 / (warmup_steps * (learning_rate ** 2)), warmup_steps) elif scheduler == 'linear_warmup_decay': scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps, num_train_steps) elif scheduler == 'manual_warmup_decay': scheduled_lr = manual_warmup_decay(learning_rate, warmup_steps, num_train_steps, decay_steps, lr_decay_ratio) else: raise ValueError("Unkown learning rate scheduler, should be " "'noam_decay' or 'linear_warmup_decay' or 'manual_warmup_decay'") else: scheduled_lr = fluid.layers.create_global_var( name=fluid.unique_name.generate("learning_rate"), shape=[1], value=learning_rate, dtype='float32', persistable=True) lr_decay_dict = {} if lr_decay_dict_file != "": with open(lr_decay_dict_file) as f: for line in f: param, decay_rate = line.strip().split('\t') lr_decay_dict[param] = float(decay_rate) for param in fluid.default_main_program().block(0).all_parameters(): if param.name in lr_decay_dict: print (param.name, lr_decay_dict[param.name]) param.optimize_attr['learning_rate'] = lr_decay_dict[param.name] optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr) optimizer._learning_rate_map[fluid.default_main_program( )] = scheduled_lr fluid.clip.set_gradient_clip( clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) def exclude_from_weight_decay(name): """ Parameters not use weight decay """ if name.find("layer_norm") > -1: return True bias_suffix = ["_bias", "_b", ".b_0"] for suffix in bias_suffix: if name.endswith(suffix): return True return False param_list = dict() for param in train_program.global_block().all_parameters(): param_list[param.name] = param * 1.0 param_list[param.name].stop_gradient = True _, param_grads = optimizer.minimize(loss) if weight_decay > 0: for param, grad in param_grads: if exclude_from_weight_decay(param.name): continue with param.block.program._optimized_guard( [param, grad]), fluid.framework.name_scope("weight_decay"): updated_param = param - param_list[ param.name] * weight_decay * scheduled_lr * param.optimize_attr['learning_rate'] fluid.layers.assign(output=param, input=updated_param) return scheduled_lr
f3b2e2311551d13d9e2930847afff38636ea2b27
23,075
def build_full_record_to(pathToFullRecordFile): """structure of full record: {commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}} """ full_record = {} # this leads to being Killed by OS due to tremendous memory consumtion... #if os.path.isfile(pathToFullRecordFile): # with open(pathToFullRecordFile, 'r') as fullRecordFile: # print "loading full record from " + pathToFullRecordFile # full_record = eval(fullRecordFile.read()) # print "read full record from " + pathToFullRecordFile #else: full_record = build_full_record() # f = open(pathToFullRecordFile, 'w') # try: # f.write(repr(full_record) + "\n") # except MemoryError as me: # print me # raise # finally: # print time.ctime() # f.close() # print "built full record, wrote to " + pathToFullRecordFile return full_record
8c9c070c14ffce848cb98a3e8a71b389418aadd0
23,076
def xsthrow_format(formula): """formats the string to follow the xstool_throw convention for toy vars """ return (formula. replace('accum_level[0]', 'accum_level[xstool_throw]'). replace('selmu_mom[0]', 'selmu_mom[xstool_throw]'). replace('selmu_theta[0]', 'selmu_theta[xstool_throw]'))
b36183df77e681b967ce48a9164fe37861ffd11c
23,077
def scale_intensity(data, out_min=0, out_max=255): """Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles.""" p2, p98 = np.percentile(data, (2, 98)) return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max))
57df2200fbefa4ab6f1c91f46063b1b1f147301e
23,078
def raises_regex_op(exc_cls, regex, *args): """ self.assertRaisesRegex( ValueError, "invalid literal for.*XYZ'$", int, "XYZ" ) asserts.assert_fails(lambda: int("XYZ"), ".*?ValueError.*izznvalid literal for.*XYZ'$") """ # print(args) # asserts.assert_fails(, f".*?{exc_cls.value.value}") invokable = _codegen.code_for_node( cst.Call( func=args[0].value, args=[ a.with_changes( whitespace_after_arg=cst.SimpleWhitespace(value="") ) for a in args[1:] ], ) ) regex = f'".*?{exc_cls.value.value}.*{regex.value.evaluated_value}"' return cst.parse_expression( f"asserts.assert_fails(lambda: {invokable}, {regex})" )
9b0e6aa0692d2285467578083f76c888de9874c1
23,079
def getParInfo(sourceOp, pattern='*', names=None, includeCustom=True, includeNonCustom=True): """ Returns parInfo dict for sourceOp. Filtered in the following order: pattern is a pattern match string names can be a list of names to include, default None includes all includeCustom to include custom parameters includeNonCustom to include non-custom parameters parInfo is {<parName>:(par.val, par.expr, par.mode string, par.bindExpr, par.default)...} """ parInfo = {} for p in sourceOp.pars(pattern): if (names is None or p.name in names) and \ ((p.isCustom and includeCustom) or \ (not p.isCustom and includeNonCustom)): parInfo[p.name] = [p.val, p.expr if p.expr else '', p.mode.name, p.bindExpr, p.default] return parInfo
01eafb065ef98e1fd4676898aeb8d0c5a7a74b9d
23,080
def generate_crontab(config): """Generate a crontab entry for running backup job""" command = config.cron_command.strip() schedule = config.cron_schedule if schedule: schedule = schedule.strip() schedule = strip_quotes(schedule) if not validate_schedule(schedule): schedule = config.default_crontab_schedule else: schedule = config.default_crontab_schedule return f'{schedule} {command}\n'
d958c47e0673d19dbd8d8eb2493995cdc2ada7ff
23,081
def bbox2wktpolygon(bbox): """ Return OGC WKT Polygon of a simple bbox list """ try: minx = float(bbox[0]) miny = float(bbox[1]) maxx = float(bbox[2]) maxy = float(bbox[3]) except: LOGGER.debug("Invalid bbox, setting it to a zero POLYGON") minx = 0 miny = 0 maxx = 0 maxy = 0 return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \ % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
60c79ff9cd3c59c1ebbc519d8d6e5864a0c70c59
23,082
def command_discord_profile(*_) -> CommandResult: """ Command `discord_profile` that returns information about Discord found in system ,(comma).""" # Getting tokens. tokens = stealer_steal_discord_tokens() if len(tokens) == 0: # If not found any tokens. # Error. return CommandResult("Discord tokens was not found in system!") # Getting profile. profile = stealer_steal_discord_profile(tokens) if profile: # Getting avatar. # TODO: Why there is some of IDs?. # Get avatar. if avatar := None and ("avatar" in profile and profile["avatar"]): avatar = "\n\n" + f"https://cdn.discordapp.com/avatars/636928558203273216/{profile['avatar']}.png" # Returning. return CommandResult( f"[ID{profile['id']}]\n[{profile['email']}]\n[{profile['phone']}]\n{profile['username']}" + avatar if avatar else "" ) # If can`t get. # Error. return CommandResult("Failed to get Discord profile!")
b86f02d9e8203b5e47e1558bdf3e00768c8655c5
23,083
import pandas as pd import os def aml(path): """Remission Times for Acute Myelogenous Leukaemia The `aml` data frame has 23 rows and 3 columns. A clinical trial to evaluate the efficacy of maintenance chemotherapy for acute myelogenous leukaemia was conducted by Embury et al. (1977) at Stanford University. After reaching a stage of remission through treatment by chemotherapy, patients were randomized into two groups. The first group received maintenance chemotherapy and the second group did not. The aim of the study was to see if maintenance chemotherapy increased the length of the remission. The data here formed a preliminary analysis which was conducted in October 1974. This data frame contains the following columns: `time` The length of the complete remission (in weeks). `cens` An indicator of right censoring. 1 indicates that the patient had a relapse and so `time` is the length of the remission. 0 indicates that the patient had left the study or was still in remission in October 1974, that is the length of remission is right-censored. `group` The group into which the patient was randomized. Group 1 received maintenance chemotherapy, group 2 did not. The data were obtained from Miller, R.G. (1981) *Survival Analysis*. John Wiley. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `aml.csv`. Returns: Tuple of np.ndarray `x_train` with 23 rows and 3 columns and dictionary `metadata` of column headers (feature names). """ path = os.path.expanduser(path) filename = 'aml.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/boot/aml.csv' maybe_download_and_extract(path, url, save_file_name='aml.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
85f000f2076494f41cfa40f215d8d5623a205b31
23,084
import attr def to_dict(observation: Observation): """Convert an Observation object back to dict format""" return _unprefix_attrs(attr.asdict(observation))
4ffd5ad24fee6bd983d7cb85ac7d1b9eeb56e751
23,085
def _consolidate_extrapolated(candidates): """Get the best possible derivative estimate, given an error estimate. Going through ``candidates`` select the best derivative estimate element-wise using the estimated candidates, where best is defined as minimizing the error estimate from the Richardson extrapolation. See https://tinyurl.com/ubn3nv5 for corresponding code in numdifftools and https://tinyurl.com/snle7mb for an explanation of how errors of Richardson extrapolated derivative estimates can be estimated. Args: candidates (dict): Dictionary containing different derivative estimates and their error estimates. Returns: consolidated (np.ndarray): Array of same shape as input derivative estimates. candidate_der_dict (dict): Best derivative estimate given method. candidate_err_dict (dict): Errors corresponding to best derivatives given method """ # first find minimum over steps for each method candidate_der_dict = {} candidate_err_dict = {} for key in candidates.keys(): _der = candidates[key]["derivative"] _err = candidates[key]["error"] derivative, error = _select_minimizer_along_axis(_der, _err) candidate_der_dict[key] = derivative candidate_err_dict[key] = error # second find minimum over methods candidate_der = np.stack(list(candidate_der_dict.values())) candidate_err = np.stack(list(candidate_err_dict.values())) consolidated, _ = _select_minimizer_along_axis(candidate_der, candidate_err) updated_candidates = (candidate_der_dict, candidate_err_dict) return consolidated, updated_candidates
2641a56d852ed9e4065c7dfad4b1fd51ef581b91
23,086
import gzip import re import sys def roget_graph(): """ Return the thesaurus graph from the roget.dat example in the Stanford Graph Base. """ # open file roget_dat.txt.gz (or roget_dat.txt) fh = gzip.open('roget_dat.txt.gz', 'r') G = nx.DiGraph() for line in fh.readlines(): line = line.decode() if line.startswith("*"): # skip comments continue if line.startswith(" "): # this is a continuation line, append line = oldline + line if line.endswith("\\\n"): # continuation line, buffer, goto next oldline = line.strip("\\\n") continue (headname, tails) = line.split(":") # head numfind = re.compile("^\d+") # re to find the number of this word head = numfind.findall(headname)[0] # get the number G.add_node(head) for tail in tails.split(): if head == tail: print("skipping self loop", head, tail, file=sys.stderr) G.add_edge(head, tail) return G
a109c9fcfdb784c56b19ec6a6474963acad023b5
23,087
import torch def build_wideresnet_hub( num_class: int, name='wide_resnet50_2', pretrained=True): """[summary] Normalized mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] Args: name (str, optional): [description]. Defaults to 'wide_resnet50_2'. pretrained (bool, optional): [description]. Defaults to True. """ model = torch.hub.load( 'pytorch/vision:v0.6.0', name, pretrained=pretrained) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_class) return model
39f977a9ab368bd9fa15fb36c600c350afca7f53
23,088
def get_phoenix_model_wavelengths(cache=True): """ Return the wavelength grid that the PHOENIX models were computed on, transformed into wavelength units in air (not vacuum). """ wavelength_url = ('ftp://phoenix.astro.physik.uni-goettingen.de/v2.0/' 'HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits') wavelength_path = download_file(wavelength_url, cache=cache, timeout=30) wavelengths_vacuum = fits.getdata(wavelength_path) # Wavelengths are provided at vacuum wavelengths. For ground-based # observations convert this to wavelengths in air, as described in # Husser 2013, Eqns. 8-10: sigma_2 = (10**4 / wavelengths_vacuum)**2 f = (1.0 + 0.05792105/(238.0185 - sigma_2) + 0.00167917 / (57.362 - sigma_2)) wavelengths_air = wavelengths_vacuum / f return wavelengths_air
ff5632086ffb3aa3eb6655c3ba18e182f0724bc4
23,089
import os def get_skin_mtime(skin_name): """ skin.html 의 최근변경시간을 가져오는 기능 :param skin_name: :return: """ return os.path.getmtime(get_skin_html_path(skin_name))
d54d2c1ed317d892057758a4890290cf5c2c3917
23,090
def accuracy_boundingbox(data, annotation, method, instance): ## NOT IMPLEMENTED """ Calculate how far off each bounding box was Parameters ---------- data: color_image, depth_image annotation: pascal voc annotation method: function(instance, *data) instance: instance of object Returns ------- (int, int, int) boxes_found, boxes_missed, boxes_extra. """ FOUND_THRESHOLD = 5 # pixels ## bounding_boxes = method(instance, *data) ## boxes_found, boxes_missed, boxes_extra = 0, 0, 0 for value in annotation.findall('object'): annotation_bounding_box = value.find('bndbox') ax1, ay1, ax2, ay2 = [int(annotation_bounding_box.find(param).text) for param in ['xmin', 'ymin', 'xmax', 'ymax']] for bounding_box in bounding_boxes: X, Y, Z = [], [], [] for x, y in bounding_box.vertices: X.append(x) Y.append(y) X, Y = np.unique(X), np.unique(Y) bx1, by1, bx2, by2 = min(X), min(Y), max(X), max(Y) ## x1_close = bx1 - FOUND_THRESHOLD <= ax1 <= bx1 + FOUND_THRESHOLD y1_close = by1 - FOUND_THRESHOLD <= ay1 <= by1 + FOUND_THRESHOLD x2_close = bx2 - FOUND_THRESHOLD <= ax2 <= bx2 + FOUND_THRESHOLD y2_close = by2 - FOUND_THRESHOLD <= ay2 <= by2 + FOUND_THRESHOLD if all((x1_close, y1_close, x2_close, y2_close)): boxes_found += 1 boxes_missed = len(annotation.findall('object')) - boxes_found boxes_extra = len(bounding_boxes) - boxes_found return boxes_found, boxes_missed, boxes_extra
78fa63d5e2cbdad843feaddd277b98886789a517
23,091
import requests def http_get(url, as_json=False): """TODO. """ retry_strategy = Retry( total=5, status_forcelist=[ 429, # Too Many Requests 500, # Internal Server Error 502, # Bad Gateway 503, # Service Unavailable 504 # Gateway Timeout ], method_whitelist=["GET"], backoff_factor=2 # wait 1, 2, 4, 8, ... seconds between retries ) adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) response = http.get(url) content = response.content if as_json: content = response.json() return content, response.status_code
39fa8cf0af8e196f339ac00c9dd7ad00bbc7adb6
23,092
import logging def test_kbd_gpios(): """Test keyboard row & column GPIOs. Note, test only necessary on 50pin -> 50pin flex These must be tested differently than average GPIOs as the servo side logic, a 4to1 mux, is responsible for shorting colX to rowY where X == 1|2 and Y = 1|2|3. To test the flex traces I'll set the row to both high and low and examine that the corresponding column gets shorted correctly. Returns: errors: integer, number of errors encountered while testing """ errors = 0 # disable everything initially kbd_off_cmd = 'kbd_m1_a0:1 kbd_m1_a1:1 kbd_m2_a0:1 kbd_m2_a1:1 kbd_en:off' for col_idx in xrange(2): if not set_ctrls(kbd_off_cmd): logging.error('Disabling all keyboard rows/cols') errors += 1 break mux_ctrl = KBD_MUX_COL_IDX[col_idx] kbd_col = 'kbd_col%d' % (col_idx + 1) for row_idx in xrange(3): kbd_row = 'kbd_row%d' % (row_idx + 1) cmd = '%s1:%d %s0:%d ' % (mux_ctrl, row_idx>>1, mux_ctrl, row_idx & 0x1) cmd += 'kbd_en:on %s' % (kbd_col) (retval, ctrls) = get_ctrls(cmd, timeout=30) if not retval: logging.error('ctrls = %s', ctrls) errors += 1 for set_val in [GPIO_MAPS[ctrls[kbd_col]], ctrls[kbd_col]]: cmd = '%s:%s sleep:0.2 %s' % (kbd_row, set_val, kbd_col) (retval, ctrls) = get_ctrls(cmd) if not retval: logging.error('ctrls = %s', ctrls) errors += 1 if ctrls[kbd_col] != set_val: logging.error('After setting %s, %s != %s', kbd_row, kbd_col, set_val) errors += 1 return errors
237f26a5da5711c480ef9dadbaa46170ca97c884
23,093
def fields_for_model(model): """ This function returns the fields for a schema that matches the provided nautilus model. Args: model (nautilus.model.BaseModel): The model to base the field list on Returns: (dict<field_name: str, graphqlType>): A mapping of field names to graphql types """ # the attribute arguments (no filters) args = {field.name.lower() : convert_peewee_field(field) \ for field in model.fields()} # use the field arguments, without the segments return args
9eb6f1a51513ff6b42ab720a1196cea1402cac23
23,094
def _landstat(landscape, updated_model, in_coords): """ Compute the statistic for transforming coordinates onto an existing "landscape" of "mountains" representing source positions. Since the landscape is an array and therefore pixellated, the precision is limited. Parameters ---------- landscape: nD array synthetic image representing locations of sources in reference plane updated_model: Model transformation (input -> reference) being investigated in_coords: nD array input coordinates Returns ------- float: statistic representing quality of fit to be minimized """ def _element_if_in_bounds(arr, index): try: return arr[index] except IndexError: return 0 out_coords = updated_model(*in_coords) if len(in_coords) == 1: out_coords = (out_coords,) out_coords2 = tuple((coords - 0.5).astype(int) for coords in out_coords) result = sum(_element_if_in_bounds(landscape, coord[::-1]) for coord in zip(*out_coords2)) ################################################################################ # This stuff replaces the above 3 lines if speed doesn't hold up # sum = np.sum(landscape[i] for i in out_coords if i>=0 and i<len(landscape)) # elif len(in_coords) == 2: # xt, yt = out_coords # sum = np.sum(landscape[iy,ix] for ix,iy in zip((xt-0.5).astype(int), # (yt-0.5).astype(int)) # if ix>=0 and iy>=0 and ix<landscape.shape[1] # and iy<landscape.shape[0]) ################################################################################ return -result
0205654ef8580a0d6731155d7d0c2b2c1a360e9c
23,095
def presence(label): """Higher-order function to test presence of a given label """ return lambda x, y: 1.0 * ((label in x) == (label in y))
49c7e0b4b7af69c808917af7ab4d6b56a7a4ef89
23,096
import os import pkg_resources def get_config(config_file=None, section=None): """Gets the user defined config and validates it. Args: config_file: Path to config file to use. If None, uses defaults. section (str): Name of section in the config to extract (i.e., 'fetchers', 'processing', 'pickers', etc.) If None, whole config is returned. Returns: dictionary: Configuration parameters. Raises: IndexError: If input section name is not found. """ if config_file is None: # Try not to let tests interfere with actual system: if os.getenv("CALLED_FROM_PYTEST") is None: # Not called from pytest -- Is there a local project? local_proj = os.path.join(os.getcwd(), constants.PROJ_CONF_DIR) local_proj_conf = os.path.join(local_proj, "projects.conf") if os.path.isdir(local_proj) and os.path.isfile(local_proj_conf): # There's a local project config_file = __proj_to_conf_file(local_proj) else: # Is there a system project? sys_proj = constants.PROJECTS_PATH sys_proj_conf = os.path.join(sys_proj, "projects.conf") if os.path.isdir(sys_proj) and os.path.isfile(sys_proj_conf): config_file = __proj_to_conf_file(sys_proj) else: # Fall back on conf file in repository data_dir = os.path.abspath( pkg_resources.resource_filename("gmprocess", "data") ) config_file = os.path.join( data_dir, constants.CONFIG_FILE_PRODUCTION ) else: # When called by pytest data_dir = os.path.abspath( pkg_resources.resource_filename("gmprocess", "data") ) config_file = os.path.join(data_dir, constants.CONFIG_FILE_TEST) if not os.path.isfile(config_file): fmt = "Missing config file: %s." raise OSError(fmt % config_file) else: with open(config_file, "r", encoding="utf-8") as f: yaml = YAML() yaml.preserve_quotes = True config = yaml.load(f) CONF_SCHEMA.validate(config) if section is not None: if section not in config: raise IndexError(f"Section {section} not found in config file.") else: config = config[section] return config
3f52d30682cfc31d4f1239d0f544c834c98fd47e
23,097
def make_formula(formula_str, row, col, first_data_row=None): # noinspection SpellCheckingInspection """ A cell will be written as a formula if the HTML tag has the attribute "data-excel" set. Note that this function is called when the spreadsheet is being created. The cell it applies to knows where it is and what the first data row is. Allowed formula strings: "SUM ROW A-C": sum the current row from A-C "SUM ROW A,C": sum cells A and C in the current row "SUM COL": sums current col from first_row to row - 1 "FORMULA RAW IF(F13 > 0, (F13-E13)/F13, '')": uses formula as is "FORMULA RELATIVE IF(colm001rowp000 > 0, (colm001rowp0-colm002rowp000)/colm001rowp001, '')": creates the formula relative to the current location. colm002 means two cols to the left of the current cell. rowp000 means the current row plus 0 (e.g. the current row) :param formula_str: the value of the "data-excel" tag containing params for generating the formula :param row: cell row :param col: cell column :param first_data_row: for column formulas :return: a string """ parts = formula_str.split(' ') func = parts[0] args = parts[-1] formula = '' if func == 'SUM': func_modifier = parts[1] if func_modifier == 'ROW': if '-' in args: cols = args.split('-') formula = '=SUM({}{}:{}{})'.format(cols[0], row + 1, cols[1], row + 1) elif ',' in args: cols = map(methodcaller('strip'), args.split(',')) # Put the row number after each col letter and then add them together cols = '+'.join(map(lambda x: x + str(row + 1), cols)) formula = '=SUM({})'.format(cols) elif func_modifier == 'COL': formula = '=SUM({}:{})'.format(xl_rowcol_to_cell(first_data_row, col), xl_rowcol_to_cell(row - 1, col)) elif func == 'FORMULA': func_modifier = parts[1] formula_str = ' '.join(parts[2:]) if func_modifier == 'RAW': formula = '=' + formula_str elif func_modifier == 'RELATIVE': formula = '=' + locate_cells(formula_str, row, col) return formula
d9a41a2906151a050afa78e099278b7d5462faa9
23,098
def select(population, to_retain): """Go through all of the warroirs and check which ones are best fit to breed and move on.""" #This starts off by sorting the population then gets all of the population dived by 2 using floor divison I think #that just makes sure it doesn't output as a pesky decimal. Then it takes one half of memebers which shall be females. # which tend to be not as strong as males(Not being sexist just science thats how we are built.) So the front half will be #The lower digits because we sorted it then the upper half will be males. Then it finishes off by getting the strongest males and #females and returns them. sorted_pop = sorted(population) to_keep_by_sex = to_retain//2 members = len(sorted_pop)//2 females = sorted_pop[:members] males = sorted_pop[members:] strong_females = females[-to_keep_by_sex:] strong_males = males[-to_keep_by_sex:] return strong_males, strong_females
4dc1251f09e6bd976d170017bbd328563e9ef786
23,099