content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _get_minimum_columns( nrows, col_limits, families, family_counts, random_state ): """If ``col_limits`` has a tuple lower limit then sample columns of the corresponding element of ``families`` as needed to satisfy this bound.""" columns, metadata = [], [] for family, min_limit in zip(families, col_limits[0]): for _ in range(min_limit): meta = family.make_instance(random_state) columns.append(meta.sample(nrows, random_state)) metadata.append(meta) family_counts[family.name] += 1 return columns, metadata, family_counts
56a0abc58a6e6c3356be08511e7a30da3c1c52e3
3,631,000
from typing import Union from typing import Any from typing import cast def ensure_callable(x: Union[Any, Function]) -> Function: """ensure a given args is a callable by returning a new callable if not""" if not callable(x): def wrapped(*args: Any, **kwargs: Any) -> Any: return x return cast(Function, wrapped) return cast(Function, x)
5165a1b44de13f570454f223f3b9b1e799bd309d
3,631,001
from typing import List def concat_dataframes(dataframes: List[pd.DataFrame], remove_duplicates: bool = True, keep: str = 'first' ) -> pd.DataFrame: """ Concatenate list of dataframes with optional removal of duplicate keys. Args: dataframes (list): list of DataFrames to merge remove_duplicates (bool): remove duplicates. keep (str, bool): 'first', 'last', or False. Returns: df (pandas.DataFrame) """ df = pd.concat(dataframes) duplicate_array = df.index.duplicated(keep=keep) if np.any(duplicate_array): print('Duplicates keys found:', np.sum(duplicate_array)) if remove_duplicates: print('Removing with keep=', keep) df = df[~duplicate_array] print('Unique keys:', len(df)) return df
fd21c0ca6e95bdc5e5c854ba0d3cd2e30f018966
3,631,002
def __split_pair(number: SnailfishNumber, left: bool) -> int | None: """Splits a number into a pair if the number is >= 10. :param number: Number node from the binary tree. :param left: Boolean if we are inspecting the right or left leaf. :return: 1 if we split else None. """ # Search for the leftmost number greater or equal to 10. if left and isinstance(number.left, int) and number.left >= 10: # Split left number into pair number.left = SnailfishNumber( parent=number, left=number.left // 2, # floor right=-(number.left // -2), # ceil is_left=True, ) return 1 if not left and isinstance(number.right, int) and number.right >= 10: number.right = SnailfishNumber( parent=number, left=number.right // 2, right=-(number.right // -2), is_left=False, ) return 1 return None
cd9a762fbad3148989fa2f823d83779ebd12e3ba
3,631,003
def def_pin_pout(m): # This could be better implemented. # In particular, it should not use m.p directly but m.find_component(name_var) """ Defines power 'in' and 'out' variables :param m: :return: """ assert isinstance(m, Block), f"argument 'm', must be an instance of Block, but is actually {type(m)}." assert hasattr(m, 'p'), f"model m does not have attribute named 'p'. This is needed. " m.pmax = Param(initialize=UB, mutable=True, doc='maximal power out (kW)') m.pmin = Param(initialize=UB, mutable=True, doc='maximal power in (kW)') m.pout = Var(m.time, doc='power to the main grid', within=NonNegativeReals, initialize=None) m.pin = Var(m.time, doc='power from the main grid', within=NonNegativeReals, initialize=None) m.u = Var(m.time, doc='binary variable', within=Binary, initialize=None) def _power_balance(b, t): return b.p[t] - b.pout[t] + b.pin[t] == 0 def _pmax(b, t): if b.pmax.value is None: return Constraint.Skip return b.pout[t] - b.u[t] * b.pmax <= 0 def _pmin(b, t): if b.pmin.value is None: return Constraint.Skip return b.pin[t] + b.u[t] * b.pmin <= b.pmin m._pmin = Constraint(m.time, rule=_pmin, doc='low bound') m._pmax = Constraint(m.time, rule=_pmax, doc='up bound') m._p_balance = Constraint(m.time, rule=_power_balance, doc='power balance')
606d21f8b3204ca4394a891a260c88384e56c435
3,631,004
def get_user_choice() -> tuple[str, str]: """Returns the headers for the x and y value choices as a tuple.""" while True: # X console_display_choices(X_OPTIONS, "X") x_choice = get_x_y_value(X_OPTIONS, "X") # Y console_display_choices(Y_OPTIONS, "Y") y_choice = get_x_y_value(Y_OPTIONS, "Y") if X_OPTIONS[x_choice] == Y_OPTIONS[y_choice]: print("You cannot plot the same X and Y values.") else: break return X_OPTIONS[x_choice], Y_OPTIONS[y_choice]
94ff044acfbd52752dbe625e2a3f43531692ac14
3,631,005
import torch import test def exp(args, model_type, vertex_label, b_true, X_train, X_test_ID, X_test_OOD): """Experiment function.""" torch.set_default_dtype(torch.double) np.set_printoptions(precision=3) model, w_est = train( X_train, vertex_label=vertex_label, b_true=b_true, model_type=model_type, args=args) # ID test test( model, X_test_ID, vertex_label=vertex_label, b_true=b_true, model_type=model_type, args=args, test_type='ID') # OOD test test( model, X_test_OOD, vertex_label=vertex_label, b_true=b_true, model_type=model_type, args=args, test_type='OOD') return model, w_est
245f0feea515cfa1c28c8d59c176a486ddf43675
3,631,006
def sliding_window(warped): """ Using sliding window to find the lane line segments. This method will be used at the start of a video or when there is no lane line found on the previous video frame. """ img_height, img_width = warped.shape[0], warped.shape[1] histogram = np.sum(warped[img_height//2:,:], axis=0) # Create an output image to draw on and visualize the result out_img = np.dstack((warped, warped, warped)) midpoint = histogram.shape[0] // 2 leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint # Choose the number of sliding windows (How to choose this???) n_windows = 9 # Set height of windows window_height = img_height // n_windows # Identify the x and y positions of all nonzero pixels in the image. nonzero = warped.nonzero() # This returns indices of nonzero value. X_nonzero = nonzero[1] Y_nonzero = nonzero[0] # Current positions to be updated for each window leftx_current = leftx_base rightx_current = rightx_base # Set the width of the windows +/- margin margin = 100 # Set minimum number of pixels found to recenter window min_pixel = 50 # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Step through the windows one by one for window in range(n_windows): # Identify window boundaries in x and y (and right and left) win_y_low = img_height - (window + 1) * window_height win_y_high = img_height - (window) * window_height win_x_left_low = leftx_current - margin win_x_left_high = leftx_current + margin win_x_right_low = rightx_current - margin win_x_right_high = rightx_current + margin cv2.rectangle(out_img, (win_x_left_low, win_y_low), (win_x_left_high, win_y_high), (0, 255, 0), 2) cv2.rectangle(out_img, (win_x_right_low, win_y_low), (win_x_right_high, win_y_high), (0, 255, 0), 2) # Identify the nonzero pixels in x and y within the window left_lane_ind = ((Y_nonzero >= win_y_low) & (Y_nonzero < win_y_high) & \ (X_nonzero >= win_x_left_low) & (X_nonzero < win_x_left_high)).nonzero()[0] right_lane_ind = ((Y_nonzero >= win_y_low) & (Y_nonzero < win_y_high) & \ (X_nonzero >= win_x_right_low) & (X_nonzero < win_x_right_high)).nonzero()[0] left_lane_inds.append(left_lane_ind) right_lane_inds.append(right_lane_ind) # If you found > minpix pixels, recenter next window on their mean position if(len(left_lane_ind) > min_pixel): leftx_current = np.int(np.mean(X_nonzero[left_lane_ind])) if(len(right_lane_ind) > min_pixel): rightx_current = np.int(np.mean(X_nonzero[right_lane_ind])) # Concatenate the arrays of indices left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) # Extract left and right line pixel positions left_x = X_nonzero[left_lane_inds] left_y = Y_nonzero[left_lane_inds] right_x = X_nonzero[right_lane_inds] right_y = Y_nonzero[right_lane_inds] # Fit a second order polynomial to each left_poly_para = np.polyfit(left_y, left_x, 2) right_poly_para = np.polyfit(right_y, right_x, 2) # Find curvature curvature = find_curvature(left_x, left_y, right_x, right_y) # Visualisation # Draw the polynomial line plot_y = np.linspace(0, img_height-1, img_height) left_poly = left_poly_para[0] * plot_y**2 + left_poly_para[1] * plot_y + left_poly_para[2] right_poly = right_poly_para[0] * plot_y**2 + right_poly_para[1] * plot_y + right_poly_para[2] #Color the laneline out_img[Y_nonzero[left_lane_inds], X_nonzero[left_lane_inds]] = [255, 0, 0] out_img[Y_nonzero[right_lane_inds], X_nonzero[right_lane_inds]] = [0, 0, 255] return (out_img, left_poly, right_poly, plot_y, left_poly_para, right_poly_para, curvature)
9c495a502b79e8708573a94a0015915a35d44e41
3,631,007
def get_config(n_layers, use_auto_acts, fp_quant): """Returns a ConfigDict instance for a WMT transformer. The ConfigDict is wired up so that changing a field at one level of the hierarchy changes the value of that field everywhere downstream in the hierarchy. For example, changing the top-level 'prec' parameter (eg, config.prec=4) will cause the precision of all layers to change. Changing the precision of a specific layer type (eg, config.mlp_block.dense_1.weight_prec=4) will cause the weight precision of all Dense1 layers to change, overriding the value of the global config.prec value. See config_schema_test.test_schema_matches_expected to see the structure of the ConfigDict instance this will return. Args: n_layers: Number of layers in the encoder and the decoder. use_auto_acts: Whether to use automatic clipping bounds for activations or fixed bounds. Unlike other properties of the configuration which can be overridden directly in the ConfigDict instance, this affects the immutable schema of the ConfigDict and so has to be specified before the ConfigDict is created. fp_quant: Whether to use floating point quantization. Defaults to False for integer quantization. Returns: A ConfigDict instance which parallels the hierarchy of TrainingHParams. """ base_config = get_wmt_base_config( use_auto_acts=use_auto_acts, fp_quant=fp_quant) model_hparams = base_config.model_hparams model_hparams.encoder = { "encoder_1d_blocks": [ get_block_config(base_config, BlockKind.encoder) for _ in range(n_layers) ] } config_schema_utils.set_default_reference(model_hparams.encoder, base_config, "embedding") model_hparams.decoder = { "encoder_decoder_1d_blocks": [ get_block_config(base_config, BlockKind.decoder) for _ in range(n_layers) ] } config_schema_utils.set_default_reference(model_hparams.decoder, base_config, "embedding") config_schema_utils.set_default_reference(model_hparams.decoder, base_config, "logits", parent_field="dense") base_config.lock() return base_config
c3b30a30ed2e272cd5e9802508d3eab78a679b47
3,631,008
import torch def get_train_loader(transform=None): """ Args: transform (transform): Albumentations transform Returns: trainloader: DataLoader Object """ if transform: trainset = Cifar10SearchDataset(transform=transform) else: trainset = Cifar10SearchDataset(root="~/data/cifar10", train=True, download=True) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) return(trainloader)
c72222835c98b104fa4a04f4bf49de5850071365
3,631,009
def train_regression(train_data, model, criterion, optimizer, batch_size, device, scheduler=None, collate_fn=None): """Train a Pytorch regresssion model Parameters ---------- train_data : torch.utils.data.Dataset Pytorch dataset model: torch.nn.Module Pytorch Model criterion: function Loss function optimizer: torch.optim Optimizer bacth_size : int Number of observations per batch device : str Name of the device used for the model scheduler : torch.optim.lr_scheduler Pytorch Scheduler used for updating learning rate collate_fn : function Function defining required pre-processing steps Returns ------- Float Loss score Float: RMSE Score """ # Set model to training mode model.train() train_loss = 0 # Create data loader data = DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn) # Iterate through data by batch of observations for feature, target_class in data: # Reset gradients optimizer.zero_grad() # Load data to specified device feature, target_class = feature.to(device), target_class.to(device) # Make predictions output = model(feature) # Calculate loss for given batch loss = criterion(output, target_class) # Calculate global loss train_loss += loss.item() # Calculate gradients loss.backward() # Update Weights optimizer.step() # Adjust the learning rate if scheduler: scheduler.step() return train_loss / len(train_data), np.sqrt(train_loss / len(train_data))
6a20c532137e32e1a56656b3cecf0c1686ec9f40
3,631,010
import re import urllib import locale import socket def get_url_content(url): """Performs a HTTP GET on the url. Returns False if the url is invalid or not-found""" res = None if not re.search(r'^http', url): url = 'http://' + url try: req = urllib.request.urlopen(url, None, 30) res = req.read().decode(locale.getlocale()[1]) req.close() except urllib.request.URLError: return None except ValueError: return None except socket.timeout: return None return res
a6cebdb19eff1daa86e9f7aa18943c19854e5641
3,631,011
def _evaluate(cmd, dork): """Parse a command and execute it""" cmd = cmd.strip().split(" ", 1) if cmd[0]: verb, *noun = cmd noun = noun[0] if noun else None call = _CMDS.get(verb, _MOVES.get(verb, _META.get(verb, _ERRS["u"]))) if isinstance(call, dict): method, arg = call.get(noun, _ERRS["which way"]) elif call not in _ERRS.values(): if verb == noun: method, arg = _ERRS["twice"] elif len(call) > 1: if noun: method, arg = _ERRS["which way"] else: method, arg = call elif noun and len(call) == 1: method, arg = call[0], noun else: method, arg = call[0], None else: method, arg = call else: method, arg = _ERRS["?"] return dork(method, arg)
cfb6957cf1c10c18cc768c266992412c36222571
3,631,012
from typing import OrderedDict def build_input_dict(feature_columns): """ 基于特征列(feature columns)构建输入字典 :param feature_columns: list 特征列 :return: input_dict: dict 输入字典,形如{feature_name: keras.Input()} """ # 1,基于特征列构建输入字典 input_dict = OrderedDict() for fc in feature_columns: if isinstance(fc, DenseFeat): input_dict[fc.name] = keras.Input(shape=(fc.dimension,), name=fc.name, dtype=fc.dtype) elif isinstance(fc, SparseFeat): input_dict[fc.name] = keras.Input(shape=(1,), name=fc.name, dtype=fc.dtype) elif isinstance(fc, VarLenSparseFeat): input_dict[fc.name] = keras.Input(shape=(fc.maxlen,), name=fc.name, dtype=fc.dtype) if fc.weight_name is not None: input_dict[fc.weight_name] = keras.Input(shape=(fc.maxlen,), name=fc.weight_name, dtype='float32') else: raise ValueError('Invalid type in feature columns.') return input_dict
033a6cb0d73a9652a5a9ce83026a05a93756e85c
3,631,013
def factorial(n): """Calcula el factorial de un numero""" fact = 1 for i in range (1, n+1): fact = fact * i return fact
b341682fbc13fd184af8551be37348bf36ec082c
3,631,014
def sigmoid(x): """ Computes sigmoid of x element-wise. Parameters ---------- x : tensor A Tensor with type float16, float32, float64, complex64, or complex128. Returns ------- A Tensor with the same type as x. """ outputs = P.Sigmoid() return outputs(x)
314c13df8127e3bb602e3a3eab98b2c2b68fb82d
3,631,015
def conf_serializer(configuration: Configuration) -> str: """Serialize configuration to .conf files.""" lines = [f"{key} {val}" for key, val in configuration.Properties.items()] return "\n".join(lines) + "\n"
8bf3104495cf849ad0b3576c3bf61482c51d04fe
3,631,016
def redness_greenness_response(C, e_s, N_c, N_cb): """ Returns the redness / greenness response :math:`M_{yb}`. Parameters ---------- C : array_like Colour difference signals :math:`C`. e_s : numeric or array_like Eccentricity factor :math:`e_s`. N_c : numeric or array_like Chromatic surround induction factor :math:`N_c`. N_cb : numeric or array_like Chromatic background induction factor :math:`N_{cb}`. Returns ------- numeric or ndarray Redness / greenness response :math:`M_{rg}`. Examples -------- >>> C = np.array([ ... -5.365865581996587e-05, ... -0.000571699383647, ... 0.000625358039467 ... ]) >>> e_s = 1.110836504862630 >>> N_c = 1.0 >>> N_cb = 0.725000000000000 >>> redness_greenness_response(C, e_s, N_c, N_cb) # doctest: +ELLIPSIS -0.0001044... """ C_1, C_2, _C_3 = tsplit(C) e_s = as_float_array(e_s) N_c = as_float_array(N_c) N_cb = as_float_array(N_cb) M_rg = 100 * (C_1 - (C_2 / 11)) * (e_s * (10 / 13) * N_c * N_cb) return M_rg
06ea4162ec83957db705a2903c8cd748f7a7c34a
3,631,017
import re import os def validate_file(file_type, path): """Ensure the data file contains parseable rows data""" match = re.search(r'\.(\w+)$', str(path)) extension = None if match: extension = match.group(1).lower() else: return 'invalid file extension' if extension not in _FILE_TYPE_EXTENSIONS[file_type]: return 'invalid file type: {}'.format(extension) if file_type == 'mirror': # mirror file try: count = 0 with open(str(path)) as f: for line in f.readlines(): parts = line.split("\t") if len(parts) > 0: float(parts[0]) if len(parts) > 1: float(parts[1]) count += 1 if count == 0: return 'no data rows found in file' except ValueError as e: return 'invalid file format: {}'.format(e) elif file_type == 'undulatorTable': # undulator magnetic data file #TODO(pjm): add additional zip file validation try: template_common.validate_safe_zip(str(path), '.', validate_magnet_data_file) except AssertionError as err: return err.message elif file_type == 'sample': filename = os.path.splitext(os.path.basename(str(path)))[0] # Save the processed file: srwl_uti_smp.SRWLUtiSmp(file_path=str(path), is_save_images=True, prefix=filename) return None
b2697cfc8105859bf6b78201d317f7203a80f5b0
3,631,018
import mimetypes def upload(bucket_name, key, file_path): """ upload object to bucket """ # create s3 client client = boto3.client("s3") # guess ContentType mime, _ = mimetypes.guess_type(file_path, strict=False) mime = "text/plain" if mime is None else mime with open(file_path, "rb") as file: response = client.put_object( Body=file, Bucket=bucket_name, Key=key, ContentType=mime ) LOGGER.debug(response) return response
9ad3500ea8631cb44e2ab35aa020ae7434181e2a
3,631,019
import platform def _format_source(src, virtual_cells = None): # type: (Union[str, RuleTarget], str) -> str """ Converts a 'source' to a string that can be used by buck native rules Args: src: Either a string (for a source file), or a RuleTarget that needs converted to a label platform: The platform to use to convert RuleTarget objects Returns: A string with either the source path, or a full buck label """ if target_utils.is_rule_target(src): if third_party.is_tp2_target(src) and virtual_cells == None: fail("Invalid RuleTarget ({}) and platform ({}) provided".format(src, platform)) return target_utils.target_to_label(src, virtual_cells = virtual_cells) return src
530c13b7ce8239e97159f57ef794f036f958f9d4
3,631,020
def test_vnic_and_assign_vf(ip_addr, free_vnics_ips, backlisted_vfs=()): """ Based on the IP address of an OCI VNIC, ensure that the VNIC is not already assigned to a virtual machine. If that VNIC is available, find a free virtual function on the appropriate physical interface and return the necessary information. Parameters ---------- ip_addr : str The ip address. free_vnics_ips: list() The list of free vnic IPs backlisted_vfs: list of tuple list of ('pci id','vf num'). VF which should be filtered out during selection Returns ------- tuple The virtual network interface, the pci id, the virtual function id on success, False,False,False otherwise. """ _logger.debug('test_vnic_and_assign_vf called with (%s,%s,%s)' % (ip_addr, free_vnics_ips, backlisted_vfs)) vnics = InstanceMetadata().refresh()['vnics'] _logger.debug('vnics found in metadata: %s' % vnics) domains = virt_utils.get_domains_name() domain_interfaces = {d: virt_utils.get_interfaces_from_domain( virt_utils.get_domain_xml(d)) for d in domains} # First see if the given ip address belongs to a vnic vnic = find_vnic_by_ip(ip_addr, vnics) if vnic is None: _logger.error("{} is not the IP address of a VNIC.", ip_addr) _print_available_vnics(free_vnics_ips) return False, False, False _logger.debug('vnic found from IP : %s' % vnic) # Next check that the ip address is not already assigned to a vm vnic_mac = vnic['macAddr'].lower() _logger.debug('vnic found mac is %s' % vnic_mac) dom = _find_vlan(vnic_mac, domain_interfaces) if dom: _logger.error("{} is in use by \"{}\".", ip_addr, dom) _print_available_vnics(free_vnics_ips) return False, False, False phys_nic = get_phys_by_index(vnic, vnics, get_interfaces()) _logger.debug('physical intf found by index : %s' % phys_nic) vf_pci_id, vf_num = find_unassigned_vf_by_phys(phys_nic, domain_interfaces, vnic_mac, backlisted_vfs) _logger.debug('VF PCI id found [%s] VF number [%s]' % (vf_pci_id, vf_num)) if vf_pci_id is None: # This should never happen. There are always at least as many virtual # Functions as there are potential creatable vnics _logger.error( "Could not find an unassigned virtual function on {}. Try using a " "VNIC on an alternate physical interface.", phys_nic) return False, False, False return vnic, vf_pci_id, vf_num
af1b60812c2ea03e4042de0560f852f932fb6337
3,631,021
from sys import path def read_config_file_step( info=None, config_file_path=path.join(path.dirname(path.dirname(__file__)), 'config', 'config.yml')): """ 读取配置文件 :param info: :param config_file_path: 配置文件路径 :return: """ if info: res = FileOperate.read_yaml(config_file_path)[info] else: res = FileOperate.read_yaml(config_file_path) return res
0e934bb23da497ebe74e9ec8768f91770e33b213
3,631,022
def get_player(): """Returns a driver to control the VoiceHat speaker. The aiy modules automatically use this player. So usually you do not need to use this. Instead, use 'aiy.audio.play_wave' if you would like to play some audio. """ global _voicehat_player if not _voicehat_player: _voicehat_player = aiy._drivers._player.Player() return _voicehat_player
f8c242083072392663a19bc720fac156e96cef7a
3,631,023
import json def post_user_events(): """Endpoint for users to post the events they want to add to their calendar Returns: Response: JSON with the success status of each event """ body = request.json response = insert_user_calendar_events( session['google-idap']['access_token'], calendar_id=body['calendar_id'], events_array=body['basket']) response = {"data": response} response1 = clean_recurrences( session['google-idap']['access_token'], body['calendar_id'], False) return Response(json.dumps(response1), mimetype="application/json")
23dc6e8d4751e4c630b4049f1aec50ee6a8783b2
3,631,024
def zmq_version(): """return the version of libzmq as a string""" return "%i.%i.%i" % zmq_version_info()
49fc037744e4215a583ccc8de07cd415a99fc17c
3,631,025
def parse_datetime_interval(period_from: str, period_to: str, strformat: str = None) -> date_tuple: """ Returns given period parameters in datetime format, or next step in back-fill mode along with generated last state for next iteration. Args: period_from: YYYY-MM-DD or relative string supported by date parser e.g. 5 days ago period_to: YYYY-MM-DD or relative string supported by date parser e.g. 5 days ago strformat: A python strtime format, in which output dates will be returned. If not specified function returns dates in datetime.datetime type Returns: start_date: datetime, end_date: datetime """ start_date_form = dateparser.parse(period_from) end_date_form = dateparser.parse(period_to) day_diff = (end_date_form - start_date_form).days if day_diff < 0: raise ValueError("start_date cannot exceed end_date.") if strformat is None: return start_date_form, end_date_form else: return start_date_form.strftime(strformat), end_date_form.strftime(strformat)
ddf97283465b04cc85bf74a079465439c49c82ff
3,631,026
def _paths_from_ls(recs): """The xenstore-ls command returns a listing that isn't terribly useful. This method cleans that up into a dict with each path as the key, and the associated string as the value. """ ret = {} last_nm = "" level = 0 path = [] ret = [] for ln in recs.splitlines(): nm, val = ln.rstrip().split(" = ") barename = nm.lstrip() this_level = len(nm) - len(barename) if this_level == 0: ret.append(barename) level = 0 path = [] elif this_level == level: # child of same parent ret.append("%s/%s" % ("/".join(path), barename)) elif this_level > level: path.append(last_nm) ret.append("%s/%s" % ("/".join(path), barename)) level = this_level elif this_level < level: path = path[:this_level] ret.append("%s/%s" % ("/".join(path), barename)) level = this_level last_nm = barename return ret
afa0fbe3e5c1773074569363a538587664a00a2f
3,631,027
import torch import tqdm import os import pickle def train(args, train_dataset, eval_dataset, model, tokenizer): """ Train the model """ # ===== Setting up # summary writer tb_writer = SummaryWriter() print("DEBUGGING!") print("train_dataset: " + str(len(train_dataset))) print(train_dataset[0]) print("train_batch_size: " + str(args.per_gpu_train_batch_size * max(1, args.n_gpu))) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.from_checkpoint: global_step = args.start_step t_total += args.start_step else: global_step = 0 optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # ===== Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * 1) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) if args.from_checkpoint: logger.info(" Starting from checkpoint {}, total optimization steps = {}".format(args.start_step, t_total)) else: logger.info(" Total optimization steps = %d", t_total) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=False) set_seed(args) # Added here for reproducibility (even between python 2 and 3) # create decoder_attention_mask here instead of in the loop to save running time decoder_attention_mask_onesample = create_attention_mask(0, args.block_size, args.gpt2_config, "decoder_mask") decoder_attention_mask_batchsize = torch.tensor([decoder_attention_mask_onesample] * args.train_batch_size) loss_report = [] eval_loss_report = [] eval_perplexity_loss_report = [] eval_current_step = [] for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False) for step, batch in enumerate(epoch_iterator): sentence_embedding = batch["sentence_embedding"].float() decoder_input = batch["decoder_input"].long() decoder_label = batch["decoder_label"].long() if len(batch["decoder_attention_mask"]) == args.train_batch_size: decoder_attention_mask = decoder_attention_mask_batchsize.long() else: decoder_attention_mask = torch.tensor([decoder_attention_mask_onesample] * len(batch["decoder_attention_mask"])).long() sentence_embedding = sentence_embedding.to(args.device) decoder_input = decoder_input.to(args.device) decoder_label = decoder_label.to(args.device) decoder_attention_mask = decoder_attention_mask.to(args.device) # forward pass (change and edit with VAE code) decoder_lm_logits = model(sentence_embedding, decoder_input, decoder_attention_mask, args.device) # compute loss NLL_loss = loss_fn(decoder_lm_logits, decoder_label, tokenizer.convert_tokens_to_ids(["<|pad|>"])[0]) ## DEBUGGING loss = NLL_loss # # DEBUGGING # print("sentence_embedding: " + str(sentence_embedding)) # input_text = tokenizer.decode(decoder_input[0].tolist(), clean_up_tokenization_spaces=True) # print("input_text: " + input_text) # predictions = torch.nn.functional.softmax(decoder_lm_logits, dim = -1) # logger.info("decoder_input: " + str(tokenizer.decode(decoder_input[0].tolist(), clean_up_tokenization_spaces=True))) # logger.info("decoder_label: " + str(tokenizer.decode(decoder_label[0].tolist(), clean_up_tokenization_spaces=True))) # prediction_text = tokenizer.decode(torch.argmax(predictions[0], dim=-1).tolist(), clean_up_tokenization_spaces=True) # first_endoftext = prediction_text.find("<|endoftext|>") # logger.info("predictions: " + str(prediction_text[:(first_endoftext + 13) if first_endoftext>0 else len(prediction_text)])) # process loss across GPUs, batches then backwards if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss_report.append(loss.data.cpu().numpy()) # run loss backward loss.backward() # accummulte enough step, step backward tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # print loss if args.printing_steps > 0 and global_step % args.printing_steps == 0: # if global_step % (10*args.printing_steps) == 0: # # DEBUGGING # predictions = torch.nn.functional.softmax(decoder_lm_logits, dim = -1) # logger.info("decoder_input: " + str(tokenizer.decode(decoder_input[0].tolist(), clean_up_tokenization_spaces=True))) # logger.info("decoder_label: " + str(tokenizer.decode(decoder_label[0].tolist(), clean_up_tokenization_spaces=True))) # prediction_text = tokenizer.decode(torch.argmax(predictions[0], dim=-1).tolist(), clean_up_tokenization_spaces=True) # first_endoftext = prediction_text.find("<|endoftext|>") # logger.info("predictions: " + str(prediction_text[:(first_endoftext + 13) if first_endoftext>0 else len(prediction_text)])) # Log metrics print("Current training step: " + str(global_step)) print("Average current training loss of the latests {} steps: {}".format(str(args.printing_steps), str(np.mean(loss_report[-args.printing_steps:])))) # logging if args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics # if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss # save checkpoints if args.save_steps > 0 and global_step % args.save_steps == 0: checkpoint_prefix = 'checkpoint' # Save model checkpoint output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) # evaluate if args.evaluate_during_training: # set model to eval model.eval() # running train function eval_loss, eval_perplexity = evaluate(args, eval_dataset, model, tokenizer) eval_loss_report.append(eval_loss) eval_perplexity_loss_report.append(eval_perplexity) eval_current_step.append(global_step) # set model to train model.train() # save model loss_reports = {"loss_report":loss_report, "eval_loss_report":eval_loss_report, "eval_perplexity_loss_report":eval_perplexity_loss_report, "eval_current_step":eval_current_step} if args.from_checkpoint: # concatenate with results from checkpoint if training from check point loss_reports_from_checkpoint = pickle.load(open(args.output_dir + "/checkpoint-{}".format(str(args.start_step)) + "/loss_reports.pkl", "rb")) for key in loss_reports.keys(): loss_reports[key] = loss_reports_from_checkpoint[key] + loss_reports[key] model.module.save_pretrained(args, output_dir, loss_reports) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break # save final loss_reports loss_reports = {"loss_report":loss_report, "eval_loss_report":eval_loss_report, "eval_perplexity_loss_report":eval_perplexity_loss_report, "eval_current_step":eval_current_step} if args.from_checkpoint: # concatenate with results from checkpoint if training from check point loss_reports_from_checkpoint = pickle.load(open(args.output_dir + "/checkpoint-{}".format(str(args.start_step)) + "/loss_reports.pkl", "rb")) for key in loss_reports.keys(): loss_reports[key] = loss_reports_from_checkpoint[key] + loss_reports[key] # close summary writer tb_writer.close() return global_step, tr_loss, loss_reports
f20c68d1f8c7b1d704f537c56bb0f6d8bbe46f93
3,631,028
def TRS_between_rounds(X1, X2): """ Calculate the TRS rotor between any pair of rounds of the same grade Bring rounds to origin, line up carriers, calculate scale """ T1 = generate_translation_rotor(-down((X1 * einf * X1)(1))) X1h = (T1 * X1 * ~T1).normal() T2 = generate_translation_rotor(-down((X2 * einf * X2)(1))) X2h = (T2 * X2 * ~T2).normal() X1f = (X1h ^ einf).normal() X2f = (X2h ^ einf).normal() Rc = rotor_between_objects(X1f, X2f) S = generate_dilation_rotor(get_radius_from_sphere((X2h*X2f*I5).normal())/get_radius_from_sphere((X1h*X1f*I5).normal())) return ((~T2)*S*Rc*T1).normal()
6e25e89999e22a6f3e3ef853f7b4c8e6268824b6
3,631,029
def col_range_nb(col_arr, n_cols): """Build column range for sorted column array. Creates a 2-dim array with first column being start indices (inclusive) and second column being end indices (exclusive). !!! note Requires `col_arr` to be in ascending order. This can be done by sorting.""" col_range = np.full((n_cols, 2), -1, dtype=np.int_) last_col = -1 for r in range(col_arr.shape[0]): col = col_arr[r] if col < last_col: raise ValueError("col_arr must be in ascending order") if col != last_col: if last_col != -1: col_range[last_col, 1] = r col_range[col, 0] = r last_col = col if r == col_arr.shape[0] - 1: col_range[col, 1] = r + 1 return col_range
f6ff35b01c34b917f9aaf69acfe4aa3d1e6d7548
3,631,030
def filterfiles(files, criteria): """Rerturns only the files from filelist that match the criteria. The criteria should be a list of (unit,keyword,function) where the function returns true for desired values of the keyword. For example criteria = ((0,'OPT_ELEM', lambda x: x == 'G130M'), (1,'CENWAVE' , lambda x: x < 1300.0 and x > 1200.0)) """ outfiles = list() hdus = [c[0] for c in criteria] hdus = list(set(hdus)) #keeps the unique elements for h in hdus: #select the criteria associated with keywords in header h hcriteria = [c for c in criteria if c[0] == h] for f in files: hdr = fits.getheader(f,h) for c in hcriteria: value = hdr[c[1]] #keep file if the keyword value is what we want if c[2](value): outfiles.append(f) return outfiles
5a60a2a46944eeb8d70337b342824cad6156c33d
3,631,031
def module_start(ip, port, dev, module): """模块启动 Args: ip(str):ip地址 port(int):端口号 dev(str): 设备名称 module(str):模块名称 Returns: tuple:返回错误码以及对应信息 """ command = "模块启动" AP.__class__.logout("发送{}{}{}指令".format(dev, module, command)) MESSAGE_DICT["mod_start_request_message"]["data"]["device"] = dev MESSAGE_DICT["mod_start_request_message"]["data"]["mod"] = module request_message = MESSAGE_DICT["mod_start_request_message"] payload = client(ip, port, request_message) err_no = payload["response"]["err_no"] error_info = return_err_info(dev, err_no) AP.__class__.logout( "服务端执行{} {} {}指令结果:{}".format(dev, module, command, error_info if error_info else '未知({})'.format(err_no))) return (err_no, error_info)
9eb5f222cb3fb6f55398c2390fb67b9f823cae6e
3,631,032
def activate_bgp_neighbor(dut, local_asn, neighbor_ip, family="ipv4", config='yes',vrf='default', **kwargs): """ :param dut: :param local_asn: :param neighbor_ip: :param family: :param config: :param vrf: :return: """ st.log("Activate BGP neigbor") cli_type = get_cfg_cli_type(dut, **kwargs) skip_error_check = kwargs.get('skip_error_check', True) remote_asn = kwargs.get('remote_asn', '') if config.lower() == 'yes': mode = "" else: mode = 'no' if family !='ipv4' and family != 'ipv6': return False cmd = '' if cli_type == 'vtysh': if vrf != 'default': cmd = cmd + 'router bgp {} vrf {}\n'.format(local_asn, vrf) else: cmd = cmd + 'router bgp {}\n'.format(local_asn) if remote_asn != '': cmd = cmd + 'neighbor {} remote-as {}\n'.format(neighbor_ip, remote_asn) cmd = cmd + 'address-family {} unicast\n'.format(family) cmd = cmd + '{} neighbor {} activate\n'.format(mode, neighbor_ip) cmd = cmd + '\n end' st.config(dut, cmd, type='vtysh', skip_error_check=skip_error_check) return True elif cli_type == "klish": neigh_name = get_interface_number_from_name(neighbor_ip) if vrf != 'default': cmd = cmd + 'router bgp {} vrf {}\n'.format(local_asn, vrf) else: cmd = cmd + 'router bgp {}\n'.format(local_asn) if neigh_name: if isinstance(neigh_name, dict): cmd = cmd + 'neighbor interface {} {}\n'.format(neigh_name["type"], neigh_name["number"]) else: cmd = cmd + 'neighbor {}\n'.format(neigh_name) cmd = cmd + 'remote-as {}\n'.format(remote_asn) cmd = cmd + 'address-family {} unicast\n'.format(family) cmd = cmd + ' {} activate\n'.format(mode) cmd = cmd + 'exit\nexit\nexit\n' st.config(dut, cmd, type=cli_type, skip_error_check=skip_error_check, conf = True) return True else: st.log("Unsupported CLI TYPE - {}".format(cli_type)) return False
8e335938c73b1cd816afc77b0a8af75c5dfdd893
3,631,033
def print_experiment_record_argtable(records): """ Print a table comparing experiment arguments and their results. """ funtion_names = [record.info.get_field(ExpInfoFields.FUNCTION) for record in records] args = [record.get_args() for record in records] common_args, different_args = separate_common_items(args) record_ids = [record.get_id() for record in records] def lookup_fcn(record_id, column): index = record_ids.index(record_id) if column=='Function': return funtion_names[index] elif column=='Run Time': return records[index].info.get_field_text(ExpInfoFields.RUNTIME) elif column=='Common Args': return ', '.join('{}={}'.format(k, v) for k, v in common_args.items()) elif column=='Different Args': return ', '.join('{}={}'.format(k, v) for k, v in different_args[index].items()) elif column=='Result': return get_oneline_result_string(records[index]) else: bad_value(column) rows = build_table(lookup_fcn, row_categories=record_ids, column_categories=['Function', 'Run Time', 'Common Args', 'Different Args', 'Result'], prettify_labels=False ) print(tabulate(rows))
04b8e5e07e19f6dae0f8b9d988a4bda0a0a1de17
3,631,034
def key_validator(*args, **kwargs): """Wraps hex_validator generator, to keep makemigrations happy.""" return hex_validator()(*args, **kwargs)
59c6ddda60ac4d1a954f112e426bd2952800dd8e
3,631,035
def fuzzy_search(request): """ ajax interface for fuzzy string search :param request: request of the web server :return: json-set with all matched strings """ LOG.debug("Fuzzy String search for AJAX: %s", request.json_body) mode = request.validated['type'] value = request.validated['value'] db_issue = request.validated['issue'] statement_uid = request.validated['statement_uid'] db_user = request.validated['user'] prepared_dict = fuzzy_string_matcher.get_prediction(db_user, db_issue, value, mode, statement_uid) for part_dict in prepared_dict['values']: __modifiy_discussion_url(part_dict) return prepared_dict
501425306babe15af0baae73b4f69dd89b4947ec
3,631,036
from . import routes from . import auth def create_app(): """ Initialize the core application """ app = Flask(__name__, instance_relative_config=False) app.config.from_object(Config) # Initialize Plugins db.init_app(app) login_manager.init_app(app) with app.app_context(): # Register Blueprints app.register_blueprint(routes.main_bp) app.register_blueprint(auth.auth_bp) # Create Database Models db.create_all() return app
1aaffaf9ece821e6448670006871c143286ac37c
3,631,037
from typing import Optional def plot_points_3D_mayavi( points: np.ndarray, bird: bool, fig: Figure, per_pt_color_strengths: np.ndarray = None, fixed_color: Optional[Color] = (1, 0, 0), colormap: str = "spectral", ) -> Figure: """Visualize points with Mayavi. Scale factor has no influence on point size rendering when calling `points3d()` with the mode="point" argument, so we ignore it altogether. The parameter "line_width" also has no effect on points, so we ignore it also. Args: points: The points to visualize fig: A Mayavi figure per_pt_color_strengths: An array of scalar values the same size as `points` fixed_color: Use a fixed color instead of a colormap colormap: different green to red jet for 'spectral' or 'gnuplot' Returns: Updated Mayavi figure """ if len(points) == 0: return None if per_pt_color_strengths is None or len(per_pt_color_strengths) != len(points): # Height data used for shading if bird: per_pt_color_strengths = points[:, 2] else: per_pt_color_strengths = points[:, 0] mlab.points3d( points[:, 0], # x points[:, 1], # y points[:, 2], # z per_pt_color_strengths, mode="point", # Render each point as a 'point', not as a 'sphere' or 'cube' colormap=colormap, color=fixed_color, # Used a fixed (r,g,b) color instead of colormap figure=fig, ) return fig
e9f80d8de890cd4b1c51a9614dc69812c951cec5
3,631,038
import io def read_binary_integer32_token(file_desc: io.BufferedReader) -> int: """ Get next int32 value from file The carriage moves forward to 5 position. :param file_desc: file descriptor :return: next uint32 value in file """ buffer_size = file_desc.read(1) return get_uint32(file_desc.read(buffer_size[0]))
81a0c6a592476a142e28ddbf65bddd59004486ad
3,631,039
def pop(key): """ 从缓存队列的后尾读取一条数据 :param key: 缓存key,字符串,不区分大小写 :return: 缓存数据 """ # 将key转换为小写字母 key = str(key).lower() try: value = r.rpop(key) except Exception as e: log_helper.info('读取缓存队列失败:key(' + key + ')' + str(e.args)) value = None return _str_to_json(value)
92cc254b9b2270ac2fa6c7d66b7f8a13b11b2522
3,631,040
def _one_q_pauli_prep(label, index, qubit): """Prepare the index-th eigenstate of the pauli operator given by label.""" if index not in [0, 1]: raise ValueError(f'Bad Pauli index: {index}') if label == 'X': if index == 0: return Program(_RY(pi / 2, qubit)) else: return Program(_RY(-pi / 2, qubit)) elif label == 'Y': if index == 0: return Program(RX(-pi / 2, qubit)) else: return Program(RX(pi / 2, qubit)) elif label == 'Z': if index == 0: return Program() else: return Program(RX(pi, qubit)) raise ValueError(f'Bad Pauli label: {label}')
4c4f02c6e1ffcbb57ca161f3cb0f17a678563b0b
3,631,041
import logging def run(camera: Camera): """ Runs the PictogramDetector. :return: the pictogram which had the most hits. """ try: detector = PictogramDetector(camera) stats = detector.detect() logging.debug(stats) result = max(stats, key=stats.get) t2s = pyttsx3.init() t2s.setProperty('voice', t2s.getProperty('voices')) t2s.setProperty('volume', 1) comb = 'some' if result is 'paint' else 'a' t2s.say("I am looking for %s %s" % (comb, result)) logging.info("detected: %s", result) t2s.runAndWait() return result except RuntimeError as e: logging.error("Error in a_detect_pictogram:\n", e)
7479d84174130edc078a4e00fcfed39dadb014c4
3,631,042
import os import pickle def concat_claims_all_documents(index: list, file_name: str): """ Merges all the lists of unique tags and saves the merged version as one pickle file with all unique tags """ list_of_dfs = [] for i in index: location = '{0}{1}_wclaims.pkl'.format(file_name.split('.')[0][:-len(i)], i) with open(os.path.join(ROOT_DIR, location), "rb") as fp: df_claims = pickle.load(fp) list_of_dfs.append(df_claims) df_concat_all_claims = pd.concat(list_of_dfs) df_concat_all_claims = df_concat_all_claims.reset_index(drop=True) with open(os.path.join(ROOT_DIR, '{0}_wclaims_all.pkl'.format(file_name.split('.')[0])), "wb") as fp: pickle.dump(df_concat_all_claims, fp) return df_concat_all_claims
09d155c5f14ee88bfc797e861551b12d987867e3
3,631,043
def get_movie_data_from_wikidata(slice_movie_set: pd.DataFrame): """ Function that consults the wikidata KG for a slice of the movies set :param slice_movie_set: slice of the movie data set with movie id as index and imdbId, Title, year and imdbUrl as columns :return: JSON with the results of the query """ imdbIdList = slice_movie_set['full_imdbId'].to_list() imdbs = "" for i in range(0, len(imdbIdList)): imdbId = imdbIdList[i] imdbs += " ""\"""" + imdbId + """\" """ endpoint_url = "https://query.wikidata.org/sparql" query = """SELECT DISTINCT ?itemLabel ?propertyItemLabel ?valueLabel ?imdbId WHERE { ?item wdt:P345 ?imdbId . ?item ?propertyRel ?value. VALUES ?imdbId {""" + imdbs + """} . ?propertyItem wikibase:directClaim ?propertyRel . SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } . FILTER( ?propertyRel = wdt:P1476 || ?propertyRel = wdt:P179 || ?propertyRel = wdt:P915 || ?propertyRel = wdt:P136 || ?propertyRel = wdt:P170 || ?propertyRel = wdt:P495 || ?propertyRel = wdt:P57 || ?propertyRel = wdt:P58 || ?propertyRel = wdt:P161 || ?propertyRel = wdt:P725 || ?propertyRel = wdt:P1431 || ?propertyRel = wdt:P1040 || ?propertyRel = wdt:P86 || ?propertyRel = wdt:P162 || ?propertyRel = wdt:P272 || ?propertyRel = wdt:P344 || ?propertyRel = wdt:P166 || ?propertyRel = wdt:P1411 || ?propertyRel = wdt:P2554 || ?propertyRel = wdt:P2515 || ?propertyRel = wdt:P840 || ?propertyRel = wdt:P921 || ?propertyRel = wdt:P175 ) } ORDER BY ?imdbId""" user_agent = "WikidataExplanationBotIntegration/1.0 https://www.wikidata.org/wiki/User:Andrelzan) " \ "wiki-bot-explanation-integration/1.0" sparql = SPARQLWrapper(endpoint_url, agent=user_agent) sparql.setQuery(query) sparql.setReturnFormat(JSON) results = sparql.query().convert() results_dic = results_movies_to_dict(slice_movie_set, results) return results_dic
a4c3a9a7e7cce1a2eb85326422afcb4ff3463db4
3,631,044
def run_dpc( filename, i, j, ref_fx=None, ref_fy=None, start_point=[1, 0], pixel_size=55, focus_to_det=1.46, dx=0.1, dy=0.1, energy=19.5, zip_file=None, roi=None, bad_pixels=[], max_iters=1000, solver="Nelder-Mead", hang=True, reverse_x=1, reverse_y=1, load_image=load_timepix.load, ): """ All units in micron pixel_size focus_to_det: focus to detector distance dx: scan step size x dy: scan step size y energy: in keV """ try: img, fx, fy = load_file(load_image, filename, hang=hang, zip_file=zip_file, roi=roi, bad_pixels=bad_pixels) except IOError as ie: print("%s" % ie) return 0.0, 0.0, 0.0, 0.0, 0.0 if img is None: print("Image {0} was not loaded.".format(filename)) return 1e-5, 1e-5, 1e-5, 1e-5, 1e-5 # vx = fmin(rss, start_point, args=(ref_fx, fx, get_beta(ref_fx)), # maxiter=max_iters, maxfun=max_iters, disp=0) res = minimize( rss, start_point, args=(ref_fx, fx, get_beta(ref_fx)), method=solver, tol=1e-6, options=dict(maxiter=max_iters), ) vx = res.x rx = res.fun a = vx[0] gx = reverse_x * vx[1] # vy = fmin(rss, start_point, args=(ref_fy, fy, get_beta(ref_fy)), # maxiter=max_iters, maxfun=max_iters, disp=0) res = minimize( rss, start_point, args=(ref_fy, fy, get_beta(ref_fy)), method=solver, tol=1e-6, options=dict(maxiter=max_iters), ) vy = res.x ry = res.fun gy = reverse_y * vy[1] # print(i, j, vx[0], vx[1], vy[1]) return a, gx, gy, rx, ry
e7d96201350cb84c323066434d8257631de353f7
3,631,045
from ostap.utils.cleanup import CleanUp import os def make_build_dir ( build = None ) : """Create proper temporary directory for ROOT builds """ if not build or not writeable ( build ) : build = CleanUp.tempdir ( prefix = 'ostap-build-' ) if not os.path.exists ( build ) : make_dir ( build ) return build
28a6b9de1b8b33235c4abce988624c3e6a56ba25
3,631,046
def api_converter(): """ Handler for conversion API request :return: Text of response :rtype: str """ try: parsed_args = parse_request_arguments() conversion_result = convert_core.convert_currency(**parsed_args) except (APIRequestError, CurrencyConversionError) as exc_msg: print(exc_msg) return simplejson.dumps({"error": str(exc_msg)}) + "\n" except BadRequest: return simplejson.dumps({"error": "Invalid request arguments"}) + "\n" else: return output_formatter( conversion_result, parsed_args["currency_amount"] ) + "\n"
591961f92a5e1e831976bcb09396630b515785cb
3,631,047
import pytz def plot1(ctx): """Do main plotting logic""" df = read_sql(""" SELECT * from sm_hourly WHERE station = %s and valid BETWEEN %s and %s ORDER by valid ASC """, ctx['pgconn'], params=(ctx['station'], ctx['sts'], ctx['ets']), index_col='valid') if df.empty: raise NoDataFound("No Data Found for This Plot.") slrkw = df['slrkw_avg_qc'] d12sm = df['calc_vwc_12_avg_qc'] d12t = df['t12_c_avg_qc'] d24t = df['t24_c_avg_qc'] d50t = df['t50_c_avg_qc'] d24sm = df['calc_vwc_24_avg_qc'] d50sm = df['calc_vwc_50_avg_qc'] rain = df['rain_mm_tot_qc'] tair = df['tair_c_avg_qc'] tsoil = df['tsoil_c_avg_qc'] valid = df.index.values (fig, ax) = plt.subplots(3, 1, sharex=True, figsize=(8, 8)) ax[0].grid(True) ax2 = ax[0].twinx() ax[0].set_zorder(ax2.get_zorder()+1) ax[0].patch.set_visible(False) # arange leads to funky values ax2.set_yticks([-0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0]) ax2.set_yticklabels([0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0]) ax2.set_ylim(-0.6, 0) ax2.set_ylabel("Hourly Precipitation [inch]") b1 = ax2.bar(valid, 0 - rain / 25.4, width=0.04, fc='b', ec='b', zorder=4) l1 = None l2 = None l3 = None if not d12sm.isnull().all(): l1, = ax[0].plot(valid, d12sm * 100.0, linewidth=2, color='r', zorder=5) if not d24sm.isnull().all(): l2, = ax[0].plot(valid, d24sm * 100.0, linewidth=2, color='purple', zorder=5) if not d50sm.isnull().all(): l3, = ax[0].plot(valid, d50sm * 100.0, linewidth=2, color='black', zorder=5) ax[0].set_ylabel("Volumetric Soil Water Content [%]", fontsize=10) days = (ctx['ets'] - ctx['sts']).days if days >= 3: interval = max(int(days/7), 1) ax[0].xaxis.set_major_locator( mdates.DayLocator(interval=interval, tz=pytz.timezone("America/Chicago"))) ax[0].xaxis.set_major_formatter( mdates.DateFormatter('%-d %b\n%Y', tz=pytz.timezone("America/Chicago"))) else: ax[0].xaxis.set_major_locator( mdates.AutoDateLocator(maxticks=10, tz=pytz.timezone("America/Chicago"))) ax[0].xaxis.set_major_formatter( mdates.DateFormatter('%-I %p\n%d %b', tz=pytz.timezone("America/Chicago"))) ax[0].set_title(("ISUSM Station: %s Timeseries" ) % (ctx['_nt'].sts[ctx['station']]['name'], )) box = ax[0].get_position() ax[0].set_position([box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95]) box = ax2.get_position() ax2.set_position([box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95]) if None not in [l1, l2, l3]: ax[0].legend([l1, l2, l3, b1], ['12 inch', '24 inch', '50 inch', 'Hourly Precip'], bbox_to_anchor=(0.5, -0.15), ncol=4, loc='center', fontsize=12) # ---------------------------------------- if not d12t.isnull().all(): ax[1].plot(valid, temperature(d12t, 'C').value('F'), linewidth=2, color='r', label='12in') if not d24t.isnull().all(): ax[1].plot(valid, temperature(d24t, 'C').value('F'), linewidth=2, color='purple', label='24in') if not d50t.isnull().all(): ax[1].plot(valid, temperature(d50t, 'C').value('F'), linewidth=2, color='black', label='50in') ax[1].grid(True) ax[1].set_ylabel(r"Temperature $^\circ$F") box = ax[1].get_position() ax[1].set_position([box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95]) # ------------------------------------------------------ ax2 = ax[2].twinx() l3, = ax2.plot(valid, slrkw, color='g', zorder=1, lw=2) ax2.set_ylabel("Solar Radiation [W/m^2]", color='g') l1, = ax[2].plot(valid, temperature(tair, 'C').value('F'), linewidth=2, color='blue', zorder=2) l2, = ax[2].plot(valid, temperature(tsoil, 'C').value('F'), linewidth=2, color='brown', zorder=2) ax[2].grid(True) ax[2].legend([l1, l2, l3], ['Air', '4" Soil', 'Solar Radiation'], bbox_to_anchor=(0.5, 1.1), loc='center', ncol=3) ax[2].set_ylabel(r"Temperature $^\circ$F") ax[2].set_zorder(ax2.get_zorder()+1) ax[2].patch.set_visible(False) ax[0].set_xlim(df.index.min(), df.index.max()) return fig, df
8c94e9b989bbf0f04db99cffdc54cc9ec46b8e40
3,631,048
from typing import Tuple from typing import Optional def is_royal_flush(hand: Tuple[Card]) -> Optional[Tuple[str, PokerHand, int]]: """ If this hand contains a royal flush, return string representation of it """ straight_flush = is_straight_flush(hand) if straight_flush is not None and "Ten to Ace" in straight_flush[0]: # All royal straights are equal in hash value return "a royal flush", PokerHand.ROYAL_FLUSH, 0 return None
9e530d3c1ff44b35e6e17bc068f4557e059e3415
3,631,049
import torch def make_complex_matrix(x, y): """A function that takes two tensors (a REAL (x) and IMAGINARY part (y)) and returns the combine complex tensor. :param x: The real part of your matrix. :type x: torch.doubleTensor :param y: The imaginary part of your matrix. :type y: torch.doubleTensor :raises ValueError: This function will not execute if x and y do not have the same dimension. :returns: The full vector with the real and imaginary parts seperated as previously mentioned. :rtype: torch.doubleTensor """ if x.size()[0] != y.size()[0] or x.size()[1] != y.size()[1]: raise ValueError( 'Real and imaginary parts do not have the same dimension.') z = torch.zeros(2, x.size()[0], x.size()[1], dtype=torch.double) z[0] = x z[1] = y return z
faae031b3aa6f4972c8f558f6b66e33d416dec71
3,631,050
def array(dtype, ndim): """ :param dtype: the Numba dtype type (e.g. double) :param ndim: the array dimensionality (int) :return: an array type representation """ if ndim == 0: return dtype return minitypes.ArrayType(dtype, ndim)
f9b89a414d9bfb7a1e154df34c1c75a47943bca5
3,631,051
def as_pandas(data): """Returns a dataframe if possible, an error otherwise""" if isinstance(data, pd.DataFrame): return data elif isinstance(data, dict): return pd.DataFrame(data) else: raise TypeError( f"Expected a DataFrame or dict type, got: {type(data)} insead" )
a9243c327f8f7851b0b8347a9df684db7b152561
3,631,052
def scale3(v, s): """ scale3 """ return (v[0] * s, v[1] * s, v[2] * s)
4993c072fb66a33116177023dde7b1ed2c8705fd
3,631,053
def _get_last_ext_comment_id(connection): """Returns last external comment id. Args: connection: An instance of SQLAlchemy connection. Returns: Integer of last comment id from external model. """ result = connection.execute( sa.text(""" SELECT MAX(id) FROM external_comments """)).fetchone()[0] return result
5cc139e3c4490293ebb305b7cac03d4803c51df6
3,631,054
from typing import SupportsAbs import math def is_unit(v: SupportsAbs[float]) -> bool: # <2> """'True' if the magnitude of 'v' is close to 1.""" return math.isclose(abs(v), 1.0)
0b31da2e5a3bb6ce49705d5b2a36d3270cc5d802
3,631,055
def atom_eq(at1,at2): """ Returns true lits are syntactically equal """ return at1 == at2
43aab77292c81134490eb8a1c79a68b38d50628d
3,631,056
def is_valid_month (val): """ Checks whether or not a two-digit string is a valid date month. Args: val (str): The string to check. Returns: bool: True if the string is a valid date month, otherwise false. """ if len(val) == 2 and count_digits(val) == 2: month = int(val) return month > 0 and month < 13 return False
53d825473cf497441d09e08402e833fa9c362a83
3,631,057
def env_repos(action=None): """ Perform an action on each environment repository, specified by action. """ actions = { 'add': _add_repo, 'reset': _reset_repo, 'rm': _rm_repo } def validate_action(input): if input not in actions: raise Exception('Invalid action specified.') return input if action: validate_action(action) else: action = prompt( "Enter one of the following actions: <%s>" % ", ".join(actions), validate=validate_action) repos = get_repos() for env, details in ENVS.iteritems(): actions[action](env, details['repo_url'], repos)
f9b9ab0e671757bbcdf7bf4f50fe05e079aca115
3,631,058
import re def get_job_definition_name_by_arn(job_definition_arn): """ Parse Job Definition arn and get name. Args: job_definition_arn: something like arn:aws:batch:<region>:<account-id>:job-definition/<name>:<version> Returns: the job definition name """ pattern = r".*/(.*):(.*)" return re.search(pattern, job_definition_arn).group(1)
d55bab5bbc62bf6d9f7907e26cb2a4a418bd9c50
3,631,059
def get_polyline_length(polyline: np.ndarray) -> float: """Calculate the length of a polyline. Args: polyline: Numpy array of shape (N,2) Returns: The length of the polyline as a scalar """ assert polyline.shape[1] == 2 return float(np.linalg.norm(np.diff(polyline, axis=0), axis=1).sum())
9fb76a611c961af8ca10fda33029a55eb8589be1
3,631,060
import sys def GDALReadBlock(dataset, blocno, BSx=-1, BSy=-1, verbose=False): """ GDALReadBlock """ dataset = gdal.Open(dataset, gdal.GA_ReadOnly) if isstring(dataset) else dataset if dataset: band = dataset.GetRasterBand(1) BSx, BSy = (BSx, BSy) if BSx > 0 else band.GetBlockSize() M, N = int(dataset.RasterYSize), int(dataset.RasterXSize) Nb = int(N / BSx) + (0 if N % BSx == 0 else 1) x0 = (blocno % Nb) * BSx y0 = int(blocno / Nb) * BSy QSx = BSx if x0 + BSx <= N else N % BSx QSy = BSy if y0 + BSy <= M else M % BSy data = band.ReadAsArray(x0, y0, QSx, QSy) # Manage No Data nodata = band.GetNoDataValue() bandtype = gdal.GetDataTypeName(band.DataType) if bandtype in ('Byte', 'Int16', 'Int32', 'UInt16', 'UInt32', 'CInt16', 'CInt32'): data = data.astype("Float32", copy=False) if bandtype in ('Float32', 'Float64', 'CFloat32', 'CFloat64'): data[data == nodata] = np.nan if verbose and blocno % 100 == 0: sys.stdout.write('r') if isstring(dataset): dataset, band = None, None return data return None
09aa97b137e7c9f627af007d80610829a0c98827
3,631,061
def add_update_stock(symbol, is_held): """This function takes a stock symbol as a string, makes a call to yfinance, and gets back the necessary data to add the symbol to the database. `is_held` must also be specified, to mark the is_held flag in the database True/False.""" session = connect_to_session() # I imagine it isn't terribly likely that a company will change industry/sectors # So all we will focus on updating is is_held and datetime_updated # Start by querying to see if the stock exists in the database for row in session.query(Stocks).filter(Stocks.symbol == symbol): # there should only be one entry here, so this should work if row.symbol == symbol: # As long as everything is correct, the function should exit here stock = row stock.is_held = is_held stock.datetime_updated = func.now() session.commit() return stock else: continue data = yf.Ticker(symbol).info # Check to see if the existing Industry/Sectors exist in the Db # Fetch all Industry names ind_sect_dict = dict() ind_sect_dict['industry'] = fetch_industry_sector_data('Industry', session) ind_sect_dict['sector'] = fetch_industry_sector_data('Sector', session) # Attempting above as a for loop for less copy/paste ids = dict() for x in ind_sect_dict.keys(): try: ids[f'{x}_id'] = ind_sect_dict[x][data[x]] except KeyError: new = add_industry_sector(x, data[x], session) ids[f'{x}_id'] = new.id # Create an object of the stock stock = Stocks( symbol=symbol, short_name=data['shortName'], long_name=data['longName'], # industry_id=data['industry'], # sector_id=data['sector'], industry_id=ids['industry_id'], sector_id=ids['sector_id'], is_held=is_held, datetime_updated=func.now()) # Add entry to DB session.add(stock) session.commit() return stock
df5c7782fd07f916e61e4d5dc1f8f8cf9be86eec
3,631,062
import math def k2(Ti, exp=math.exp): """[cm^3 / s]""" return 2.78e-13 * exp(2.07/(Ti/300) - 0.61/(Ti/300)**2)
6c1f471b31767f2d95f3900a8811f47dc8c45086
3,631,063
async def add_source(request): """ API Endpoint to add new datasets to an instance API Params: file: location of the json or hub file filetype: 'hub' if trackhub or 'json' if configuration file Args: request: a sanic request object Returns: success/fail after adding measurements """ file = request.args.get("file") type = request.args.get("filetype") if type is "json": request.app.epivizMeasurementsManager.import_files(file, request.app.epivizFileHandler) elif type is "hub": request.app.epivizMeasurementsManager.import_trackhub(file, request.app.epivizFileHandler) param_id = request.args.get("requestId") return response.json({"requestId": int(param_id), "type": "response", "error": None, "data": True, "version": 5 }, status=200)
624218d1e773c43a35fa3579d892cc6207e1ee1d
3,631,064
def clean_counties_data(): """Clean US Counties data from NY Times Returns: DataFrame -- clean us counties data Updates: database table -- NYTIMES_COUNTIES_TABLEs database view -- COUNTIES_VIEW """ _db = DataBase() data = _db.get_table(US_COUNTIES_TABLE, parse_dates=['date']) counties = _db.get_geotable(US_MAP_TABLE) states = _db.get_geotable(STATE_MAP_TABLE) _db.close() start = len(data) # use new york county fips for new york city data.loc[data['county'] == 'New York City', 'fips'] = '36061' # add state ids lookup = states.set_index('name')['state_id'].to_dict() data['state_id'] = data['state'].map(lookup) # add county ids - first attempt data['id'] = data['county'].str.lower() + data['state_id'] counties['id'] = counties['name'].str.lower() + counties['state_id'] lookup = counties[['id', 'county_id']].set_index('id')['county_id'].to_dict() data['county_id'] = data['id'].map(lookup) # add county ids - last attempt condition = (~data['fips'].isna()) & (data['county_id'].isna()) data.loc[condition, 'county_id'] = data.loc[condition, 'fips'] # get rid of data that is not in county meta data data = data[data['county_id'].isin(list(counties['county_id']))].copy(deep=True) # state ids base on county_ids lookup = counties.set_index('county_id')['state_id'].to_dict() data['state_id'] = data['county_id'].map(lookup) # days from lastest day delta_day = pd.to_timedelta(1, unit='days') data['day'] = (data['date'].max() - data['date']) / delta_day data['day'] = data['day'].astype('Int32') end = len(data) # ny times counties table cols = ['county_id', 'state_id', 'date', 'day', 'cases', 'deaths'] data = data[cols].copy(deep=True) data.reset_index(drop=True, inplace=True) data['case_level'] = pd.cut(data['cases'], LEVELS, labels=range(1, len(LEVELS))) data['case_level'] = pd.to_numeric(data['case_level'], 'coerce').fillna(0) data['case_level'] = data['case_level'].astype('Int32') # ignored lines print(f'ignored lines: {start-end}/{start} = {(100*(start-end)/start):.01f}%') # tables to database _db = DataBase() _db.add_table(NYTIMES_COUNTIES_TABLE, data.set_index(['county_id', 'day'])) _db.update(DROP_COUNTIES_VIEW) _db.update(COUNTIES_VIEW) _db.close() return data
083e9720de9bfa422984d01fec08c0c4b33b0861
3,631,065
def get_3D_hist(sub_img): """ Take in a sub-image Get 3D histogram of the colors of the image and return it """ M, N = sub_img.shape[:2] t = 4 pixels = sub_img.reshape(M * N, 3) hist_3D, _ = np.histogramdd(pixels, (t, t, t)) return hist_3D
c582ec9b7d6bb24585ce5f95d2a770b4b7a06c37
3,631,066
import os import time from datetime import datetime def cache_tree(config_age, location_suffix): """ A decorator for caching pickle files based on the configuration file. It is currently set up to decorate a function that has a single parameter ``site``. The returned function also can be passed keyword arguments to override the ``location_suffix`` argument. This is done with the ``cache`` argument to the function. :param str config_age: The key from the config file to read the max age from :param str location_suffix: The ending of the main part of the file name where the cached file is saved. :returns: A function that uses the caching configuration :rtype: func """ def func_decorator(func): @wraps(func) def do_function(site, callback=None, **kwargs): config_dict = config.config_dict() # Make cache directory if it doesn't exist first cache_dir = os.path.join(config_dict['VarLocation'], 'cache', site) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Overwrite location_suffix if that's desired cache_location = os.path.join( cache_dir, '%s.pkl' % kwargs.get('cache', location_suffix)) LOG.info('Checking for cache at %s', cache_location) if not os.path.exists(cache_location) or \ (time.time() - os.stat(cache_location).st_mtime) > \ float(config_dict.get(config_age, 0)) * 24 * 3600: if int(config_dict.get('SaveCache')) and os.path.exists(cache_location): os.rename(cache_location, '%s.%s' % (cache_location, datetime.datetime.fromtimestamp( os.stat(cache_location).st_mtime).strftime('%y%m%d') ) ) LOG.info('Cache is no good, getting new tree') if callback is None: tree = func(site) else: tree = func(site, callback) LOG.info('Making hash') tree.setup_hash() LOG.info('Saving tree at %s', cache_location) tree.save(cache_location) else: LOG.info('Loading tree from cache') tree = datatypes.get_info(cache_location) return tree return do_function return func_decorator
445384fa353d0bf60d7e78a28951562880b24717
3,631,067
import math import torch def magnitude_prune(masking, mask, weight, name): """Prunes the weights with smallest magnitude. The pruning functions in this sparse learning library work by constructing a binary mask variable "mask" which prevents gradient flow to weights and also sets the weights to zero where the binary mask is 0. Thus 1s in the "mask" variable indicate where the sparse network has active weights. In this function name and masking can be used to access global statistics about the specific layer (name) and the sparse network as a whole. Args: masking Masking class with state about current layers and the entire sparse network. mask The binary mask. 1s indicated active weights. weight The weight of the respective sparse layer. This is a torch parameter. name The name of the layer. This can be used to access layer-specific statistics in the masking class. Returns: mask Pruned Binary mask where 1s indicated active weights. Can be modified in-place or newly constructed Accessable global statistics: Layer statistics: Non-zero count of layer: masking.name2nonzeros[name] Zero count of layer: masking.name2zeros[name] Redistribution proportion: masking.name2variance[name] Number of items removed through pruning: masking.name2removed[name] Network statistics: Total number of nonzero parameter in the network: masking.total_nonzero = 0 Total number of zero-valued parameter in the network: masking.total_zero = 0 Total number of parameters removed in pruning: masking.total_removed = 0 """ num_remove = math.ceil(masking.prune_rate*masking.name2nonzeros[name]) num_zeros = masking.name2zeros[name] k = math.ceil(num_zeros + num_remove) if num_remove == 0.0: return weight.data != 0.0 x, idx = torch.sort(torch.abs(weight.data.view(-1))) mask.data.view(-1)[idx[:k]] = 0.0 return mask
4bac89da952338e133ac0d85735e80631862c7da
3,631,068
import requests def delete_post(post_id): """Authenticates and proxies a request to users service to delete a post.""" try: my_user_id = get_user()['user_id'] response = requests.delete(app.config['POSTS_ENDPOINT'] + post_id, data={'author_id': my_user_id}) return response.text, response.status_code except TypeError: return 'Error: Not signed in', 401
a558ab58ac3129fb543c89c8b3c236126d26ddac
3,631,069
import collections def file_based_convert_examples_to_features_single(examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) ex_index = 0 for example in examples: if ex_index % 10000 == 0: tf.logging.info("Writing example " + str(ex_index)) feature = convert_single_example(ex_index, example, max_seq_length, tokenizer, label_list) ex_index = ex_index + 1 def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f def create_float_feature(values): f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return f features = collections.OrderedDict() p_ids = [] q_ids = [] for qp in feature.unique_ids: elems = qp.split("-") q_id = int(elems[0]) p_id = int(elems[1]) p_ids.append(p_id) q_ids.append(q_id) features["q_ids"] = create_int_feature(q_ids) features["cand_nums"] = create_int_feature(p_ids) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_masks) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_ids) features["scores"] = create_float_feature(feature.scores) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() print("Wrote " + str(ex_index) + " examples " + output_file)
444afd74b8bccd7e014e727d4be41be75fbdbb11
3,631,070
def less_equal(x, y): """Element-wise truth value of (x <= y). # Arguments x: Tensor or variable. y: Tensor or variable. # Returns A bool tensor. # Raise TypeError: if inputs are not valid. """ scalar = False if isinstance(x, KerasSymbol): x = x.symbol scalar = True if isinstance(y, KerasSymbol): y = y.symbol scalar = True if isinstance(x, mx.sym.Symbol) and isinstance(y, mx.sym.Symbol): out = KerasSymbol(mx.sym.Cast(mx.sym.broadcast_lesser_equal(lhs=x, rhs=y), dtype='uint8')) elif scalar: out = KerasSymbol(mx.sym.Cast(x <= y, dtype='uint8')) else: try: out = np.less(x, y) except: raise TypeError('MXNet Backend: The inputs are not valid for less_equal operation.') return out
566c46cc4882f167275cb6bc400800413efdc85b
3,631,071
from datetime import datetime def annual_reports(): """ Return list of all existing annual reports """ database = DataProvider() total = count(database.objects, lambda x: x.with_cafe) * 2 + \ count(database.objects, lambda x: not x.with_cafe) reports_list = list() # calculate count of annual reports items = sorted(database.checklists, key=lambda x: x.date) start_date = items[0].date.replace(day=1) now_date = datetime.now() while start_date < now_date: next_date = add_one_month(start_date) (shifted_start, shifted_end) = calculate_date_period(start_date) all_records = where(items, lambda x: x.date > shifted_start and x.date < shifted_end) logger.info('All records between %s and %s: %s', shifted_start.strftime('%Y-%m-%d'), shifted_end.strftime('%Y-%m-%d'), len(all_records)) if len(all_records) == 0: # roll to next month start_date = next_date continue verified = where(all_records, lambda x: x.state == 'verified') logger.info('Verified records between %s and %s: %s', shifted_start.strftime('%Y-%m-%d'), shifted_end.strftime('%Y-%m-%d'), len(verified)) report_item = dict() report_item['date'] = start_date report_item['all'] = len(all_records) report_item['verified'] = len(verified) report_item['total'] = total reports_list.append(report_item) # roll to next month start_date = next_date return render_template( 'annual_list.html', model=reports_list, title='Отчеты' )
2e6daacd7150e47b299948b60866238589881ef5
3,631,072
import sys def alpha_036(code, end_date=None, fq="pre"): """ 公式: RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP)), 6), 2) Inputs: code: 股票池 end_date: 查询日期 Outputs: 因子的值 """ end_date = to_date_str(end_date) func_name = sys._getframe().f_code.co_name return JQDataClient.instance().get_alpha_191(**locals())
0c65d8d0b7961fcf79dafd87e3ed683a11da6350
3,631,073
def definition_for_include(parsed_include, parent_definition_key): """ Given a parsed <xblock-include /> element as a XBlockInclude tuple, get the definition (OLX file) that it is pointing to. Arguments: parsed_include: An XBlockInclude tuple parent_definition_key: The BundleDefinitionLocator for the XBlock whose OLX contained the <xblock-include /> (i.e. the parent). Returns: a BundleDefinitionLocator """ if parsed_include.link_id: links = get_bundle_direct_links_with_cache( parent_definition_key.bundle_uuid, # And one of the following will be set: bundle_version=parent_definition_key.bundle_version, draft_name=parent_definition_key.draft_name, ) try: link = links[parsed_include.link_id] except KeyError: raise BundleFormatException(f"Link not found: {parsed_include.link_id}") # lint-amnesty, pylint: disable=raise-missing-from return BundleDefinitionLocator( bundle_uuid=link.bundle_uuid, block_type=parsed_include.block_type, olx_path=f"{parsed_include.block_type}/{parsed_include.definition_id}/definition.xml", bundle_version=link.version, ) else: return BundleDefinitionLocator( bundle_uuid=parent_definition_key.bundle_uuid, block_type=parsed_include.block_type, olx_path=f"{parsed_include.block_type}/{parsed_include.definition_id}/definition.xml", bundle_version=parent_definition_key.bundle_version, draft_name=parent_definition_key.draft_name, )
325de830231c9b21a3c7cfce4262fef291ab6fbf
3,631,074
def verify_user(uid, token_value): """ Verify the current user's account. Link should have been sent to the user's email. Args: token_value: the verification token value Returns: True if successful verification based on the (uid, token_value) False if token is not valid for the current user Raises: PicoException if user is not logged in """ db = api.db.get_conn() token_user = api.token.find_key_by_token("email_verification", token_value) if token_user is None: return False current_user = api.user.get_user(uid=uid) if token_user["uid"] == current_user["uid"]: db.users.find_one_and_update( {"uid": current_user["uid"]}, {"$set": {"verified": True}} ) api.token.delete_token({"uid": current_user["uid"]}, "email_verification") return True else: return False
e63e7044e66bb29e8f44d7fa0a08128597e7b07e
3,631,075
def cholesky_metric(chol: JAXArray, *, lower: bool = True) -> Metric: """A general metric parameterized by its Cholesky factor The units of the Cholesky factor are length, unlike the dense metric. Therefore, .. code-block:: python cholesky_metric(jnp.diag(ell)) and .. code-block:: python diagonal_metric(ell) are equivalent. Args: chol (JAXArray): The covariance matrix metric. This must be positive definite. lower (bool, optional): Is ``chol`` lower triangular? """ solve = partial(linalg.solve_triangular, chol, lower=lower) return compose(unit_metric, solve)
406d0db5d3f6f9317a0b45895823c3927564653a
3,631,076
import typing def format_roman(value: int) -> str: """Format a number as lowercase Roman numerals.""" assert 0 < value < 4000 result: typing.List[str] = [] index = 0 while value != 0: value, remainder = divmod(value, 10) if remainder == 9: result.insert(0, ROMAN_ONES[index]) result.insert(1, ROMAN_ONES[index + 1]) elif remainder == 4: result.insert(0, ROMAN_ONES[index]) result.insert(1, ROMAN_FIVES[index]) else: over_five = remainder >= 5 if over_five: result.insert(0, ROMAN_FIVES[index]) remainder -= 5 result.insert(1 if over_five else 0, ROMAN_ONES[index] * remainder) index += 1 return ''.join(result)
259b205ffa25bbdeccb0ca6883c02c99d194f60d
3,631,077
import os def find_top_directory(): """ Find the parent directory of the poky meta-layer :return: the base path """ return os.path.dirname(tinfoil.config_data.getVar("COREBASE", True))
9978c6f0c0673c9fb42d47c15d24e17c684161d7
3,631,078
def isint(s): """Does this object represent an integer?""" try: int(s) return True except (ValueError, TypeError): return False
dbcb20b437f1ccfb09f5cb969b7d5b9d369d2e38
3,631,079
def embed_vimeo(url): """ Return HTML for embedding Vimeo videos or ``None``, if argument isn't a Vimeo link. The Vimeo ``<iframe>`` is wrapped in a ``<div class="responsive-embed widescreen vimeo">`` element. """ match = VIMEO_RE.search(url) if not match: return None d = match.groupdict() return mark_safe( f'<div class="responsive-embed widescreen vimeo">' f'<iframe src="https://player.vimeo.com/video/{d["code"]}"' f' frameborder="0" allow="autoplay; fullscreen" allowfullscreen="">' f"</iframe>" f"</div>" )
7923991466ec3eafa4c991bb93b2244ebcf99c47
3,631,080
def random_sample(random_state, size=None, chunk_size=None, gpu=None, dtype=None): """ Return random floats in the half-open interval [0.0, 1.0). Results are from the "continuous uniform" distribution over the stated interval. To sample :math:`Unif[a, b), b > a` multiply the output of `random_sample` by `(b-a)` and add `a`:: (b - a) * random_sample() + a Parameters ---------- size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a single value is returned. chunk_size : int or tuple of int or tuple of ints, optional Desired chunk size on each dimension gpu : bool, optional Allocate the tensor on GPU if True, False as default dtype : data-type, optional Data-type of the returned tensor. Returns ------- out : float or Tensor of floats Array of random floats of shape `size` (unless ``size=None``, in which case a single float is returned). Examples -------- >>> import mars.tensor as mt >>> mt.random.random_sample().execute() 0.47108547995356098 >>> type(mt.random.random_sample().execute()) <type 'float'> >>> mt.random.random_sample((5,)).execute() array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) Three-by-two array of random numbers from [-5, 0): >>> (5 * mt.random.random_sample((3, 2)) - 5).execute() array([[-3.99149989, -0.52338984], [-2.99091858, -0.79479508], [-1.23204345, -1.75224494]]) """ if dtype is None: dtype = np.dtype('f8') size = random_state._handle_size(size) op = TensorRandomSample(state=random_state.to_numpy(), size=size, gpu=gpu, dtype=dtype) return op(chunk_size=chunk_size)
441a8d1b6e972ab961cf5910cd3737e5a21578e7
3,631,081
def anndata_file(): """Pytest fixture for creation of anndata files.""" def _create_file(nvals): size = 15289 * nvals vals = np.zeros(size, dtype=np.float32) non_zero = size - int(size * 0.92) non_zero = int(np.random.normal(loc=non_zero, scale=10, size=1)) rand = np.random.normal(loc=1.38, scale=0.889, size=non_zero) rand = np.abs(rand) + 1 idx = np.random.choice(np.arange(0, size), size=non_zero, replace=False) vals[idx] = rand vals = vals.reshape(-1, 15289) labels = { "pbmc_8k_new": 0.5382136602451839, "pbmc_cite_new": 0.4617863397548161 } batch = np.random.choice(list(labels.keys()), p=list(labels.values()), replace=True, size=nvals) batch = pd.Categorical(batch) processed = anndata.AnnData(vals) processed.obs["batch"] = batch processed.obs["split"] = np.random.choice(["train", "valid"], p=[0.6, 0.4], replace=True, size=nvals) return processed return _create_file
0f9c1ff260ae48837e9f925c65fd87c7b5f75768
3,631,082
def _heatmap_summary(pvals, coefs, plot_width=1200, plot_height=400): """ Plots heatmap of coefficients colored by pvalues Parameters ---------- pvals : pd.DataFrame Table of pvalues where rows are balances and columns are covariates. coefs : pd.DataFrame Table of coefficients where rows are balances and columns are covariates. plot_width : int, optional Width of plot. plot_height : int, optional Height of plot. Returns ------- bokeh.charts.Heatmap Heatmap summarizing the regression statistics. """ c = coefs.reset_index() c = c.rename(columns={'index': 'balance'}) # fix alpha in fdr to account for the number of covariates def fdr(x): return multipletests(x, method='fdr_bh', alpha=0.05 / pvals.shape[1])[1] cpvals = pvals.apply(fdr, axis=0) # log scale for coloring log_p = -np.log10(cpvals+1e-200) log_p = log_p.reset_index() log_p = log_p.rename(columns={'index': 'balance'}) p = pvals.reset_index() p = p.rename(columns={'index': 'balance'}) cp = cpvals.reset_index() cp = cp.rename(columns={'index': 'balance'}) cm = pd.melt(c, id_vars='balance', var_name='Covariate', value_name='Coefficient') pm = pd.melt(p, id_vars='balance', var_name='Covariate', value_name='Pvalue') cpm = pd.melt(cp, id_vars='balance', var_name='Covariate', value_name='Corrected_Pvalue') logpm = pd.melt(log_p, id_vars='balance', var_name='Covariate', value_name='log_Pvalue') m = pd.merge(cm, pm, left_on=['balance', 'Covariate'], right_on=['balance', 'Covariate']) m = pd.merge(m, logpm, left_on=['balance', 'Covariate'], right_on=['balance', 'Covariate']) m = pd.merge(m, cpm, left_on=['balance', 'Covariate'], right_on=['balance', 'Covariate']) hover = HoverTool( tooltips=[("Pvalue", "@Pvalue"), ("Corrected Pvalue", "@Corrected_Pvalue"), ("Coefficient", "@Coefficient")] ) N, _min, _max = len(palette), m.log_Pvalue.min(), m.log_Pvalue.max() X = pd.Series(np.arange(len(pvals.index)), index=pvals.index) Y = pd.Series(np.arange(len(pvals.columns)), index=pvals.columns) m['X'] = [X.loc[i] for i in m.balance] m['Y'] = [Y.loc[i] for i in m.Covariate] # fill in nans with zero. Sometimes the pvalue calculation fails. m = m.fillna(0) for i in m.index: x = m.loc[i, 'log_Pvalue'] ind = int(np.floor((x - _min) / (_max - _min) * (N - 1))) m.loc[i, 'color'] = palette[ind] source = ColumnDataSource(ColumnDataSource.from_df(m)) hm = figure(title='Regression Coefficients Summary', plot_width=1200, plot_height=400, tools=[hover, PanTool(), BoxZoomTool(), WheelZoomTool(), ResetTool(), SaveTool()]) hm.rect(x='X', y='Y', width=1, height=1, fill_color='color', line_color="white", source=source) Xlabels = pd.Series(pvals.index, index=np.arange(len(pvals.index))) Ylabels = pd.Series(pvals.columns, index=np.arange(len(pvals.columns)), ) hm.xaxis[0].ticker = FixedTicker(ticks=Xlabels.index) hm.xaxis.formatter = FuncTickFormatter(code=""" var labels = %s; return labels[tick]; """ % Xlabels.to_dict()) hm.yaxis[0].ticker = FixedTicker(ticks=Ylabels.index) hm.yaxis.formatter = FuncTickFormatter(code=""" var labels = %s; return labels[tick]; """ % Ylabels.to_dict()) return hm
32dae78fbaa3e978d418255e387e63f6346315ff
3,631,083
from datetime import datetime def get_warehouse_latest_modified_date(email_on_delay=False): """ Return in minutes how fresh is the data of app_status warehouse model. """ last_completed_app_status_batch = Batch.objects.filter( dag_slug='app_status_batch', completed_on__isnull=False ).order_by('completed_on').last() # The end_datetime of a batch is used to filter on forms by last_modified (received_on, edited_on, deleted_on) if not last_completed_app_status_batch: return datetime(2000, 1, 1) latest_date = last_completed_app_status_batch.end_datetime if email_on_delay: SMS_TEAM = ['{}@{}'.format('icds-sms-rule', 'dimagi.com')] _soft_assert = soft_assert(to=SMS_TEAM, send_to_ops=False) lag = (datetime.utcnow() - latest_date).total_seconds() / 60 if lag > ACCEPTABLE_WAREHOUSE_LAG_IN_MINUTES: _soft_assert(False, "The weekly inactive SMS rule is skipped for this week. Warehouse lag is {} minutes" .format(str(lag)) ) else: _soft_assert(False, "The weekly inactive SMS rule is successfully triggered for this week") return latest_date
541dba13fde93acc51a41a9079a6da5f0344514a
3,631,084
import subprocess def retrieve_contents(repo, commit, path, encoding=None): """Retrieve contents of given file at given revision / tree Parameters ---------- repo : str | git.Repo | pygit2.Repository Pathname to the repository, or either GitPython (git.Repo) or pygit2 (pygit2.Repository) repository object. Type of this parameter selects which implementation is used. NOTE: Both GitPython and pygit2 backends raise KeyError if file or commit does not exist; error handling for git command based backend is not implemented yet. commit : str The commit for which to return file contents. Defaults to 'HEAD', that is the current commit. path : str Path to a file, relative to the top-level of the repository encoding : str, optional Encoding of the file Returns: -------- str | unicode Contents of the file with given path at given revision """ if encoding is None: encoding = DEFAULT_FILE_ENCODING if isinstance(repo, basestring): cmd = [ 'git', '-C', repo, 'show', #'git', '-C', repo, 'cat-file', 'blob', # assumed that 'commit' is sane commit+':'+path ] process = subprocess.Popen(cmd, stdout=subprocess.PIPE) result = process.stdout.read().decode(encoding) # NOTE: does not handle errors correctly yet return result elif isinstance(repo, git.repo.base.Repo): # first possible implementation, less flexible #blob = repo.commit(commit).tree / path # second possible implementation, more flexible blob = repo.rev_parse(commit + ':' + path) result = blob.data_stream.read().decode(encoding) return result elif isinstance(repo, pygit2.repository.Repository): blob = repo.revparse_single(commit + ':' + path) result = blob.data return result else: raise NotImplementedError('unsupported repository type %s (%s)' % (type(repo), repo))
834aabe9eca88f9a9f4b3ba377832aa24ae720b9
3,631,085
async def async_unload_entry(hass, config_entry): """Handle removal of an entry.""" return True
28005ececbf0c43c562cbaf7a2b8aceb12ce3e41
3,631,086
def render_links(link_dict): """Render links to html Args: link_dict: dict where keys are names, and values are lists (url, text_to_display). For example:: {"column_moistening.mp4": [(url_to_qv, "specific humidity"), ...]} """ return { key: " ".join([_html_link(url, tag) for (url, tag) in links]) for key, links in link_dict.items() }
c07f388e97f9e723cfc42ee66ef1eea654167820
3,631,087
import json def is_valid_json(text: str) -> bool: """Is this text valid JSON? """ try: json.loads(text) return True except json.JSONDecodeError: return False
3013210bafd5c26cacb13e9d3f4b1b708185848b
3,631,088
import signal def peri_saccadic_response(spike_counts, eye_track, motion_threshold=5, window=15): """ Computes the cell average response around saccades. params: - spike_counts: cells activity matrix of shape (t, n_cell) - eye_track: Eye tracking data of shape (t, x_pos, y_pos, ...) - motion_threshold: Amount of motion in pixel to account for a saccade - window: Size of the window before and after the saccade on which to average the cell response return: - peri saccadic response of cells of shape (n_cell, window*2+1) """ eye_shifts = np.concatenate(([0], np.linalg.norm(eye_tracking[1:,:2]-eye_tracking[:-1,:2], axis=1))) #Because eye tracking is usually upsampled from 15 to 60Hz, it sums the shift, and smooth the peak # detection summed_shifts = np.convolve(eye_shifts, [1,1,1,1,1,1,1], mode="same") peaks, res = signal.find_peaks(eye_shifts, height=motion_threshold, distance=10) heights = res["peak_heights"] #Not used for now psr = np.zeros((window*2, spike_bins.shape[1])) for peak in peaks: if peak<window or (peak+window)>len(spike_bins): continue #Just ignoring peaks too close to the matrix edges psr += spike_bins[peak-window:peak+window] psr /= len(peaks) return psr
85455106bb1cd438b2aeb25ee0fc3708a166d4b7
3,631,089
import operator def assign_subpopulation_from_region(pop, region, criteria, verbose=False): """ Compute required consistencies and assign subpopulations to a population of models based on results from a simulation region. Inputs: pop - a PopulationOfModels class region - a list of simulations criteria - has the format: {name:[firing pattern, opcode, value]} E.g. {'single < 25':['single', '>=', 25]} """ opcodes = {'>':operator.gt, '>=':operator.ge, '<':operator.lt, '<=':operator.le} # Build consistencies for each criteria consistencies = pd.DataFrame(index=pop.results.index) for criterion in criteria: firing_pattern = criteria[criterion][0] pom_consistency = pom_consistency_in_region(pop, region, firing_pattern=firing_pattern, stim_type='step', amp_stim_relationship={'step':1,'ramp':10}) consistencies[firing_pattern] = pom_consistency # Find the models that fulfill all consistency criteria models_passing_criteria = pd.DataFrame(index=consistencies.index) for criterion, criterion_params in criteria.items(): firing_pattern = criterion_params[0] opcode = criterion_params[1] val = criterion_params[2] op = opcodes[opcode] consistency = consistencies[firing_pattern] models_passing_criteria[criterion] = op(consistency,val) models_passing_all_criteria = models_passing_criteria.all(axis=1) # Filter away models that don't pass all criteria subpopulation = pd.DataFrame(index=pop.results.index) subpopulation = subpopulation[models_passing_all_criteria] if verbose: print('{} models out of {} in population of models are in the subpopulation'.format( len(subpopulation.index), len(pop.results.index)) ) return subpopulation
5279d7e398ed164aed4131bb1c826c5f1604c4a4
3,631,090
def op_structure(ea, opnum, id, **delta): """Apply the structure identified by `id` to the instruction operand `opnum` at the address `ea`. If the offset `delta` is specified, shift the structure by that amount. """ ea = interface.address.inside(ea) if not database.type.is_code(ea): raise E.InvalidTypeOrValueError(u"{:s}.op_structure({:#x}, {:d}, {:#x}{:s}) : Item type at requested address is not of a code type.".format(__name__, ea, opnum, id, ", {:s}".format(utils.string.kwargs(delta)) if delta else '')) offset, sptr, name = 0, idaapi.get_struc(id), idaapi.get_member_fullname(id) if sptr is not None: offset = idaapi.get_struc_first_offset(sptr) sid, mptr = sptr.id, idaapi.get_member(sptr, offset) if mptr is None: raise E.DisassemblerError(u"{:s}.op_structure({:#x}, {:d}, {:#x}{:s}) : Unable to locate the first member of the structure with the specified id.".format(__name__, ea, opnum, id, ", {:s}".format(utils.string.kwargs(delta)) if delta else '')) mid = mptr.id elif name is not None: fn = idaapi.get_member_fullname(id) sptr = idaapi.get_member_struc(name) sid, mid = sptr.id, id else: raise E.InvalidParameterError(u"{:s}.op_structure({:#x}, {:d}, {:#x}{:s}) : Unable to locate the structure member for the specified id.".format(__name__, ea, opnum, id, ", {:s}".format(utils.string.kwargs(delta)) if delta else '')) # if an offset was specified such as if the first member of the structure # is not at offset 0, then adjust the delta by its value if offset: delta['delta'] = delta.get('delta', 0) - offset st = structure.by(sid) m = st.by_identifier(mid) return op_structure(ea, opnum, [st, m], **delta)
95d45003c86b4a99bb60d00eb584bb358f1cf350
3,631,091
from pathlib import Path def get_config_path(root: str, idiom: str) -> Path: """Get path to idiom config Arguments: root {str} -- root directory of idiom config idiom {str} -- basename of idiom config Returns: Tuple[Path, Path] -- pathlib.Path to file """ root_path = Path(root) file_name = '{}.json'.format(idiom) return root_path.joinpath(file_name)
86d65f11fbd1dfb8aca13a98e129b085158d2aff
3,631,092
def status(): """ Method to get the list of components available. :return: It yields json string for the list of components. """ data = pgc.get_data("status") return render_template('status.html', data=data)
012431c843d051aec85df45fad005b9d17c71a5d
3,631,093
from typing import Union from typing import Iterable from typing import Tuple from typing import List import heapq def dijkstra( graph: LilMatrix, source: Union[int, Iterable[int]] ) -> Tuple[List[int], List[int]]: """Dijkstra Parameters ---------- graph Weighted Graph source (list of) source Returns ------- (cost, prev) Notes ----- Time complexity O(V + Elog(V)) References ---------- ..[1] 🐜 p.96 """ n = len(graph) cost = [IINF] * n prev = [-1] * n que: List[Tuple[int, int]] = [] if isinstance(source, int): que = [(0, source)] cost[source] = 0 else: que = [(0, si) for si in source] for si in source: cost[si] = 0 heapq.heapify(que) while que: top_cost, top = heapq.heappop(que) if cost[top] < top_cost: continue for dest, weight in graph[top]: nxt_cost = cost[top] + weight if nxt_cost < cost[dest]: cost[dest] = nxt_cost prev[dest] = top heapq.heappush(que, (nxt_cost, dest)) return cost, prev
62278adeda336344eadaac00b9240cddd747b496
3,631,094
def stdev(some_list): """ Calculate the standard deviation of a list. """ m = mean(some_list) var = mean([(v - m)**2 for v in some_list]) return sqrt(var)
4a8cf5d19af1e07e8228d285d1f2fcfb702a2158
3,631,095
def get_hg19_chroms(): """Chromosomes in the human genome Returns: list: list of chromosomes """ return get_hg38_chroms()
2565eb2fa1ca1dd1b513a2a7dc836777911c7fc8
3,631,096
def track(im0, im1, p0, lk_params_, fb_threshold=-1): """ Main tracking method using sparse optical flow (LK) im0: previous image in gray scale im1: next image lk_params: Lukas Kanade params dict fb_threshold: minimum acceptable backtracking distance """ if p0 is None or not len(p0): return np.array([]) # Forward flow p1, st1, err1 = cv2.calcOpticalFlowPyrLK(im0, im1, p0, None, **lk_params_) if fb_threshold > 0: # Backward flow p0r, st0, err0 = cv2.calcOpticalFlowPyrLK(im1, im0, p1, None, **lk_params_) p0r[st0 == 0] = np.nan # Set only good tracks fb_good = np.fabs(np.linalg.norm(p0r[:,0,:] - p0[:,0,:], axis=1)) < fb_threshold p1[~fb_good] = np.nan st1[~fb_good] = 0 err1[~fb_good] = np.nan return p1, st1, err1
4a35dbb3c206f3b2e967f2b15853b19c7d579eb9
3,631,097
def get_kernel(X, Y, type='linear', param=1.0): """Calculates a kernel given the data X and Y (dims x exms)""" _, Xn = X.shape _, Yn = Y.shape kernel = 1.0 if type == 'linear': #print('Calculating linear kernel with size {0}x{1}.'.format(Xn, Yn)) kernel = X.T.dot(Y) if type == 'rbf': #print('Calculating Gaussian kernel with size {0}x{1} and sigma2={2}.'.format(Xn, Yn, param)) Dx = (np.ones((Yn, 1)) * np.diag(X.T.dot(X)).reshape(1, Xn)).T Dy = (np.ones((Xn, 1)) * np.diag(Y.T.dot(Y)).reshape(1, Yn)) kernel = Dx - 2.* np.array(X.T.dot(Y)) + Dy kernel = np.exp(-kernel / param) return kernel
98bd634456bbc4ec115de58fd58a1cd6df84b3a5
3,631,098
def MIDPOINT(ds, count, timeperiod=-2**31): """MidPoint over period""" return call_talib_with_ds(ds, count, talib.MIDPOINT, timeperiod)
0315f5148bbd4621db30aa572fd984f23cb6ee79
3,631,099