content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _conv2d(input, filter, bias=False, strides=[1, 1], pads=[1, 1, 1, 1], dilations=[1, 1], group=1, debugContext=''): """Encapsulation of function get_builder().aiOnnx.conv! args: x: input tensor ksize: int,kernel size stride: int,stride of conv pads: int, conv padding c_out: int, output channel group: int, conv group nums,default:1 """ args = [input.getIpuIndex(), filter.getIpuIndex()] if bias: args.append(bias.getIpuIndex()) output = get_builder().aiOnnx.conv(args, strides=strides, pads=pads, dilations=dilations, group=group, debugContext=debugContext) if get_memory_proportion() is not None: get_builder().setAvailableMemoryProportion(output, get_memory_proportion()) return TTensor(output)
ca80e49e218414e04fcfc81a8890546188765eb3
23,500
def split_data(n_samps, percent_test): """ :param n_samps: number of data samples :param percent_test: percent of data to hold out :return: two sets of indices corresponding to training and validation data """ # generate and randomly shuffle idx = np.arange(n_samps) np.random.shuffle(idx) # determine cut-point i_cut = int(n_samps * (1 - percent_test)) # generate train and test indices i_train = idx[:i_cut] i_valid = idx[i_cut:] return i_train, i_valid
68d63d28b2aaab2697f2aab70fc7341a9a31811d
23,501
def compute_totals(songs, limit_n, save_file=None): """ Return array of shape (4, 3, 35) representing counts for each group of each context type of each label """ totals = np.zeros((4, 3, 35), dtype='int32') i = 0 for song_path, beatmap_ids in songs: print('song {}'.format(i)) spectrogram = np.load(song_path) beatmap_data = [db.beatmap_data(beatmap_id) for beatmap_id in beatmap_ids] counts = get_counts(beatmap_data, spectrogram, limit_n=limit_n) totals[:] = totals + counts i += 1 if save_file: np.save(save_file, totals) return totals
d8e845912d6e1b5e0fab864e8a19cdc08500b4c5
23,502
def _initialize_arrays(initial_values, num_steps): """Construct a structure of `TraceArray`s from initial values.""" trace_arrays = tf.nest.map_structure( lambda t: tf.TensorArray( # pylint: disable=g-long-lambda dtype=t.dtype, size=num_steps, # Initial size. clear_after_read=False, # Allow reading->tiling final value. element_shape=t.shape), initial_values) return tf.nest.map_structure( lambda ta, t: ta.write(0, t), trace_arrays, initial_values)
f63e13f35aade7979b4090964c593c2d222e94bd
23,503
def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. Factor can be above 0.0. A value of 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor of type uint8. image2: An image Tensor of type uint8. factor: A floating point value above 0.0. Returns: A blended image Tensor of type uint8. """ if factor == 0.0: return tf.convert_to_tensor(image1) if factor == 1.0: return tf.convert_to_tensor(image2) image1 = tf.to_float(image1) image2 = tf.to_float(image2) difference = image2 - image1 scaled = factor * difference # Do addition in float. temp = tf.to_float(image1) + scaled # Interpolate if factor > 0.0 and factor < 1.0: # Interpolation means we always stay within 0 and 255. return tf.cast(temp, tf.uint8) # Extrapolate: # # We need to clip and then cast. return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
5012d34ab9974e88bfc7dae4683521313fd37cd0
23,504
def handle_debug(drive_file, node_id, show_all): """Handle the debug verb by toggling the debug flag.""" if drive_file.debug: print("# handle_debug(node_id: " + str(node_id) + ",") print("# show_all: " + str(show_all)) drive_file.set_debug(not drive_file.get_debug()) return True
7afcecc36c9f0cc9104267dac68ff65eb0eb527a
23,505
def start_of_next_clk_period(time: float, clk_period: float): """ :return: start time of next clk period """ return (start_clk(time, clk_period) + 1) * clk_period
d59dafc3a8fdec9d199dcf379eefce52267ea4c1
23,506
import re def eval_formula(formula, assignment): """ Evaluates a formula represented as a string. **Attention**: Be extremely careful about what to pass to this function. All parameters are plugged into the formula and evaluated using `eval()` which executes arbitrary python code. Parameters ---------- formula : str String representation of the formula to be evaluated. assignment : dict Dictionary containing parameter names and values as keys and values, respectively. Returns ------- float Evaluation result. Examples -------- >>> eval_formula('a + (1 - b) * a', {'a': 0.1, 'b': 0.8}) 0.12 """ expression = formula for param, value in sorted(assignment.items(), reverse=True): expression = expression.replace(param, str(value)) # remove leading 0's expression = re.sub(r'\d-0\d', lambda x: re.sub(r'-0', '-', x[0]), expression) # pylint: disable=eval-used return eval(expression) # pylint: enable=eval-used
c1f344fc0049e20e86feb2428a46d51f9eee5898
23,507
def soil_temperature(jth: int, states: States, weather: Weather): # j = 1,2,..,5 """ Equation 2.4 / 8.4 cap_soil_j * soil_j_t = sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus 0 is Floor, 6 is SoOut """ h_soil_j_minus = Coefficients.Floor.floor_thickness if jth == 1 else Coefficients.Soil.soil_thicknesses[jth - 2] h_soil_j = Coefficients.Soil.soil_thicknesses[jth - 1] h_soil_j_plus = 1.28 if jth == 5 else Coefficients.Soil.soil_thicknesses[jth] # Assumed by GreenLight's authors, line 83, setGlParams cap_soil_j = h_soil_j * Coefficients.Soil.rho_c_p_So soil_heat_conductivity = Coefficients.Soil.soil_heat_conductivity HEC_soil_j_minus_soil_j = 2 * soil_heat_conductivity / (h_soil_j_minus + h_soil_j) HEC_soil_j_soil_j_plus = 2 * soil_heat_conductivity / (h_soil_j + h_soil_j_plus) soil_j_minus_t = states.floor_t if jth == 1 else states.soil_j_t[jth - 2] soil_j_t = states.soil_j_t[jth - 1] soil_j_plus_t = weather.soil_out_t if jth == 5 else states.soil_j_t[jth] sensible_heat_flux_soil_j_minus_soil_j = convective_and_conductive_heat_fluxes(HEC_soil_j_minus_soil_j, soil_j_minus_t, soil_j_t) sensible_heat_flux_soil_j_soil_j_plus = convective_and_conductive_heat_fluxes(HEC_soil_j_soil_j_plus, soil_j_t, soil_j_plus_t) return (sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus) / cap_soil_j
ddd3e50b30dc1240d5f6c6200aea710beba6b498
23,508
def clean_user_data(model_fields): """ Transforms the user data loaded from LDAP into a form suitable for creating a user. """ # Create an unusable password for the user. model_fields["password"] = make_password(None) return model_fields
9b9f968c4a775527dac36597ecadee476549dc7d
23,509
import json def case_structure_generator(path): """Create test cases from reference data files.""" with open(str(path), 'r') as in_f: case_data = json.load(in_f) system_dict = case_data['namelists']['SYSTEM'] ibrav = system_dict['ibrav'] ins = {'ibrav': ibrav, 'cell': case_data['cell']} if '-' in path.name: _, qe_version_with_suffix = path.name.split('-') qe_version, _ = qe_version_with_suffix.rsplit('.', 1) else: qe_version = None ins = {'ibrav': ibrav, 'cell': case_data['cell'], 'qe_version': qe_version} if ibrav == 0: return ins, None, ValueError outs = dict() for key in (['a', 'b', 'c', 'cosab', 'cosac', 'cosbc'] + ['celldm({})'.format(i) for i in range(1, 7)]): if key in system_dict: outs[key] = system_dict[key] return ins, outs, None
1c7249c207032ed623bbfe274ed117283cd6ef4d
23,510
from typing import Optional from typing import Type from typing import Dict from typing import List from typing import Any def load_ascii(file: 'BinaryFile', # pylint: disable=unused-argument,keyword-arg-before-vararg parser: 'Optional[Type[ASCIIParser]]' = None, type_hook: 'Optional[Dict[str, Type[BaseType]]]' = None, enum_namespaces: 'Optional[List[str]]' = None, bare: bool = False, *args: 'Any', **kwargs: 'Any') -> 'ASCIIInfo': """Parse ASCII log file. Args: file: Log file object opened in binary mode. parser (:class:`~zlogging.loader.ASCIIParser`, optional): Parser class. type_hook (:obj:`dict` mapping :obj:`str` and :class:`~zlogging.types.BaseType` class, optional): Bro/Zeek type parser hooks. User may customise subclasses of :class:`~zlogging.types.BaseType` to modify parsing behaviours. enum_namespaces (:obj:`List[str]`, optional): Namespaces to be loaded. bare (:obj:`bool`, optional): If ``True``, do not load ``zeek`` namespace by default. *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: The parsed ASCII log data. """ if parser is None: parser = ASCIIParser ascii_parser = parser(type_hook, enum_namespaces, bare) return ascii_parser.parse_file(file)
fa7f0ba4a98dc295fb23651373a6489dad5c205e
23,511
def differences_dict(input_dict): """Create a dictionary of combinations of readers to create bar graphs""" # Getting the combinations of the formats for each_case in input_dict.keys(): comb = combinations(input_dict[each_case].keys(), 2) x = list(comb) comp_values = {} comp_values[each_case] = {} for each in x: name = each[0].split("_")[0] + " vs " + each[1].split("_")[0] comp_values[each_case][name] = {} comp_values[each_case][name]["R0"] = [] comp_values[each_case][name]["X0"] = [] comp_values[each_case][name]["R1"] = [] comp_values[each_case][name]["X1"] = [] for (k, v), (k1, v1) in zip( input_dict[each_case][each[0]].items(), input_dict[each_case][each[1]].items(), ): comp_values[each_case][name]["R0"].append(abs(v[0] - v1[0])) comp_values[each_case][name]["X0"].append(abs(v[1] - v1[1])) comp_values[each_case][name]["R1"].append(abs(v[2] - v1[2])) comp_values[each_case][name]["X1"].append(abs(v[3] - v1[3])) return comp_values
a15ef7bab8a9abaf556e1ce97a4c695b50d5b460
23,512
import psutil def available_memory(): """ Returns total system wide available memory in bytes """ return psutil.virtual_memory().available
5071312f64aa37e1d777c8f20009fa38137381a4
23,513
def aslr_for_module(target, module): """ Get the aslr offset for a specific module - parameter target: lldb.SBTarget that is currently being debugged - parameter module: lldb.SBModule to find the offset for - returns: the offset as an int """ header_address = module.GetObjectFileHeaderAddress() load_address = header_address.GetLoadAddress(target) return load_address - header_address.GetFileAddress()
bd4e6ff8949da55d678f5d89b296b3a6256b6c8a
23,514
from typing import List from typing import Set import numpy def get_hypergraph_incidence_matrix(node_list: List[Node], hyperedge_list: List[Set[Node]] ) -> numpy.array: """Get the incidence matrix of a hypergraph""" node_to_index = {node: index for index, node in enumerate(node_list)} incidence_matrix = numpy.zeros((len(node_list), len(hyperedge_list)), dtype=int) for hyperedge_index, hyperedge in enumerate(hyperedge_list): for node in hyperedge: incidence_matrix[node_to_index[node], hyperedge_index] = 1 return incidence_matrix
706bdd53a1fefec3ee3f77fa79248361ffff0351
23,515
from re import X def fformat(last_data, last_records): """ @param last_data: dictionary(node_name => node's data segment) @param last_records: dictionary(node_name => timestamp, node when last transmitted) @return: html """ nodelist = last_data.keys() a = repr(map(str, nodelist)) b = ''.join(['<div id="'+x+'" class="node"></div>' for x in nodelist]) return (X % (a, b)).encode('utf8')
28da148b43c616652872cabc7815cba51dafd16c
23,516
import math def ceil(base): """Get the ceil of a number""" return math.ceil(float(base))
ebe78a5eb8fa47e6cfba48327ebb1bdc469b970d
23,517
import os def process_raw_data(input_seqs, scaffold_type=None, percentile=None, binarize_els=True, homogeneous=False, deflank=True, insert_into_scaffold=True, extra_padding=0, pad_front=False, report_loss=True, report_times=True, remove_files=True, create_sample_of_size=None): """ A wrapper function that: Takes raw data as retrieved from Carl de Boer's publication at https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE104878, and processes the sequences according to the custom arguments, pads them to same length, and writes them to an output file along with their expression levels (tab separated). The end of the file contains comments specifying the number of sequences in the file and the lengths of the padded sequences. Args: ----- input_seqs (str) -- the absolute pathname of the file that contains all of the input sequences and their expression levels (tab separated). scaffold_type (str) -- the scaffold type (pTpA or Abf1TATA) that the input sequences had their expression levels measured in. percentile (float) -- the proportion of the raw input data to extract from the sequences with the highest and lowest expression levels. i.e if 'percentile=0.1' then the top 10 % of sequences with highest expression levels, and the bottom 10 % of sequences with lowest expression levels will be extracted from the raw input data. The resulting data file will contain ~ 20 % of the data as the raw input data. binarize_els (bool) -- if (and only if) a 'percentile' value is passed, this argument determines whether the expression level values (Els) will be binarized or not. If True (defualt), sequences with ELs in the top percentile will have their ELs binarized to 1, and sequences with ELs in the bottom percentile will have their ELs binarized to 0. homogeneous (bool) -- if True, only sequences of modal length will be processed. If False, all sequences will be processed regardless of length. Default: False. deflank (bool) -- if True, removes the constant flanking regions of the input sequences. Default: True. insert_into_scaffold (bool) -- if True inserts the input sequences into the appropriate scaffold. If False, the sequences are encoded as they are. Default: True. extra_padding (int) -- the number of 'P' characters greater than the maximum sequence length to pad each sequence to. Default: 0. pad_front (bool) -- whether to pad out the front (left hand side) or end (right hand side) of the sequences. If True, pads the front. Default: False (will pad the end). report_loss (bool) -- if True, reports the number of lines of data lost at each step in the process. Default: False. report_times (bool) -- if True, reports the time each step in the cleaning process takes. Default: False. remove_files (bool) -- if True, will remove intermediate files created in the process of processing raw data. Default: False (i.e. intermediary files will be kept). create_sample_of_size (int) -- if a number is passed, a sample of this size will be taken by pseudo-random from the file containing processed data, and written to a separate file. Returns: ----- processed_data (str) -- the absolute path for the file containing processed sequences along with their expression levels. """ # Assertions assert isinstance(input_seqs, str), ('Input file path name must be ' 'passed as a string.') assert os.path.exists(input_seqs), 'Input file does not exist.' assert isinstance(scaffold_type, str), ('Scaffold type must be passed as ' 'a string if specified.') assert scaffold_type == 'pTpA' or scaffold_type == 'Abf1TATA', 'Scaffold \ type must be specified as either "pTpA" or "Abf1TATA".' assert isinstance(percentile, (float, type(None))), ('The "percentile" ' 'argument must be ' 'passed as a float.') if percentile is not None: assert percentile < 0.5, '"percentile" must be less that 0.5' assert isinstance(homogeneous, bool), ('The homogeneous argument must be ' 'passed as a bool.') assert isinstance(deflank, bool), ('The deflank argument must be passed ' 'as a bool.') assert isinstance(insert_into_scaffold, bool), ('insert_into_scaffold ' 'argument must be passed ' 'as a bool.') assert isinstance(extra_padding, int), ('The number of extra vectors to ' 'pad each sequence by should be ' 'passed as an integer.') assert extra_padding >= 0, ('extra_padding must be passed as a non-' 'negative integer.') assert isinstance(pad_front, bool), ('The pad_front argument must be ' 'passed as a bool.') assert isinstance(report_loss, bool), ('The report_loss argument must be ' 'passed as a bool.') assert isinstance(report_times, bool), ('The report_times argument must ' 'be passed as a bool.') assert isinstance(remove_files, bool), ('The remove_files argument must ' 'be passed as a bool.') if create_sample_of_size is not None: assert isinstance(create_sample_of_size, int), ('Sample size must be ' 'passed as an int') # Functionality print('Starting processing of raw data...') raw_data = input_seqs # Define final output file path time_stamp = get_time_stamp() relative_path = 'example/processed_data/' + time_stamp processed_data = os.path.join(ROOT_DIR, relative_path) # Create log file to write reports to if report_loss or report_times: report = smart_open(processed_data + '_process_report' + '.txt', 'w') # Initialize custom operations if specified (i.e loss + timing reports) if report_loss: loss_report = {} loss_report['Raw Data'] = get_seq_count(input_seqs) if report_times: t_init = t.time() t0 = t_init if remove_files: created_files = [] # keep track of the intermediate files created. # Pull out the top and bottom percentiles of data if percentile is not None: print('Pulling out the top and bottom percentiles...') df = organize.sort_by_exp_level(input_seqs) df = organize.discard_mid_data(df, percentile=percentile) processed_data += '_percentiles' if binarize_els: print('Binarizing expression levels...') df = organize.binarize_data(df) processed_data += '_els_binarized' input_seqs = organize.write_df_to_file(df) if report_loss: loss_report['Percentile Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Top & bottom percentiles pulled...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Create new file of only homogeneous (same length) seqs if homogeneous: print('Pulling homogeneous sequences from input file...') input_seqs = organize.pull_homogeneous_seqs(input_seqs, scaffold_type) processed_data += '_homogeneous' if report_loss: loss_report['Homogeneous Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Homogeneous sequences pulled...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Remove all of the flanking regions from the input sequences if deflank: print('Removing flank regions from sequences...') input_seqs = build.remove_flanks_from_all_seqs(input_seqs, scaffold_type) processed_data += '_deflanked' if report_loss: loss_report['Deflanked Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Sequences deflanked...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) processed_data += '_sequences' # Insert sequences into appropriate scaffold if insert_into_scaffold: print('Inserting sequences into %s scaffold...' % (scaffold_type)) input_seqs = build.insert_all_seq_into_one_scaffold(input_seqs, scaffold_type) processed_data += '_inserted_into_%s_scaffold' % (scaffold_type) if report_loss: loss_report['Scaffold-Inserted Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Seqs inserted into ' + scaffold_type + 'scaffold...\n') report.write(text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Pad sequences if homogeneous and extra_padding == 0: pass else: print('Padding sequences...') input_seqs = build.pad_sequences(input_seqs, pad_front=pad_front, extra_padding=extra_padding) if not homogeneous: # then they will have been padded processed_data += '_padded_at' if pad_front: processed_data += '_front' else: processed_data += '_back' if extra_padding != 0: processed_data += '_%s_extra' % (extra_padding) if report_loss: loss_report['Padded Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Padded sequences...\n') report.write(text + '\n') t0 = t1 # Remove intermediate files created in the process if remove_files: created_files.append(input_seqs) # Rename the final output file to reflect how data has been cleaned. processed_data += '_with_exp_levels.txt' # Report end of process and print final output file locations. if input_seqs != raw_data: # i.e. if data has been processed in some way os.rename(input_seqs, processed_data) # Report end of process and print absolute path of processed data. text = ('\nRaw data successfully processed.\nLocation: %s\n' % (processed_data)) print(text) if report_loss or report_times: report.write(text) else: # If no processing was performed. text = '\nNo processing performed.\n' text += 'Change processing specifications and try again.' print(text) report.write(text + '\n') text = 'Raw data remains unchanged.' print(text) report.write(text + '\n') text = 'Location : %s' % (raw_data) print(text) if report_loss or report_times: report.write(text + '\n') # Write the number of seqs and length of seqs to the start of file organize.write_num_and_len_of_seqs_to_file(processed_data) # Report loss if report_loss: report.write('\nLine counts at each step of the process:\n') for category in loss_report.keys(): curr_count = loss_report[category] if category == 'Raw Data': report.write('\t%s : %s\n' % (category, curr_count)) prev_count = curr_count else: report.write('\t%s : %s (%s lines lost since last step)\n' % (category, curr_count, (prev_count - curr_count))) prev_count = curr_count # Remove intermediate files if remove_files: print('\nRemoving intermediate files...') organize.remove_file_list(created_files) print('Files successfully removed.') print('Process complete.') # Report total time taken if report_times: t_final = t.time() text = '\nTotal processing time : %s s' % (t_final - t_init) print(text) report.write(text) print('Please find the process report in the same directory as the' ' output file for reports of data losses and timings.') if report_times or report_loss: report.close() # Create sample data if create_sample_of_size is not None: size = create_sample_of_size print('\n\nCreating sample of size %s ...' % str(size)) sample_seqs = organize.create_sample_data(processed_data, size) print('\nSample data successfully created.') print('\nLocation: %s \n' % (sample_seqs)) return processed_data
7dafb359dba280a56b6d9860cd23c4f1dde7cd02
23,518
def train( dir, input_s3_dir, output_s3_dir, hyperparams_file, ec2_type, volume_size, time_out, docker_tag, aws_role, external_id, base_job_name, job_name, use_spot_instances=False, metric_names=None, tags=None ): """ Trains ML model(s) on SageMaker :param dir: [str], source root directory :param input_s3_dir: [str], S3 location to input data :param output_s3_dir: [str], S3 location to save output (models, etc) :param hyperparams_file: [str], path to hyperparams json file :param ec2_type: [str], ec2 instance type. Refer to: https://aws.amazon.com/sagemaker/pricing/instance-types/ :param volume_size: [int], size in GB of the EBS volume :param time_out: [int], time-out in seconds :param docker_tag: [str], the Docker tag for the image :param aws_role: [str], the AWS role assumed by SageMaker while training :param external_id: [str], Optional external id used when using an IAM role :param base_job_name: [str], Optional prefix for the SageMaker training job :param job_name: [str], Optional name for the SageMaker training job. Overrides `base_job_name` :param use_spot_instances: bool, default=False], Specifies whether to use SageMaker Managed Spot instances for training. More information: https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html (default: ``False``). :param metric_names: [list[str], default=None], Optional list of string metric names :param tags: [optional[list[dict]], default: None], List of tags for labeling a training job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Example: [ { 'Key': 'key_name_1', 'Value': key_value_1, }, { 'Key': 'key_name_2', 'Value': key_value_2, }, ... ] :return: [str], S3 model location """ config = _read_config(dir) hyperparams_dict = _read_hyperparams_config(hyperparams_file) if hyperparams_file else None sage_maker_client = sagemaker.SageMakerClient(config.aws_profile, config.aws_region, aws_role, external_id) image_name = config.image_name+':'+docker_tag return sage_maker_client.train( image_name=image_name, input_s3_data_location=input_s3_dir, train_instance_count=1, train_instance_type=ec2_type, train_volume_size=volume_size, train_max_run=time_out, output_path=output_s3_dir, hyperparameters=hyperparams_dict, base_job_name=base_job_name, job_name=job_name, use_spot_instances=use_spot_instances, tags=tags, metric_names=metric_names )
7cd92363bcde8dc86989a8932236cd4b2961b0e3
23,519
def wikitext_page(d, e, title, fmt='wikitext'): """Create infobox with stats about a single page from a category. Create infobox with stats about a single page from a category. Currently only supports formatting as wikitext. Only returns the string of the text, does not save any files or modify other data structures.""" datum = d['stats']['scrape_start'][:10] date_from = d['stats']['date_from'] date_to = d['stats']['date_to'] date_days = d['stats']['pv_days'] desc = f"Sidvisningsstatistik {datum} för tidsperioden {date_from}--{date_to} ({date_days} dagar)\n\n" page_stats = d['pages'][title]['stats'] if fmt == 'wikitext': text = f"{desc}\n\n\n" table = table_start([colspan('Sidinformation')], [], cellpadding=3, cls='wikitable') table += f"|Visningar || align='right' | {page_stats['pageviews_sv']}\n|-\n" table += f"|Längd || align='right' | {page_stats['len_sv']}\n|-\n" table += f"|Kvalitet || align='right' | {page_stats['quality']}\n|-\n" if 'len_fi' in page_stats: table += f"|Visningar Finska || align='right' | {page_stats['pageviews_fi']}\n|-\n" table += f"|Längd Finska || align='right' | {page_stats['len_fi']}\n|-\n" if 'len_en' in page_stats: table += f"|Visningar Engelska || align='right' | {page_stats['pageviews_en']}\n|-\n" table += f"|Längd Engelska || align='right' | {page_stats['len_en']}\n|-\n" if 'len_de' in page_stats: table += f"|Visningar Tyska || align='right' | {page_stats['pageviews_de']}\n|-\n" table += f"|Längd Tyska || align='right' | {page_stats['len_de']}\n|-\n" table += f"|Kategorier || align='right' | {page_stats['categories_cnt']}\n|-\n" table += f"|Kontributörer || align='right' | {page_stats['contributors_tot']}\n|-\n" table += f"|Antal andra språk || align='right' | {page_stats['langlinks_cnt']}\n|-\n" table += f"|Externa länkar || align='right' | {page_stats['extlinks_cnt']}\n|-\n" table += f"|Bilder || align='right' | {page_stats['images_cnt']}\n|-\n" table += f"|Länkar || align='right' | {page_stats['links_cnt']}\n|-\n" table += f"|Omdirigeringar || align='right' | {page_stats['redirects_cnt']}\n|-\n" table += f"|Länkar till denna sida || align='right' | {page_stats['linkshere_cnt']}\n|-\n" table += "|}\n\n" text += table text += """Kvalitet räknas ut med formeln: Kvalitet = 3 * antalet kategorier + 4 * antalet bilder + 4 * antalet andra språk + 1 * antalet länkar + 1 * antalet länkar till denna sida + 2 * externa länkar + 3 * antalet omdirigeringar + 1 * antalet kontributörer """ return text elif fmt == 'print': text = f"Visningar---------------{page_stats['pageviews_sv']}\n" text += f"Längd-------------------{page_stats['len_sv']}\n" text += f"Kvalitet----------------{page_stats['quality']}\n" if 'len_fi' in page_stats: text += f"Visningar Finska--------{page_stats['pageviews_fi']}\n" text += f"Längd Finska------------{page_stats['len_fi']}\n" if 'len_en' in page_stats: text += f"Visningar Engelska------{page_stats['pageviews_en']}\n" text += f"Längd Engelska----------{page_stats['len_en']}\n" if 'len_de' in page_stats: text += f"Visningar Tyska---------{page_stats['pageviews_de']}\n" text += f"Längd Tyska-------------{page_stats['len_de']}\n" text += f"Kategorier--------------{page_stats['categories_cnt']}\n" text += f"Kontributörer-----------{page_stats['contributors_tot']}\n" text += f"Antal andra språk-------{page_stats['langlinks_cnt']}\n" text += f"Externa länkar----------{page_stats['extlinks_cnt']}\n" text += f"Bilder------------------{page_stats['images_cnt']}\n" text += f"Länkar------------------{page_stats['links_cnt']}\n" text += f"Omdirigeringar----------{page_stats['redirects_cnt']}\n" text += f"Länkar till denna sida--{page_stats['linkshere_cnt']}\n" return text
1288a1fea5bc54ef6243089eea0578cc43ec311e
23,520
from typing import Tuple def _quadratic( self: qp.utils.Minimize[Vector], direction: Vector, step_size_test: float, state: qp.utils.MinimizeState[Vector], ) -> Tuple[float, float, bool]: """Take a quadratic step calculated from an energy-only test step. Adjusts step size to back off if energy increases.""" # Check initial point: step_size_prev = 0.0 # cumulative progress along direction E = self._sync(float(state.energy)) E_orig = E g_d = self._sync(state.gradient.overlap(direction)) if g_d >= 0.0: qp.log.info( f"{self.name}: Bad step direction with positive" " gradient component" ) return E_orig, step_size_prev, False # Test step and quadratic step size prediction: for i_step in range(self.step_size.n_adjust): # Check test step size: if step_size_test < self.step_size.minimum: qp.log.info(f"{self.name}: Test step size below threshold.") return E, step_size_prev, False # Try test step: self.step(direction, step_size_test - step_size_prev) step_size_prev = step_size_test E_test = self._compute(state, energy_only=True) # gradient not needed # Check if step left valid domain: if not np.isfinite(E_test): # Back off from difficult region step_size_test *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Test step failed with" f" {state.energy.name} = {E_test:.3e};" f" reducing test step size to {step_size_test:.3e}." ) continue # Predict step size (quadratic based on gradient and two energies): step_size = ( 0.5 * (step_size_test ** 2) * g_d / (step_size_test * g_d + E - E_test) ) # Check reasonableness of predicted step: if step_size < 0.0: # Curvature has wrong sign, but E_test < E, so accept step # for now and try descending further next time: step_size_test *= self.step_size.grow_factor qp.log.info( f"{self.name}: Wrong curvature in test step," f" growing test step size to {step_size_test:.3e}." ) E = self._compute(state, energy_only=False) return E, step_size_prev, True if step_size / step_size_test > self.step_size.grow_factor: step_size_test *= self.step_size.grow_factor qp.log.info( f"{self.name}: Predicted step size growth" f" > {self.step_size.grow_factor}," f" growing test step size to {step_size_test:.3e}." ) continue if step_size / step_size_test < self.step_size.reduce_factor: step_size_test *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Predicted step size reduction" f" < {self.step_size.reduce_factor}," f" reducing test step size to {step_size_test:.3e}." ) continue # Successful test step: break if not np.isfinite(E_test): qp.log.info( f"{self.name}: Test step failed {self.step_size.n_adjust}" " times. Quitting step." ) return E_orig, step_size_prev, False # Actual step: for i_step in range(self.step_size.n_adjust): # Try the step: self.step(direction, step_size - step_size_prev) step_size_prev = step_size E = self._compute(state, energy_only=False) if not np.isfinite(E): step_size *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Step failed with" f" {state.energy.name} = {E:.3e};" f" reducing step size to {step_size:.3e}." ) continue if E > E_orig + self.energy_threshold: step_size *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Step increased" f" {state.energy.name} by {E - E_orig:.3e};" f" reducing step size to {step_size:.3e}." ) continue # Step successful: break if (not np.isfinite(E)) or (E > E_orig + self.energy_threshold): qp.log.info( f"{self.name}: Step failed to reduce {state.energy.name}" f" after {self.step_size.n_adjust} attempts." " Quitting step." ) return E_orig, step_size_prev, False return E, step_size_prev, True
49b2e75d0ae39e968e7288690dea7d42c423a2df
23,521
def create_image(ds: "Dataset", data_element: "DataElement") -> "gdcm.Image": """Return a ``gdcm.Image``. Parameters ---------- ds : dataset.Dataset The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module. data_element : gdcm.DataElement The ``gdcm.DataElement`` *Pixel Data* element. Returns ------- gdcm.Image """ image = gdcm.Image() number_of_frames = getattr(ds, 'NumberOfFrames', 1) image.SetNumberOfDimensions(2 if number_of_frames == 1 else 3) image.SetDimensions((ds.Columns, ds.Rows, number_of_frames)) image.SetDataElement(data_element) pi_type = gdcm.PhotometricInterpretation.GetPIType( ds.PhotometricInterpretation ) image.SetPhotometricInterpretation( gdcm.PhotometricInterpretation(pi_type) ) tsyntax = ds.file_meta.TransferSyntaxUID ts_type = gdcm.TransferSyntax.GetTSType(str.__str__(tsyntax)) image.SetTransferSyntax(gdcm.TransferSyntax(ts_type)) pixel_format = gdcm.PixelFormat( ds.SamplesPerPixel, ds.BitsAllocated, ds.BitsStored, ds.HighBit, ds.PixelRepresentation ) image.SetPixelFormat(pixel_format) if 'PlanarConfiguration' in ds: image.SetPlanarConfiguration(ds.PlanarConfiguration) return image
95f96b0f666903529811fbf3aaeb71305dfcb1bc
23,522
def linear_interpolate_by_datetime(datetime_axis, y_axis, datetime_new_axis, enable_warning=True): """A datetime-version that takes datetime object list as x_axis """ numeric_datetime_axis = [ totimestamp(a_datetime) for a_datetime in datetime_axis ] numeric_datetime_new_axis = [ totimestamp(a_datetime) for a_datetime in datetime_new_axis ] return linear_interpolate( numeric_datetime_axis, y_axis, numeric_datetime_new_axis, enable_warning=enable_warning)
515eb1e389b711ff3add707abe91bf577b38192d
23,523
def calculate_index( target_ts: pd.Timestamp, timestamps: pd.DatetimeIndex ) -> pd.Timestamp: """ Return the first index value after the target timestamp if the exact timestamp is not available """ # noinspection PyUnresolvedReferences target_beyond_available = (target_ts > timestamps).all() if target_beyond_available: return timestamps[-1] elif target_ts in timestamps: return target_ts else: return timestamps[timestamps > target_ts][0]
db1ad3130b3763115cb88e8798618d9632996bd7
23,524
from typing import OrderedDict import pydoc def walk_through_package(package): """ Get the documentation for each of the modules in the package: Args: package: An imported python package. Returns: output: A dictionary with documentation strings for each module. """ output = OrderedDict() modules = pydoc.inspect.getmembers(package, pydoc.inspect.ismodule) for mod in modules: module_name, reference = mod output[module_name] = getmodule(module_name, reference) return output
9ad0e9d935a812608fb42af788c1ae6746b78684
23,525
import gzip def extract_images_2(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. Args: f: A file object that can be passed into a gzip reader. Returns: data: A 4D unit8 numpy array [index, y, x, depth]. Raises: ValueError: If the bytestream does not start with 2051. """ print('Extracting', f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, f.name)) num_images = np.int64(_read32(bytestream)) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data
eb3b44051c6cc3721a82641346c233d9d4bfe1da
23,526
from datetime import datetime def slope_finder(station): """ This function computes the slope of a least-squares fit of polynomial of degree p to water level data and return that is it positive or negative""" try: dt = 2 dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt)) slope = polyfit(dates,levels,1) if slope[1] >= 0: return True else: return False except: return None
035a78a4e54b94945837e97c6dce53bc36770956
23,527
def get_attr_counts(datas, attr): """ 不同属性值的数量. :param datas: :type datas: list[BaseDataSample] :param attr: :type attr: str :return: """ results = {} for data in datas: value = data.get_value(attr) if isinstance(value, list): for v in value: results.setdefault(attr + "-" + v, 0) results[attr + "-" + v] += 1 else: results.setdefault(value, 0) results[value] += 1 return results
bea8e6e1c99efe1ad18894831006f0e218517c74
23,528
def split(string: str, separator: str = " ") -> list: """ Will split the string up into all the values separated by the separator (defaults to spaces) >>> split("apple#banana#cherry#orange",separator='#') ['apple', 'banana', 'cherry', 'orange'] >>> split("Hello there") ['Hello', 'there'] >>> split("11/22/63",separator = '/') ['11', '22', '63'] >>> split("12:43:39",separator = ":") ['12', '43', '39'] """ split_words = [] last_index = 0 for index, char in enumerate(string): if char == separator: split_words.append(string[last_index:index]) last_index = index + 1 elif index + 1 == len(string): split_words.append(string[last_index : index + 1]) return split_words
73e01d7ff9111d949f31f37b36c3b0656d06e340
23,529
import ast def _find_class(name: str, target: ast.Module) -> t.Tuple[int, ast.ClassDef]: """Returns tuple containing index of classdef in the module and the ast.ClassDef object""" for idx, definition in enumerate(target.body): if isinstance(definition, ast.ClassDef) and definition.name == name: return idx, definition
ad67d36772ef9541edb72a9d56f3553dc9eaffd2
23,530
def get_floor_reference_points(): """ This function get 4 points of reference from the real world, asking the user to move the baxter arm to the position of each corresponding point in the image, and then getting the X,Y and Z coordinates of baxter's hand. Returns an array of size 4 containing 4 coordinates: [[x1,y1], [x2,y2], [x3,y3], [x4,y4]]. All the coordinates Z should be approximatelly the same. We assume the table is niveled. Save the Z coordinate in the global variable. TODO: Implement this. Figure out a way to get the end position of baxter hand. I know that in baxter_msgs """ global Z # This declaration is needed to modify the global variable Z global floor_reference_points # Maybe erase. global floor_reference_orientations # Maybe erase. #Z = (-0.04311285564353425 -0.04512672573083166 -0.04080078888404003 -0.046071914959185875)/4 #Z= -0.04721129960500225 Z = -0.15113003072395247 print Z # [0.5264201148167275, 0.40034933311487086, -0.027560670871152958] # Point 1 = [0.5264201148167275, 0.40034933311487086, -0.027560670871152958] # Move the LEFT arm to point 2 and press enter. # Move the LEFT arm to point 3 and press enter. # Point 3 = [0.8164126163781988, 0.00011724257622775782, -0.006060458646583389] # Move the LEFT arm to point 4 and press enter. # Point 4 = [0.5774338486223564, -0.02912627450728407, -0.02923769860966796] # Point 1 = [0.45835412247904794, 0.4167330917312844, -0.11362745036843477] # Move the LEFT arm to point 2 and press enter. # Point 2 = [0.7046556740624649, 0.45390428836232344, -0.11322759071560898] # Move the LEFT arm to point 3 and press enter. # Point 3 = [0.7778487250094798, 0.07406413897305184, -0.11181591166991744] # Move the LEFT arm to point 4 and press enter. # Point 4 = [0.5418466718761972, 0.034360381218309734, -0.11464607923115094] #return [[p1[0],p1[1]], [p2[0],p2[1]], [p3[0],p3[1]], [p4[0],p4[1]]] #print p4 filename = "/home/sampath/midca/examples/_gazebo_baxter/calibration.txt" f = open(filename, 'r') p1 = f.readline().split(' ') p2 = f.readline().split(' ') p3 = f.readline().split(' ') p4 = f.readline().split(' ') p1[0] = float(p1[0]) p1[1] = float(p1[1]) p2[0] = float(p2[0]) p2[1] = float(p2[1]) p3[0] = float(p3[0]) p3[1] = float(p3[1]) p4[0] = float(p4[0]) p4[1] = float(p4[1]) return [[p1[0], p1[1]], [p2[0], p2[1]], [p3[0], p3[1]], [p4[0], p4[1]]]
2e058caedeabb23f4efc0a7119456db596421c30
23,531
def tidy_osx_command_line_tools_command(client: TidyClient, **kwargs) -> DemistoResult: """ Install OSX command line tools Args: client: Tidy client object. **kwargs: command kwargs. Returns: DemistoResults: Demisto structured response. """ runner: Runner = client.osx_command_line_tools() return parse_response(response=runner, human_readable_name="OSx command line tools", installed_software="command line tools", additional_vars={})
0045848f0cef054dfab24a7698e4b3432843f747
23,532
def nav_entries(context): """ Renders dynamic nav bar entries from nav_registry for the provided user. """ context['nav_registry'] = nav_registry return context
8af1917c04a9cbd17895c0fab0239d6fd7c009d2
23,533
from typing import Any def get_largest_component(graph: ig.Graph, **kwds: Any) -> ig.Graph: """Get largest component of a graph. ``**kwds`` are passed to :py:meth:`igraph.Graph.components`. """ vids = None for component in graph.components(**kwds): if vids is None or len(component) > len(vids): vids = component return graph.induced_subgraph(vids)
24f04905c767f02a03b5a6fbf4ae0ba0b1f49269
23,534
def hiring_contests(): """Gets all the hiring challenges from all the availbale platforms""" contests_data = get_contests_data() active_contests = contests_data["active"] upcoming_contests = contests_data["pending"] get_challenge_name = lambda x : x.lower().split() hiring_challenges = [contest for contest in active_contests if "hiring" in get_challenge_name(contest["contest_name"])] hiring_challenges += [contest for contest in upcoming_contests if "hiring" in get_challenge_name(contest["contest_name"])] return hiring_challenges
91566f0117fc5bc38db7bc930d5e4c7bd1bd2992
23,535
import torch def _find_quantized_op_num(model, white_list, op_count=0): """This is a helper function for `_fallback_quantizable_ops_recursively` Args: model (object): input model white_list (list): list of quantizable op types in pytorch op_count (int, optional): count the quantizable op quantity in this module Returns: the quantizable op quantity in this module """ quantize_op_num = op_count for name_tmp, child_tmp in model.named_children(): if type(child_tmp) in white_list \ and not (isinstance(child_tmp, torch.quantization.QuantStub) or isinstance(child_tmp, torch.quantization.DeQuantStub)): quantize_op_num += 1 else: quantize_op_num = _find_quantized_op_num( child_tmp, white_list, quantize_op_num) return quantize_op_num
c51b06e476ff4804d5bdfca5a187717536a0418f
23,536
import sys def make_withdrawal(account): """Adjusts account balance for withdrawal. Script that verifies withdrawal amount is valid, confirms that withdrawal amount is less than account balance, and adjusts account balance. Arg: account(dict): contains pin and balance for account Return: account(dict): returns account with balance adjusted for withdrawal """ # Use questionary to capture the withdrawal and set equal to amount variable amount = questionary.text("How much would you like to withdraw?").ask() amount = float(amount) # Validates amount of withdrawal. If less than or equal to 0 system exits with error message. if amount <= 0.0: sys.exit("This is not a valid withdrawal amount. Please try again.") # Validates if withdrawal amount is less than or equal to account balance, processes withdrawal and returns account. # Else system exits with error messages indicating that the account is short of funds. if amount <= account["balance"]: account["balance"] = account["balance"] - amount print("Your withdrawal was successful!") return account else: sys.exit( "You do not have enough money in your account to make this withdrawal. Please try again." )
94600c74b0aa61674c6e6d63cd1a541b8157cb47
23,537
def monta_reacao(coef, form): """ Retorna a estrutura de uma reação química com base em arrays gerados por métodos de sorteio de reações. :param coefs: Array com coeficientes das substâncias. :param formulas: Array com fórmulas das substâncias. :return: string pronta para ser impressa (print()) adequadamente. """ # ÍNDICE GERAL DAS TUPLAS (combustão/salificação): # 0: combustível/ácido | 1: oxigênio/base | 2: dióxido de carbono/sal | 3: água if len(coef) == 4: return ('' if coef[0] == 1 else str(coef[0])) + ' ' + form[0] + ' + ' + \ ('' if coef[1] == 1 else str(coef[1])) + ' ' + form[1] + ' → ' + \ ('' if coef[2] == 1 else str(coef[2])) + ' ' + form[2] + ' + ' + \ ('' if coef[3] == 1 else str(coef[3])) + ' ' + form[3] else: print('Formatação de reações com mais ou menos que quatro substâncias ainda não gerada.')
9ead94969fbeec6f403f21dc8ebc9eec36ad9438
23,538
from unittest.mock import patch def test_process_bulk_queue_errors(app, queue): """Test error handling during indexing.""" with app.app_context(): # Create a test record r1 = Record.create({ 'title': 'invalid', 'reffail': {'$ref': '#/invalid'}}) r2 = Record.create({ 'title': 'valid', }) db.session.commit() RecordIndexer().bulk_index([r1.id, r2.id]) ret = {} def _mock_bulk(client, actions_iterator, **kwargs): ret['actions'] = list(actions_iterator) return len(ret['actions']) with patch('invenio_indexer.api.bulk', _mock_bulk): # Exceptions are caught assert RecordIndexer().process_bulk_queue() == 1 assert len(ret['actions']) == 1 assert ret['actions'][0]['_id'] == str(r2.id)
83c4609eb62d65fb7d53117906a0d6f128fe7b30
23,539
def list_to_string(the_list): """Converts list into one string.""" strings_of_list_items = [str(i) + ", " for i in the_list] the_string = "".join(strings_of_list_items) return the_string
f580dd8646526e64bb50297608e8ad8e338d9197
23,540
import os import sys from typing import get_args def _op_select_format(kernel_info): """ call op's op_select_format to get op supported format Args: kernel_info (dict): kernel info load by json string Returns: op supported format """ try: op_name = kernel_info['op_info']['name'] te_set_version(kernel_info["op_info"]["socVersion"]) impl_path = build_in_impl_path custom_flag = False if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: op_impl_path = os.path.realpath(kernel_info['impl_path']) if os.path.isfile(op_impl_path): path, file_name = os.path.split(op_impl_path) op_name, _ = os.path.splitext(file_name) impl_path = path custom_flag = True if impl_path not in sys.path: sys.path.insert(0, impl_path) if custom_flag: op_module = __import__(op_name) else: op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) # get function if not hasattr(op_module, "op_select_format"): return "" op_func = getattr(op_module, "op_select_format", None) # call function inputs_args = get_args(kernel_info['op_info'], 'inputs') outputs_args = get_args(kernel_info['op_info'], 'outputs') attrs_args = get_args(kernel_info['op_info'], 'attrs') kernel_name = kernel_info['op_info']['kernel_name'] ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) except Exception as e: raise TBEException(str(e)) finally: pass return ret
8ab9a28e9e680ad181a317ba07cd120ca9dac762
23,541
from typing import Optional def get_rate_plan(apiproduct_id: Optional[str] = None, organization_id: Optional[str] = None, rateplan_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRatePlanResult: """ Gets the details of a rate plan. """ __args__ = dict() __args__['apiproductId'] = apiproduct_id __args__['organizationId'] = organization_id __args__['rateplanId'] = rateplan_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:apigee/v1:getRatePlan', __args__, opts=opts, typ=GetRatePlanResult).value return AwaitableGetRatePlanResult( apiproduct=__ret__.apiproduct, billing_period=__ret__.billing_period, consumption_pricing_rates=__ret__.consumption_pricing_rates, consumption_pricing_type=__ret__.consumption_pricing_type, created_at=__ret__.created_at, currency_code=__ret__.currency_code, description=__ret__.description, display_name=__ret__.display_name, end_time=__ret__.end_time, fixed_fee_frequency=__ret__.fixed_fee_frequency, fixed_recurring_fee=__ret__.fixed_recurring_fee, last_modified_at=__ret__.last_modified_at, name=__ret__.name, revenue_share_rates=__ret__.revenue_share_rates, revenue_share_type=__ret__.revenue_share_type, setup_fee=__ret__.setup_fee, start_time=__ret__.start_time, state=__ret__.state)
c439d2b991174b2fa4137d0b88f04af0ba4a22b9
23,542
def detail_blotter(backtest, positions, holdings, mode='simplified'): """ 分品种获取详细交易状况,合并市场数据、交易情况和账户变动 参数: backtest, positions, holdings为回测引擎返回的变量 mode: 'simplified'则市场行情数据只保留'close'列 (DataFrame的字典) 返回: 字典,键为symbol,值为DataFrame格式 示例: blotter = detail_blotter(backtest, positions, holdings) blotter_rb = blotter['RB'] blotter_br.head() """ blotter = dict() data_dict = backtest.data_handler.latest_symbol_data trades = backtest.trade_record() trades['direction'] = [1 if d=='BUY' else -1 for d in trades['direction']] trades['cost'] = trades['direction'] * trades['fill_price'] * trades['quantity'] for symb in data_dict.keys(): data = pd.DataFrame(data_dict[symb], columns=['symbol', 'datetime', 'open', 'high', 'low', 'close', 'volume']) if mode == 'simplified': data = data[['datetime', 'close']].set_index('datetime') else: # 'full' data = data.set_index('datetime') trades_symb = trades[trades['symbol']==symb][['direction','fill_price', 'commission', 'cost']] holdings_symb = pd.Series(holdings[symb], name='holdings') positions_symb = pd.Series(positions[symb], name='positions') merge = data.join([positions_symb, holdings_symb, trades_symb], how='outer').iloc[1:, :].fillna(0.) # 计算每根bar结束后的盈亏 merge['pnl'] = merge['holdings'] - merge['holdings'].shift(1) - merge['cost'].shift(1) - \ merge['commission'].shift(1) merge.ix[0, 'pnl'] = 0. # NaN # 回测结束时对可能存在的强制平仓进行额外计算 merge.ix[-1, 'pnl'] = merge['holdings'].iloc[-1] - merge['holdings'].iloc[-2] - merge['cost'].iloc[-1] - \ merge['commission'].iloc[-1] # 以回测第一根bar收盘价作为起始资本 merge['adj_total'] = merge['pnl'].cumsum() + merge['close'].iloc[0] del merge['cost'] blotter[symb] = merge return blotter
9a2d8168cfc9ee979be847c6dac783a70503503c
23,543
def parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status): """parameters_create_lcdm(double Omega_c, double Omega_b, double Omega_k, double h, double norm_pk, double n_s, int * status) -> parameters""" return _ccllib.parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status)
d0a623fcfbcee06a387a3bf7add96068a8205824
23,544
def _split_header_params(s): """Split header parameters.""" result = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] result.append(f.strip()) s = s[end:] return result
fabbfb0959133e70019742c6661cb3bb443ca34d
23,545
def countDigits(string): """return number of digits in a string (Helper for countHaveTenDigits)""" count = 0 for char in string: if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \ char == '5' or char == '6' or char == '7' or char == '8' or char == '9': count += 1 return count
f8d2327e022efc7a117b744588dfe16a3a7ba75e
23,546
def get_process_entry(process_id: int) -> Process: """Get process entry :raises AssertionError: When illegal state: Active processes != 1 :param process_id: specify process :return: Process entry """ active_process_entry_query = db.session.query(Process).filter(Process.id == process_id) assert active_process_entry_query.count() == 1, "Active processes != 1: " + str(active_process_entry_query.count()) return active_process_entry_query.first()
442df14f1d032ff1fe8c40598f39a06255982da8
23,547
def shrink(filename): """ The function will make the original image shrink to its half without losing too much quality. :param filename: The directory of an image tou want to process. :return img: SimpleImage, a shrink image that is similar to the original image. """ img = SimpleImage(filename) # Create a blank image that its size is 1/2 of the original image. img_blank = SimpleImage.blank(img.width // 2, img.height // 2) for x in range(img_blank.width): for y in range(img_blank.height): new_pixel = img_blank.get_pixel(x, y) # Right upper corner. if x == 0 and y == 0: new_pixel.red = (img.get_pixel(x, y+1).red + img.get_pixel(x+1, y).red + img.get_pixel(x+1, y+1).red + img.get_pixel(x, y).red) // 4 new_pixel.green = (img.get_pixel(x, y+1).green + img.get_pixel(x+1, y).green + img.get_pixel(x+1, y+1).green + img.get_pixel(x, y).green) // 4 new_pixel.blue = (img.get_pixel(x, y+1).blue + img.get_pixel(x+1, y).blue + img.get_pixel(x+1, y+1).blue) + img.get_pixel(x, y).blue // 4 # Left side. elif x == 0 and y != 0: new_pixel.red = (img.get_pixel(x, y*2).red + img.get_pixel(x, y*2+1).red + img.get_pixel(x+1, y*2).red + img.get_pixel(x+1, y*2+1).red) // 4 new_pixel.green = (img.get_pixel(x, y*2).green + img.get_pixel(x, y*2+1).green + img.get_pixel(x+1, y*2).green + img.get_pixel(x+1, y*2+1).green) // 4 new_pixel.blue = (img.get_pixel(x, y*2).blue + img.get_pixel(x, y*2+1).blue + img.get_pixel(x+1, y*2).blue + img.get_pixel(x+1, y*2+1).blue) // 4 # Top. elif y == 0 and x != 0: new_pixel.red = (img.get_pixel(x*2, y).red + img.get_pixel(x*2+1, y).red + img.get_pixel(x*2, y+1).red + img.get_pixel(x*2+1, y+1).red) // 4 new_pixel.green = (img.get_pixel(x*2, y).green + img.get_pixel(x*2+1, y).green + img.get_pixel(x*2, y+1).green + img.get_pixel(x*2+1, y+1).green) // 4 new_pixel.blue = (img.get_pixel(x*2, y).blue + img.get_pixel(x*2+1, y).blue + img.get_pixel(x*2, y+1).blue + img.get_pixel(x*2+1, y+1).blue) // 4 else: new_pixel.red = (img.get_pixel(x*2, y*2).red + img.get_pixel(x*2+1, y*2).red + img.get_pixel(x*2, y*2+1).red + img.get_pixel(x*2+1, y*2+1).red) // 4 new_pixel.green = (img.get_pixel(x*2, y*2).green + img.get_pixel(x*2+1, y*2).green + img.get_pixel(x*2, y*2+1).green + img.get_pixel(x*2+1, y*2+1).green) // 4 new_pixel.blue = (img.get_pixel(x*2, y*2).blue + img.get_pixel(x*2+1, y*2).blue + img.get_pixel(x*2, y*2+1).blue + img.get_pixel(x*2+1, y*2+1).blue) // 4 return img_blank
0bff7d59bb7ae512883103bfe21ba80598c28c17
23,548
import getpass def get_target_config(): """ Get details of the target database (Postgres) """ print('\n------------------------------------------') print('Enter target database settings:') print('------------------------------------------') config = {} config['username'] = input('- Username on target database (default "postgres"): ') or 'postgres' config['host'] = input('- Hostname for target database (default "localhost"): ') or 'localhost' config['port'] = input('- Port for target database (default "5432"): ') or 5432 config['database'] = input("- Name of target database (default 'oracle_migration'): ") or "oracle_migration" config['password'] = getpass.getpass('- Password for target database: ') print('\nUsername: {}'.format(config['username'])) print('Hostname: {}'.format(config['host'])) print('Port: {}'.format(config['port'])) print('Database name: {}'.format(config['database'])) print('Password: {}'.format('*'*len(config['password']))) return config
36820bae4af66b2db92ce1d467996b6e9a7a2624
23,549
def GetGPU(): """Get the global index of GPU. Returns ------- int The global index of GPU. """ return option['device_id']
2c392c97da988c33ff12f59db4bb10f6b41e3bc1
23,550
def get_generic_explanation(exception_type): """Provides a generic explanation about a particular exception.""" if hasattr(exception_type, "__name__"): exception_name = exception_type.__name__ else: exception_name = exception_type if exception_name in GENERIC: return GENERIC[exception_name]() elif exception_name.endswith("Warning"): return GENERIC["UnknownWarning"]() elif hasattr(exception_type, "__name__") and issubclass(exception_type, OSError): return os_error_subclass(exception_type.__name__) else: return no_information()
b590be31518f3eabdc1cdeb31b1c66e66b47b253
23,551
import os def _normalized_bam_coverage(name, bam_input, data): """Run bamCoverage from deeptools but produce normalized bigWig files""" cmd = ("{bam_coverage} --bam {bam_input} --outFileName {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores} ") size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) cores = dd.get_num_cores(data) try: bam_coverage = config_utils.get_program("bamCoverage", data) except config_utils.CmdNotFound: logger.info("No bamCoverage found, skipping bamCoverage.") return None method = dd.get_chip_method(data) cmd += "--normalizeUsing CPM " toignore = get_mitochondrial_chroms(data) if toignore: ignorenormflag = f"--ignoreForNormalization {' '.join(toignore)} " cmd += ignorenormflag resources = config_utils.get_resources("bamCoverage", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name) if utils.file_exists(bw_output): return bw_output with file_transaction(bw_output) as out_tx: do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name) return bw_output
e282a1333d30656eb35157bb78f7bd429d6c5358
23,552
def simple_histogram(queryset, column, bins): """ Return a histogram from data in queryset. :param queryset: A Queryet, Model, or Manager :param column: The column we are aggregating into a histogram :param bins: An ordered iterable of left endpoints of the bins. Must have at least two elements. The endpoints must be a convertible to strings by force_text :return: A dictionary with bin endpoints converted to strings as keys and """ queryset = _get_queryset(queryset) queryset = queryset.annotate(column_name=Value(column, output_field=CharField())) return multi_histogram(queryset, column, bins, slice_on='column_name', choices=((column, column),))
b6f4f2738cdf5e3e610e830886e2c6639aae309e
23,553
import os def ProbeDebuggerDir(): """Probes the debugger installed path and returns the path.""" program_files = os.environ.get('ProgramFiles') if not program_files: return None # Probing debugger installed path. # Starting with 32 bit debugger on 32 bit platform. debugger_dir = '%s\\Debugging Tools For Windows' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir # 32 bit debugger on 32 bit platform. debugger_dir = '%s\\Debugging Tools For Windows (x86)' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir # 64 bit debugger. debugger_dir = '%s\\Debugging Tools For Windows (x64)' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir # windows 8 32 bit debugger_dir = '%s\\Windows Kits\\8.0\\Debuggers\\x86' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir # windows 8.1 64 bit debugger_dir = '%s\\Windows Kits\\8.1\\Debuggers\\x64' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir program_files = os.environ.get('PROGRAMW6432') if not program_files: return None # 64 bit debugger on 64 bit platform. debugger_dir = '%s\\Debugging Tools For Windows (x64)' % program_files if CdbExistsAtLocation(debugger_dir): return debugger_dir return None
56b6d18782d0557fd8f4236fd0cdfcbbc98faf90
23,554
def bounding_box(points): """Bounding box Args: points: Array of shape (amount_of_points, dimensions) Returns: numpy.ndarray: Array of [[min, max], [min, max], ...] along the dimensions of points. """ out = np.empty((points.ndim, 2)) for i in range(points.ndim): x = points[:, i] out[i, 0] = x.min() out[i, 1] = x.max() return out
44871a584f3592296c982c82a798c05ee8b166f7
23,555
def get_ts_WFI(self): """ Get kinetic energy density """ ts = np.zeros((self.grid.Nelem, len(self.solver[0,:]) )) if self.optInv.ens_spin_sym is not True: for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): self.solver[i,j].calc_ked_WFI() #ts[i,j] = self.solver[i,j].get_ked_WFI() #get_ked_WFI cannot be defined as a solver's method #Get Kinetic Energy Density for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): if self.solver[i,j].ked_WFI is not None: ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 ) else: for i in range(self.solver.shape[0]): self.solver[i,0].calc_ked_WFI() #Get Kinetic Energy Density for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): if self.solver[i,j].ked_WFI is not None: ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 ) return ts
8384a5c3d1e2cdb5551ecb783101961f73a2d523
23,556
def _correct_outlier_correlation(rpeaks: pd.DataFrame, bool_mask: np.array, corr_thres: float, **kwargs) -> np.array: """Apply outlier correction method 'correlation'. This function compute the cross-correlation coefficient between every single beat and the average of all detected beats. It marks beats as outlier if the cross-correlation coefficient is below a certain threshold. Parameters ---------- rpeaks : :class:`~pandas.DataFrame` dataframe with detected R peaks. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()` bool_mask : :class:`numpy.array` boolean array with beats marked as outlier. Results of this outlier correction method will be combined with the array using a logical 'or' corr_thres : float threshold for cross-correlation coefficient. Beats below that threshold will be marked as outlier **kwargs : additional parameters required for this outlier function, such as: * ecg_signal :class:`~pandas.DataFrame` dataframe with processed ECG signal. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()` * sampling_rate : float sampling rate of recorded data in Hz Returns ------- :class:`numpy.array` boolean array with beats marked as outlier. Logical 'or' combination of ``bool_mask`` and results from this algorithm """ ecg_signal = kwargs.get("ecg_signal", None) sampling_rate = kwargs.get("sampling_rate", None) if any(v is None for v in [ecg_signal, sampling_rate]): raise ValueError( "Cannot apply outlier correction method 'correlation' because not all additionally required arguments " "were provided! Make sure you pass the following arguments: 'ecg_signal', 'sampling_rate'." ) # signal outlier # segment individual heart beats heartbeats = nk.ecg_segment(ecg_signal["ECG_Clean"], rpeaks["R_Peak_Idx"], int(sampling_rate)) heartbeats = nk.epochs_to_df(heartbeats) heartbeats_pivoted = heartbeats.pivot(index="Time", columns="Label", values="Signal") heartbeats = heartbeats.set_index("Index") heartbeats = heartbeats.loc[heartbeats.index.intersection(rpeaks["R_Peak_Idx"])].sort_values(by="Label") heartbeats = heartbeats[~heartbeats.index.duplicated()] heartbeats_pivoted.columns = heartbeats.index # compute the average over all heart beats and compute the correlation coefficient between all beats and # the average mean_beat = heartbeats_pivoted.mean(axis=1) heartbeats_pivoted["mean"] = mean_beat corr_coeff = heartbeats_pivoted.corr()["mean"].abs().sort_values(ascending=True) corr_coeff = corr_coeff.drop("mean") # compute RR intervals (in seconds) from R Peak Locations rpeaks["RR_Interval"] = np.ediff1d(rpeaks["R_Peak_Idx"], to_end=0) / sampling_rate # signal outlier: drop all beats that are below a correlation coefficient threshold return np.logical_or(bool_mask, rpeaks["R_Peak_Idx"].isin(corr_coeff[corr_coeff < corr_thres].index))
7216b1c8e2c3352d14273aa058e5c9fd4398044b
23,557
import time def _time_from_timestamp(timestamp: int) -> time: """ Casts a timestamp representing the number of seconds from the midnigh to a time object Parameters ---------- timestamp : int The number of seconds since midnight Returns ------- time The associated time object """ SECONDS_IN_MINUTE = 60 SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE remaining_time = timestamp hour, remaining_time = divmod(remaining_time, SECONDS_IN_HOUR) minute, second = divmod(remaining_time, SECONDS_IN_MINUTE) return time(hour, minute, second)
552f2b3b6841d48f3340ecdd94edb03f791a84c9
23,558
def get_marginal_frequencies_of_spikes_in_bins(symbol_counts, number_of_bins_d): """ Compute for each past bin 1...d the sum of spikes found in that bin across all observed symbols. """ return np.array(sum((emb.symbol_binary_to_array(symbol, number_of_bins_d) * symbol_counts[symbol] for symbol in symbol_counts)), dtype=int)
c1ace43f040715c87a3a137bebf64d862060a590
23,559
def member_stand(v, m): """ returns member m stand on vote v """ va = VoteAction.objects.filter(member = m, vote = v) if va: for (name,string) in VOTE_ACTION_TYPE_CHOICES: if va[0].type==name: stand = _(string) cls = name return {'stand':stand, 'class':cls, 'name':va[0].member.name} else: stand=_('Absent') cls = 'absent' try: return {'stand':stand, 'class':cls, 'name':m.name} except Exception, e: logging.debug(e,exc_info=True) return
2a038c907046fad81784c442fe67f0b65902df85
23,560
def pagination(cl): """ Generate the series of links to the pages in a paginated list. """ paginator, page_num = cl.paginator, cl.page_num pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page if not pagination_required: page_range = [] else: ON_EACH_SIDE = 2 ON_ENDS = 1 # If there are 10 or fewer pages, display links to every page. # Otherwise, do some fancy if paginator.num_pages <= 8: page_range = range(paginator.num_pages) else: # Insert "smart" pagination links, so that there are always ON_ENDS # links at either end of the list of pages, and there are always # ON_EACH_SIDE links at either end of the "current page" link. page_range = [] if page_num > (ON_EACH_SIDE + ON_ENDS): page_range += [ *range(0, ON_ENDS), DOT, *range(page_num - ON_EACH_SIDE, page_num + 1), ] else: page_range.extend(range(0, page_num + 1)) if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1): page_range += [ *range(page_num + 1, page_num + ON_EACH_SIDE - 1), DOT, *range(paginator.num_pages - ON_ENDS, paginator.num_pages), ] else: page_range.extend(range(page_num + 1, paginator.num_pages)) need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page return { "cl": cl, "pagination_required": pagination_required, "show_all_url": need_show_all_link and cl.get_query_string({ALL_VAR: ""}), "page_range": page_range, "ALL_VAR": ALL_VAR, "1": 1, }
cca1b80f1bc2c60c8f4af44f138b5433023298f7
23,561
def ants_apply_inverse_warps_template_to_func( workflow, strat, num_strat, num_ants_cores, input_node, input_outfile, ref_node, ref_outfile, func_name, interp, input_image_type ): """Apply the functional-to-structural and structural-to-template warps inversely to functional time-series in template space to warp it back to native functional space. Parameters ---------- workflow: Nipype workflow object the workflow containing the resources involved strat: C-PAC Strategy object a strategy with one or more resource pools num_strat: int the number of strategy objects num_ants_cores: int the number of CPU cores dedicated to ANTS anatomical-to-standard registration input_node: Nipype pointer pointer to the node containing the 4D functional time-series (often the leaf node) input_outfile: Nipype pointer pointer to the output of the node, i.e. the 4D functional time-series itself ref_node: Nipype pointer pointer to the node containing the reference volume for the C3D FSL-to-ITK affine conversion (often the mean of the functional time-series, which is a single volume) ref_outfile: Nipype pointer pointer to the output of ref_node, i.e. the reference volume itself func_name: str what the name of the warped functional should be when written to the resource pool interp: str which interpolation to use when applying the warps input_image_type: int argument taken by the ANTs apply warp tool; in this case, should be 3 for 4D functional time-series """ # converts FSL-format .mat affine xfm into ANTS-format # .txt; .mat affine comes from Func->Anat registration fsl_to_itk_mni_func = create_wf_c3d_fsl_to_itk( name='fsl_to_itk_%s_%d' % (func_name, num_strat) ) # collects series of warps to be applied collect_transforms_mni_func = \ create_wf_collect_transforms( inverse=True, name='collect_transforms_%s_%d' % (func_name, num_strat) ) # apply ants warps apply_ants_warp_mni_func = \ create_wf_apply_ants_warp( inverse=True, name='apply_ants_warp_%s_%d' % (func_name, num_strat), ants_threads=int(num_ants_cores)) apply_ants_warp_mni_func.inputs.inputspec.dimension = 3 apply_ants_warp_mni_func.inputs.inputspec.interpolation = interp # input_image_type: # (0 or 1 or 2 or 3) # Option specifying the input image type of scalar # (default), vector, tensor, or time series. apply_ants_warp_mni_func.inputs.inputspec. \ input_image_type = input_image_type # convert the .mat from linear Func->Anat to # ANTS format node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, fsl_to_itk_mni_func, 'inputspec.affine_file') node, out_file = strat["anatomical_brain"] workflow.connect(node, out_file, fsl_to_itk_mni_func, 'inputspec.reference_file') workflow.connect(ref_node, ref_outfile, fsl_to_itk_mni_func, 'inputspec.source_file') workflow.connect(ref_node, ref_outfile, apply_ants_warp_mni_func, 'inputspec.reference_image') # Field file from anatomical nonlinear registration node, out_file = strat['mni_to_anatomical_nonlinear_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.warp_file') # initial transformation from anatomical registration node, out_file = strat['ants_initial_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_initial') # affine transformation from anatomical registration node, out_file = strat['ants_affine_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_affine') # rigid transformation from anatomical registration node, out_file = strat['ants_rigid_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_rigid') # Premat from Func->Anat linear reg and bbreg # (if bbreg is enabled) workflow.connect(fsl_to_itk_mni_func, 'outputspec.itk_transform', collect_transforms_mni_func, 'inputspec.fsl_to_itk_affine') # this <node, out_file> pulls in directly because # it pulls in the leaf in some instances workflow.connect(input_node, input_outfile, apply_ants_warp_mni_func, 'inputspec.input_image') workflow.connect(collect_transforms_mni_func, 'outputspec.transformation_series', apply_ants_warp_mni_func, 'inputspec.transforms') strat.update_resource_pool({ func_name: (apply_ants_warp_mni_func, 'outputspec.output_image') }) strat.append_name(apply_ants_warp_mni_func.name) return apply_ants_warp_mni_func
e1fdd24a9e3b13ff280baf89586eeca85f1f0a7d
23,562
def get_metrics_influx(query, query_index): """ Function to Query InfluxDB """ influx_connect = InfluxDBClient( host=defs.INFLUX_DETAILS[query_index][0], database=defs.INFLUX_DETAILS[query_index][1], port=8086, timeout=5, retries=5) response = influx_connect.query(query, epoch='s') return response
3f5d7c147553d3b16cfb3d18a1b86805f879fda7
23,563
def find_buckets(pc, target_centres, N, bucket_height=.38, bucket_radius=.15): """ Returns: pc, bucket_centres """ ### find buckets and remove ### print ('finding buckets') buckets = pc[pc.z.between(.1, .4)] # voxelise to speed-up dbscan buckets.loc[:, 'xx'] = (buckets.x // .005) * .005 buckets.loc[:, 'yy'] = (buckets.y // .005) * .005 buckets.loc[:, 'zz'] = (buckets.z // .005) * .005 buckets.sort_values(['xx', 'yy', 'zz', 'refl'], inplace=True) bucket_voxels = buckets[~buckets[['xx', 'yy', 'zz']].duplicated()] # print(buckets) dbscan = DBSCAN(min_samples=20, eps=.05).fit(bucket_voxels[['xx', 'yy', 'zz']]) bucket_voxels.loc[:, 'labels_'] = dbscan.labels_ # merge results back buckets = pd.merge(buckets, bucket_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx', 'yy', 'zz']) # find three largest targets (assumed buckets) labels = buckets.labels_.value_counts().index[:N] buckets = buckets[buckets.labels_.isin(labels)] bucket_centres = buckets.groupby('labels_')[['x', 'y']].mean().reset_index() bucket_centres.loc[:, 'aruco'] = -1 try: # pair up aruco and buckets , identify and label bucket points for i, lbl in enumerate(buckets.labels_.unique()): bucket = buckets[buckets.labels_ == lbl] X, Y = bucket[['x', 'y']].mean(), target_centres[['x', 'y']].astype(float) dist2bucket = np.linalg.norm(X - Y, axis=1) aruco = target_centres.loc[np.where(dist2bucket == dist2bucket.min())].aruco.values[0] print ('bucket {} associated with aruco {}'.format(lbl, aruco)) bucket_centres.loc[bucket_centres.labels_ == lbl, 'aruco'] = aruco # identify buckets points x_shift = bucket_centres[bucket_centres.aruco == aruco].x.values y_shift = bucket_centres[bucket_centres.aruco == aruco].y.values pc.dist = np.sqrt((pc.x - x_shift)**2 + (pc.y - y_shift)**2) idx = pc[(pc.z < bucket_height) & (pc.dist < bucket_radius) & (pc.is_branch)].index pc.loc[idx, 'is_branch'] = False # label branch base with aruco idx = pc[(pc.z < bucket_height + .5) & (pc.dist < bucket_radius)].index pc.loc[idx, 'aruco'] = aruco except Exception as err: plt.scatter(buckets.x.loc[::100], buckets.y.loc[::100], c=buckets.labels_.loc[::100]) plt.scatter(target_centres.x, target_centres.y) [plt.text(r.x, r.y, r.aruco) for ix, r in target_centres.iterrows()] raise Exception return pc, bucket_centres
e7e5480783235e6e9ad48cbfc4d006e9a0a7b61e
23,564
import torch def fit_model(model, state_train, action_train, num_epochs, learning_rate = 1e-2, batch_size=32, shuffle=True): """ Trains a pytorch module model to predict actions from states for num_epochs passes through the dataset. This is used to do a (relatively naive) version of behavior cloning pretty naive (but fully functional) training loop right now, will want to keep adding to this and will want to eventually make it more customizable. The hope is that this will eventually serve as a keras model.fit funtion, but custimized to our needs. Attributes: model: pytorch module implementing your controller states_train numpy array (or pytorch tensor) of states (inputs to your network) you want to train over action_train: numpy array (or pytorch tensor) of actions (outputs of the network) num_epochs: how many passes through the dataset to make learning_rate: initial learning rate for the adam optimizer Returns: Returns a list of average losses per epoch but note that the model is trained in place!! Example: model = nn.Sequential( nn.Linear(4,12), nn.ReLU(), nn.Linear(12,12), nn.ReLU(), nn.Linear(12,1) ) states = np.random.randn(100,4) actions = np.random.randn(100,1) loss_hist = fit_model(model,states, actions, 200) """ # Check if GPU is available , else fall back to CPU # TODO this might belong in module body use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") # Normalize training data set state_train_norm, state_train_mean, state_train_std = normalize_data(state_train) action_train_norm, action_train_mean, action_train_std = normalize_data(action_train) state_tensor = torch.as_tensor(state_train_norm, dtype = torch.float32) # make sure that our input is a tensor action_tensor = torch.as_tensor(action_train_norm, dtype = torch.float32) training_data = data.TensorDataset(state_tensor, action_tensor) training_generator = data.DataLoader(training_data, batch_size=batch_size, shuffle=shuffle) # action_size = action_train.size()[1] loss_hist = [] loss_fn = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(num_epochs): epoch_loss = 0 for local_states, local_actions in training_generator: # Transfer to GPU (if GPU is enabled, else this does nothing) local_states, local_actions = local_states.to(device), local_actions.to(device) # predict and calculate loss for the batch action_preds = model(local_states) loss = loss_fn(local_actions, action_preds) epoch_loss += loss # only used for metrics # do the normal pytorch update optimizer.zero_grad() loss.backward() optimizer.step() # after each epoch append the average loss loss_hist.append(epoch_loss / len(state_train)) return loss_hist
f68573326a218c3da47bb016f02318412ddf9d1d
23,565
from typing import Any from typing import Dict import typing def _deserialize_dict( class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode ): """Deserialize a dictionary to a Python object.""" # Check if we are doing a straightforward dictionary parse first, or if it # has to be deserialized remaining_properties = set(data.keys()) if not isinstance(data, dict): raise DeserializeException( f"Data was not dict for instance: {class_reference} for {debug_name}" ) if is_dict(class_reference): if class_reference is dict: # If types of dictionary entries are not defined, do not deserialize return data key_type, value_type = dict_content_types(class_reference, debug_name) result = {} for dict_key, dict_value in data.items(): if key_type != Any and not isinstance(dict_key, key_type): raise DeserializeException( f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}" ) result[dict_key] = _deserialize( value_type, dict_value, f"{debug_name}.{dict_key}", throw_on_unhandled=throw_on_unhandled, raw_storage_mode=raw_storage_mode.child_mode(), ) remaining_properties.remove(dict_key) if throw_on_unhandled and len(remaining_properties) > 0: raise UnhandledFieldException( f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}" ) return result # It wasn't a straight forward dictionary, so we are in deserialize mode class_instance = None class_reference_downcast_field = _get_downcast_field(class_reference) if class_reference_downcast_field: downcast_value = data[class_reference_downcast_field] new_reference = _get_downcast_class(class_reference, downcast_value) if new_reference is None: if _allows_downcast_fallback(class_reference): return _deserialize( Dict[Any, Any], data, debug_name, throw_on_unhandled=throw_on_unhandled, raw_storage_mode=raw_storage_mode.child_mode(), ) raise UndefinedDowncastException( f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}" ) class_reference = new_reference class_instance = class_reference.__new__(class_reference) handled_fields = set() hints = typing.get_type_hints(class_reference) if len(hints) == 0: raise DeserializeException( f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})" ) for attribute_name, attribute_type in hints.items(): if _should_ignore(class_reference, attribute_name): continue property_key = _get_key(class_reference, attribute_name) parser_function = _get_parser(class_reference, property_key) if is_classvar(attribute_type): if property_key in data: raise DeserializeException( f"ClassVars cannot be set: {debug_name}.{attribute_name}" ) continue if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name: raise DeserializeException( f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}" ) using_default = False if property_key in data: value = data[property_key] handled_fields.add(property_key) property_value = parser_function(value) elif _uses_auto_snake(class_reference) and camel_case(property_key) in data: value = data[camel_case(property_key)] handled_fields.add(camel_case(property_key)) property_value = parser_function(value) elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data: value = data[pascal_case(property_key)] handled_fields.add(pascal_case(property_key)) property_value = parser_function(value) else: if _has_default(class_reference, attribute_name): deserialized_value = _get_default(class_reference, attribute_name) using_default = True else: if not is_union(attribute_type) or type(None) not in union_types( attribute_type, debug_name ): raise DeserializeException( f"Unexpected missing value for: {debug_name}.{attribute_name}" ) property_value = parser_function(None) if not using_default: deserialized_value = _deserialize( attribute_type, property_value, f"{debug_name}.{attribute_name}", throw_on_unhandled=throw_on_unhandled, raw_storage_mode=raw_storage_mode.child_mode(), ) setattr(class_instance, attribute_name, deserialized_value) unhandled = set(data.keys()) - handled_fields if throw_on_unhandled and len(unhandled) > 0: filtered_unhandled = [ key for key in unhandled if not _should_allow_unhandled(class_reference, key) ] if len(filtered_unhandled) > 0: raise UnhandledFieldException( f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}" ) _call_constructed(class_reference, class_instance) return class_instance
d1b81247d47958b05bc4ff116a2a0df9ee6eaeb6
23,566
def red_bg(text): """ Red background. """ return _create_color_func(text, bgcolor=1)
c867f7415230b6f5c179a4369bf4751f9ee2a442
23,567
def does_block_type_support_children(block_type): """ Does the specified block type (e.g. "html", "vertical") support child blocks? """ try: return XBlock.load_class(block_type).has_children except PluginMissingError: # We don't know if this now-uninstalled block type had children # but to be conservative, assume it may have. return True
f1e86e6b378ef3e134106653e012c8a06cebf821
23,568
def jsonDateTimeHandler(obj): """Takes an object and tries to serialize it in JSON by using strftime or isoformat.""" if hasattr(obj, "strftime"): # To avoid problems with the js date-time format return obj.strftime("%a %b %d, %Y %I:%M %p") elif hasattr(obj, 'isoformat'): return obj.isoformat() # elif isinstance(obj, ...): # return ... else: raise TypeError( 'Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
605f8a379575d185bc2a8b16810252511eec52af
23,569
def truncate_desired(cluster, desired, min_size, max_size): """Do truncation of desired capacity for non-strict cases. :param cluster: The target cluster. :param desired: The expected capacity of the cluster. :param min_size: The NEW minimum capacity set for the cluster. :param max_size: The NEW maximum capacity set for the cluster. """ if min_size is not None and desired < min_size: desired = min_size LOG.debug("Truncating shrinkage to specified min_size (%s).", desired) if min_size is None and desired < cluster.min_size: desired = cluster.min_size LOG.debug("Truncating shrinkage to cluster's min_size (%s).", desired) if max_size is not None and max_size > 0 and desired > max_size: desired = max_size LOG.debug("Truncating growth to specified max_size (%s).", desired) if (max_size is None and desired > cluster.max_size and cluster.max_size > 0): desired = cluster.max_size LOG.debug("Truncating growth to cluster's max_size (%s).", desired) return desired
c282a7faece40f78bb9baf58b702b942603cc793
23,570
def get_crypto_price(crypto, fiat): """Helper function to convert any cryptocurrency to fiat""" converted_btc_value = float(binance_convert_crypto( crypto, "BTC").split('=')[1].strip().split()[0]) # grab latest bitcoin price btc_price = float(get_price("btc", fiat).split('=')[1].strip().split()[0]) # converted_btc_value * latest reading return converted_btc_value * btc_price
f91e56c74b3422d3ab272c029352ae94521033b0
23,571
def boxblur(stream: Stream, *args, **kwargs) -> FilterableStream: """https://ffmpeg.org/ffmpeg-filters.html#boxblur""" return filter(stream, boxblur.__name__, *args, **kwargs)
5f29981abaf050b43207452649f4ad9e3fafc05c
23,572
import os def create_netcdf_dataset( location, name, start_time, end_time, sweep, inpath=None, outpath="", chunks={}, engine="h5netcdf", ): """Create NetCDF file from radar data""" radar_path = get_xpol_path(inpath=inpath, start_time=start_time, loc=location) file_path = os.path.join(radar_path, name) file_obj = list(create_filelist(os.path.join(file_path, "*"), start_time, end_time)) vol = wrl.io.open_odim(file_obj, loader="h5py", flavour="GAMIC", chunks=chunks) ds = vol[sweep].data ds = ds.assign_coords({"location": location}) # ds = ds.chunk({"time": 24}) ofname = save_netcdf_dataset(ds, outpath, engine=engine) del vol return os.path.abspath(ofname)
062b79957862995cd59403afb6343b793727fb77
23,573
def flatten(lis): """Given a list, possibly nested to any level, return it flattened.""" new_lis = [] for item in lis: if type(item) == type([]): new_lis.extend(flatten(item)) else: new_lis.append(item) return new_lis
7e4e00af9f20f58dc0798a0731c352949dd71cf5
23,574
def name(ndims=2, ndepth=2): """ encrypt n and version into a standardized string """ # Model name, depth and version value = 'care_denoise_%dDdepth%d' % (ndims, ndepth) return value
1933ac0454eac4c860d70683e58c922074498b63
23,575
import string import random def _generate_url_slug(size=10, chars=string.ascii_lowercase + string.digits): """ This is for a Django project and it assumes your instance has a model with a slug field and a title character (char) field. Parameters ---------- size: <Int> Size of the slug. chars: <string.class> Character class to be included in the slug. """ slug = ''.join(random.choice(chars) for _ in range(size)) if redis.exists(slug): try: return _generate_url_slug() except RecursionError: return else: return slug
1ebe945730e7e5f977c80666db92cfefb9a1a1a7
23,576
def mc_compute_stationary(P): """ Computes the stationary distribution of Markov matrix P. Parameters ---------- P : array_like(float, ndim=2) A discrete Markov transition matrix Returns ------- solution : array_like(float, ndim=1) The stationary distribution for P Note: Currently only supports transition matrices with a unique invariant distribution. See issue 19. """ n = len(P) # P is n x n I = np.identity(n) # Identity matrix B, b = np.ones((n, n)), np.ones((n, 1)) # Matrix and vector of ones A = np.transpose(I - P + B) solution = np.linalg.solve(A, b).flatten() return solution
dc971399bc7b8626347ba9a20a6c8f449870b606
23,577
def isight_prepare_data_request(a_url, a_query, a_pub_key, a_prv_key): """ :param a_url: :type a_url: :param a_query: :type a_query: :param a_pub_key: :type a_pub_key: :param a_prv_key: :type a_prv_key: :return: :rtype: """ header = set_header(a_prv_key, a_pub_key, a_query) result = isight_load_data(a_url, a_query, header) if not result: PySight_settings.logger.error('Something went wrong when retrieving indicators from the FireEye iSight API') return False else: return result
8854013b509fa52a35bd1957f6680ce4e4c17dc4
23,578
def norm_fisher_vector(v, method=['power', 'l2']): """ Normalize a set of fisher vectors. :param v: numpy.array A matrix with Fisher vectors as rows (each row corresponding to an image). :param method: list A list of normalization methods. Choices: 'power', 'l2'. :return: numpy.array The set of normalized vectors (as a matrix). """ if 'power' in method: v = np.sign(v) * np.abs(v)**0.5 if 'l2' in method: nrm = np.sqrt(np.sum(v**2, axis=1)) v /= nrm.reshape(-1, 1) v[np.isnan(v)] = 100000.0 # some large value return v
06592b42914902f183d085f584b00cd9a1f057ce
23,579
def get_top_funnels_df(funurl: str, funlen: int, useResolvedUrls: bool, events: DataFrame, limit_rows: int = 0) -> dict: """Get top funnels of specified length which contain the specified URL :param funurl: URL that should be contained in the funnel :param funlen: funnel length :param useResolvedUrls: indicates whether original or resolved URLs should be used :param events: events DataFrame :param limit_rows: number of rows of events DataFrame to use (use all rows if 0) :return: dictionary of funnels and their frequencies """ if useResolvedUrls: columnToUse = analyze_traffic.RESOLVEDURL else: columnToUse = analyze_traffic.PAGEURL if limit_rows != 0: events = events.head(limit_rows) if useResolvedUrls: url_regex_resolver.resolve_urls(events, manage_resolutions.get_regex_dict(), analyze_traffic.PAGEURL, analyze_traffic.RESOLVEDURL) si = analyze_traffic.build_session_index(events, columnToUse) funnelCounts = get_funnel_lists(events, si, funurl, funlen, columnToUse) return funnelCounts
1fe29668e98076bbb39023e04fc1a5845788d9ef
23,580
import os import string def count_alphabet(): """ Return dict which contains rating of alplabet """ # Get all txt file in folder data list_file = [] for file in os.listdir("data"): if file.endswith(".txt"): list_file.append(os.path.join("data", file)) # Int result result = {} for i in string.ascii_lowercase: result[i] = 0 # Counting for file in list_file: f = open(file, 'r') content = f.read() content = slugify(content) content = content.replace("-", '') for char in content: if char in result.keys(): result[char] = result[char] + 1 f.close() # Compute total total = 0 for k in result.keys(): total = total + result[k] # Compute for k in result.keys(): result[k] = 100.0 * result[k] / total return result
241af2b66abd46afc72259615ad216b96bef6ea5
23,581
from typing import Dict from typing import Any def graph_to_json(obj: Graph) -> Dict[str, Any]: """ Uses regular serialization but excludes "operator" field to rid of circular references """ serialized_obj = { k: v for k, v in any_to_json(obj).items() if k != 'operator' # to prevent circular reference } return serialized_obj
922d53d5fb9b23773cdea13e94930985785f6c77
23,582
def log_ttest_vs_basal(df, basal_key): """Do t-tests in log space to see if sequences has the same activity as basal. Parameters ---------- df : pd.DataFrame Index is sequence ID, columns are average RNA/DNA barcode counts for each replicate. basal_key : str Index value for basal. Returns ------- pvals : pd.Series p-value for t-test of the null hypothesis that the log activity of a sequence is the same as that of basal. Does not include a p-value for basal. """ log_params = df.apply(_get_lognormal_params, axis=1) # Pull out basal params basal_mean, basal_std, basal_n = log_params.loc[basal_key] # Drop basal from the df log_params = log_params.drop(index=basal_key) # Do t-tests on each row pvals = log_params.apply(lambda x: stats.ttest_ind_from_stats(basal_mean, basal_std, basal_n, x["mean"], x["std"], x["n"], equal_var=False)[1], axis=1) return pvals
b41029c7c61b3b365bf71e2aaba8a81aecf5533a
23,583
def spleen_lymph_cite_seq( save_path: str = "data/", protein_join: str = "inner", remove_outliers: bool = True, run_setup_anndata: bool = True, ) -> anndata.AnnData: """ Immune cells from the murine spleen and lymph nodes [GayosoSteier21]_. This dataset was used throughout the totalVI manuscript, and named SLN-all. Parameters ---------- save_path Location to use when saving/loading the data. protein_join Whether to take an inner join or outer join of proteins remove_outliers Whether to remove clusters annotated as doublet or low quality run_setup_anndata If true, runs setup_anndata() on dataset before returning Returns ------- AnnData with batch info (``.obs['batch']``), label info (``.obs['cell_types']``), protein expression (``.obsm["protein_expression"]``), and tissue (``.obs['tissue']``). Missing protein values are zero, when ``protein_join == "outer`` and are identified during ``AnnData`` setup. Examples -------- >>> import scvi >>> adata = scvi.data.spleen_lymph_cite_seq() """ return _load_spleen_lymph_cite_seq( save_path=save_path, protein_join=protein_join, remove_outliers=remove_outliers, run_setup_anndata=run_setup_anndata, )
7eeead5e6f69b7f3b9dff85b33cae675ea0a47ec
23,584
import os def getInputs(path, sequenceNames): """Requires setting SON_TRACE_DATASETS variable and having access to datasets. """ seqPath = os.path.join(TestStatus.getPathToDataSets(), path) sequences = [ os.path.join(seqPath, sequence) for sequence in sequenceNames ] #Same order as tree newickTreeString = parseNewickTreeFile(os.path.join(path, "tree.newick")) return sequences, newickTreeString
fc5bb96daf0309e7ca4bd65b1217b6a333c0e8a2
23,585
def strftime_local(aware_time, fmt="%Y-%m-%d %H:%M:%S"): """ 格式化aware_time为本地时间 """ if not aware_time: # 当时间字段允许为NULL时,直接返回None return None if timezone.is_aware(aware_time): # translate to time in local timezone aware_time = timezone.localtime(aware_time) return aware_time.strftime(fmt)
1294795d793c22e7639fb88ca02e34bb6b764892
23,586
import re def filter_issues_fixed_by_prs(issues, prs, show_related_prs, show_related_issues): """ Find related issues to prs and prs to issues that are fixed. This adds extra information to the issues and prs listings. """ words = [ 'close', 'closes', 'fix', 'fixes', 'fixed', 'resolve', 'resolves', 'resolved' ] pattern = re.compile( r'(?P<word>' + r'|'.join(words) + r') ' r'((?P<repo>.*?)#(?P<number>\d*)|(?P<full_repo>.*)/(?P<number_2>\d*))', re.IGNORECASE, ) issue_pr_map = {} pr_issue_map = {} for pr in prs: is_pr = bool(pr.get('pull_request')) if is_pr: pr_url = pr.html_url pr_number = pr.number user = pr.user repo_url = pr_url.split('/pull/')[0] + '/issues/' pr_issue_map[pr_url] = [] body = pr.body or '' # Remove blanks and markdown comments if body: lines = body.splitlines() no_comments = [l for l in lines if (l and not l.startswith("<!---"))] body = '\n'.join(no_comments) for matches in pattern.finditer(body): dic = matches.groupdict() issue_number = dic['number'] or dic['number_2'] or '' repo = dic['full_repo'] or dic['repo'] or repo_url # Repo name can't have spaces. if ' ' not in repo: # In case spyder-ide/loghub#45 was for example used if 'http' not in repo: repo = 'https://github.com/' + repo if '/issues' not in repo: issue_url = repo + '/issues/' + issue_number elif repo.endswith('/') and issue_number: issue_url = repo + issue_number elif issue_number: issue_url = repo + '/' + issue_number else: issue_url = None else: issue_url = None # Set the issue data issue_data = {'url': pr_url, 'text': pr_number, 'user': user} if issue_url is not None: if issue_number in issue_pr_map: issue_pr_map[issue_url].append(issue_data) else: issue_pr_map[issue_url] = [issue_data] pr_data = {'url': issue_url, 'text': issue_number} pr_issue_map[pr_url].append(pr_data) if show_related_issues: pr['loghub_related_issues'] = pr_issue_map[pr_url] for issue in issues: issue_url = issue.html_url if issue_url in issue_pr_map and show_related_prs: issue['loghub_related_pulls'] = issue_pr_map[issue_url] # Now sort the numbers in descending order for issue in issues: related_pulls = issue.get('loghub_related_pulls', []) related_pulls = sorted( related_pulls, key=lambda p: p['url'], reverse=True) issue['loghub_related_pulls'] = related_pulls for pr in prs: related_issues = pr.get('loghub_related_issues', []) related_issues = sorted( related_issues, key=lambda i: i['url'], reverse=True) pr['loghub_related_issues'] = related_issues return issues, prs
6e63dc9988c9343b4f9d2baae2d995b26b666ed3
23,587
import re def run_job(answer: str, job: dict, grade: float, feedback: str): """ Match answer to regex inside job dictionary. Add weight to grade if successful, else add comment to feedback. :param answer: Answer. :param job: Dictionary with regex, weight, and comment. :param grade: Current grade for the answer. :param feedback: Current feedback for the answer. :return: Modified answer, grade, and feedback. """ match = re.search(job["regex"], answer) if match: grade += job["weight"] answer = answer.replace(match[0], "", 1) else: feedback += job["comment"] + "\n" return answer, grade, feedback
487916da129b8958f8427b11f0118135268f9245
23,588
def __build_data__(feature, qars): """ Return all the data needed to build the Benin republic departments Layer """ data = { 'qars': qars, } # GEOJSON layer consisting of a single feature department_name = feature["properties"]["NAME_1"] data["department"] = department_name data["predictions"] = data_dictionary[feature["properties"]["NAME_0"]][feature["properties"]["NAME_1"]][ "properties"] z_list = [] # looping through all departments in Benin Repubic to get the ranking for d in range(len(DeptSatellite.objects.all())): y = DeptSatellite.objects.all()[d].department x = CommuneSatellite.objects.filter(department=y).aggregate(Sum('cashew_tree_cover')) x = x['cashew_tree_cover__sum'] z_list.append((y, x)) sorted_by_second = sorted(z_list, reverse=True, key=lambda tup: tup[1]) list1, _ = zip(*sorted_by_second) # A small logic to solve the French symbols department error when viewed on local host if heroku: position = list1.index(department_name) else: position = 1 data["position"] = position my_dict = {'0': "highest", '1': "2nd", '2': "3rd", '3': "4th", '4': "5th", '5': "6th", '6': "7th", '7': "8th", '8': "9th", '9': "10th", '10': "11th", '11': "lowest"} data["my_dict"] = my_dict pred_dept_data = [] pred_ground_dept_data = [['Communes', 'Satellite Prediction', 'Ground Data Estimate']] for c in CommuneSatellite.objects.filter(department=department_name): y = c.commune x = round(c.cashew_tree_cover / 10000, 2) pred_dept_data.append([y, x]) pred_ground_dept_data.append([y, x, x]) data["pred_dept_data"] = pred_dept_data data["pred_ground_dept_data"] = pred_ground_dept_data # load statistics from the database and formating them for displaying on popups. # The try catch is to avoid error that arise when we round null values tree_ha_pred_dept = CommuneSatellite.objects.filter(department=department_name).aggregate(Sum('cashew_tree_cover')) try: tree_ha_pred_dept = int(round(tree_ha_pred_dept['cashew_tree_cover__sum'] / 10000, 2)) except Exception as e: tree_ha_pred_dept = 0 data["tree_ha_pred_dept"] = tree_ha_pred_dept surface_area_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('surface_area')) try: surface_area_d = int(round(surface_area_d['surface_area__sum'], 2)) except Exception as e: surface_area_d = 0 data["surface_area_d"] = surface_area_d total_yield_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_yield_kg')) try: total_yield_d = int(round(total_yield_d['total_yield_kg__sum'], 2)) except Exception as e: total_yield_d = 0 data["total_yield_d"] = total_yield_d yield_ha_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_ha_kg')) try: yield_ha_d = int(round(yield_ha_d['total_yield_per_ha_kg__avg'], 2)) except Exception as e: yield_ha_d = 0 data["yield_ha_d"] = yield_ha_d # Used only in case of error in the try and except catch yield_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_tree_kg')) try: yield_tree_d = int(round(yield_tree_d['total_yield_per_tree_kg__avg'], 2)) except Exception as e: yield_tree_d = 0 data["yield_tree_d"] = yield_tree_d num_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_number_trees')) try: num_tree_d = int(num_tree_d['total_number_trees__sum']) except Exception as e: num_tree_d = 0 data["num_tree_d"] = num_tree_d sick_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_sick_trees')) try: sick_tree_d = int(sick_tree_d['total_sick_trees__sum']) except Exception as e: sick_tree_d = 0 data["sick_tree_d"] = sick_tree_d out_prod_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_trees_out_of_prod')) try: out_prod_tree_d = int(out_prod_tree_d['total_trees_out_of_prod__sum']) except Exception as e: out_prod_tree_d = 0 data["out_prod_tree_d"] = out_prod_tree_d dead_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_dead_trees')) try: dead_tree_d = int(round(dead_tree_d['total_dead_trees__sum'], 2)) except Exception as e: dead_tree_d = 0 data["dead_tree_d"] = dead_tree_d region_size_d = area(feature['geometry']) / 10000 try: active_trees_d = num_tree_d - sick_tree_d - out_prod_tree_d - dead_tree_d except Exception as e: active_trees_d = 0 data["active_trees_d"] = active_trees_d try: r_tree_ha_pred_dept = round(tree_ha_pred_dept, 1 - int( floor(log10(abs(tree_ha_pred_dept))))) if tree_ha_pred_dept < 90000 else round(tree_ha_pred_dept, 2 - int(floor(log10( abs(tree_ha_pred_dept))))) except Exception as e: r_tree_ha_pred_dept = tree_ha_pred_dept data["r_tree_ha_pred_dept"] = r_tree_ha_pred_dept try: r_surface_area_d = round(surface_area_d, 1 - int(floor(log10(abs(surface_area_d))))) if surface_area_d < 90000 else round( surface_area_d, 2 - int(floor(log10(abs(surface_area_d))))) except Exception as e: r_surface_area_d = surface_area_d data["r_surface_area_d"] = r_surface_area_d try: r_total_yield_d = round(total_yield_d, 1 - int(floor(log10(abs(total_yield_d))))) if total_yield_d < 90000 else round( total_yield_d, 2 - int(floor(log10(abs(total_yield_d))))) except Exception as e: r_total_yield_d = total_yield_d data["r_total_yield_d"] = r_total_yield_d try: r_yield_ha_d = round(yield_ha_d, 1 - int(floor(log10(abs(yield_ha_d))))) if yield_ha_d < 90000 else round( yield_ha_d, 2 - int(floor(log10(abs(yield_ha_d))))) except Exception as e: r_yield_ha_d = yield_ha_d data["r_yield_ha_d"] = r_yield_ha_d try: yield_pred_dept = int(r_yield_ha_d * tree_ha_pred_dept) except Exception as e: yield_pred_dept = 0 data["yield_pred_dept"] = yield_pred_dept try: r_yield_pred_dept = round(yield_pred_dept, 1 - int( floor(log10(abs(yield_pred_dept))))) if yield_pred_dept < 90000 else round(yield_pred_dept, 2 - int( floor(log10(abs(yield_pred_dept))))) except Exception as e: r_yield_pred_dept = yield_pred_dept data["r_yield_pred_dept"] = r_yield_pred_dept try: r_yield_tree_d = round(r_total_yield_d / active_trees_d) except Exception as e: r_yield_tree_d = yield_tree_d data["r_yield_tree_d"] = r_yield_tree_d try: r_num_tree_d = round(num_tree_d, 1 - int(floor(log10(abs(num_tree_d))))) if num_tree_d < 90000 else round( num_tree_d, 2 - int(floor(log10(abs(num_tree_d))))) except Exception as e: r_num_tree_d = num_tree_d data["r_num_tree_d"] = r_num_tree_d try: r_region_size_d = round(region_size_d, 1 - int(floor(log10(abs(region_size_d))))) if region_size_d < 90000 else round( region_size_d, 2 - int(floor(log10(abs(region_size_d))))) except Exception as e: r_region_size_d = region_size_d data["r_region_size_d"] = r_region_size_d return data
e1982a1f610ea724ca8cf06f6641a4bc3428fa47
23,589
def hook(callback): """ Installs a global listener on all available mouses, invoking `callback` each time it is moved, a key status changes or the wheel is spun. A mouse event is passed as argument, with type either `mouse.ButtonEvent`, `mouse.WheelEvent` or `mouse.MoveEvent`. Returns the given callback for easier development. """ _listener.add_handler(callback) return callback
4bf0884de591fc4f0b30bee42b6e36b06c526134
23,590
def notification_list(request): """ returns the notification list """ notifications = Notification.get_notifications(user=request.user) return {"notifications": notifications}
c8e967fa8cef0dfd5cc9673c99289e17813f2e75
23,591
from typing import Iterator def filter_samples_by_detected_language_via_langid( samples_iterator: Iterator[Sample], lang_code: str, ) -> Iterator[Sample]: """Return sample documents whose language detected by langid matches the expected language. Documents are converted to a simple text via the method `slub_docsa.data.preprocess.document.document_as_concatenated_string`. Parameters ---------- samples_iterator: Iterator[Sample] an iterator over samples that is being filtered lang_code: str the expected language Returns ------- Iterator[Sample] an iterator over samples only including samples that match the expected language """ def condition(sample: Sample) -> bool: text = document_as_concatenated_string(sample.document) if text is not None: detected_lang_code = detect_language_from_text_via_langid(text) if detected_lang_code != lang_code: logger.debug( "document '%s' with unexpected detected language of '%s'", sample.document.uri, detected_lang_code ) logger.debug("document text begins with: %s", text[:100]) return False return True return False return filter_samples_by_condition(samples_iterator, condition)
c6c952f46903fc491f88ba32dddb93398420c9d8
23,592
def predictClass(x, mus, sigmas, X_train, number_of_classes, class_probabilities): """ For every model, it calculates the likelihood for each class, and picks the class with max likelihood. :param x: The datapoint we want to derive the class for. :param mus: A list with the mean vector for each method. First three are for first class, next three for second class, etc. :param sigmas: A list with the covariance matrix for each method. Same as mus. :param X_train: The train set - needed for Parzen Windows method. :param number_of_classes: The number of different classes in the dataset. :param class_probabilities: An array with the probability of each class. :return: A vector with the predicted classes by each model. """ predictions = [] # For the parametric methods number_of_models = int(len(mus) / 2) for i in range(0, number_of_models): method_likelihoods = [] for j in range(number_of_classes): index = i + j * number_of_models # the index will "jump" over the other methds in the lists. prob = gaussian(x, mus[index], sigmas[index]) * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(prob) predictions.append(np.argmax(method_likelihoods)) # For the non-parametric method method_likelihoods = [] for j in range(number_of_classes): sumlog_pi = question_d(X_train, x) p_i = sumlog_pi * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(p_i) predictions.append(np.argmax(method_likelihoods)) return predictions
dbd9d6227c8877862d74d4bf1932f3f1acd37a2f
23,593
def create_secret_id(vault, name, version=None): """ :param vault: The vault uri. :type vault: str :param name: The secret name. :type name: str :param version: The secret version. :type version: str :rtype: KeyVaultId """ return create_object_id('secrets', vault, name, version)
65c918a8f9c1f5c087835ff36a9eb13233bada2d
23,594
def config_output_page(): """ Configuration landing page :return: config.html """ config_type = "output" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() outputs = [] for doc in docs: i = c.get_output_from_data(doc) outputs.append(i) output_types = c.get_outputs_available() config_descr = """ Outputs act as stores - seperate from the local database - for host information """ return render_template('config.html', items=outputs, config_type=config_type, config_descr=config_descr, item_types=output_types)
346ae058db6e0081a37a5ebedd6d231f0a3204da
23,595
from typing import Optional def compute_all_aggregator_metrics( per_plan_confidences: np.ndarray, predictions: np.ndarray, ground_truth: np.ndarray, metric_name: Optional[str] = None ): """Batch size B, we assume consistent number of predictions D per scene. per_plan_confidences: np.ndarray, shape (B, D), we assume that all prediction requests have the same number of proposed plans here. predictions: np.ndarray, shape (B, D, T, 2) ground_truth: np.ndarray, shape (B, T, 2), there is only one ground_truth trajectory for each prediction request. metric_name: Optional[str], if specified, compute a particular metric only. """ metrics_dict = defaultdict(list) if metric_name is None: base_metrics = VALID_BASE_METRICS else: base_metrics = [] for metric in VALID_BASE_METRICS: if metric.upper() in metric_name: base_metrics.append(metric) if not base_metrics: raise ValueError(f'Invalid metric name {metric_name} specified.') if metric_name is None: aggregators = VALID_AGGREGATORS else: aggregators = [] for agg in VALID_AGGREGATORS: if agg in metric_name: aggregators.append(agg) if not aggregators: raise ValueError(f'Invalid metric name {metric_name} specified.') for base_metric_name in base_metrics: if base_metric_name == 'ade': base_metric = average_displacement_error elif base_metric_name == 'fde': base_metric = final_displacement_error else: raise NotImplementedError # For each prediction request: for index, (req_preds, req_gt, req_plan_confs) in enumerate( zip(predictions, ground_truth, per_plan_confidences)): req_plan_losses = base_metric( predicted=req_preds, ground_truth=req_gt) for aggregator in aggregators: metric_key = f'{aggregator}{base_metric_name.upper()}' metrics_dict[metric_key].append( aggregate_prediction_request_losses( aggregator=aggregator, per_plan_losses=req_plan_losses, per_plan_weights=_softmax_normalize(req_plan_confs))) metrics_dict = { key: np.stack(values) for key, values in metrics_dict.items()} return metrics_dict
cd17c4c1273ec2a23aa16efebd7a4437dc4e16f7
23,596
import requests import re def query_url_base(_url, _proxy=True, _isPC=True, _isPhone=False): """ 基于requset的模块,不能采集动态网页数据 :param _url<str> :param _proxy<bool> :param _isPc<bool> :param _isPhone<bool> :return _result<dict> """ _result = {} _headers = {'Connection':'kepp-alive'} if _isPC: _headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36' elif _isPhone: _headers['User-Agent'] = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1' _ip_url = 'https://restapi.amap.com/v3/ip?output=json&key=880b9655c8c084258bfbedf98145a936' _proxy = { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5://127.0.0.1:1080', } if _proxy else None _pattern_dict = { 'title': r"<(title|TITLE)>(?P<title>[^<>]+)</(title|TITLE)>"} # print(requests.get(_ip_url, proxies=_proxy).json()) response = requests.post(_url, proxies=_proxy, headers=_headers, verify=False, timeout=30) content = response.text for k,v in _pattern_dict.items(): _match = re.search(v, content) if not re.match: continue _result[k] = _match.groupdict()[k] _result['text'] = html2text(content) return _result
f916b974a472f6a3d079911461902da1ae7cb18d
23,597
def timefstring(dtobj, tz_name=True): """Standardize the format used for timestamp string format. Include 3 letter string for timezone if set to True. """ if tz_name: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S%Z")}' else: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S")}NTZ'
5bbf0454a76ed1418cbc9c44de909940065fb51f
23,598
def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (ShuffleUnit, )): return True return False
ac6f059b763f25d81508826a3a8c8db5beb769b0
23,599