content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def list_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name :returns: Return a logical volume list for given volume group : Data format example : ['volume-aaa', 'volume-bbb', 'volume-ccc'] """ out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()]
4cd613c8c10aaec443dce31cef8b132e3b2c65da
3,640,900
def question_aligned_passage_embedding(question_lstm_outs, document_embeddings, passage_aligned_embedding_dim): """create question aligned passage embedding. Arguments: - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - document_embeddings: The document embeddings. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ def outer_sentence_step(document_embeddings, question_lstm_outs, passage_aligned_embedding_dim): """step function for PaddlePaddle's recurrent_group. In this function, the original input document_embeddings are scattered from nested sequence into sequence by recurrent_group in PaddlePaddle. The step function iterates over each sentence in the document. Arguments: - document_embeddings: The word embeddings of the document. - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ def inner_word_step(word_embedding, question_lstm_outs, question_outs_proj, passage_aligned_embedding_dim): """ In this recurrent_group, sentence embedding has been scattered into word embeddings. The step function iterates over each word in one sentence in the document. Arguments: - word_embedding: The word embeddings of documents. - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - question_outs_proj: The projection of question_lstm_outs into a new hidden space. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ doc_word_expand = paddle.layer.expand( input=word_embedding, expand_as=question_lstm_outs, expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE) weights = paddle.layer.fc( input=[question_lstm_outs, doc_word_expand], size=1, bias_attr=False, act=paddle.activation.SequenceSoftmax()) weighted_candidates = paddle.layer.scaling( input=question_outs_proj, weight=weights) return paddle.layer.pooling( input=weighted_candidates, pooling_type=paddle.pooling.Sum()) question_outs_proj = paddle.layer.fc( input=question_lstm_outs, bias_attr=False, size=passage_aligned_embedding_dim) return paddle.layer.recurrent_group( input=[ paddle.layer.SubsequenceInput(document_embeddings), paddle.layer.StaticInput(question_lstm_outs), paddle.layer.StaticInput(question_outs_proj), passage_aligned_embedding_dim, ], step=inner_word_step, name="iter_over_word") return paddle.layer.recurrent_group( input=[ paddle.layer.SubsequenceInput(document_embeddings), paddle.layer.StaticInput(question_lstm_outs), passage_aligned_embedding_dim ], step=outer_sentence_step, name="iter_over_sen")
8dbcb298a24ec18da4904a8f48a7c63331b27c91
3,640,901
def lm_loss_fn(forward_fn, vocab_size, params, rng, data, is_training=True): """Compute the loss on data wrt params.""" logits = forward_fn(params, rng, data, is_training) targets = hk.one_hot(data['target'], vocab_size) assert logits.shape == targets.shape mask = jnp.greater(data['obs'], 0) loss = -jnp.sum(targets * jax.nn.log_softmax(logits), axis=-1) loss = jnp.sum(loss * mask) / jnp.sum(mask) return loss
44188d717759a82d80079b5e4f7309b3cf7b5cb0
3,640,902
import argparse import os import logging def ParseArguments(): """Parse command line arguments, validate them, and return them. Returns: A dict of: {'args': argparse arguments, see below, 'cur_ip': the ip address of the target, as packed binary, 'cur_node_index': the current node index of the target, 'cur_node_name': the current node name of the target, 'new_ip': the new ip address of the target, as packed binary, 'new_node_index': the new node index of the target, 'new_node_name': the new node name of the target, 'update_type': the update type, a shortname of update_type_helper } Raises: RuntimeError: if run from outside the Makani workspace without specifying --tms570_bin. ValueError: if the binary the user supplied doesn't match the target type. ValueError: if user passes --dump_image without a .elf file. ValueError: if update is a param type, but the file doesn't end in '.bin'. ValueError: if the update type in the filename isn't recognized. ValueError: if the update type is 'CalibParams' but we don't see --calib. ValueError: if the update type is not 'CalibParams' but we see --calib. ValueError: if the update type is 'SerialParams' but we don't see --serial. ValueError: if the update type is not 'SerialParams' but we see --serial. ValueError: if the update type is 'CarrierSerialParams' but we don't see --serial. ValueError: if the update type is not 'CarrierSerialParams' but we see --serial. """ parser = argparse.ArgumentParser( description='Burn an application or parameter set to a board.') parser.add_argument( '--target', help='board to burn, e.g. MOTOR_PBO or FC_A.', required=True) parser.add_argument('file', help='binary to burn, e.g motor_application.elf ' 'or servo_config_params.bin') parser.add_argument('--dump_image', action='store_true', help='Output intermediate .bin file instead of' ' sending it to the device.') parser.add_argument('--calib', action='store_true', help='Add this flag to burn calibration parameters.') parser.add_argument('--serial', action='store_true', help='Add this flag to burn serial parameters.') parser.add_argument('--carrier_serial', action='store_true', help='Add this flag to burn carrier serial' ' parameters.') parser.add_argument('--config', action='store_true', help='Add this flag to burn config parameters.') parser.add_argument('--bootloader', action='store_true', help='Add this flag to burn a bootloader.') parser.add_argument('--override_target', help='Override target identity in bootloader image.') parser.add_argument('--force_hardware', help='Burn e.g. an Fc board, rather than an Aio board.\n' 'use with argument "new" or "old".') parser.add_argument('--ignore_mismatch', action='store_true', help='Ignore mismatch between binary and board app type, ' 'ip address, etc.') # TODO: Allow override of IP address. args = parser.parse_args() args.application = not (args.calib or args.serial or args.carrier_serial or args.config or args.bootloader) if (args.calib + args.serial + args.carrier_serial + args.config + args.bootloader + args.application) != 1: raise ValueError('Cannot specify more than one update type (calib, serial, ' 'carrier_serial, config, or bootloader).') if args.force_hardware and not ParseHardwareType(args.force_hardware): raise ValueError('Unknown hardware type "%s"; please specify a valid ' 'HardwareType.' % args.force_hardware) target_info = GetTargetInfo(args.target) file_info = GetInfoFromFileName(os.path.basename(args.file)) if args.dump_image and not args.file.endswith('.elf'): raise ValueError('--dump_image requires an .elf file.') if args.calib and file_info['update_type'] != 'CalibParams': raise ValueError('That does not look like an calib param file to me.') if file_info['update_type'] == 'CalibParams' and not args.calib: raise ValueError('If you really want to burn calib params, pass --calib.') if args.serial and file_info['update_type'] != 'SerialParams': raise ValueError('That does not look like an serial param file to me.') if file_info['update_type'] == 'SerialParams' and not args.serial: raise ValueError('If you really want to burn serial params, pass --serial.') if args.carrier_serial and file_info['update_type'] != 'CarrierSerialParams': raise ValueError('That does not look like a carrier serial param' ' file to me.') if (file_info['update_type'] == 'CarrierSerialParams' and not args.carrier_serial): raise ValueError('If you really want to burn carrier serial params,' ' pass --carrier_serial.') if args.bootloader and file_info['update_type'] != 'Bootloader': raise ValueError('That does not look like a bootloader file to me.') if file_info['update_type'] == 'Bootloader' and not args.bootloader: raise ValueError( 'If you really want to burn a bootloader, pass --bootloader.') if args.override_target and file_info['update_type'] != 'Bootloader': raise ValueError('--override_target only supported with --bootloader.') if args.override_target: new_target_info = GetTargetInfo(args.override_target) else: new_target_info = target_info logging.info('Attempting to flash %s segment on target %s [%s, index %d].', file_info['update_type'], target_info['node_name'], target_info['ip_address'], target_info['node_index']) logging.info('Flashing file %s.', args.file) return {'args': args, 'cur_ip': target_info['ip_address'], 'cur_node_index': target_info['node_index'], 'cur_node_name': target_info['node_name'], 'new_ip': new_target_info['ip_address'], 'new_node_index': new_target_info['node_index'], 'new_node_name': new_target_info['node_name'], 'file': args.file, 'update_type': file_info['update_type'], }
1fd47dafe3fcc30b6bf18174fe3da9f10e3e6c2b
3,640,903
def chroms_from_build(build): """ Get list of chromosomes from a particular genome build Args: build str Returns: chrom_list list """ chroms = {'grch37': [str(i) for i in range(1, 23)], 'hg19': ['chr{}'.format(i) for i in range(1, 23)] # chroms = {'grch37': [i for i in range(1, 23)] + ['X', 'Y'], } try: return chroms[build] except KeyError: raise ValueError("Oops, I don't recognize the build {}".format(build))
c87431911c07c00aaa63357771258394cfff859e
3,640,904
def get_ready_count_string(room: str) -> str: """Returns a string representing how many players in a room are ready. Args: room (str): The room code of the players. Returns: str: A string representing how many players in a room are ready in the format '[ready]/[not ready]'. """ player_count = 0 ready_count = 0 players = get_players(room) for player in players: if player.is_alive: player_count += 1 if player.ready: ready_count += 1 return f'{ready_count}/{player_count}'
eb8ae2a308ccd58355de5a8a15629bfccd1fcc2c
3,640,905
from typing import List def switches(topology: 'Topology') -> List['Node']: """ @param topology: @return: """ return filter_nodes(topology, type=DeviceType.SWITCH)
e489740b29f8aff7368147274d020cb467422669
3,640,906
def geometric_progression(init, ratio): """ Generate a geometric progression start form 'init' and multiplying 'ratio'. """ return _iterate(lambda x: x * ratio, init)
6b2626bc9d4016518b1cc7e41b63d34924c1ee30
3,640,907
import urllib def resolve(marathon_lb_url): """Return the individual URLs for all available Marathon-LB instances given a single URL to a DNS-balanced Marathon-LB cluster. Marathon-LB typically uses DNS for load balancing between instances and so the address provided by the user may actually be multiple load-balanced instances. This function uses DNS to lookup the hostnames (IPv4 A-records) of each instance, returning them all to the caller for use as required. """ url = urllib.parse.urlparse(marathon_lb_url) all_hosts = _get_alias_records(url.hostname) resolved_urls = _reassemble_urls(url, all_hosts) return resolved_urls
f192d66a8a12d772ad33b2b8030796af2393ec16
3,640,908
def _parse_bluetooth_info(data): """ """ # Combine the bytes as a char string and then strip off extra bytes. name = ''.join(chr(i) for i in data[:16]).partition('\0')[0] return BluetoothInfo(name, ''.join(chr(i) for i in data[16:28]), ''.join(chr(i) for i in data[29:]))
ef46576102cfb5d1df0b40e84529a89e2ed6bfa8
3,640,909
async def get_reverse_objects_topranked_for_lst(entities): """ get pairs that point to the given entity as the primary property primary properties are those with the highest rank per property """ # run the query res = await runQuerySingleKey(cacheReverseObjectTop, entities, """ SELECT ?base ?prop ?parent WHERE { VALUES ?base { %s } ?parent ?prop ?base . FILTER( ?prop NOT IN (""" + ex_cls + """) ) # exclude wikilinks and redirects } LIMIT """ + str(config.RESULTS_LIMIT) + """ """) return res
d975ba3ac3a0983d3a08057c91cd96ca466708df
3,640,910
def LU_razcep(A): """ Vrne razcep A kot ``[L\\U]`` """ # eliminacija for p, pivot_vrsta in enumerate(A[:-1]): for i, vrsta in enumerate(A[p + 1:]): if pivot_vrsta[p]: m = vrsta[p] / pivot_vrsta[p] vrsta[p:] = vrsta[p:] - pivot_vrsta[p:] * m vrsta[p] = m return A
79d6a00b4e16254739b987228fd506cae133907b
3,640,911
def jni_request_identifiers_for_type(field_type, field_reference_name, field_name, object_name="request"): """ Generates jni code that defines C variable corresponding to field of java object (dto or custom type). To be used in request message handlers. :param field_type: type of the field to be initialized (as defined in vpe.api) :param field_reference_name: name of the field reference in generated code :param field_name: name of the field (camelcase) :param object_name: name of the object to be initialized """ # field identifiers jni_type = util.vpp_2_jni_type_mapping[field_type] jni_signature = util.jni_2_signature_mapping[field_type] jni_getter = util.jni_field_accessors[field_type] # field identifier return request_field_identifier_template.substitute( jni_type=jni_type, field_reference_name=field_reference_name, field_name=field_name, jni_signature=jni_signature, jni_getter=jni_getter, object_name=object_name)
4f23ba559124b938fa82a044ae1adc0f16f4a7ad
3,640,912
def _ValidateDuration(arg_internal_name, arg_value): """Validates an argument which should have a Duration value.""" try: if isinstance(arg_value, basestring): return TIMEOUT_PARSER(arg_value) elif isinstance(arg_value, int): return TIMEOUT_PARSER(str(arg_value)) except arg_parsers.ArgumentTypeError as e: raise InvalidArgException(arg_internal_name, e.message) raise InvalidArgException(arg_internal_name, arg_value)
b08b65831e04ece410be7f0a490cd6ebf7bcaa6f
3,640,913
def get_jaccard_dist1(y_true, y_pred, smooth=default_smooth): """Helper to get Jaccard distance (for loss functions). Note: This mirrors what others in the ML community have been using even for non-binary vectors.""" return 1 - get_jaccard_index1(y_true, y_pred, smooth)
c64ba7fd81c3697bc472d372afeb940e19d35e3c
3,640,914
from pathlib import Path from typing import Dict import json import warnings def deduplicate_obi_codes(fname: Path) -> None: """ Remove duplicate http://terminology.hl7.org/CodeSystem/v2-0203#OBI codes from an instance. When using the Medizininformatik Initiative Profile LabObservation, SUSHI v2.1.1 inserts the identifier.type code for http://terminology.hl7.org/CodeSystem/v2-0203#OBI twice, but it has a cardinality of 1, resulting in an error by the FHIR validator. This workaround function actively removes the duplicates. MII Profile: https://www.medizininformatik-initiative.de/fhir/core/modul-labor/StructureDefinition/ObservationLab :param fname: Filename of instance to remove duplicates from :return: None """ def num_obi_codes(json_data: Dict): jp = parse( "$.type.coding[?code = 'OBI' & system='http://terminology.hl7.org/CodeSystem/v2-0203']" ) return len(jp.find(json_data)) def del_obi_codes(identifier: Dict): codings = identifier["type"]["coding"] for i, coding in enumerate(codings): if ( coding["system"] == "http://terminology.hl7.org/CodeSystem/v2-0203" and coding["code"] == "OBI" ): del codings[i] break json_data = json.load(open(fname)) if "identifier" not in json_data: return for identifier in json_data["identifier"]: if num_obi_codes(identifier) > 1: warnings.warn(f"Found multiple OBI codes in {fname}, removing") del_obi_codes(identifier) json.dump(json_data, open(fname, "w"), indent=2)
336a143e30224b64c39358137bab26e4013c5049
3,640,915
def fold_conv_bns(onnx_file: str) -> onnx.ModelProto: """ When a batch norm op is the only child operator of a conv op, this function will fold the batch norm into the conv and return the processed graph :param onnx_file: file path to ONNX model to process :return: A loaded ONNX model with BatchNormalization ops folded into Conv ops where possible """ model = onnx.load(onnx_file) conv_nodes = [n for n in model.graph.node if n.op_type == "Conv"] graph_modified = False for conv_node in conv_nodes: conv_output = conv_node.output[0] child_nodes = [n for n in model.graph.node if conv_output in n.input] # Check if the only child of the conv output is a batch norm op if len(child_nodes) == 1 and child_nodes[0].op_type == "BatchNormalization": bn_node = child_nodes[0] fold_performed = _fold_conv_bn(model, conv_node, bn_node) graph_modified = fold_performed or graph_modified return model if graph_modified else None
25c2748b0e964310cc9909b60e68a9740e3e0df1
3,640,916
def numdays(year, month): """ numdays returns the number of days in the given month of the given year. Args: year month Returns: ndays: number of days in month """ NDAYS = list([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) assert(year >= 0) assert(1 <= month and month <= 12) ndays = NDAYS[month-1] # Check for leap year for February if ((month == 2) and leapyear(year)): ndays += 1 return ndays
159a41f3706b087194e0ba5d107a1ceb88583c21
3,640,917
def normalise_diversity_year_df(y_div_df): """Normalises a dataframe with diversity information by year and parametre set""" yearly_results_norm = [] # For each possible diversity metric it pivots over parametre sets # and calculates the zscore for the series for x in set(y_div_df["diversity_metric"]): yearly_long = y_div_df.query(f"diversity_metric == '{x}'").pivot_table( index=["year", "diversity_metric"], columns="parametre_set", values="score" ) yearly_long_norm = yearly_long.apply(zscore) yearly_results_norm.append(yearly_long_norm) # Concatenate and melt so they can be visualised with altair y_div_df_norm = ( pd.concat(yearly_results_norm) .reset_index(drop=False) .melt( id_vars=["year", "diversity_metric"], var_name="parametre_set", value_name="score", ) ) return y_div_df_norm
83e12072e65a707dd61b98383ce295fac8e9f2f7
3,640,918
def allowed_file(filename): """Does filename have the right extension?""" return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
f42ac5ef5470515258715b4552945206a440effb
3,640,919
from typing import Dict from typing import Optional from typing import Any from typing import List import tokenize def render( template: str, context: Dict, serializer: Optional[CallableType[[Any], str]] = None, partials: Optional[Dict] = None, missing_variable_handler: Optional[CallableType[[str, str], str]] = None, missing_partial_handler: Optional[CallableType[[str, str], str]] = None, cache_tokens: bool = False, ) -> str: """Render a mustache template""" serializer = serializer or default_serializer missing_variable_handler = missing_variable_handler or missing_variable_default missing_partial_handler = missing_partial_handler or missing_partial_default partials = partials or {} output: str = '' context_stack: List = [context] env_stack: List = [] left_delimiter: str = '{{' right_delimiter: str = '}}' pointer: int = 0 tokens = [] if cache_tokens: tokens = list(tokenize(template, 0, left_delimiter, right_delimiter)) while True: if cache_tokens: try: (token, value, indentation), position_pointer = tokens[pointer] pointer += 1 except IndexError: break else: try: (token, value, indentation), pointer = next( tokenize(template, pointer, left_delimiter, right_delimiter) ) position_pointer = pointer except StopIteration: break current_context = context_stack[-1] if token is Token.SET_DELIMITER: new_delimiters = value.strip().split(' ') left_delimiter = new_delimiters[0] right_delimiter = new_delimiters[-1] if token is Token.END: current_env = env_stack[-1] context_stack.pop() env_name, env_pointer, [env_var, _] = current_env if should_iterate(env_var): current_env[2][1] += 1 try: next_item = env_var[current_env[2][1]] context_stack.append(next_item) pointer = env_pointer continue except IndexError: pass if env_name != value: raise MustacheSyntaxError.from_template_pointer( f'Unexpected section end tag on line {{line_number}}. Expected "{env_name}" got "{value}"', template, position_pointer, ) env_stack.pop() if not current_context and len(context_stack) != 1: if token in [Token.SECTION, Token.INVERTED]: context_stack.append(False) env_stack.append([value, pointer, [False, 0]]) continue if token in [Token.NO_ESCAPE, Token.VARIABLE, Token.SECTION, Token.INVERTED]: try: variable = get_from_context(context_stack, value) except MissingVariable: variable = missing_variable_handler( value, f'{left_delimiter} {value} {right_delimiter}' ) else: variable = None if token is Token.LITERAL: output += value elif token is Token.NO_ESCAPE: output += serializer(variable) elif token is Token.VARIABLE: output += escape(serializer(variable)) elif token in [Token.SECTION, Token.INVERTED]: if token is Token.INVERTED: variable = not variable if should_iterate(variable): try: context_item = variable[0] context_stack.append(context_item) except IndexError: context_stack.append(False) else: context_stack.append(variable) env_stack.append([value, pointer, [variable, 0]]) elif token is Token.PARTIAL: partial_template = partials.get(value) # potentially raise error here if partial_template is None: partial_template = missing_partial_handler( value, f'{left_delimiter} {value} {right_delimiter}' ) if partial_template != '': remove_trailing_indentation = False if partial_template.endswith('\n'): remove_trailing_indentation = True partial_template = indentation + f'\n{indentation}'.join( partial_template.split('\n') ) if remove_trailing_indentation: partial_template = partial_template[: -len(indentation)] partial_output = render( partial_template, current_context, serializer=serializer, partials=partials ) output += partial_output return output
b660c0ac97915121d061fd5c7dde8cccea42f03f
3,640,920
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions): """ convert the 210x160x3 uint8 frame into a 6400 float vector """ processed_observation = input_observation[35:195] # crop processed_observation = downsample(processed_observation) processed_observation = remove_color(processed_observation) processed_observation = remove_background(processed_observation) processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1 # Convert from 80 x 80 matrix to 6400 x 1 matrix processed_observation = processed_observation.astype(np.float).ravel() # subtract the previous frame from the current one so we are only processing on changes in the game if prev_processed_observation is not None: input_observation = processed_observation - prev_processed_observation else: input_observation = np.zeros(input_dimensions) # store the previous frame so we can subtract from it next time prev_processed_observations = processed_observation return input_observation, prev_processed_observations
885fbb2a1f81200843bb15d37f3c13726c23ea90
3,640,921
def expand_configuration(configuration): """Fill up backups with defaults.""" for backup in configuration['backups']: for field in _FIELDS: if field not in backup or backup[field] is None: if field not in configuration: backup[field] = None else: backup[field] = configuration[field] return configuration['backups']
218f5c5cb67d3fa0f52b453d3cd00cde40835025
3,640,922
def create_feature_extractor(input_shape: tuple, dropout:float=0.3, kernel_size:tuple=(3,3,3)) -> tf.keras.Sequential: """ Create feature extracting model :param input_shape: shape of input Z, X, Y, channels :return: feature extracting model """ model = Sequential() model.add(Conv3D(filters=4, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2), input_shape=input_shape)) model.add(Conv3D(filters=8, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2))) model.add(Conv3D(filters=16, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2))) model.add(Dropout(dropout)) return model
47f52bab452e6bf7c9875a3c9c85bed02b79fcdc
3,640,923
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Set up the component.""" hass.data.setdefault(DOMAIN, {}) async_add_defaults(hass, config_entry) router = KeeneticRouter(hass, config_entry) await router.async_setup() undo_listener = config_entry.add_update_listener(update_listener) hass.data[DOMAIN][config_entry.entry_id] = { ROUTER: router, UNDO_UPDATE_LISTENER: undo_listener, } for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, platform) ) return True
beac0da52a530aa63495003b78a87638b869779c
3,640,924
def log_set_level(client, level): """Set log level. Args: level: log level we want to set. (for example "DEBUG") """ params = {'level': level} return client.call('log_set_level', params)
d9f0c7cda877497acbe832bbc6c767bf331d0dc5
3,640,925
import subprocess def exec_local_command(cmd): """ Executes a command for the local bash shell and return stdout as a string. Raise CalledProcessError in case of non-zero return code. Args: cmd: command as a string Return: STDOUT """ proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = proc.communicate() retcode = proc.poll() if retcode: LOG.error("{0} returned status {1}: {2}".format(cmd, retcode, error)) raise subprocess.CalledProcessError() else: return output
be0912712f4f5987b848cb08422fc6601be62653
3,640,926
def OGH(p0, p1, v0, v1, t0, t1, t): """Optimized geometric Hermite curve.""" s = (t-t0)/(t1-t0) a0 = (6*np.dot((p1-p0).T,v0)*np.dot(v1.T,v1) - 3*np.dot((p1-p0).T,v1)*np.dot(v0.T,v1)) / ((4*np.dot(v0.T,v0)*np.dot(v1.T,v1) - np.dot(v0.T,v1)*np.dot(v0.T,v1))*(t1-t0)) a1 = (3*np.dot((p1-p0).T,v0)*np.dot(v0.T,v1) - 6*np.dot((p1-p0).T,v1)*np.dot(v0.T,v0)) / ((np.dot(v0.T,v1)*np.dot(v0.T,v1) - 4*np.dot(v0.T,v0)*np.dot(v1.T,v1))*(t1-t0)) h0 = (2*s+1)*(s-1)*(s-1) h1 = (-2*s+3)*s*s h2 = (1-s)*(1-s)*s h3 = (s-1)*s*s plt.plot([p0[0],p1[0]], [p0[1],p1[1]], ':c') plt.plot([p0[0], (p0+v0)[0]], [p0[1], (p0+v0)[1]], '-g') plt.plot([p1[0], (p1+v1)[0]], [p1[1], (p1+v1)[1]], '-g') return h0*p0 + h1*p1 + h2*v0*a0 + h3*v1*a1
8bf86bbb2105ec26586a3568bb1a6448b284fbec
3,640,927
def permutation_test(v1, v2, iter=1000): """ Conduct Permutation test Parameters ---------- v1 : array Vector 1. v2 : array Vector 2. iter : int. Default is 1000. The times for iteration. Returns ------- p : float The permutation test result, p-value. """ if len(v1) != len(v2): return "Invalid input" # permutation test diff = abs(np.average(v1) - np.average(v2)) v = np.hstack((v1, v2)) nv = v.shape[0] ni = 0 for i in range(iter): vshuffle = np.random.permutation(v) vshuffle1 = vshuffle[:int(nv/2)] vshuffle2 = vshuffle[int(nv/2):] diff_i = np.average(vshuffle1) - np.average(vshuffle2) if diff_i >= diff: ni = ni + 1 # permunitation test p-value p = np.float64(ni/iter) return p
3b618069b610d0ee37e8bcb32f814e34efaeebab
3,640,928
def registered_paths(): """Return paths added via registration ..note:: This returns a copy of the registered paths and can therefore not be modified directly. """ return list(_registered_paths)
4bd8471fc2bff1e09a84b1ae8878c0db5f7afd65
3,640,929
import torch def nms_dynamic(ctx, g, boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int, iou_threshold: float, score_threshold: float): """Rewrite symbolic function for default backend. Support max_output_boxes_per_class, iou_threshold, score_threshold of constant Tensor, which is aligned with ONNX's nms op. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. iou_threshold (float): IOU threshold of nms. score_threshold (float): score threshold of nms. Returns: NonMaxSuppression op for onnx. """ if not sym_help._is_value(max_output_boxes_per_class): max_output_boxes_per_class = g.op( 'Constant', value_t=torch.tensor(max_output_boxes_per_class, dtype=torch.long)) if not sym_help._is_value(iou_threshold): iou_threshold = g.op( 'Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) if not sym_help._is_value(score_threshold): score_threshold = g.op( 'Constant', value_t=torch.tensor([score_threshold], dtype=torch.float)) return g.op('NonMaxSuppression', boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
6b6eea9ce2f2fe84cabb85ddbb069732fa78cca9
3,640,930
from typing import Union from datetime import datetime import pytz def api_timestamp_to_datetime(api_dt: Union[str, dict]): """Convertes the datetime string returned by the API to python datetime object""" """ Somehow this string is formatted with 7 digits for 'microsecond' resolution, so crop the last digit (and trailing Z) The cropped string will be written into api_dt_str_mod """ api_dt_str_mod = None if isinstance(api_dt, str): api_dt_str_mod = api_dt[:-2] elif isinstance(api_dt, dict): api_dt_str_mod = api_dt["dateTime"][:-2] else: raise dt = datetime.strptime(api_dt_str_mod, "%Y-%m-%dT%H:%M:%S.%f") dt = pytz.utc.localize(dt) return dt
26f4828a19d17c883a8658eb594853158d70fbcf
3,640,931
from typing import List def calc_mutation(offsprings: List[List[List[int]]], mut_rate: float, genes_num: int) -> List[List[List[int]]]: """ Not necessary, however when provided and returns value other than None, the simulator is going to use this one instead of the given ones by default, if you are not intending to use it leave it to `return None` """ return None
d665b7c2ff8ddfa2c4b905c3d6bab02028e30ec2
3,640,932
def compute_targets(ex_rois, gt_rois, weights=(1.0, 1.0, 1.0, 1.0)): """Compute bounding-box regression targets for an image.""" return box_utils.bbox_transform_inv(ex_rois, gt_rois, weights).astype( np.float32, copy=False )
de2a65b5c3c44bbd4bffcd0d99143982ed4c031c
3,640,933
def _args_filter(args): """ zenith db api only accept list of tuple arguments for bind execute, that is ungainly so we should make all kind of arguments to list of tuple arguments """ if isinstance(args, (GeneratorType, )): args = list(args) if len(args) <= 0: return [] if isinstance(args[0], (tuple, list,)): return [tuple(v) for v in args] else: return [tuple(args), ]
af9a836c1389acc4e0faf0d08e47ef8a39e57345
3,640,934
def getAreaDF(spark): """ Returns a Spark DF containing the BLOCK geocodes and the Land and Water area columns Parameters ========== spark : SparkSession Returns ======= a Spark DF Notes ===== - Converts the AREALAND and AREAWATER columns from square meters to square miles - Used primarily for calculating Population Density """ area_cols = ['AREALAND', 'AREAWATER'] area = getGRFC(spark, columns=area_cols) for area_col in area_cols: area = area.withColumn(area_col, sf.col(area_col).cast("long")).persist() # calculation for converting square meters (current units for AREALAND from the GRFC) to square miles # square miles = square meters / 2,589,988 # https://www.census.gov/quickfacts/fact/note/US/LND110210 area = area.withColumn(area_col, sf.col(area_col) / sf.lit(2589988)).persist() area = area.withColumn("AREA_SQUARE_MILES", sf.expr(" + ".join(area_cols))).persist() return area
181e84e98ca2cf83be0cf5dbf41a8dbc46b88ad4
3,640,935
import os import subprocess def run_command(command, filename=None, repeat=1, silent=False): """ Run `command` with `filename` positional argument in the directory of the `filename`. If `filename` is not given, run only the command. """ if filename is not None: fdir = os.path.dirname(os.path.abspath(filename)) fname = os.path.basename(filename) cmd = command + ' ' + fname else: fdir = None cmd = command status = 0 for ii in range(repeat): if silent: with open(os.devnull, 'w') as devnull: st = subprocess.call(cmd.split(), cwd=fdir, stdout=devnull, stderr=devnull) else: st = subprocess.call(cmd.split(), cwd=fdir) status = status or st return status
0f24c79f9d557198645de75fe7160af41638ecb6
3,640,936
def how_many(): """Check current number of issues waiting in SQS.""" if not is_request_valid(request): abort(400) lapdog_instance = Lapdog() lapdog_instance.how_many() return jsonify( response_type="in_channel", text="There are 4 issues waiting to be handled", )
db132bed6c957ad1f922776165ccb999bfcedb32
3,640,937
import struct def read_sbd(filepath): """Reads an .sbd file containing spectra in either profile or centroid mode Returns: list:List of spectra """ with open(filepath, 'rb') as in_file: header = struct.unpack("<BQB", in_file.read(10)) meta_size = header[1] * 20 # sizeof(QLfHH) meta = [meta_item for meta_item in struct.iter_unpack("<QLfHH", in_file.read(meta_size))] num_points = [meta_item[1] for meta_item in meta] spectra = [read_spectrum(in_file, n) for n in num_points] return (header, meta, spectra)
364499580d5531d7361b87d3f575bf006fc79791
3,640,938
def dct2(X, blksize): """Calculate DCT transform of a 2D array, X In order for this work, we have to split X into blksize chunks""" dctm = dct_mat(blksize) #try: #blks = [sp.vsplit(x, X.shape[1]/blksize) for x in sp.hsplit(X, X.shape[0]/blksize)] #except: # print "Some error occurred" output = sp.zeros(X.shape) if output.ndim==3: for i in range(blksize,X.shape[0],blksize): for j in range(blksize, X.shape[1], blksize): for c in range(X.shape[2]): b = X[i-blksize:i, j-blksize:j, c] output[i-blksize:i, j-blksize:j, c] = sp.dot(sp.dot(dctm,b),dctm.T) elif output.ndim==2: for i in range(blksize,X.shape[0],blksize): for j in range(blksize, X.shape[1], blksize): b = X[i-blksize:i, j-blksize:j] output[i-blksize:i, j-blksize:j] = sp.dot(sp.dot(dctm,b),dctm.T) #blks = [sp.dot(sp.dot(dctm, b), dctm.T) for b in blks] #return sp.concatenate([blk for blk in blks]).reshape(X.shape) return output
79aa158f4fd05ac35bad2d16c14b3b8cbd8351af
3,640,939
def print_filtering(dataset, filter_vec, threshold, meta_name): """Function to select the filtering_names(names of those batches or cell types with less proportion of cells than threshold), and print an informative table with: batches/cell types, absolute_n_cells, relative_n_cells, Exluded or not. """ cell_count = filter_vec.value_counts(ascending=False) print("**", meta_name , "containing less than:", str(threshold), "of total cells are removed" +"\n" + "**", meta_name, "filtered based on our threshold") #dataframe informing about the filtering about to be done exclude_df = pd.DataFrame({meta_name: cell_count.index.to_list(), 'n_cells': cell_count.values, '%_cells': cell_count.values/dataset.n_obs, 'Excluded_?': cell_count.values/dataset.n_obs < threshold}) print(exclude_df) removal_names = exclude_df[meta_name][exclude_df["Excluded_?"] == True].tolist() return removal_names
c637a9d219443de730156e546d52461b9bcdfc84
3,640,940
import os def attempt_input_load(input_path): """Attempts to load the file at the provided path and return it as an array of lines. If the file does not exist we will exit the program since nothing useful can be done.""" if not os.path.isfile(input_path): print("Input file does not exist: %s" % input_path) exit() print("Loading input from file: %s" % input_path) with open(input_path, "r", encoding='utf-8') as f: lines = f.readlines() return lines
f75e95258c803175d1f13d82c6479987cfdecbf9
3,640,941
from typing import Dict def get_chunk_tags(chunks: Dict, attrs: str): """ Get tags for :param chunks: :param attrs: :return: """ tags = [] for chunk in chunks: resource_type = chunk['resource_type'] original_url = chunk['url'] parse_result = urlparse(original_url) path = parse_result.path # If under STATIC_URL rewrite using static tag so that we respect static file storage # options, eg. ManifestStaticFileStorage if settings.STATIC_URL and path.startswith(settings.STATIC_URL): try: path = static(path[len(settings.STATIC_URL):]) except ValueError: # Allow url's that aren't managed by static files - eg. this will happen # for ManifestStaticFileStorage if file is not in the manifest pass url = ParseResult(**dict(parse_result._asdict(), path=path)).geturl() if resource_type == 'js': tags.append(f'<script type="text/javascript" src="{url}" {attrs}></script>') if resource_type == 'css': tags.append(f'<link type="text/css" href="{url}" rel="stylesheet" {attrs}/>') return tags
e7076b345bcca4e7fe8ac96002aad7499cf0b0f3
3,640,942
def __discount_PF(i, n): """ Present worth factor Factor: (P/F, i, N) Formula: P = F(1+i)^N :param i: :param n: :return: Cash Flow: F | | -------------- | P """ return (1 + i) ** (-n)
b6e7424647921b945a524a22d844925573b6490a
3,640,943
def pw2dense(pw, maxd): """Make a pairwise distance matrix dense assuming -1 is used to encode D = 0""" pw = np.asarray(pw.todense()) pw[pw == 0] = maxd + 1 # pw[np.diag_indices_from(pw)] = 0 pw[pw == -1] = 0 return pw
68bbf753d80032a0e697b161c8836283a030a54a
3,640,944
from typing import Awaitable def run_simulation(sim: td.Simulation) -> Awaitable[td.Simulation]: """Returns a simulation with simulation results Only submits simulation if results not found locally or remotely. First tries to load simulation results from disk. Then it tries to load them from the server storage. Finally, only submits simulation if not found .. code:: import gtidy3d as gm component = gf.components.straight(length=3) sim = gm.get_simulation(component=component) sim = run_simulation(sim).result() """ td.logging_level("error") sim_hash = get_sim_hash(sim) sim_path = PATH.results / f"{sim_hash}.hdf5" logger.info(f"running simulation {sim_hash}") hash_to_id = {d["task_name"][:32]: d["task_id"] for d in web.get_last_projects()} target = PATH.results / f"{sim_hash}.hdf5" # Try from local storage if sim_path.exists(): logger.info(f"{sim_path} found in local storage") sim = _executor.submit(load_results, sim, target) # Try from server storage elif sim_hash in hash_to_id: task_id = hash_to_id[sim_hash] sim = _executor.submit(load_results, sim, target, task_id) # Only submit if simulation not found else: task_id = _export_simulation(sim=sim, task_name=sim_hash) sim = _executor.submit(load_results, sim, target, task_id) return sim
23524bff78ac326bbf74e2389180d924849e57f4
3,640,945
def get_cursor_position(fd=1): """Gets the current cursor position as an (x, y) tuple.""" csbi = get_console_screen_buffer_info(fd=fd) coord = csbi.dwCursorPosition return (coord.X, coord.Y)
b99cf19081af7e0d68523d1efdfc80c89cfe64cc
3,640,946
from typing import Tuple def _held_karp(dists: np.ndarray) -> Tuple[float, np.ndarray]: """ Held-Karp algorithm solves the Traveling Salesman Problem. This algorithm uses dynamic programming with memoization. Parameters ---------- dists Distance matrix. Returns ------- The cost and the path. """ n = len(dists) # Maps each subset of the nodes to the cost to reach that subset, as well # as what node it passed before reaching this subset. # Node subsets are represented as set bits. C = {} # Set transition cost from initial state for k in range(1, n): C[1 << k, k] = (dists[0][k], 0) # Iterate subsets of increasing length and store intermediate results # in classic dynamic programming manner for subset_size in range(2, n): for subset in combinations(range(1, n), subset_size): # Set bits for all nodes in this subset bits = 0 for bit in subset: bits |= 1 << bit # Find the lowest cost to get to this subset for k in subset: prev = bits & ~(1 << k) res = [] for m in subset: if m == 0 or m == k: continue res.append((C[prev, m][0] + dists[m][k], m)) C[bits, k] = min(res) # We're interested in all bits but the least significant (the start state) bits = (2 ** n - 1) - 1 # Calculate optimal cost res = [] for k in range(1, n): res.append((C[bits, k][0] + dists[k][0], k)) opt, parent = min(res) # Backtrack to find full path path = [] for _ in range(n - 1): path.append(parent) new_bits = bits & ~(1 << parent) _, parent = C[bits, parent] bits = new_bits # Add implicit start state path.append(0) return opt, np.array(path)[::-1]
982d771c1fef5e4f6311fd1b36216c95db7f1343
3,640,947
import os def dl_files(go_directory): """function to download latest ontologies and associations files from geneontology.org specify the directory to download the files to""" # change to go directory os.chdir(go_directory) # Get http://geneontology.org/ontology/go-basic.obo obo_fname = download_go_basic_obo() # print go file version: with open(obo_fname) as fin: for line in islice(fin, 1, 2): print(line) # download gene2go annotation file fin_gene2go = download_ncbi_associations() return obo_fname, fin_gene2go
898597925655c31dc3a6eb84c6d99b5bc3c3a5db
3,640,948
def NS(s,o): """ Nash Sutcliffe efficiency coefficient Adapated to use in alarconpy by Albenis Pérez Alarcón contact: apalarcon1991@gmail.com Parameters -------------------------- input: s: simulated o: observed output: ns: Nash Sutcliffe efficient coefficient """ s,o = filter_nan(s,o) return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)
10c14022ae634a74f0a417454ddfa0fa52d89c8a
3,640,949
def UTArgs(v): """ tag UTArgs """ tag = SyntaxTag.TagUTArgs() tag.AddV(v) return tag
8d9ff601a5a2bf65e68e074dad1894342881950f
3,640,950
from src.praxxis.sqlite import sqlite_rulesengine from src.praxxis.notebook.notebook import get_output_from_filename def rules_check(rulesengine_db, filename, output_path, query_start, query_end): """check if any rules match""" rulesets = sqlite_rulesengine.get_active_rulesets(rulesengine_db, query_start, query_end) rulesmatch = [] hit = set() predictions = [] for ruleset in rulesets: filenames = sqlite_rulesengine.get_filenames_by_rule(ruleset[2]) for fmatch in filenames: if fmatch[0] in filename: rulesmatch.append(fmatch[1]) if rulesmatch != []: #get output output = get_output_from_filename(output_path) outputs = sqlite_rulesengine.get_outputs_for_rules(ruleset[2], rulesmatch) for omatch in outputs: if omatch[0] in output: hit.add(omatch[1]) predictions.extend(sqlite_rulesengine.get_predictions(ruleset[2], hit)) return predictions
a81d29a8a9d61ba6a577fbe9899967b81a25ff7f
3,640,951
def shortstr(s,max_len=144,replace={'\n':';'}): """ Obtain a shorter string """ s = str(s) for k,v in replace.items(): s = s.replace(k,v) if max_len>0 and len(s) > max_len: s = s[:max_len-4]+' ...' return s
396794506583dcf39e74941a20f27ac63de325ec
3,640,952
def update_gms_stats_collection( self, application: bool = None, dns: bool = None, drc: bool = None, drops: bool = None, dscp: bool = None, flow: bool = None, interface: bool = None, jitter: bool = None, port: bool = None, shaper: bool = None, top_talkers: bool = None, tunnel: bool = None, ) -> bool: """Enable/disable stats collection by orchestrator. All parameters optional. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - gmsStatsCollection - POST - /gms/statsCollection :param application: Description missing in Swagger, defaults to None :type application: bool, optional :param dns: Description missing in Swagger, defaults to None :type dns: bool, optional :param drc: Description missing in Swagger, defaults to None :type drc: bool, optional :param drops: Description missing in Swagger, defaults to None :type drops: bool, optional :param dscp: Description missing in Swagger, defaults to None :type dscp: bool, optional :param flow: Description missing in Swagger, defaults to None :type flow: bool, optional :param interface: Description missing in Swagger, defaults to None :type interface: bool, optional :param jitter: Description missing in Swagger, defaults to None :type jitter: bool, optional :param port: Description missing in Swagger, defaults to None :type port: bool, optional :param shaper: Description missing in Swagger, defaults to None :type shaper: bool, optional :param top_talkers: Description missing in Swagger, defaults to None :type top_talkers: bool, optional :param tunnel: Description missing in Swagger, defaults to None :type tunnel: bool, optional :return: Returns True/False based on successful call. :rtype: bool """ data = {} if application is not None: data["Application"] = application if dns is not None: data["Dns"] = dns if drc is not None: data["Drc"] = drc if drops is not None: data["Drops"] = drops if dscp is not None: data["Dscp"] = dscp if flow is not None: data["Flow"] = flow if interface is not None: data["Interface"] = interface if jitter is not None: data["Jitter"] = jitter if port is not None: data["Port"] = port if shaper is not None: data["Shaper"] = shaper if top_talkers is not None: data["TopTalkers"] = top_talkers if tunnel is not None: data["Tunnel"] = tunnel return self._post( "/gms/statsCollection", data=data, return_type="bool", )
d6dce80a8543cae16eebf076eeaa3e1428831df5
3,640,953
def _get_nearby_factories(latitude, longitude, radius): """Return nearby factories based on position and search range.""" # ref: https://stackoverflow.com/questions/574691/mysql-great-circle-distance-haversine-formula distance = 6371 * ACos( Cos(Radians(latitude)) * Cos(Radians("lat")) * Cos(Radians("lng") - Radians(longitude)) + Sin(Radians(latitude)) * Sin(Radians("lat")) ) radius_km = radius ids = Factory.objects.annotate(distance=distance).only("id").filter(distance__lt=radius_km).order_by("id") if len(ids) > settings.MAX_FACTORY_PER_GET: ids = _sample(ids, settings.MAX_FACTORY_PER_GET) return ( Factory.objects.filter(id__in=[obj.id for obj in ids]) .prefetch_related(Prefetch('report_records', queryset=ReportRecord.objects.only("created_at").all())) .prefetch_related(Prefetch('images', queryset=Image.objects.only("id").all())) .prefetch_related(Prefetch('documents', queryset=Document.objects.only('created_at', 'display_status').all())) .all() )
b94c879d93a486b4ac0dd77bee6fb9d79395dc23
3,640,954
def add_register(request): """ 处理注册提交的数据,保存到数据库 :param request: :return: """ form = forms.RegisterForm(request.POST) if form.is_valid(): data = form.cleaned_data #清洗数据 data.pop("re_password") data['password'] = hash_pwd.has_password(data.get('password')) #添加必要数据 data['is_active'] = 1 #格式化储存 models.UserInfo.objects.create( **data ) return redirect('mysite:login') else: #把前端提交的包含错误信息的对象返回到前端页面 return render(request, 'login/register.html', {"form":form})
acaf3886773b599df2853a5e73ef504af27f1c53
3,640,955
import numpy import pandas def confidence_interval(data, alpha=0.1): """ Calculate the confidence interval for each column in a pandas dataframe. @param data: A pandas dataframe with one or several columns. @param alpha: The confidence level, by default the 90% confidence interval is calculated. @return: A series where each entry contains the confidence-interval for the corresponding column. """ alpha = 0.1 t = lambda column: scipy_stats.t.isf(alpha/2.0, len(column)-1) width = lambda column: t(column) * numpy.std(column.values, ddof=1)/sqrt(len(column)) formatted_interval = lambda column: "%.2f +/- %.4f" % (column.mean(), width(column)) return pandas.Series([formatted_interval(data[c]) for c in data.columns], index=data.columns)
f9c31549287723f7f75c265485b7cd9911f68168
3,640,956
def RunInTransactionOptions(options, function, *args, **kwargs): """Runs a function inside a datastore transaction. Runs the user-provided function inside a full-featured, ACID datastore transaction. Every Put, Get, and Delete call in the function is made within the transaction. All entities involved in these calls must belong to the same entity group. Queries are supported as long as they specify an ancestor belonging to the same entity group. The trailing arguments are passed to the function as positional arguments. If the function returns a value, that value will be returned by RunInTransaction. Otherwise, it will return None. The function may raise any exception to roll back the transaction instead of committing it. If this happens, the transaction will be rolled back and the exception will be re-raised up to RunInTransaction's caller. If you want to roll back intentionally, but don't have an appropriate exception to raise, you can raise an instance of datastore_errors.Rollback. It will cause a rollback, but will *not* be re-raised up to the caller. The function may be run more than once, so it should be idempotent. It should avoid side effects, and it shouldn't have *any* side effects that aren't safe to occur multiple times. This includes modifying the arguments, since they persist across invocations of the function. However, this doesn't include Put, Get, and Delete calls, of course. Example usage: > def decrement(key, amount=1): > counter = datastore.Get(key) > counter['count'] -= amount > if counter['count'] < 0: # don't let the counter go negative > raise datastore_errors.Rollback() > datastore.Put(counter) > > counter = datastore.Query('Counter', {'name': 'foo'}) > datastore.RunInTransaction(decrement, counter.key(), amount=5) Transactions satisfy the traditional ACID properties. They are: - Atomic. All of a transaction's operations are executed or none of them are. - Consistent. The datastore's state is consistent before and after a transaction, whether it committed or rolled back. Invariants such as "every entity has a primary key" are preserved. - Isolated. Transactions operate on a snapshot of the datastore. Other datastore operations do not see intermediated effects of the transaction; they only see its effects after it has committed. - Durable. On commit, all writes are persisted to the datastore. Nested transactions are not supported. Args: options: TransactionOptions specifying options (number of retries, etc) for this transaction function: a function to be run inside the transaction on all remaining arguments *args: positional arguments for function. **kwargs: keyword arguments for function. Returns: the function's return value, if any Raises: TransactionFailedError, if the transaction could not be committed. """ return _RunInTransactionInternal(options, datastore_rpc.TransactionMode.READ_WRITE, function, *args, **kwargs)
9236024d034f193919e976a04eec9105ee899d48
3,640,957
def notify(message, key, target_object=None, url=None, filter_exclude={}): """ Notify subscribing users of a new event. Key can be any kind of string, just make sure to reuse it where applicable! Object_id is some identifier of an object, for instance if a user subscribes to a specific comment thread, you could write: notify("there was a response to your comment", "comment_response", target_object=PostersObject, url=reverse('comments:view', args=(PostersObject.id,))) The below example notifies everyone subscribing to the "new_comments" key with the message "New comment posted". notify("New comment posted", "new_comments") filter_exclude: a dictionary to exclude special elements of subscriptions in the queryset, for instance filter_exclude={''} """ if _disable_notifications: return 0 if target_object: if not isinstance(target_object, Model): raise TypeError(_("You supplied a target_object that's not an instance of a django Model.")) object_id = target_object.id else: object_id = None objects = models.Notification.create_notifications( key, object_id=object_id, message=message, url=url, filter_exclude=filter_exclude, ) return len(objects)
9da7f8a498a3fad1f1acbb9e35e798083d6a25c5
3,640,958
from pathlib import Path def get_project_root() -> Path: """Return the path of the project root folder. Returns: Path: Path to project root """ return Path(__file__).parent
0122844ae89a53b0cd28659be21fb932164719cd
3,640,959
def FTCS(Uo, diffX, diffY=None): """Return the numerical solution of dependent variable in the model eq. This routine uses the explicit Forward Time/Central Space method to obtain the solution of the 1D or 2D diffusion equation. Call signature: FTCS(Uo, diffX, diffY) Parameters ---------- Uo: ndarray[float], =1d, 2d The dependent variable at time level, n within the entire domain. diffX : float Diffusion number for x-component of the parabolic/diffusion equation. diffY : float, Default=None for 1-D applications Diffusion number for y-component of the parabolic/diffusion equation. Returns ------- U: ndarray[float], =1d, 2d The dependent variable at time level, n+1 within the entire domain. """ shapeU = Uo.shape # Obtain Dimension U = Uo.copy() # Initialize U if len(shapeU) == 1: U[1:-1] = ( Uo[1:-1] + diffX*(Uo[2:] - 2.0*Uo[1:-1] + Uo[0:-2]) ) elif len(shapeU) == 2: U[1:-1, 1:-1] = ( Uo[1:-1, 1:-1] + diffX*(Uo[2:, 1:-1] - 2.0*Uo[1:-1, 1:-1] + Uo[0:-2, 1:-1]) + diffY*(Uo[1:-1, 2:] - 2.0*Uo[1:-1, 1:-1] + Uo[1:-1, 0:-2]) ) return U
4b02749f3f50a2cff74abb75146159289d42b99e
3,640,960
def epicyclic_frequency(prof) -> Quantity: """Epicyclic frequency.""" Omega = prof['keplerian_frequency'] R = prof['radius'] return np.sqrt(2 * Omega / R * np.gradient(R ** 2 * Omega, R))
917fc1e094719f0dbb6a3ac7ca0396601060bf1c
3,640,961
import os import re def _filter_subset(systems, test_sets, langpair, origlang, subset=None): """Filter sentences with a given origlang (or subset) according to the raw SGM files.""" if origlang is None and subset is None: return systems if test_sets is None or langpair is None: raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).') indices_to_keep = [] for test_set in test_sets.split(','): rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0]) if not rawfile.endswith('.sgm'): raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile) if subset is not None: if test_set not in SUBSETS: raise Exception('No subset annotation available for test set ' + test_set) doc_to_tags = SUBSETS[test_set] number_sentences_included = 0 with smart_open(rawfile) as fin: include_doc = False for line in fin: if line.startswith('<doc '): if origlang is None: include_doc = True else: doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line) if origlang.startswith('non-'): include_doc = doc_origlang != origlang[4:] else: include_doc = doc_origlang == origlang if subset is not None: doc_id = re.sub(r'.* docid="([^"]+)".*\n', '\\1', line) if not re.search(subset, doc_to_tags.get(doc_id, '')): include_doc = False if line.startswith('<seg '): indices_to_keep.append(include_doc) number_sentences_included += 1 if include_doc else 0 return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]
0a15d2290d32b5556e18e2324b80e49300a0acca
3,640,962
def get_groups( a_graph, method='component_infomap', return_form='membership'): """ Return the grouping of the provided graph object using the specified method. The grouping is returned as a list of sets each holding all members of a group. Parameters ========== a_graph: :class:`igraph.Graph` The graph to partition method: str (default='component_infomap') String specifying which method to use. If two methods should be used one after the other they should be separated by `_`. Default: 'component_infomap' which will first consider all disconnected components as groups then apply infomap on all of those groups to optionally further split. return_form: str (default='membership') Determines the format of how the social group structure should be returned. Options are: * ``'membership'``: A list returning for each `index` node the group it belongs to. * ``'memberlists'``: Dictionary with a list of members `value` for each group `key`. Returns ======= dict Depending on what was chosen for the `return_form` attribute, either the membership dict, i.e.:: { node_id: group_id, ... } or the memberlist dict, i.e.:: { group_id: [node1_id, node2_id, ...], ... } (value) is returned. """ # methods = method.split('_') # For now only 'component_infomap' is allowed as procedure if method == 'component_infomap': # first the connected components a_graph.vs['component'] = a_graph.clusters( ).membership components = set(a_graph.vs['component']) # create for each component a graph and apply infomap to it node_membership = {} # print( # 'INFO: Found {0} disconnected components'.format(len(components)) # ) if components: # do the community detection on each component and create a # compound group id: component_group for component in components: _comp_graph = a_graph.subgraph( [ node['name'] for node in a_graph.vs if node['component'] == component ] ) _infompa_comp_graph = _comp_graph.community_infomap('weight') _comp_graph.vs['_group'] = _infompa_comp_graph.membership for node in _comp_graph.vs: node_membership[node['name']] = '{0}_{1}'.format( node['component'], node['_group'] ) del _infompa_comp_graph else: _infompa_comp_graph = a_graph.community_infomap('weight') a_graph.vs['group'] = _infompa_comp_graph.membership node_membership = { node['name']: node['group'] for node in a_graph.vs } group_membership = {} for node in node_membership: try: group_membership[node_membership[node]].append(node) except KeyError: group_membership[node_membership[node]] = [node] if return_form == 'membership': # nbr_nodes = len(a_graph.vs['name']) # membership = [None]*nbr_nodes # for g, members in group_membership.items(): # for member in members: # membership[member] = g # return membership return node_membership elif return_form == 'memberlists': # return [_group for _group in group_membership.values()] return group_membership else: return None
110dd9dc470d9426b388e0db1289ff0b23c4a963
3,640,963
def positional_rank_queues (service_platform, api_key): """ Get the queues that have positional ranks enabled. References: https://developer.riotgames.com/regional-endpoints.html https://developer.riotgames.com/api-methods/#league-v4/GET_getQueuesWithPositionRanks Arguments: service_platform (str): The service platform that the request should be issued to. api_key (str): The client's api key. Returns: dict: the details of the response to the issued http request. """ header_parameters = { "X-Riot-Token": api_key } url = endpoints.v4["host"]["endpoint"].format(service_platform) path = endpoints.v4["positional-rank-queues"]["endpoint"] return _request_executor.get("".join([url, path]), header_parameters=header_parameters)
f48f9a445aac9611d4892e1aab5e7699a4c3ec1f
3,640,964
def maplist(f, xs): """Implement `maplist` in pure Python.""" return list(map(f, xs))
894a58f9e2cd66fe9c327ea65433b8210051ed60
3,640,965
import string import re def pull_urls_excel_sheets(workbook): """ Pull URLs from cells in a given ExcelBook object. """ # Got an Excel workbook? if (workbook is None): return [] # Look through each cell. all_cells = excel.pull_cells_workbook(workbook) r = set() for cell in all_cells: # Skip empty cells. value = None try: value = str(cell["value"]).strip() except UnicodeEncodeError: value = ''.join(filter(lambda x:x in string.printable, cell["value"])).strip() if (len(value) == 0): continue # Add http:// for cells that look like they might be URLs # missing the http part. pat = r"[A-Za-z0-9_]{3,50}\.[A-Za-z]{2,10}/(?:[A-Za-z0-9_]{1,50}/)*[A-Za-z0-9_\.]{3,50}" if (re.search(pat, value) is not None): value = "http://" + value # Look for URLs in the cell value. for url in re.findall(read_ole_fields.URL_REGEX, value): r.add(url.strip()) # Return any URLs found in cells. return r
0359fb8e1fd552749e15cce631f756130c5199cf
3,640,966
import click import requests def do_request(base_url, api_path, key, session_id, extra_params=''): """ Voer een aanvraag uit op de KNVB API, bijvoorbeeld /teams; hiermee vraag je alle team-data op """ hashStr = md5.new('{0}#{1}#{2}'.format(key, api_path, session_id)).hexdigest() url = '{0}{1}?PHPSESSID={2}&hash={3}&{4}'.format(base_url, api_path, session_id, hashStr, extra_params) headers = { 'HTTP_X_APIKEY': key, 'Content-Type': 'application/json' } click.echo('URL: {0}'.format(url)) r = requests.get(url, headers=headers) json_data = r.json() return json_data
44217caa2c2cdf7543597405836cf0bb1ac650cd
3,640,967
def write_code(): """ Code that checks the existing path and snaviewpath in the environmental viriables/PATH """ msg = """\n\n[Code]\n""" msg += """function InstallVC90CRT(): Boolean;\n""" msg += """begin\n""" msg += """ Result := not DirExists('C:\WINDOWS\WinSxS\\x86_Microsoft.VC90.""" msg += """CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375');\n""" msg += """end;\n\n""" msg += """function NeedsAddPath(): boolean;\n""" msg += """var\n""" msg += """ oldpath: string;\n""" msg += """ newpath: string;\n""" msg += """ pathArr: TArrayOfString;\n""" msg += """ i: Integer;\n""" msg += """begin\n""" msg += """ RegQueryStringValue(HKEY_CURRENT_USER,'Environment',""" msg += """'PATH', oldpath)\n""" msg += """ oldpath := oldpath + ';';\n""" msg += """ newpath := '%SASVIEWPATH%';\n""" msg += """ i := 0;\n""" msg += """ while (Pos(';', oldpath) > 0) do begin\n""" msg += """ SetArrayLength(pathArr, i+1);\n""" msg += """ pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);\n""" msg += """ oldpath := Copy(oldpath, Pos(';', oldpath)+1,""" msg += """ Length(oldpath));\n""" msg += """ i := i + 1;\n""" msg += """ // Check if current directory matches app dir\n""" msg += """ if newpath = pathArr[i-1] \n""" msg += """ then begin\n""" msg += """ Result := False;\n""" msg += """ exit;\n""" msg += """ end;\n""" msg += """ end;\n""" msg += """ Result := True;\n""" msg += """end;\n""" msg += """\n""" return msg
429eb64485a4fe240c1bebbfd2a2a89613b4fddd
3,640,968
import re def get_filenames(filename): """ Return list of unique file references within a passed file. """ try: with open(filename, 'r', encoding='utf8') as file: words = re.split("[\n\\, \-!?;'//]", file.read()) #files = filter(str.endswith(('csv', 'zip')), words) files = set(filter(lambda s: s.endswith(('.csv', '.zip', '.pdf', '.txt', '.tsv', '.cfg', '.ini')), words)) return list(files) except Exception as e: print(e) return []
a1d8c396245cfc682ecc37edb3e673f87939b6fa
3,640,969
def format_filename_gen(prefix, seq_len, tgt_len, bi_data, suffix, src_lang,tgt_lang,uncased=False,): """docs.""" if not uncased: uncased_str = "" else: uncased_str = "uncased." if bi_data: bi_data_str = "bi" else: bi_data_str = "uni" file_name = "{}-{}_{}.seqlen-{}.tgtlen-{}.{}{}.gen.{}".format( src_lang[:2],tgt_lang[:2], prefix, seq_len, tgt_len, uncased_str, bi_data_str, suffix) return file_name
4a54c1fbfe371d628c1d7019c131b8fa6755f900
3,640,970
def is_holiday(date) -> bool: """ Return True or False for whether a date is a holiday """ name = penn_holidays.get(date) if not name: return False name = name.replace(' (Observed)', '') return name in holiday_names
edb68fa552f0f772b29b5d8a414758e63c252045
3,640,971
import re def tokenize_text(text): """ Tokenizes a string. :param text: String :return: Tokens """ token = [] running_word = "" for c in text: if re.match(alphanumeric, c): running_word += c else: if running_word != "": token.append(running_word) if c not in filter_character: token.append(c) running_word = "" if running_word != "": token.append(running_word) return token
b7f420d081d9cd658435ef623142a9d8ecf7b99b
3,640,972
def generate_dummy_probe(elec_shapes='circle'): """ Generate a 3 columns 32 channels electrode. Mainly used for testing and examples. """ if elec_shapes == 'circle': electrode_shape_params = {'radius': 6} elif elec_shapes == 'square': electrode_shape_params = {'width': 7} elif elec_shapes == 'rect': electrode_shape_params = {'width': 6, 'height': 4.5} probe = generate_multi_columns_probe(num_columns=3, num_elec_per_column=[10, 12, 10], xpitch=25, ypitch=25, y_shift_per_column=[0, -12.5, 0], electrode_shapes=elec_shapes, electrode_shape_params=electrode_shape_params) return probe
ea0f900390cf808cd8df3a38df9c47b99b77167b
3,640,973
def try_decode(message): """Try to decode the message with each known message class; return the first successful decode, or None.""" for c in MESSAGE_CLASSES: try: return c.decode(message) except ValueError: pass # The message was probably of a different type. return None
1dbbe5a6426b67690834673cd049535b018c0097
3,640,974
def build_where_clause(args: dict) -> str: """ This function transforms the relevant entries of dict into the where part of a SQL query Args: args: The arguments dict Returns: A string represents the where part of a SQL query """ args_dict = { 'source_ip': 'source_ip.value', 'dest_ip': 'dest_ip.value', 'rule_matched': 'rule_matched', 'from_zone': 'from_zone', 'to_zone': 'to_zone', 'source_port': 'source_port', 'dest_port': 'dest_port', 'action': 'action.value', 'file_sha_256': 'file_sha_256', 'file_name': 'file_name', 'app': 'app', 'app_category': 'app_category', 'dest_device_port': 'dest_device_port', 'dest_edl': 'dest_edl', 'dest_dynamic_address_group': 'dest_dynamic_address_group', 'dest_location': 'dest_location', 'dest_user': 'dest_user', 'file_type': 'file_type', 'is_server_to_client': 'is_server_to_client', 'is_url_denied': 'is_url_denied', 'log_type': 'log_type', 'nat_dest': 'nat_dest', 'nat_dest_port': 'nat_dest_port', 'nat_source': 'nat_source', 'nat_source_port': 'nat_source_port', 'rule_matched_uuid': 'rule_matched_uuid', 'severity': 'severity', 'source_device_host': 'source_device_host', 'source_edl': 'source_edl', 'source_dynamic_address_group': 'source_dynamic_address_group', 'source_location': 'source_location', 'source_user': 'source_user', 'sub_type': 'sub_type.value', 'time_generated': 'time_generated', 'url_category': 'url_category', 'url_domain': 'url_domain' } if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')): raise DemistoException('Error: "ip" argument cannot appear with either "source_ip" nor "dest_ip"') if args.get('port') and (args.get('source_port') or args.get('dest_port')): raise DemistoException('Error: "port" argument cannot appear with either "source_port" nor "dest_port"') non_string_keys = {'dest_port', 'source_port'} if 'query' in args: # if query arg is supplied than we just need to parse it and only it return args['query'].strip() where_clause = '' if args.get('ip'): ips = argToList(args.pop('ip')) # Creating a query for ip argument using source ip and dest ip where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')' if any(args.get(key) for key in args_dict) or args.get('port') or args.get('url'): where_clause += ' AND ' if args.get('port'): ports = argToList(args.pop('port')) # Creating a query for port argument using source port and dest port where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')' if any(args.get(key) for key in args_dict): where_clause += ' AND ' if args.get('url'): urls = argToList(args.pop('url')) # Creating a query for url argument using uri and referer where_clause += '(' + ' OR '.join(f'uri LIKE "%{url}%" OR referer LIKE "%{url}%"' for url in urls) + ')' if any(args.get(key) for key in args_dict): where_clause += ' AND ' # We want to add only keys that are part of the query string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys} or_statements = [] for key, values in string_query_fields.items(): string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list])) # ports are digested as ints and cannot be sent as strings non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys} for key, values in non_string_query_fields.items(): non_string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list])) where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement]) return where_clause
3b85c92346be254646dd5208259cee317f6f9741
3,640,975
def matrix_scale(s): """Produce scaling transform matrix with uniform scale s in all 3 dimensions.""" M = matrix_ident() M[0:3,0:3] = np.diag([ s, s, s ]).astype(np.float64) return M
22949a406865c18fe8200e43ea046ca6f16bdd6f
3,640,976
from typing import List def magnitude_datapoints(data: DataPoint) -> List: """ :param data: :return: """ if data is None or len(data) == 0: return [] input_data = np.array([i.sample for i in data]) data = norm(input_data, axis=1).tolist() return data
b6c505f02042cfc34183a19cc0843b28e25dd6b2
3,640,977
import PyOpenColorIO as ocio import logging from typing import Mapping def generate_config(data, config_name=None, validate=True, base_config=None): """ Generates the *OpenColorIO* config from given data. Parameters ---------- data : ConfigData *OpenColorIO* config data. config_name : unicode, optional *OpenColorIO* config file name, if given the config will be written to disk. validate : bool, optional Whether to validate the config. base_config : bool, optional *OpenColorIO* base config inherited for initial data. Returns ------- Config *OpenColorIO* config. """ if base_config is not None: config = base_config else: config = ocio.Config() config.setMajorVersion(data.profile_version) if data.description is not None: config.setDescription(data.description) for search_path in data.search_path: logging.debug(f'Adding "{search_path}".') config.addSearchPath(search_path) for role, colorspace in data.roles.items(): logging.debug(f'Adding "{colorspace}" colorspace as "{role}" role.') config.setRole(role, colorspace) for colorspace in data.colorspaces: if isinstance(colorspace, Mapping): colorspace = colorspace_factory(**colorspace) logging.debug(f'Adding "{colorspace.getName()}" colorspace.') config.addColorSpace(colorspace) for named_transform in data.named_transforms: if isinstance(named_transform, Mapping): named_transform = named_transform_factory(**named_transform) logging.debug(f'Adding "{named_transform.getName()}" named transform.') config.addNamedTransform(named_transform) for view_transform in data.view_transforms: if isinstance(view_transform, Mapping): view_transform = view_transform_factory(**view_transform) logging.debug(f'Adding "{view_transform.getName()}" view transform.') config.addViewTransform(view_transform) for look in data.looks: if isinstance(look, Mapping): look = look_factory(**look) logging.debug(f'Adding "{look.getName()}" look.') config.addLook(look) if data.profile_version >= 2: logging.debug(f'Disabling "{data.inactive_colorspaces}" colorspaces.') config.setInactiveColorSpaces(','.join(data.inactive_colorspaces)) for shared_view in data.shared_views: display_colorspace = shared_view.get('display_colorspace', '<USE_DISPLAY_NAME>') looks = shared_view.get('looks') view_transform = shared_view.get('view_transform') rule = shared_view.get('rule') description = shared_view.get('description') view = shared_view['view'] logging.debug( f'Adding "{view}" shared view using "{view_transform}" ' f'view transform, "{display_colorspace}" display colorspace, ' f'"{looks}" looks, "{rule}" rule and "{description}"' f'description.') config.addSharedView(view, view_transform, display_colorspace, looks, rule, description) for view in data.views: display = view['display'] colorspace = view.get('colorspace') looks = view.get('looks') view_transform = view.get('view_transform') display_colorspace = view.get('display_colorspace') rule = view.get('rule') description = view.get('description') view = view['view'] if colorspace is not None: logging.debug(f'Adding "{view}" view to "{display}" display ' f'using "{colorspace}" colorspace.') config.addDisplayView(display, view, colorspace, looks) elif view_transform is not None and display_colorspace is not None: logging.debug(f'Adding "{view}" view to "{display}" display ' f'using "{view_transform}" view transform, ' f'"{display_colorspace}" display colorspace, ' f'"{rule}" rule and "{description}" description.') config.addDisplayView(display, view, view_transform, display_colorspace, looks, rule, description) else: logging.debug(f'Adding "{view}" view to "{display}" display.') config.addDisplaySharedView(display, view) if data.active_displays: logging.debug(f'Activating "{data.active_displays}" displays.') config.setActiveDisplays(','.join(data.active_displays)) if data.active_views: logging.debug(f'Activating "{data.active_views}" views.') config.setActiveViews(','.join(data.active_views)) if data.file_rules: file_rules = ocio.FileRules() rule_index = 0 for file_rule in reversed(data.file_rules): name = file_rule['name'] colorspace = file_rule['colorspace'] regex = file_rule.get('regex') pattern = file_rule.get('pattern') extension = file_rule.get('extension') if name == 'Default': logging.debug(f'Setting "{name}" file rule with ' f'"{colorspace}" colorspace.') file_rules.setDefaultRuleColorSpace(colorspace) elif regex: logging.debug(f'Adding "{name}" file rule with ' f'"{regex}" regex pattern for ' f'"{colorspace}" colorspace.') file_rules.insertRule(rule_index, name, colorspace, regex) rule_index += 1 else: logging.debug( f'Adding "{name}" file rule with ' f'"{pattern}" pattern and "{extension}" extension ' f'for "{colorspace}" colorspace.') file_rules.insertRule(rule_index, name, colorspace, pattern, extension) rule_index += 1 config.setFileRules(file_rules) if data.viewing_rules: viewing_rules = ocio.ViewingRules() for i, viewing_rule in enumerate(reversed(data.viewing_rules)): logging.warning('Inserting a viewing rule is not supported yet!') # viewing_rules.insertRule() config.setViewingRules(viewing_rules) if data.default_view_transform is not None: config.setDefaultViewTransformName(data.default_view_transform) if validate: validate_config(config) if config_name is not None: with open(config_name, 'w') as file: file.write(config.serialize()) return config
7385989fd0afedd9bc5e74de7e48b2c8f27f4b85
3,640,978
def svn_stringbuf_from_aprfile(*args): """svn_stringbuf_from_aprfile(svn_stringbuf_t result, apr_file_t file, apr_pool_t pool) -> svn_error_t""" return apply(_core.svn_stringbuf_from_aprfile, args)
d9faccd861d5382593988c1e2585207e0b5fa89f
3,640,979
from pathlib import Path def Arrow_Head_A (cls, elid = "SVG:Arrow_Head_A", design_size = 12, ref_x = None, stroke = "black", marker_height = 6, marker_width = 6, fill = "white", fill_opacity = 1, ** kw) : """Return a marker that is an arrow head with an A-Shape. >>> mrk = Marker.Arrow_Head_A () >>> svg = Document (Root (view_box="0 0 1000 500")) >>> svg.add (Defs (mrk)) >>> svg.add (Rect (x = 5, y = 5, width = 990, height = 490, fill = "none", stroke = "orange", stroke_width = 5)) >>> svg.add (Path (fill = "none", stroke = "red", stroke_width = 25, marker_end = "url(#SVG:Arrow_Head_A)", d = "M 100 200 L 500 200 900 400")) >>> svg.add (Path (fill = "none", stroke = "blue", stroke_width =10, marker_start = "url(#SVG:Arrow_Head_A)", d = "M 100 100 L 500 100 900 50")) >>> svg.write_to_xml_stream () <?xml version="1.0" encoding="utf-8" standalone="yes"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg version="1.1" viewBox="0 0 1000 500" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" > <defs> <marker id="SVG:Arrow_Head_A" fill="none" markerHeight="6" markerUnits="strokeWidth" markerWidth="6" orient="auto" refX="0" refY="6" stroke="black" viewBox="0 0 12 12" > <path d="m 0,9.0 0,-6.0 6.0,3.0 -6.0,3.0 z" fill="white" fill-opacity="1" stroke="none" > </path> <path d="m 0,9.0 5.0,-3.0 -5.0,-3.0"> </path> <path d="m 2.0,4.0 0,4.0"> </path> </marker> </defs> <rect fill="none" height="490" stroke="orange" stroke-width="5" width="990" x="5" y="5" /> <path d="M 100 200 L 500 200 900 400" fill="none" marker-end="url(#SVG:Arrow_Head_A)" stroke="red" stroke-width="25" > </path> <path d="M 100 100 L 500 100 900 50" fill="none" marker-start="url(#SVG:Arrow_Head_A)" stroke="blue" stroke-width="10" > </path> </svg> """ # modifying design size will draw with different line-strength # compared to the shape size = design_size size_2 = size // 2 scope = Scope () if ref_x is None : ref_x = 0 result = cls \ ( Path ( d = "m %s,%s 0,%s %s,%s %s,%s z" % ( 0 , size * 3 / 4. , -(size / 2.) , size / 2. , size / 4. , -(size / 2.) , size / 4. ) , fill = fill , fill_opacity = fill_opacity , stroke = "none" ) , Path ( d = "m %s,%s %s,%s %s,%s" % ( 0 , size * 3 / 4. , size * 5 / 12. , -(size / 4.) , -(size * 5 / 12.) , -(size / 4.) ) ) , Path ( d = "m %s,%s 0,%s" % ( size / 6. , size / 3. , size / 3. ) ) , elid = elid , fill = "none" , marker_units = "strokeWidth" , marker_height = marker_height , marker_width = marker_width , orient = "auto" , ref_x = ref_x , ref_y = size_2 , stroke = stroke , view_box = (0, 0, size, size) , ** kw ) return result
661409c1ed37e33e9aea306b1c5b8d2a369bbaf2
3,640,980
def signup() -> Response | str | tuple[dict[str, str | int], int]: """Sign up""" # Bypass if user is logged in if current_user.is_authenticated: return redirect(url_for("home")) # Process user data try: # Return template if request.method is GET assert request.method != "GET" # Process form username, password, remember = _get_auth_form() assert username and password # Check if user with given username already exists if database.get_instance(models.User, username=username): flash( f"User with {username} username already exists.<br>" f'Go to <a href="{url_for("auth.login")}">login page</a>.' ) assert False except AssertionError: return render_template("signup.html") # Add user to database and login user = database.add_instance( models.User, lambda i: i.set_password(password), username=username, ) login_user(user, remember=remember) # Return json response or redirect to home if request.form.get("raw"): response = { "info": f"Successfully signed up as {username}.", "status": 200, } return response, 200 return redirect(url_for("home"))
9496c11a9015b69c7cf2f89d19a45f93405f5dfe
3,640,981
import copy def evaluate_all_configs(hparams, agent_model_dir): """Evaluate the agent with multiple eval configurations.""" def make_eval_hparams(hparams, policy_to_action, max_num_noops): hparams = copy.copy(hparams) hparams.add_hparam("num_agents", hparams.eval_num_agents) hparams.add_hparam("policy_to_actions_lambda", { "sample": lambda policy: policy.sample(), "mode": lambda policy: policy.mode() }[policy_to_action]) hparams.max_num_noops = max_num_noops return hparams metrics = {} # Iterate over all combinations of picking actions by sampling/mode and # whether to do initial no-ops. for policy_to_action in ("mode", "sample"): for max_num_noops in (hparams.eval_max_num_noops, 0): eval_hparams = make_eval_hparams(hparams, policy_to_action, max_num_noops) scores = evaluate_single_config(eval_hparams, agent_model_dir) for (score, clipped) in zip(scores, (True, False)): metric_name = "mean_reward/eval/{}_{}_max_noops_{}".format( policy_to_action, max_num_noops, "clipped" if clipped else "unclipped" ) metrics[metric_name] = score return metrics
107fb692adec1ce7dbb580c30e3e7b0402874054
3,640,982
from typing import Tuple def shape(a: Matrix) -> Tuple[int, int]: """ Returns the num of rows and columns of A """ num_rows = len(a) num_cols = len(a[0]) if a else 0 # number of elements so columns in first element if it exists return num_rows, num_cols
24ef9045e86b6027f76ddf57bf5eba44553798c5
3,640,983
def solve(A, b): """ :param A: Matrix R x C :param b: Vector R :return: Vector C 'x' solving Ax=b >>> M = Mat(({'a', 'b', 'c', 'd'}, {'A', 'B', 'C', 'D'}), { \ ('a', 'A'): one, ('a', 'B'): one, ('a', 'D'): one, \ ('b', 'A'): one, ('b', 'D'): one, \ ('c', 'A'): one, ('c', 'B'): one, ('c', 'C'): one, ('c', 'D'): one, \ ('d', 'C'): one, ('d', 'D'): one \ }) >>> v = Vec(M.D[0], {'a': one, 'c': one}) >>> solve(M, v) """ M = transformation(A) U = M*A col_label_list = sorted(A.D[1]) U_rows_dict = mat2rowdict(U) row_list = [U_rows_dict[i] for i in sorted(U_rows_dict)] # return echelon_solve(row_list,col_label_list, M*b) # print(row_list, col_label_list, repr(M * b)) return row_list, col_label_list, M * b
b25a762ee8f8229d1bc573c828a7151770d3240c
3,640,984
def token_groups(self): """The groups the Token owner is a member of.""" return self.created_by.groups
9db411660db1def09b8dc52db800ca4c09a38cce
3,640,985
import requests def get_html_content_in_text(url): """ Grab all the content in webpage url and return it's content in text. Arguments: url -- a webpage url string. Returns: r.text -- the content of webpage in text. """ r = requests.get(url) return r.text
fd8ddc992f34c186051ca8985ffb110c50004970
3,640,986
def subscribe(): """Subscribe new message""" webhook_url = request.form.get("webhook_url") header_key = request.form.get("header_key") header_value = request.form.get("header_value") g.driver.subscribe_new_messages(NewMessageObserver(webhook_url, header_key, header_value)) return jsonify({"success": True})
97a47fb298bbc0bac3e333037210556525dd837f
3,640,987
def SegAlign(ea, alignment): """ Change alignment of the segment @param ea: any address in the segment @param alignment: new alignment of the segment (one of the sa... constants) @return: success (boolean) """ return SetSegmentAttr(ea, SEGATTR_ALIGN, alignment)
c0c380e194fbed43b87be81108eecf864809c447
3,640,988
import requests def create_bitlink(logger, headers='', long_url='google.com'): """ Функция создает короткие ссылки из длинных :param logger: logger object :param headers: Generic Access Token сформированнный на сайте :param long_url: Ссылка которую надо укоротить :return: созданная короткая ссылка """ url_template = 'https://api-ssl.bitly.com/v4/{}' user, bit = ['user', 'bitlinks'] with requests.Session() as s: bitl_user_info = s.get(url_template.format(user), headers=headers) logger.info(f'Получаем группу по пользователю ответ: {bitl_user_info.json()}') group_guid = bitl_user_info.json()['default_group_guid'] payload = {'group_guid': group_guid, 'title': 'shortlink', 'long_url': long_url} response = s.post(url_template.format(bit), json=payload, headers=headers) bitlink = response.json()['id'] return bitlink
be5c7882a8577c8d406412c790f3d5fbcbd11019
3,640,989
import numpy def compute_neq(count_mat): """ Compute the Neq for each residue from an occurence matrix. Parameters ---------- count_mat : numpy array an occurence matrix returned by `count_matrix`. Returns ------- neq_array : numpy array a 1D array containing the neq values """ # get the frequency matrix freq = utils.compute_freq_matrix(count_mat) # Compute neq neq_array = numpy.apply_along_axis(_neq_per_residue, 1, freq) return neq_array
e3d738eb1c8ed58a3c4d4a4efc5323930d03be1f
3,640,990
import optparse def _GetOptionsParser(): """Get the options parser.""" parser = optparse.OptionParser(__doc__) parser.add_option('-i', '--input', dest='inputs', action='append', default=[], help='One or more input files to calculate dependencies ' 'for. The namespaces in this file will be combined with ' 'those given with the -n flag to form the set of ' 'namespaces to find dependencies for.') parser.add_option('-n', '--namespace', dest='namespaces', action='append', default=[], help='One or more namespaces to calculate dependencies ' 'for. These namespaces will be combined with those given ' 'with the -i flag to form the set of namespaces to find ' 'dependencies for. A Closure namespace is a ' 'dot-delimited path expression declared with a call to ' 'goog.provide() (e.g. "goog.array" or "foo.bar").') parser.add_option('--root', dest='roots', action='append', default=[], help='The paths that should be traversed to build the ' 'dependencies.') parser.add_option('-o', '--output_mode', dest='output_mode', type='choice', action='store', choices=['list', 'script', 'compiled'], default='list', help='The type of output to generate from this script. ' 'Options are "list" for a list of filenames, "script" ' 'for a single script containing the contents of all the ' 'files, or "compiled" to produce compiled output with ' 'the Closure Compiler. Default is "list".') parser.add_option('-c', '--compiler_jar', dest='compiler_jar', action='store', help='The location of the Closure compiler .jar file.') parser.add_option('-f', '--compiler_flags', dest='compiler_flags', default=[], action='append', help='Additional flags to pass to the Closure compiler. ' 'To pass multiple flags, --compiler_flags has to be ' 'specified multiple times.') parser.add_option('--output_file', dest='output_file', action='store', help=('If specified, write output to this path instead of ' 'writing to standard output.')) return parser
e1ec0530357ad3bebbac80c86b9d9b1010e6688c
3,640,991
def spikalize_img(experiment, image, label): """ Transform image to spikes. Spike with poisson distributed rate proportional to pixel brightness. :param experiment: :param image: :param label: :return: """ image_shape = np.append(np.array(experiment.timesteps), np.array(image.shape)) rand = tf.random.uniform(shape=image_shape) spiked_img = tf.cast(image / 255 * experiment.max_rate > rand, tf.float32) return spiked_img, label
7159334b40c3841977a0772ac25c71a934d268ac
3,640,992
def update_security_schemes(spec, security, login_headers, security_schemes, unauthorized_schema): """Patch OpenAPI spec to include security schemas. Args: spec: OpenAPI spec dictionary Returns: Patched spec """ # login_headers = {'Set-Cookie': # {'schema': # {'type': 'string', # 'example': 'session=abcde12345; Path=/; HttpOnly'}}} # security_schemes = {'cookieAuth': {'description': 'Session Cookie', # 'type': 'apiKey', # 'in': 'cookie', # 'name': 'session'}} # unauthorized_schema = {'UnauthorizedError': # {'description': "The auth cookie isn't present", # 'properties': # {'schema': {'type': 'string', 'example': 'Unauthorized'}}}} spec["components"]["securitySchemes"] = security_schemes spec["security"] = security spec["paths"]["/login"]["post"]["responses"][200]["headers"] = login_headers.copy() return spec
1ecb5cc3a121fc151794e4e24cd4aca4bc07ce46
3,640,993
def get_geckodriver_url(version): """ Generates the download URL for current platform , architecture and the given version. Supports Linux, MacOS and Windows. :param version: the version of geckodriver :return: Download URL for geckodriver """ platform, architecture = get_platform_architecture() return f'https://github.com/mozilla/geckodriver/releases/download/{version}' \ f'/geckodriver-{version}-{platform}{architecture}.tar.gz'
9d71728c551c67e86a61c3b870728bc70cad48ba
3,640,994
def get_graph_size(depth: int): """returns how many nodes are in fully-equipped with nodes graph of the given depth""" size = 1 cur_size = 1 ln = len(expand_sizes) for i in range(min(ln, depth)): cur_size *= expand_sizes[i] size += cur_size if ln < depth: size += cur_size * later_expand_size*(depth - ln) return size
00f2a2ce714550785e99c0d193f47df05cc30b68
3,640,995
def inherits_from(obj, parent): """ Takes an object and tries to determine if it inherits at *any* distance from parent. Args: obj (any): Object to analyze. This may be either an instance or a class. parent (any): Can be either instance, class or python path to class. Returns: inherits_from (bool): If `parent` is a parent to `obj` or not. Notes: What differs this function from e.g. `isinstance()` is that `obj` may be both an instance and a class, and parent may be an instance, a class, or the python path to a class (counting from the evennia root directory). """ if callable(obj): # this is a class obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()] else: obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()] if isinstance(parent, str): # a given string path, for direct matching parent_path = parent elif callable(parent): # this is a class parent_path = "%s.%s" % (parent.__module__, parent.__name__) else: parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__) return any(1 for obj_path in obj_paths if obj_path == parent_path)
9d7e0665b4e4fe2a3f7c136436a2502c8b72527c
3,640,996
from typing import Optional def load_df_from_googlesheet( url_string: str, skiprows: Optional[int] = 0, skipfooter: Optional[int] = 0, ) -> pd.DataFrame: """Load a Pandas DataFrame from a google sheet. Given a file object, try to read the content as a CSV file and transform into a data frame. The skiprows and skipfooter are number of lines to skip from the top and bottom of the file (see read_csv in pandas). It also tries to convert as many columns as possible to date/time format (testing the conversion on every string column). :param url_string: URL where the file is available :param skiprows: Number of lines to skip at the top of the document :param skipfooter: Number of lines to skip at the bottom of the document :return: Resulting data frame, or an Exception. """ # Process the URL provided by google. If the URL is obtained using the # GUI, it has as suffix /edit?[parameters]. This part needs to be # replaced by the suffix /export?format=csv # For example from: # https://docs.google.com/spreadsheets/d/DOCID/edit?usp=sharing # to # https://docs.google.com/spreadsheets/d/DOCID/export?format=csv&gid=0 parse_res = urlparse(url_string) if parse_res.path.endswith('/edit'): qs_dict = parse_qs(parse_res.query) qs_dict['format'] = 'csv' new_fragment = parse_res.fragment if 'gid=' in parse_res.fragment: qs_dict['gid'] = parse_res.fragment.split('=')[1] new_fragment = '' url_string = urlunparse([ parse_res.scheme, parse_res.netloc, parse_res.path.replace('/edit', '/export'), parse_res.params, urlencode(qs_dict, doseq=True), new_fragment, ]) # Process the link using pandas read_csv return load_df_from_csvfile(url_string, skiprows, skipfooter)
adfcff1968eccfa44640b5e9a7e3143703284dfb
3,640,997
def decomp(bits, dummies=default_dummies, width=default_width): """Translate 0s and 1s to dummies[0] and dummies[1].""" words = (dummies[i] for i in bits) unwrapped = ' '.join(words) return wrap_source(unwrapped, width=width)
a6540cc90412b9e72b62c57fe6828b45ad5df593
3,640,998
def get_word_node_attrs(word: Word) -> WordNodeAttrs: """Create the graph's node attribute for a `Word`. Build an attribute dict with the word's features. Note that we're using the term `Word` instead of `Token` to be closer to the implementation of these data structures in stanza. From stanza's documentation, a `Token` might hold more the a single word in the case of multi-word tokens. For more information please refer to 'https://stanfordnlp.github.io/stanza/data_objects.html#token'. Arguments: word: Word A stanza-annotated word. Return: WordNodeAttrs A dictionary containing the word's features to be used by networkx's feature graph. """ # Changing the color for the sentence's head token if word.head == 0: color = GraphNodeColor.HEAD.value else: color = GraphNodeColor.TOKEN.value return { 'fname': word.sent.doc.fname, 'start_idx': word.parent.start_char, 'end_idx': word.parent.end_char, 'text': word.text, 'upos': word.upos, 'lemma': word.lemma, 'label': word.text, # for PyVis 'color': color # for pyvis }
143a00206cfa8d98419f2d0e21e1013ea6dd02ab
3,640,999