content
stringlengths
22
815k
id
int64
0
4.91M
def parse_mint_studies_response(xml_raw) -> List[MintStudy]: """Parse the xml response to a MINT find DICOM studies call Raises ------ DICOMTrolleyError If parsing fails """ try: studies = ElementTree.fromstring(xml_raw).findall( MintStudy.xml_element ) except ParseError as e: raise DICOMTrolleyError( f"Could not parse server response as MINT " f"studies. Response was: {xml_raw}" ) from e return [MintStudy.init_from_element(x) for x in studies]
25,900
def lookup_no_interp(x, dx, xi, y, dy, yi): """ Return the indices for the closest values for a look-up table Choose the closest point in the grid x ... range of x values xi ... interpolation value on x-axis dx ... grid width of x ( dx = x[1]-x[0]) (same for y) return: idxX and idxY """ if xi > x[0] and xi < x[-1]: xid = (xi - x[0]) / dx xid_floor = np.floor(xid) if xid - xid_floor < dx / 2: idxX = xid_floor else: idxX = xid_floor + 1 elif xi < x[0]: idxX = 0 else: idxX = len(x) - 1 if yi > y[0] and yi < y[-1]: yid = (yi - y[0]) / dy yid_floor = np.floor(yid) if yid - yid_floor < dy / 2: idxY = yid_floor else: idxY = yid_floor + 1 elif yi < y[0]: idxY = 0 else: idxY = len(y) - 1 return idxX, idxY
25,901
def index_xml(directory, db): """ Index a list of XML files of publication information. """ xml.index_directory(directory, db)
25,902
def advertisement_data_complete_builder(list_of_ad_entries): """ Generate a finalized advertisement data value from a list of AD entries that can be passed to the BLEConnectionManager to set the advertisement data that is sent during advertising. :param list_of_ad_entries: List of AD entries (can be built using blesuite.utils.gap_utils.advertisement_data_entry_builder) :type list_of_ad_entries: [str,] :return: Finalized AD data :rtype: str """ data = "" for ad in list_of_ad_entries: length = len(ad) ad_string = chr(length) + ad data = data + ad_string return data
25,903
def process_vocab_table(vocab, vocab_size, vocab_threshold, vocab_lookup, unk, pad): """process vocab table""" default_vocab = [unk, pad] if unk in vocab: del vocab[unk] if pad in vocab: del vocab[pad] vocab = { k: vocab[k] for k in vocab.keys() if vocab[k] >= vocab_threshold } if vocab_lookup is not None: vocab = { k: vocab[k] for k in vocab.keys() if k in vocab_lookup } sorted_vocab = sorted(vocab, key=vocab.get, reverse=True) sorted_vocab = default_vocab + sorted_vocab vocab_table = sorted_vocab[:vocab_size] vocab_size = len(vocab_table) vocab_index = tf.contrib.lookup.index_table_from_tensor( mapping=tf.constant(vocab_table), default_value=0) vocab_inverted_index = tf.contrib.lookup.index_to_string_table_from_tensor( mapping=tf.constant(vocab_table), default_value=unk) return vocab_table, vocab_size, vocab_index, vocab_inverted_index
25,904
def get_mag_msg(stamp, mag): """ Get magnetometer measurement as ROS sensor_msgs::MagneticField """ # init: mag_msg = MagneticField() # a. set header: mag_msg.header.stamp = stamp mag_msg.header.frame_id = '/imu_link' # b. mag: ( mag_msg.magnetic_field.x, mag_msg.magnetic_field.y, mag_msg.magnetic_field.z ) = mag # finally: return mag_msg
25,905
async def UserMeAPI( current_user: User = Depends(User.getCurrentUser), ): """ 現在ログイン中のユーザーアカウントの情報を取得する。<br> JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。 """ # 一番よく使う API なので、リクエスト時に twitter_accounts テーブルに仮のアカウントデータが残っていたらすべて消しておく ## Twitter 連携では途中で連携をキャンセルした場合に仮のアカウントデータが残置されてしまうので、それを取り除く if await TwitterAccount.filter(icon_url='Temporary').count() > 0: await TwitterAccount.filter(icon_url='Temporary').delete() current_user = await User.filter(id=current_user.id).get() # current_user のデータを更新 await current_user.fetch_related('twitter_accounts') return current_user
25,906
def process_resolvers(context, template=templates.DEFAULT_TEMPLATE): """ Perform second stage path resolution based on template rules Args: session (TreeNode): The session node to search within context (dict): The context to perform path resolution on template (Template): The template """ namespace = template.namespace container_type = context['container_type'] container = context[container_type] if (('info' not in container) or (namespace not in container['info']) or ('template' not in container['info'][namespace])): return # Determine the applied template name template_name = container['info'][namespace]['template'] # Get a list of resolvers that apply to this template resolvers = template.resolver_map.get(template_name, []) # Apply each resolver for resolver in resolvers: resolver.resolve(context)
25,907
def test_unsupported_version_with_number(): """ Test the decorator when supplying a non existing version with a number. """ with raises(InvalidVersionException): your_function_nr.v3()
25,908
def midpt(pt1, pt2): """ Get the midpoint for two arbitrary points in space. """ return rg.Point3d((pt1[0] + pt2[0])/2, (pt1[1] + pt2[1])/2, (pt1[2] + pt2[2])/2 )
25,909
def dark_current_jh_task(det_name): """JH version of single sensor execution of the dark current task.""" from collections import defaultdict from astropy.io import fits import siteUtils from bot_eo_analyses import make_file_prefix, glob_pattern,\ get_amplifier_gains, bias_filename, dark_current_task,\ plot_ccd_total_noise, get_mask_files from bot_data_handling import most_common_dark_files run = siteUtils.getRunNumber() file_prefix = make_file_prefix(run, det_name) acq_jobname = siteUtils.getProcessName('BOT_acq') dark_files \ = siteUtils.dependency_glob(glob_pattern('dark_current', det_name), acq_jobname=acq_jobname, description="Dark current frames:") if not dark_files: print("dark_current_task: No dark files found for detector", det_name) return None dark_files_linear_fit = list(dark_files) dark_files = most_common_dark_files(dark_files) if len(dark_files_linear_fit) == len(dark_files): # These data only have one integration time, so skip linear # fit of dark current signal vs integration time. dark_files_linear_fit = None mask_files = get_mask_files(det_name) eotest_results_file \ = siteUtils.dependency_glob('{}_eotest_results.fits'.format(file_prefix), jobname='read_noise_BOT')[0] gains = get_amplifier_gains('{}_eotest_results.fits'.format(file_prefix)) bias_frame = bias_filename(run, det_name) dark_curr_pixels, dark95s \ = dark_current_task(run, det_name, dark_files, gains, mask_files=mask_files, bias_frame=bias_frame, dark_files_linear_fit=dark_files_linear_fit) plot_ccd_total_noise(run, det_name, dark_curr_pixels, dark95s, eotest_results_file) return dark_curr_pixels, dark95s
25,910
def change_title(mri): """ Change Title Change title of current MRI window. """ ui = mri.ui txt, state = ui.dlgs.dialog_input("Input new title name.", "", ui.frame.getTitle()) if state: ui.frame.setTitle(txt)
25,911
def get_sid_list (video_source_filename): """This returns a list of subtitle ids in the source video file. TODO: Also extract ID_SID_nnn_LANG to associate language. Not all DVDs include this. """ cmd = "mplayer '%s' -vo null -ao null -frames 0 -identify" % video_source_filename (command_output, exitstatus) = run(cmd) idl = re.findall("ID_SUBTITLE_ID=([0-9]+)", command_output) idl.sort() return idl
25,912
def test_default_not_required(): """Values with a default are not required""" v = Value(KEY, default='s') assert v.required is False
25,913
def load_dir(path): """Get an iterable of PySource from the given directory.""" for root, subdirs, files in os.walk(abspath(path)): files = (abspath(os.path.join(root, fname)) for fname in files) files = (fname for fname in files if fname.endswith('.py')) for file_name in files: with open(file_name, 'r') as file_handle: source = file_handle.read() yield PySource(parse.parse(source), file_name)
25,914
def test_backup(tmpdir, monkeypatch, name, existing_files, expected): """ Ensure the DeferredFileWriter backs up existing files correctly, and at the correct moment """ monkeypatch.chdir(tmpdir) for idx, file in enumerate(existing_files): with open(file, 'w') as handle: handle.write(str(idx)) writer = DeferredFileWriter() with writer.open(name, 'w') as handle: handle.write("new {}".format(name)) writer.write() assert Path(name).is_file() with open(name) as file: assert file.read() == "new {}".format(name) for idx, name in enumerate(expected): assert Path(name).is_file() with open(name) as file: assert file.read() == str(idx)
25,915
def test_1_10_15_distribution(): """ Test connector using Apache Airflow 1.10.15 built distribution. """ apc = AirflowPackageCatalogConnector(AIRFLOW_SUPPORTED_FILE_TYPES) # get catalog entries for the specified distribution ces = apc.get_catalog_entries({"airflow_package_download_url": AIRFLOW_1_10_15_PKG_URL}) # this distribution should contain 37 Python scripts with operator definitions assert len(ces) == 37 # each entry must contain two keys for entry in ces: # built distribution package file name assert entry.get("airflow_package") == "apache_airflow-1.10.15-py2.py3-none-any.whl" # a Python script assert entry.get("file", "").endswith(".py") # fetch and validate the first entry ce = apc.get_entry_data({"file": ces[0]["file"]}, {}) assert ce is not None assert isinstance(ce, AirflowEntryData) assert ce.definition is not None assert ce.package_name.startswith("airflow.operators.")
25,916
def main() -> None: """impost0r.py main function""" parser = argparse.ArgumentParser(prog=PROGNAME) parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('--version', action='version', version='%(prog)s v' + VERSION) args = parser.parse_args() if args.verbose == 1: logger.setLevel(logging.WARNING) elif args.verbose == 2: logger.setLevel(logging.INFO) elif args.verbose > 2: logger.setLevel(logging.DEBUG) config: Dict[str, str] = cli_get_configuration() tempdir = tempfile.TemporaryDirectory() logger.info('Using tempdir=%s', tempdir.name) repo_url = 'https://github.com/' + config['username'] + '/' + config['repo'] repo_tmpdir = tempdir.name + '/' + config['repo'] repo_data_file = repo_tmpdir + '/' + config['data_file'] push_url = 'https://'\ + config['username']\ + ':'\ + config['password']\ + '@github.com/'\ + config['username']\ + '/'\ + config['repo'] logger.info('Cloning %s to %s', repo_url, repo_tmpdir) active_years: List[bytes] = get_years_of_activity(config['donor']) # COMMENT THIS OUT IF You want full commit history active_years_user: List[bytes] = get_years_of_activity(config['username']) non_active_years = list(set(active_years) ^ set(active_years_user)) # print(f'{non_active_years=}') active_years: List[bytes] = [[year for year in active_years if year not in non_active_years]] # print(f'{active_years=}') if not active_years: logger.error('No yearly data found for %s.', config['donor']) sys.exit(0) # NOTE: There seems to be a weird caching issue here. # Sometimes newly created commits in the user # repo are not visible for days in the list # of years of activity on the overview page # which leads to unnecessary commits being # created on consecutive runs. # Therefore we only get the years of activity # of the donor user and use those also to # get activity data for the target user. # Otherwise running impost0r # in quick succession with the same # configuration can have unintended # consequences and create freakishly # large repositories. print('Getting activity for {}...'.format(config['username'])) data_user: Dict[str, int] = get_contribution_data(config['username'], active_years) print('Getting activity for {}...'.format(config['donor'])) data_donor: Dict[str, int] = get_contribution_data(config['donor'], active_years) if not data_donor: print('No activity found for {}.'.format(config['donor'])) sys.exit(0) print('Calculating commit data...') data_repo = diff_contribution_data(data_user, data_donor) if not data_repo: print('{} does not seem to have more contributions than {}.'.format( config['donor'], config['username'] )) print('Nothing to do; exiting.') sys.exit(0) author_data = config['username'].encode()\ + ' <'.encode()\ + config['email'].encode()\ + '>'.encode() logger.info('Using author data: \'%s\'', author_data.decode()) print('Cloning {}...'.format(repo_url)) devnull = open(os.devnull, 'w') err_stream = getattr(devnull, 'buffer', None) repo = porcelain.clone(repo_url, repo_tmpdir, errstream=err_stream) print('Creating and pushing new commits ...') # NOTE: GitHub will not correctly update the # calendar if a repository with more than 1000 # new commits is pushed. Only the most recent # 1000 will be displayed in the calendar. # Workaround: create and push commits in # several turns. And wait a couple of seconds # between pushes to let GitHub do its magic. total_commit_count = sum(data_repo.values()) commits_generated = 0 for (commit_date, commit_count) in data_repo.items(): for commit_num in range(0, commit_count): commit_stamp = calendar.timegm( time.strptime(commit_date, '%Y-%m-%d')) + commit_num + SECONDS_9AM data_file = open(repo_data_file, 'w') data_file.write(commit_date + str(commit_num)) data_file.close() porcelain.add(repo, repo_data_file) commits_generated += 1 repo.do_commit( message=commit_date.encode(), committer=author_data, author=author_data, commit_timestamp=commit_stamp) if not commits_generated % COMMITS_PER_PROGRESS_BAR_UPDATE: progress(commits_generated, total_commit_count) if not commits_generated % COMMITS_PER_PUSH: logger.info('pushing...') do_push(repo_tmpdir, push_url, err_stream) # NOTE: A certain minimum wait between pushes seems # to be necessary. Otherwise the activity data # in the calendar will be displayed correctly # but the list of years of activity will not # be updated! time.sleep(SLEEP_BETWEEN_PUSHES) if commits_generated % COMMITS_PER_PUSH: logger.info('final push') progress(commits_generated, total_commit_count) do_push(repo_tmpdir, push_url, err_stream) progress(commits_generated, total_commit_count) print('\nFinished') repo.close() tempdir.cleanup()
25,917
def tweetnacl_crypto_box_open(max_messagelength=256): """ max_messagelength: maximum length of the message, in bytes. i.e., the symbolic execution will not consider messages longer than max_messagelength """ proj = tweetnaclProject() state = funcEntryState(proj, "crypto_box_curve25519xsalsa20poly1305_tweet_open", [ ("m", pointerToUnconstrainedPublic()), # Output parameter, will hold plaintext, length 'clen' ("c", pointerToUnconstrainedPublic()), # ciphertext: length 'clen' ("clen", publicValue()), # length of ciphertext. Not a pointer ("n", pointerTo(secretArray(24), 24)), # nonce, size crypto_box_NONCEBYTES ("pk", pointerTo(publicArray(32), 32)), # public key, size crypto_box_PUBLICKEYBYTES ("sk", pointerTo(secretArray(32), 32)) # secret key, size crypto_box_SECRETKEYBYTES ]) state.add_constraints(getArgBVS(state, 'clen') <= max_messagelength) addDevURandom(state) return (proj, state)
25,918
def balanced_parentheses_checker(symbol_string): """Verify that a set of parentheses is balanced.""" opening_symbols = '{[(' closing_symbols = '}])' opening_symbols_stack = data_structures.Stack() symbol_count = len(symbol_string) counter = 0 while counter < symbol_count: current_symbol = symbol_string[counter] if current_symbol in '{[(': opening_symbols_stack.push(current_symbol) else: if not opening_symbols_stack.is_empty() and \ opening_symbols.index(opening_symbols_stack.peek()) == \ closing_symbols.index(current_symbol): opening_symbols_stack.pop() else: counter = symbol_count counter += 1 return opening_symbols_stack.is_empty() and counter == symbol_count
25,919
def _wrap_outcoming( store_cls: type, wrapped_method: str, trans_func: Optional[callable] = None ): """Output-transforming wrapping of the wrapped_method of store_cls. The transformation is given by trans_func, which could be a one (trans_func(x) or two (trans_func(self, x)) argument function. Args: store_cls: The class that will be transformed wrapped_method: The method (name) that will be transformed. trans_func: The transformation function. wrap_arg_idx: The index of the Returns: Nothing. It transforms the class in-place >>> from on.trans import store_wrap >>> S = store_wrap(dict) >>> _wrap_outcoming(S, '_key_of_id', lambda x: f'wrapped_{x}') >>> s = S({'a': 1, 'b': 2}) >>> list(s) ['wrapped_a', 'wrapped_b'] >>> _wrap_outcoming(S, '_key_of_id', lambda self, x: f'wrapped_{x}') >>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b'] >>> class A: ... def __init__(self, prefix='wrapped_'): ... self.prefix = prefix ... def _key_of_id(self, x): ... return self.prefix + x >>> _wrap_outcoming(S, '_key_of_id', A(prefix='wrapped_')._key_of_id) >>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b'] >>> >>> S = store_wrap(dict) >>> _wrap_outcoming(S, '_obj_of_data', lambda x: x * 7) >>> s = S({'a': 1, 'b': 2}) >>> list(s.values()) [7, 14] """ if trans_func is not None: wrapped_func = getattr(store_cls, wrapped_method) if not _has_unbound_self(trans_func): # print(f"00000: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}") @wraps(wrapped_func) def new_method(self, x): # # Long form (for explanation) # super_method = getattr(super(store_cls, self), wrapped_method) # output_of_super_method = super_method(x) # transformed_output_of_super_method = trans_func(output_of_super_method) # return transformed_output_of_super_method return trans_func( getattr(super(store_cls, self), wrapped_method)(x) ) else: # print(f"11111: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}") @wraps(wrapped_func) def new_method(self, x): # # Long form (for explanation) # super_method = getattr(super(store_cls, self), wrapped_method) # output_of_super_method = super_method(x) # transformed_output_of_super_method = trans_func(self, output_of_super_method) # return transformed_output_of_super_method return trans_func( self, getattr(super(store_cls, self), wrapped_method)(x) ) setattr(store_cls, wrapped_method, new_method)
25,920
def quantize(x): """convert a float in [0,1] to an int in [0,255]""" y = math.floor(x*255) return y if y<256 else 255
25,921
def initialize_logger(prefix): """ Initialization of logging subsystem. Two logging handlers are brought up: 'fh' which logs to a log file and 'ch' which logs to standard output. :param prefix: prefix that is added to the filename :return logger: return a logger instance """ logger = logging.getLogger('charm-cli') logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) logger.addHandler(ch) try: if prefix: log_filename = '{}_charm-cli.log'.format(prefix) else: log_filename = 'charm-cli.log' fh = logging.FileHandler(log_filename, 'w') fh.setLevel(logging.INFO) logger.addHandler(fh) except IOError as error: logger.warning('WARNING: Cannot create log file! Run charm-cli from a directory to ' 'which you have write access.') logger.warning(error.msg) pass return logger
25,922
def plex_test_cmd(bot, trigger): """Test if your configured options can reach Plex.""" plex = plex_test(bot, trigger) if plex is None: return else: bot.reply("Plex connection test successful.")
25,923
def interpolate(x, x_data, y_data, left_slope=0.0, right_slope=0.0, dtype=None, name=None): """Performs linear interpolation for supplied points. Given a set of knots whose x- and y- coordinates are in `x_data` and `y_data`, this function returns y-values for x-coordinates in `x` via piecewise linear interpolation. `x_data` must be strictly increasing but `y_data` don't need to be because we don't require the function approximated by these knots to be monotonic. #### Examples ```python x = [-10, -1, 1, 3, 6, 7, 8, 15, 18, 25, 30, 35] # `x_data` must be increasing, but `y_data` don't need to be. x_data = [-1, 2, 6, 8, 18, 30.0] y_data = [10, -1, -5, 7, 9, 20] result = interpolate(x, x_data, y_data) with tf.Session() as sess: print(sess.run(result)) # [ 10, 10, 2.66666667, -2, -5, 1, 7, 8.4, 9, 15.41666667, 20, 20] ``` Args: x: x-coordinates for which we need to get interpolation. A 1-D `Tensor` of real dtype. x_data: x coordinates. A 1-D `Tensor` of real dtype. Should be sorted in increasing order. y_data: y coordinates. A 1-D `Tensor` of real dtype. Should have the compatible shape as `x_data`. left_slope: The slope to use for extrapolation with x-coordinate smaller than the min `x_data`. It's a 0-D `Tensor`. If not supplied, the default will be 0, meaning constant extrapolation, i.e. extrapolated value will be the leftmost `y_data`. right_slope: The slope to use for extrapolation with x-coordinate greater than the max `x_data`. It's a 0-D `Tensor`. If not supplied, the default will be 0, meaning constant extrapolation, i.e. extrapolated value will be the rightmost `y_data`. dtype: Optional tf.dtype for `x`, x_data`, `y_data`, `left_slope` and `right_slope`. If not specified, the dtype of the inputs will be used. name: Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'linear_interpolation' is used. Returns: A 1-D `Tensor` of real dtype corresponding to the x-values in `x`. """ with tf.name_scope( name, default_name='linear_interpolation', values=[x, x_data, y_data, left_slope, right_slope]): x = tf.convert_to_tensor(x, dtype=dtype) x_data = tf.convert_to_tensor(x_data, dtype=dtype) y_data = tf.broadcast_to( tf.convert_to_tensor(y_data, dtype=dtype), shape=tf.shape(x_data)) left_slope = tf.convert_to_tensor(left_slope, dtype=dtype) right_slope = tf.convert_to_tensor(right_slope, dtype=dtype) # TODO(b/130141692): add batching support. x_data_is_rank_1 = tf.assert_rank(x_data, 1) with tf.compat.v1.control_dependencies([x_data_is_rank_1]): # Get upper bound indices for `x`. upper_indices = tf.searchsorted(x_data, x, side='left', out_type=tf.int32) x_data_size = tf.shape(x_data)[-1] at_min = tf.equal(upper_indices, 0) at_max = tf.equal(upper_indices, x_data_size) # Create tensors in order to be used by `tf.where`. # `values_min` are extrapolated values for x-coordinates less than or # equal to `x_data[0]`. # `values_max` are extrapolated values for x-coordinates greater than # `x_data[-1]`. values_min = y_data[0] + left_slope * ( x - tf.broadcast_to(x_data[0], shape=tf.shape(x))) values_max = y_data[-1] + right_slope * ( x - tf.broadcast_to(x_data[-1], shape=tf.shape(x))) # `tf.where` evaluates all branches, need to cap indices to ensure it # won't go out of bounds. capped_lower_indices = tf.math.maximum(upper_indices - 1, 0) capped_upper_indices = tf.math.minimum(upper_indices, x_data_size - 1) x_data_lower = tf.gather(x_data, capped_lower_indices) x_data_upper = tf.gather(x_data, capped_upper_indices) y_data_lower = tf.gather(y_data, capped_lower_indices) y_data_upper = tf.gather(y_data, capped_upper_indices) # Nan in unselected branches could propagate through gradient calculation, # hence we need to clip the values to ensure no nan would occur. In this # case we need to ensure there is no division by zero. x_data_diff = x_data_upper - x_data_lower floor_x_diff = tf.where(at_min | at_max, x_data_diff + 1, x_data_diff) interpolated = y_data_lower + (x - x_data_lower) * ( y_data_upper - y_data_lower) / floor_x_diff interpolated = tf.where(at_min, values_min, interpolated) interpolated = tf.where(at_max, values_max, interpolated) return interpolated
25,924
def login(token_path: str) -> Optional[Credentials]: """ Trigger the authentication so that we can store a new token.pickle. """ flow = InstalledAppFlow.from_client_secrets_file( 'gcal/credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(token_path, 'wb') as token: pickle.dump(creds, token) return creds
25,925
def get_auth_token(context, scope): """ Get a token from the auth service to allow access to a service :param context: context of the test :return: the token """ secret = get_client_secret(context) data = { 'grant_type': 'client_credentials', 'scope': scope } response = requests.post( '{}/token'.format(context.services['auth']), data=data, headers={'Content-Type': 'application/x-www-form-urlencoded'}, timeout=REQUEST_TIMEOUT, verify=context.keychain['CA_CRT'], auth=(context.client_id, secret) ) return response.json()['access_token']
25,926
def getDefaultFontFamily(): """Returns the default font family of the application""" return qt.QApplication.instance().font().family()
25,927
def result(a, b, operator): """This function return result""" lambda_ops = { "+": (lambda x,y: x+y), "-": (lambda x,y: x-y), "*": (lambda x,y: x*y), "/": (lambda x,y: x/y), "//": (lambda x,y: x//y), "%": (lambda x,y: x%y), } r = False error = '' if operator in lambda_ops: if (operator == "/" or operator == "//" or operator == "%" ) and b==0: error = "Oops, division or modulo by zero" else: r = lambda_ops[operator](a, b) else: error = "Use either + - * / or % next time" return r, error
25,928
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): """Performs non-maximum suppression in a batched fashion. Modified from https://github.com/pytorch/vision/blob /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39. In order to perform NMS independently per class, we add an offset to all the boxes. The offset is dependent only on the class idx, and is large enough so that boxes from different classes do not overlap. Arguments: boxes (torch.Tensor): boxes in shape (N, 4). scores (torch.Tensor): scores in shape (N, ). idxs (torch.Tensor): each index value correspond to a bbox cluster, and NMS will not be applied between elements of different idxs, shape (N, ). nms_cfg (dict): specify nms type and other parameters like iou_thr. Possible keys includes the following. - iou_thr (float): IoU threshold used for NMS. - split_thr (float): threshold number of boxes. In some cases the number of boxes is large (e.g., 200k). To avoid OOM during training, the users could set `split_thr` to a small value. If the number of boxes is greater than the threshold, it will perform NMS on each group of boxes separately and sequentially. Defaults to 10000. class_agnostic (bool): if true, nms is class agnostic, i.e. IoU thresholding happens over all boxes, regardless of the predicted class. Returns: tuple: kept dets and indice. """ nms_cfg_ = nms_cfg.copy() class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic) if class_agnostic: boxes_for_nms = boxes else: max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) with no_nncf_trace(): # NB: this trick is required to make class-separate NMS using ONNX NMS operation; # the ONNX NMS operation supports another way of class separation (class-separate scores), but this is not used here. # Note that `not_nncf_trace` is required here, since this trick causes accuracy degradation in case of int8 quantization: # if the output of the addition below is quantized, the maximal output value is about # ~ max_value_in_inds * max_coordinate, # usually this value is big, so after int8-quantization different small bounding # boxes may be squashed into the same bounding box, this may cause accuracy degradation. # TODO: check if it is possible in this architecture use class-separate scores that are supported in ONNX NMS. boxes_for_nms = boxes + offsets[:, None] nms_type = nms_cfg_.pop('type', 'nms') nms_op = get_nms_from_type(nms_type) split_thr = nms_cfg_.pop('split_thr', 10000) # Won't split to multiple nms nodes when exporting to onnx if boxes_for_nms.shape[0] < split_thr or (torch.onnx.is_in_onnx_export() or is_in_nncf_tracing): dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_) boxes = boxes[keep] scores = dets[:, 4] else: total_mask = scores.new_zeros(scores.size(), dtype=torch.bool) for id in torch.unique(idxs): mask = (idxs == id).nonzero(as_tuple=False).view(-1) dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_) total_mask[mask[keep]] = True keep = total_mask.nonzero(as_tuple=False).view(-1) keep = keep[scores[keep].argsort(descending=True)] boxes = boxes[keep] scores = scores[keep] return torch.cat([boxes, scores[:, None]], -1), keep
25,929
def get_deconv_filter(f_shape): """ reference: https://github.com/MarvinTeichmann/tensorflow-fcn """ width = f_shape[0] heigh = f_shape[0] f = ceil(width/2.0) c = (2 * f - 1 - f % 2) / (2.0 * f) bilinear = np.zeros([f_shape[0], f_shape[1]]) for x in range(width): for y in range(heigh): value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) bilinear[x, y] = value weights = np.zeros(f_shape, dtype=np.float32) for i in range(f_shape[2]): weights[:, :, i, i] = bilinear return weights
25,930
def twitter_split_handle_from_txt(tweet): """ Looks for RT @twitterhandle: or just @twitterhandle in the beginning of the tweet. The handle is split off and returned as two separate strings. :param tweet: (str) The tweet text to split. :return: (str, str) twitter_handle, rest_of_tweet """ match = TWEET_TARGET_RE.search(tweet) if match is not None: match = match.group() tweet = tweet.replace(match, '') return match, tweet
25,931
def points(piece_list): """Calculating point differential for the given board state""" # Args: (1) piece list # Returns: differential (white points - black points) # The points are calculated via the standard chess value system: # Pawn = 1, King = 3, Bishop = 3, Rook = 5, Queen = 9 # King = 100 (arbitrarily large) differential = 0 # For all white pieces... for i in range(0,16): # If the piece is active, add its points to the counter if piece_list[i].is_active: differential = differential + piece_list[i].value # For all black pieces... for i in range(16,32): # If the piece is active, subtract its points from the counter if piece_list[i].is_active: differential = differential - piece_list[i].value # Return point differential return differential
25,932
def grid_sampler(x, grid, name=None): """ :alias_main: paddle.nn.functional.grid_sampler :alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler :old_api: paddle.fluid.layers.grid_sampler This operation samples input X by using bilinear interpolation based on flow field grid, which is usually generated by :code:`affine_grid` . The grid of shape [N, H, W, 2] is the concatenation of (x, y) coordinates with shape [N, H, W] each, where x is indexing the 4th dimension (in width dimension) of input data x and y is indexing the 3rd dimension (in height dimension), finally results is the bilinear interpolation value of 4 nearest corner points. The output tensor shape will be [N, C, H, W]. .. code-block:: text Step 1: Get (x, y) grid coordinates and scale to [0, H-1/W-1]. .. code-block:: text grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) Step 2: Indices input data X with grid (x, y) in each [H, W] area, and bilinear interpolate point value by 4 nearest points. wn ------- y_n ------- en | | | | d_n | | | | x_w --d_w-- grid--d_e-- x_e | | | | d_s | | | | ws ------- y_s ------- wn x_w = floor(x) // west side x coord x_e = x_w + 1 // east side x coord y_n = floor(y) // north side y coord y_s = y_s + 1 // south side y coord d_w = grid_x - x_w // distance to west side d_e = x_e - grid_x // distance to east side d_n = grid_y - y_n // distance to north side d_s = y_s - grid_y // distance to south side wn = X[:, :, y_n, x_w] // north-west point value en = X[:, :, y_n, x_e] // north-east point value ws = X[:, :, y_s, x_w] // south-east point value es = X[:, :, y_s, x_w] // north-east point value output = wn * d_e * d_s + en * d_w * d_s + ws * d_e * d_n + es * d_w * d_n Args: x(Variable): The input tensor, which is a 4-D tensor with shape [N, C, H, W], N is the batch size, C is the channel number, H and W is the feature height and width. The data type is float32 or float64. grid(Variable): Input grid tensor of shape [N, H, W, 2]. The data type is float32 or float64. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: Output of shape [N, C, H, W] data samples input X using bilnear interpolation based on input grid. The data type is same as input tensor. Examples: .. code-block:: python import paddle.fluid as fluid # use with affine_grid x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]) out = fluid.layers.grid_sampler(x=x, grid=grid) """ helper = LayerHelper("grid_sampler", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler') check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], 'grid_sampler') if not isinstance(x, Variable): return ValueError("The x should be a Variable") if not isinstance(grid, Variable): return ValueError("The grid should be a Variable") out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x, 'Grid': grid} helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out}) return out
25,933
def _write_results(proto, xid=None): """Writes the prediction, ground truth, and representation to disk.""" key, s = proto p = results_pb2.Results.FromString(s) if xid is None: dir_out = FLAGS.input_dir + '/extracted/' + key + '/' else: dir_out = FLAGS.input_dir + '/extracted/XID%i/%s/' % (xid, key) file_util.makedirs(dir_out) file_util.write_mesh(f'{dir_out}/gt_mesh.ply', p.gt_mesh) file_util.write_mesh(f'{dir_out}/pred_mesh.ply', p.mesh) file_util.writetxt(f'{dir_out}/sif.txt', p.representation) # TODO(ldif-user) Set up the unnormalized2normalized path. path_to_tx = '/ROOT_DIR/%s/occnet_to_gaps.txt' % key occnet_to_gaps = file_util.read_txt_to_np(path_to_tx).reshape([4, 4]) pm = mesh_util.deserialize(p.mesh) pm.apply_transform(occnet_to_gaps) file_util.write_mesh(f'{dir_out}/nrm_pred_mesh.ply', pm) gtm = mesh_util.deserialize(p.gt_mesh) gtm.apply_transform(occnet_to_gaps) file_util.write_mesh(f'{dir_out}/nrm_gt_mesh.ply', gtm)
25,934
def fpack (filename): """fpack fits images; skip fits tables""" try: # fits check if extension is .fits and not an LDAC fits file if filename.split('.')[-1] == 'fits' and '_ldac.fits' not in filename: header = read_hdulist(filename, get_data=False, get_header=True, ext_name_indices=0) # check if it is an image if int(header['NAXIS'])==2: # determine if integer or float image if int(header['BITPIX']) > 0: cmd = ['fpack', '-D', '-Y', '-v', filename] else: if 'Scorr' in filename or 'limmag' in filename: quant = 1 else: quant = 16 cmd = ['fpack', '-q', str(quant), '-D', '-Y', '-v', filename] # if output fpacked file already exists, delete it filename_packed = '{}.fz'.format(filename) if os.path.exists(filename_packed): os.remove(filename_packed) log.warning ('fpacking over already existing file {}' .format(filename_packed)) subprocess.run(cmd) filename = filename_packed except Exception as e: #log.exception (traceback.format_exc()) log.exception ('exception was raised in fpacking of image {}: {}' .format(filename,e)) return filename
25,935
def SpecSwitch(spec_id): """ Create hotkey function that switches hotkey spec. :param spec_id: Hotkey spec ID or index. :return: Hotkey function. """ # Create hotkey function that switches hotkey spec func = partial(spec_switch, spec_id) # Add `call in main thread` tag func = tag_call_in_main_thread(func) # Return the hotkey function return func
25,936
async def eliminate(ctx): """GM Only: Eliminate a country""" if ctx.message.author == gm: with session_scope() as session: command, country = ctx.message.content.split(" ") row = session.query(Movelist).filter(Movelist.country == country).one_or_none() if row is None: await bot.say("Invalid Country") else: row.eliminated = True await bot.say('Country Eliminated') else: await bot.say('Only the GM can eliminate players!')
25,937
def get_package_version() -> str: """Returns the package version.""" metadata = importlib_metadata.metadata(PACKAGE_NAME) # type: ignore version = metadata["Version"] return version
25,938
def safe_name(dbname): """Returns a database name with non letter, digit, _ characters removed.""" char_list = [c for c in dbname if c.isalnum() or c == '_'] return "".join(char_list)
25,939
def _parse_mro(mro_file_name): """Parse an MRO file into python objects.""" # A few helpful pyparsing constants EQUALS, SEMI, LBRACE, RBRACE, LPAREN, RPAREN = map(pp.Suppress, '=;{}()') mro_label = pp.Word(pp.alphanums + '_') mro_modifier = pp.oneOf(["in", "out", "src"]) mro_type = pp.oneOf([ "bool", "bool[]", "int", "int[]", "float", "float[]", "map", "map[]", "string", "string[]", "string[][]", "path", "path[]", "py"] + \ utils.MARTIAN_FILETYPES + [x +'[]' for x in utils.MARTIAN_FILETYPES]) # First parse includes include = pp.Literal("@include").suppress() + pp.quotedString includes = pp.ZeroOrMore(include).setResultsName("includes") includes.addParseAction(pp.removeQuotes) # Then parse filetypes filetype = pp.Literal("filetype").suppress() + pp.oneOf(utils.MARTIAN_FILETYPES) + SEMI filetypes = pp.ZeroOrMore(filetype).setResultsName("filetypes") ##################################################### # Stage ##################################################### # Now define the parts of a stage # First we have a "stage entry", which is a line in the stage body, it looks like "in int lane" stage_entry = pp.Group(mro_modifier + mro_type + pp.Optional(pp.Word(pp.printables, excludeChars=',')) + pp.Optional(pp.QuotedString('"'))) # Note that stage entries a comma-delimited, but there's a trailing comma so we need the # pp.Empty option for matching stage_entries = pp.delimitedList(pp.Or([stage_entry, pp.Empty()])) # Each stage can have two parts, the main part and a "split using" part split = (pp.Literal("split using").suppress() + LPAREN + pp.Optional(pp.Group(stage_entries).setResultsName("split")) + RPAREN) stage = pp.Group(pp.Literal("stage").suppress() + mro_label + LPAREN + pp.Group(stage_entries).setResultsName("stage_entries") + RPAREN + pp.Optional(split)) # Now create a dict of the stages, with the MRO labels for keys stages = pp.Dict(pp.ZeroOrMore(stage)).setResultsName("stages") ##################################################### # Pipeline ##################################################### ## Calls call_entry = pp.Group(pp.Word(pp.printables, excludeChars="=") + EQUALS + pp.Word(pp.printables, excludeChars=',')) call_entries = pp.delimitedList(pp.Or([call_entry, pp.Empty()])) call_modifier = pp.oneOf(["local", "preflight"]) call = pp.Group(pp.Literal("call").suppress() + pp.ZeroOrMore(call_modifier).suppress() + mro_label + LPAREN + pp.Group(call_entries).setResultsName("call_entries") + RPAREN) calls = pp.Dict(pp.ZeroOrMore(call)).setResultsName("pipeline_calls") ## Return return_entry = call_entry return_entries = pp.delimitedList(pp.Or([return_entry, pp.Empty()])) return_ = (pp.Literal("return").suppress() + LPAREN + pp.Group(return_entries).setResultsName("pipeline_return") + RPAREN) ## Pipeline header pipeline_header_entry = pp.Group(mro_modifier + mro_type + pp.Word(pp.printables, excludeChars=",") + pp.Optional(pp.quotedString)) pipeline_header_entries = pp.delimitedList(pp.Or([pipeline_header_entry, pp.Empty()])) pipeline = (pp.Literal("pipeline").suppress() + mro_label.setResultsName("pipeline_name") + LPAREN + pp.Group(pipeline_header_entries).setResultsName("pipeline_header") + RPAREN + LBRACE + calls + return_ + RBRACE) mro_file = pp.Each([pp.Optional(includes), filetypes, stages, pp.Optional(pipeline)]) mro_file.ignore(pp.pythonStyleComment) result = mro_file.parseFile(mro_file_name) return result
25,940
def remove_privilege(self, server, timeout=20): """Check that we can remove privilege from a role used in the external user directory configuration. """ node = self.context.node uid = getuid() message = "DB::Exception: {user}: Not enough privileges." exitcode = 241 self.context.ldap_node = self.context.cluster.node(server) users = [ {"username": f"user0_{uid}", "password": "user0_password"}, {"username": f"user1_{uid}", "password": "user1_password"} ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: with When(f"I grant select privilege to one of the two roles assigned to LDAP users"): node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") with ldap_external_user_directory(server=server, roles=roles, restart=True): with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): with When(f"I login then LDAP user should be able to read from the table"): node.query(f"SELECT * FROM {table_name} LIMIT 1", settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) with When(f"I revoke select privilege from all the roles assigned to LDAP users"): node.query(f"REVOKE SELECT ON {table_name} FROM {roles[0]}") with When(f"I login again then cached LDAP user should not be able to read from the table"): node.query(f"SELECT * FROM {table_name} LIMIT 1", settings=[("user", users[0]["username"]), ("password", users[0]["password"])], exitcode=exitcode, message=message.format(user=users[0]["username"])) with When(f"I login with non-cached LDAP user then the user should also not be able to read from the table"): node.query(f"SELECT * FROM {table_name} LIMIT 1", settings=[("user", users[1]["username"]), ("password", users[1]["password"])], exitcode=exitcode, message=message.format(user=users[1]["username"]))
25,941
def tabulate_stats(stats: rl_common.Stats) -> str: """Pretty-prints the statistics in `stats` in a table.""" res = [] for (env_name, (reward_type, reward_path)), vs in stats.items(): for seed, (x, _log_dir) in enumerate(vs): row = { "env_name": env_name, "reward_type": reward_type, "reward_path": reward_path, "seed": seed, } row.update(x) filtered_row = {} for k, v in row.items(): if k.endswith("_std"): k = k[:-4] + "_se" v = v / math.sqrt(row["n_traj"]) new_k = _filter_key(k) if new_k is not None: filtered_row[new_k] = v res.append(filtered_row) return tabulate.tabulate(res, headers="keys")
25,942
def test_html_blacklist_dialog(): """The dialog element is a user-interactive area for performing.""" check_html_output_does_not_contain_tag(""" <p><b>Note:</b> The dialog tag is not supported in Edge.</p> <p>January <dialog open>This is an open dialog window</dialog></p> <p>February</p> """, "dialog")
25,943
def LLR_binom(k, n, p0, EPS=1E-15): """ Log likelihood ratio test statistic for the single binomial pdf. Args: k : number of counts (numpy array) n : number of trials p0 : null hypothesis parameter value Returns: individual log-likelihood ratio values """ phat = k/n # maximum likelihood estimate phat[phat < EPS] = 2*EPS # Log-likelihood (density) ratios LLR = 2*( (k*np.log(phat)+(n-k)*np.log(1-phat)) - (k*np.log(p0)+(n-k)*np.log(1-p0))) return LLR
25,944
def concatenate_shifts(shifts): """ Take the shifts, which are relative to the previous shift, and sum them up so that all of them are relative to the first.""" # the first shift is 0,0,0 for i in range(2, len(shifts)): # we start at the third s0 = shifts[i-1] s1 = shifts[i] s1.x += s0.x s1.y += s0.y s1.z += s0.z return shifts
25,945
def users(user_id=None, serialize=True): """ The method returns users in a json responses. The json is hashed to increase security. :param serialize: Serialize helps indicate the format of the response :param user_id: user id intended to be searched :return: Json format or plain text depending in the serialize parameter """ users = DATA_CONTROLLER.get_user_by_id(user_id=user_id, serialize=True) page = request.args.get("limit") number_of_pages = None pages = [] if page: number_of_pages = int(ceil(float(len(users)) / PAGE_SIZE)) converted_page = int(page) if converted_page > number_of_pages or converted_page < 0: return make_response("", 404) from_index = (converted_page - 1) * PAGE_SIZE to_index = from_index + PAGE_SIZE users = users[from_index:to_index] if number_of_pages: pages = range(1, number_of_pages + 1) if serialize: data = { "users": users, "total": len(users), "pages": pages } json_data = json.dumps(data) response = make_response(jsonify(data), 200) # Caching response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest()) # Entity tag uniquely identifies request response.headers["Cache-Control"] = "private, max-age=300" return response
25,946
def course_units_my(user): """ Get all course units assign to a teacher available persisted in DB :return: tuple with - Course units data - list of success messages - list of error messages """ success = [] data = set([attendance.course_unit for attendance in Attendance.objects.filter(creator=user).order_by('course_unit__name')]) success.append("Cadeiras do docente obtidas com sucesso") general_log.debug(f"{__name__}->{course_units.__name__} {success[-1]}") return data, success, []
25,947
def warning(msg: str, logger: Union[logging.Logger, str] = None, *args, **kwargs): """ Log to a given or default logger (if available) with 'WARNING' level. Args: msg: message to log. logger: logger instance or logger name. If not set, default logger is used (as set by :func:`createLogger()`). """ logger = _determineLogger() logger.warning(msg, *args, **kwargs)
25,948
def get_world_size(): """TODO Add missing docstring.""" if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size()
25,949
def get_namenode_setting(namenode): """Function for getting the namenode in input as parameter setting from the configuration file. Parameters ---------- namenode --> str, the namenode for which you want to get the setting info Returns ------- conf['namenodes_setting'][namenode] --> dict, the namenode setting info """ return conf['namenodes_setting'][namenode]
25,950
def cds(identity: str, sequence: str, **kwargs) -> Tuple[sbol3.Component, sbol3.Sequence]: """Creates a Coding Sequence (CDS) Component and its Sequence. :param identity: The identity of the Component. The identity of Sequence is also identity with the suffix '_seq'. :param sequence: The DNA sequence of the Component encoded in IUPAC. :param kwargs: Keyword arguments of any other Component attribute. :return: A tuple of Component and Sequence. """ cds_component, cds_seq = dna_component_with_sequence(identity, sequence, **kwargs) cds_component.roles. append(sbol3.SO_CDS) return cds_component, cds_seq
25,951
def all_scenes(): """List all scenes Returns: list of Scene objects """ global _scene_json if _scene_json is None: with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "scenes.json")) as fh: _scene_json = json.loads(fh.read()) ret = [] for s in _scene_json: ret.append(Scene(s["name"], Room(s["room"], Building(s["building"]), RoomType(s["room_type"])))) return ret
25,952
def calc_innovation(xEst, PEst, y, LMid): """ Compute innovation and Kalman gain elements """ # Compute predicted observation from state lm = get_landmark_position_from_state(xEst, LMid) delta = lm - xEst[0:2] q = (delta.T @ delta)[0, 0] y_angle = math.atan2(delta[1, 0], delta[0, 0]) - xEst[2, 0] yp = np.array([[math.sqrt(q), pi_2_pi(y_angle)]]) # compute innovation, i.e. diff with real observation innov = (y - yp).T # Yt-Yt* innov[1] = pi_2_pi(innov[1]) # compute matrixes for Kalman Gain H = jacob_h(q, delta, xEst, LMid) S = H @ PEst @ H.T + Py return innov, S, H
25,953
def next_key(basekey: str, keys: dict[str, Any]) -> str: """Returns the next unused key for basekey in the supplied dictionary. The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc until a free one is found. """ if basekey not in keys: return basekey i = 2 while f"{basekey}-{i}" in keys: i = i + 1 return f"{basekey}-{i}"
25,954
def load_mmio_overhead_elimination_map(yaml_path): """ Load a previously dumped mmio overhead elimination map """ with open(yaml_path, "r") as yaml_file: res = yaml.safe_load(yaml_file.read()) res_map = { 'overall': res[0]['overall'], 'per_model': res[1]['per_model'], } if len(res) > 2: res_map['per_access_context'] = res[2]['per_access_context'] return res_map
25,955
def execute(pagename, request): """ Handle "action=diff" checking for either a "rev=formerrevision" parameter or rev1 and rev2 parameters """ if not request.user.may.read(pagename): Page(request, pagename).send_page() return try: date = request.values['date'] try: date = long(date) # must be long for py 2.2.x except StandardError: date = 0 except KeyError: date = 0 try: rev1 = int(request.values.get('rev1', -1)) except StandardError: rev1 = 0 try: rev2 = int(request.values.get('rev2', 0)) except StandardError: rev2 = 0 if rev1 == -1 and rev2 == 0: rev1 = request.rev if rev1 is None: rev1 = -1 # spacing flag? ignorews = int(request.values.get('ignorews', 0)) _ = request.getText # get a list of old revisions, and back out if none are available currentpage = Page(request, pagename) currentrev = currentpage.current_rev() if currentrev < 2: request.theme.add_msg(_("No older revisions available!"), "error") currentpage.send_page() return if date: # this is how we get called from RecentChanges rev1 = 0 log = editlog.EditLog(request, rootpagename=pagename) for line in log.reverse(): if date >= line.ed_time_usecs and int(line.rev) != 99999999: rev1 = int(line.rev) break else: rev1 = 1 rev2 = 0 if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0: rev1, rev2 = rev2, rev1 if rev1 == -1: oldrev = currentrev - 1 oldpage = Page(request, pagename, rev=oldrev) elif rev1 == 0: oldrev = currentrev oldpage = currentpage else: oldrev = rev1 oldpage = Page(request, pagename, rev=oldrev) if rev2 == 0: newrev = currentrev newpage = currentpage else: newrev = rev2 newpage = Page(request, pagename, rev=newrev) oldlog = oldpage.editlog_entry() newlog = newpage.editlog_entry() if not oldlog or not newlog: # We use "No log entries found." msg because we already have i18n # for that. Better would "At least one log entry was not found.". request.theme.add_msg(_("No log entries found."), "error") currentpage.send_page() return edit_count = abs(newrev - oldrev) # Start output # This action generates content in the user language request.setContentLanguage(request.lang) request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1) f = request.formatter request.write(f.div(1, id="content")) oldrev = oldpage.get_real_rev() newrev = newpage.get_real_rev() title = _('Differences between revisions %d and %d') % (oldrev, newrev) if edit_count > 1: title += ' ' + _('(spanning %d versions)') % (edit_count, ) title = f.text(title) page_url = wikiutil.escape(currentpage.url(request), True) def enabled(val): return not val and u' disabled="disabled"' or u'' revert_html = "" if request.user.may.revert(pagename): revert_html = """ <form action="%s" method="get"> <div style="text-align:center"> <input name="action" value="revert" type="hidden"> <input name="rev" value="%d" type="hidden"> <input value="%s" type="submit"%s> </div> </form> """ % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev)) other_diff_button_html = """ <td style="border:0;"> <form action="%s" method="get"> <div style="text-align:%s"> <input name="action" value="diff" type="hidden"> <input name="rev1" value="%d" type="hidden"> <input name="rev2" value="%d" type="hidden"> <input value="%s" type="submit"%s> </div> </form> </td> """ navigation_html = """ <span class="diff-header">%%s</span> <table class="diff"> <tr> %(button)s <td style="border:0"> %%s </td> %(button)s </tr> </table> """ % {'button': other_diff_button_html} prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1 next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev prev_newrev = (newrev > 1) and (newrev - 1) or 1 next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev navigation_html = navigation_html % (title, page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1), revert_html, page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), ) request.write(f.rawHTML(navigation_html)) def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title): if enabled: return currentpage.link_to(request, on=1, querystr={ 'action': 'diff', 'rev1': old_rev, 'rev2': new_rev, }, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0) else: return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % { 'css_classes': css_classes, 'disabled_title': disabled_title, 'caption': caption, } rev_info_html = """ <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div> <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div> <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div> <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div> """ % { 'rev_header': _('Revision %(rev)d as of %(date)s'), 'rev_size_caption': _('Size'), 'rev_author_caption': _('Editor'), 'rev_ts_caption': _('Date'), 'rev_comment_caption': _('Comment'), } rev_info_old_html = rev_info_html % { 'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")), 'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")), 'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")), 'rev_last_link': '', 'rev': oldrev, 'rev_size': oldpage.size(), 'rev_author': oldlog.getEditor(request) or _('N/A'), 'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'), 'rev_comment': wikiutil.escape(oldlog.comment) or '', } rev_info_new_html = rev_info_html % { 'rev_first_link': '', 'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")), 'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")), 'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")), 'rev': newrev, 'rev_size': newpage.size(), 'rev_author': newlog.getEditor(request) or _('N/A'), 'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'), 'rev_comment': wikiutil.escape(newlog.comment) or '', } if request.user.show_fancy_diff: from MoinMoin.util import diff_html request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info"))) newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff") else: request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html))) from MoinMoin.util import diff_text lines = diff_text.diff(oldpage.getlines(), newpage.getlines()) if not lines: msg = f.text(" - " + _("No differences found!")) if edit_count > 1: msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % { 'count': edit_count}) + f.paragraph(0) request.write(msg) else: if ignorews: request.write(f.text(_('(ignoring whitespace)')), f.linebreak()) else: qstr = {'action': 'diff', 'ignorews': '1', } if rev1: qstr['rev1'] = str(rev1) if rev2: qstr['rev2'] = str(rev2) request.write(f.paragraph(1), Page(request, pagename).link_to(request, text=_('Ignore changes in the amount of whitespace'), querystr=qstr, rel='nofollow'), f.paragraph(0)) request.write(f.preformatted(1)) for line in lines: if line[0] == "@": request.write(f.rule(1)) request.write(f.text(line + '\n')) request.write(f.preformatted(0)) request.write(f.div(0)) # end content div request.theme.send_footer(pagename) request.theme.send_closing_html()
25,956
def SetBmaskName(enum_id, bmask, name): """ Set bitmask name (only for bitfields) @param enum_id: id of enum @param bmask: bitmask of the constant @param name: name of bitmask @return: 1-ok, 0-failed """ return idaapi.set_bmask_name(enum_id, bmask, name)
25,957
def angle_to(x: int, y: int) -> int: """Return angle for given vector pointing from orign (0,0), adjusted for north=0""" #xt,yt = y,x #rad = math.atan2(yt,xt) rad = math.atan2(x,y) if rad < 0.0: rad = math.pi + (math.pi + rad) return rad
25,958
def sp_args(): """Apply quirks for `subprocess.Popen` to have standard behavior in PyInstaller-frozen windows binary. Returns ------- dict[str, str or bool or None] The additional arguments for `subprocess` calls. """ if sys.platform.startswith('win32'): # Prevent Windows from popping up a command window on subprocess calls startup_info = sp.STARTUPINFO() startup_info.dwFlags |= sp.STARTF_USESHOWWINDOW # Make Windows search the ``PATH`` environment = os.environ else: startup_info = None environment = None # Avoid ``OSError`` exception by redirecting all standard handles return {'stdout': sp.PIPE, 'stdin': sp.PIPE, 'stderr': sp.PIPE, 'startupinfo': startup_info, 'env': environment, 'close_fds': True}
25,959
def adj(triples, num_nodes, num_rels, cuda=False, vertical=True): """ Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all relations are stacked vertically). :param edges: List representing the triples :param i2r: list of relations :param i2n: list of nodes :return: sparse tensor """ r, n = num_rels, num_nodes size = (r * n, n) if vertical else (n, r * n) from_indices = [] upto_indices = [] for fr, rel, to in triples: offset = rel.item() * n if vertical: fr = offset + fr.item() else: to = offset + to.item() from_indices.append(fr) upto_indices.append(to) indices = torch.tensor([from_indices, upto_indices], dtype=torch.long, device=d(cuda)) assert indices.size(1) == len(triples) assert indices[0, :].max() < size[0], f'{indices[0, :].max()}, {size}, {r}' assert indices[1, :].max() < size[1], f'{indices[1, :].max()}, {size}, {r}' return indices.t(), size
25,960
def join_b2_path(b2_dir, b2_name): """ Like os.path.join, but for B2 file names where the root directory is called ''. :param b2_dir: a directory path :type b2_dir: str :param b2_name: a file name :type b2_name: str """ if b2_dir == '': return b2_name else: return b2_dir + '/' + b2_name
25,961
def actual_line_flux(wavelength,flux, center=None,pass_it=True): """Measure actual line flux: parameters ---------- wavelength: float array flux: float array center: float wavelength to center plot on output parameters ----------------- flux in line integrated over region flux in background over same region Notes ----- In novae the line profile is composed of a superposition of emission from different regions, sometimes optically thick, sometimes thin, but not gaussian in shape. Here we plot the profile provide endpoints (w1,f1), (w2,f2) at the flux level of the background """ import numpy as np from pylab import plot,xlim, ylim, title, xlabel, ylabel, ginput, figure, subplot from scipy.interpolate import interp1d # find plot center and range if type(center) == type(None): center=wavelength.mean() x1 = center - 7./300*center x2 = center + 7./300*center q = (wavelength > x1) & (wavelength < x2) w = wavelength[q] flx = flux[q] y2 = flx.max() f = figure() ax = subplot(111) getit = True while getit: ax.plot(w,flx,ls='steps',color='darkblue') print ("please click the desired limits of the profile at the background level") print ("no timeout") aa = ginput(n=2,timeout=0) x1,y1 = aa[0] x2,y2 = aa[1] x1 = float(x1) x2 = float(x2) y1 = float(y1) y2 = float(y2) q = (w >= x1) & (w <= x2) bg = interp1d([x1,x2],[y1,y2],) ax.fill_between(w[q], bg(w[q]), flx[q], color='c') ans = input("Do you want to continue ?") if (ans.upper()[0] != 'Y') & (pass_it == False) : print ("Answer is not yes\n TRY AGAIN ") ax.cla() else: getit = False # not compute the fluxes w = w[q] flx = flx[q] tot_flx = [] tot_bkg = (y2+y1)*(x2-x1)*0.5 for k in range(1,len(w)): tot_flx.append(0.25*(flx[k-1]+flx[k])*(w[k-1]+w[k])) line_flx = np.asarray(tot_flx).sum() - tot_bkg print (type(line_flx), line_flx) print (type(tot_bkg), tot_bkg) print (type( 0.5*(x2+x1) ), 0.5*(x2+x1) ) print ( (" wavelength = %10.2f\n line flux = %10.3e\n"+ " background flux = %10.2e\n FWZI = %10.2f\n") %( (x2+x1)*0.5, line_flx, tot_bkg, (x2-x1) ) ) return {'wavelength':(x2+x1)*0.5, 'line_flux':line_flx, 'integrated_background_flux':tot_bkg, "FWZI":(x2-x1)}
25,962
def _too_many_contigs(ref_file): """Check for more contigs than the maximum samblaster deduplication supports. """ max_contigs = 32768 return len(list(ref.file_contigs(ref_file))) >= max_contigs
25,963
def read(ctx, file): """ Display records in human-readable form :param ctx: shared context :param file: record to be displayed """ frames = rdpcap(file) for frame in frames: show_hex(frame) frame[ZWaveReq].show() print '\n'
25,964
def survival_regression_metric(metric, outcomes_train, outcomes_test, predictions, times): """Compute metrics to assess survival model performance. Parameters ----------- metric: string Measure used to assess the survival regression model performance. Options include: - `brs` : brier score - `ibs` : integrated brier score - `auc`: cumulative dynamic area under the curve - `ctd` : concordance index inverse probability of censoring weights (ipcw) predictions: np.array A numpy array of survival time predictions for the samples. outcomes_train : pd.DataFrame A pandas dataframe with rows corresponding to individual samples and columns 'time' and 'event' for test data. outcomes_test : pd.DataFrame A pandas dataframe with rows corresponding to individual samples and columns 'time' and 'event' for training data. times: np.array The time points at which to compute metric value(s). Returns ----------- float: The metric value for the specified metric. """ survival_train = util.Surv.from_dataframe('event', 'time', outcomes_train) survival_test = util.Surv.from_dataframe('event', 'time', outcomes_test) predictions_test = predictions if metric == 'brs': return metrics.brier_score(survival_train, survival_test, predictions_test, times)[-1] elif metric == 'ibs': return metrics.integrated_brier_score(survival_train, survival_test, predictions_test, times) elif metric == 'auc': return metrics.cumulative_dynamic_auc(survival_train, survival_test, 1-predictions_test, times)[0] elif metric == 'ctd': vals = [] for i in range(len(times)): vals.append(metrics.concordance_index_ipcw(survival_train, survival_test, 1-predictions_test[:,i], tau=times[i])[0]) return vals else: raise NotImplementedError()
25,965
def check_line_length(fn, lines): """Check for line length; this checker is not run by default.""" for lno, line in enumerate(lines): if len(line) > 81: # don't complain about tables, links and function signatures if line.lstrip()[0] not in '+|' and \ 'http://' not in line and \ not line.lstrip().startswith(('.. function', '.. method', '.. cfunction')): yield lno+1, "line too long"
25,966
def _get_all_valid_corners(img_arr, crop_size, l_thresh, corner_thresh): """Get all valid corners for random cropping""" valid_pix = img_arr >= l_thresh kernel = np.ones((crop_size, crop_size)) conv = signal.correlate2d(valid_pix, kernel, mode='valid') return conv > (corner_thresh * crop_size ** 2)
25,967
async def test_get_method_raises_an_error_without_lti13_private_key( make_mock_request_handler, ): """ Is an environment error raised if the LTI13_PRIVATE_KEY env var is not set after calling the handler's method? """ handler = make_mock_request_handler(RequestHandler) config_handler = LTI13JWKSHandler(handler.application, handler.request) with pytest.raises(EnvironmentError): await config_handler.get()
25,968
def remove_dup(a): """ remove duplicates using extra array """ res = [] count = 0 for i in range(0, len(a)-1): if a[i] != a[i+1]: res.append(a[i]) count = count + 1 res.append(a[len(a)-1]) print('Total count of unique elements: {}'.format(count + 1)) return res
25,969
def mjd2crnum(mjd): """ Converts MJD to Carrington Rotation number Mathew Owens, 16/10/20 """ return 1750 + ((mjd-45871.41)/27.2753)
25,970
def poly_print_simple(poly,pretty=False): """Show the polynomial in descending form as it would be written""" # Get the degree of the polynomial in case it is in non-normal form d = poly.degree() if d == -1: return f"0" out = "" # Step through the ascending list of coefficients backward # We do this because polynomials are usually written in descending order for pwr in range(d,-1,-1): # Skip the zero coefficients entirely if poly[pwr] == 0: continue coe = poly[pwr] val = abs(coe) sgn = "-" if coe//val == -1 else "+" # When the coefficient is 1 or -1 don't print it unless it is the # coefficient for x^0 if val == 1 and pwr != 0: val = "" # If it is the first term include the sign of the coefficient if pwr == d: if sgn == "+": sgn = "" # Handle powers of 1 or 0 that appear as the first term if pwr == 1: s = f"{sgn}{val}x" elif pwr == 0: s = f"{sgn}{val}" else: if pretty == False: s = f"{sgn}{val}x^{pwr}" else: s = f"{sgn}{val}x$^{{{pwr}}}$" # If the power is 1 just show x rather than x^1 elif pwr == 1: s = f" {sgn} {val}x" # If the power is 0 only show the sign and value elif pwr == 0: s = f" {sgn} {val}" # Otherwise show everything else: if pretty == False: s = f" {sgn} {val}x^{pwr}" else: s = f" {sgn} {val}x$^{{{pwr}}}$" out += s return out
25,971
def download_junit(db, threads, client_class): """Download junit results for builds without them.""" logging.info('Downloading JUnit artifacts.') sys.stdout.flush() builds_to_grab = db.get_builds_missing_junit() pool = None if threads > 1: pool = multiprocessing.pool.ThreadPool( threads, mp_init_worker, ('', {}, client_class, False)) test_iterator = pool.imap_unordered( get_junits, builds_to_grab) else: global WORKER_CLIENT # pylint: disable=global-statement WORKER_CLIENT = client_class('', {}) test_iterator = ( get_junits(build_path) for build_path in builds_to_grab) for n, (build_id, build_path, junits) in enumerate(test_iterator, 1): logging.info('%d/%d %s %d %d', n, len(builds_to_grab), build_path, len(junits), len(''.join(junits.values()))) junits = {k: remove_system_out(v) for k, v in junits.items()} db.insert_build_junits(build_id, junits) if n % 100 == 0: db.commit() db.commit() if pool: pool.close() pool.join()
25,972
def parse_file_(): """ Retrieves the parsed information by specifying the file, the timestamp and latitude + longitude. Don't forget to encode the plus sign '+' = %2B! Example: GET /parse/data/ecmwf/an-2017-09-14.grib?timestamp=2017-09-16T15:21:20%2B00:00&lat=48.398400&lon=9.591550 :param fileName: path to a retrieved ecmwf grib file. :return: OK including json content or empty not found """ try: [point, date] = validate_request_parameters() except ValueError, e: return misc.create_response(jsonify(message=e.message), 400) file_name = misc.build_file_name(date) path_to_file = file_directory + os.sep + file_name files = file_status.get_available_files() if file_name not in files or not os.path.isfile(path_to_file): msg = {'message': 'Given filename={} could not be found in the available files are attached.'.format(file_name, files), 'data': {'files': files}} return misc.create_response(jsonify(transform_message(msg).data), 404) result = cache.cache.get(request.url) # check cache if not result: result = parse_action.parse(path_to_file, point, date) return Response(json.dumps(transform(result), default=CopernicusData.json_serial, indent=2), mimetype="text/json", status=200)
25,973
def make_model(drc_csv: str, sat_tables: list, sector_info_csv: str, ia_tables=None, units_csv='', compartments_csv='', locations_csv='') -> model.Model: """ Creates a full EE-IO model with all information required for calculations, JSON-LD export, validation, etc. :param drc_csv: CSV file with the direct requirements matrix A :param sat_tables: a list of CSV files with satellite tables :param sector_info_csv: CSV file with sector metadata :param ia_tables: an optional list of CSV files with impact assessment factors. :param units_csv: optional file with unit metadata :param compartments_csv: optional file with compartment metadata :param locations_csv: optional file with location metadata """ drc = read_csv_data_frame(drc_csv) sat_table = make_sat_table(*sat_tables) sectors = ref.SectorMap.read(sector_info_csv) ia_table = None if ia_tables is not None and len(ia_tables) > 0: ia_table = ia.Table() for iat in ia_tables: ia_table.add_file(iat) def read_map(name, clazz): if name is None or name == '': return clazz.create_default() else: return clazz.read(name) units = read_map(units_csv, ref.UnitMap) compartments = read_map(compartments_csv, ref.CompartmentMap) locations = read_map(locations_csv, ref.LocationMap) return model.Model(drc, sat_table, sectors, ia_table, units, compartments, locations)
25,974
def newNXentry(parent, name): """Create new NXentry group. Args: parent (h5py.File or h5py.Group): hdf5 file handle or group group (str): group name without extension (str) Returns: hdf5.Group: new NXentry group """ grp = parent.create_group(name) grp.attrs["NX_class"] = "NXentry" if "NX_class" in parent.attrs: if parent.attrs["NX_class"] == "NXentry": grp.attrs["NX_class"] = "NXsubentry" return grp
25,975
def list_files(path: str, excluded: List[str]) -> Iterable[str]: """List relative file paths inside the given path.""" excluded = {os.path.abspath(x) for x in excluded} for root, dirs, files in os.walk(path): abs_root = os.path.abspath(root) for d in list(dirs): if os.path.join(abs_root, d) in excluded or d.startswith('.'): dirs.remove(d) for f in files: if os.path.join(abs_root, f) not in excluded and not f.startswith('.'): yield os.path.relpath(os.path.join(root, f), path)
25,976
def _build_europe_gas_day_tzinfo(): """ Build the Europe/Gas_Day based on the CET time. :raises ValueError: When something is wrong with the CET/CEST definition """ zone = 'Europe/Gas_Day' transitions = _get_transitions() transition_info_cet = _get_transition_info_cet() difference_sec = 3600 * 6 transition_info_gas_day = [] for dt1, dt2, name in transition_info_cet: sec1 = dt1.seconds - difference_sec hours1 = sec1 / (60 * 60) gas_dt1 = datetime.timedelta(hours=hours1) sec2 = dt2.seconds - difference_sec hours2 = sec2 / (60 * 60) gas_dt2 = datetime.timedelta(hours=hours2) if name == 'CET': name = 'CET' elif name == 'CEST': name = 'CEST' else: raise ValueError("tz name not CET or CEST") transition_info_gas_day.append((gas_dt1, gas_dt2, name)) gas_day_cls = type('Europe/Gas_Day', (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info_gas_day )) _tzinfo_cache[zone] = gas_day_cls() return _tzinfo_cache[zone]
25,977
def prepare_variants_relations_data( queryset: "QuerySet", fields: Set[str], attribute_ids: Optional[List[int]], warehouse_ids: Optional[List[int]], ) -> Dict[int, Dict[str, str]]: """Prepare data about variants relation fields for given queryset. It return dict where key is a product pk, value is a dict with relation fields data. """ warehouse_fields = ProductExportFields.WAREHOUSE_FIELDS attribute_fields = ProductExportFields.VARIANT_ATTRIBUTE_FIELDS result_data: Dict[int, dict] = defaultdict(dict) fields.add("variants__pk") if attribute_ids: fields.update(ProductExportFields.VARIANT_ATTRIBUTE_FIELDS.values()) if warehouse_ids: fields.update(ProductExportFields.WAREHOUSE_FIELDS.values()) relations_data = queryset.values(*fields) for data in relations_data.iterator(): pk = data.get("variants__pk") image = data.pop("variants__images__image", None) result_data = add_image_uris_to_data( pk, image, "variants__images__image", result_data ) # handle attribute and warehouse data attribute_data: dict = {} warehouse_data: dict = {} attribute_pk = str(data.pop(attribute_fields["attribute_pk"], "")) attribute_data = { "slug": data.pop(attribute_fields["slug"], None), "value": data.pop(attribute_fields["value"], None), } warehouse_pk = str(data.pop(warehouse_fields["warehouse_pk"], "")) warehouse_data = { "slug": data.pop(warehouse_fields["slug"], None), "qty": data.pop(warehouse_fields["quantity"], None), } if attribute_ids and attribute_pk in attribute_ids: result_data = add_attribute_info_to_data( pk, attribute_data, "variant attribute", result_data ) if warehouse_ids and warehouse_pk in warehouse_ids: result_data = add_warehouse_info_to_data(pk, warehouse_data, result_data) result: Dict[int, Dict[str, str]] = { pk: { header: ", ".join(sorted(values)) if isinstance(values, set) else values for header, values in data.items() } for pk, data in result_data.items() } return result
25,978
def vnorm(velocity, window_size): """ Normalize velocity with latest window data. - Note that std is not divided. Only subtract mean - data should have dimension 3. """ v = velocity N = v.shape[1] if v.dim() != 3: print("velocity's dim must be 3 for batch operation") exit(-1) on_gpu = v.is_cuda if not on_gpu and torch.cuda.is_available(): v = v.cuda() padding_size = window_size - 1 batch_size = v.shape[0] pad = v[:, 0, :].reshape(batch_size, 1, 3).expand(batch_size, padding_size, 3) v_normed = torch.cat([pad, v], dim=1) for i in range(window_size-1, 0, -1): v_normed[:, i-1:i-1+N, :] += v v_normed = v_normed[:, :N, :] / float(window_size) v_normed = v - v_normed if not on_gpu: v_normed = v_normed.cpu() return v_normed
25,979
def valid_collate_fn(data): """Build mini-batch tensors from a list of (image, caption) tuples. Args: data: list of (image, caption) tuple. - image: torch tensor of shape (3, 256, 256). - caption: torch tensor of shape (?); variable length. Returns: images: torch tensor of shape (batch_size, 3, 256, 256). targets: torch tensor of shape (batch_size, padded_length). lengths: list; valid length for each padded caption. """ # Sort a data list by caption length data.sort(key=lambda x: len(x[1]), reverse=True) images, captions, product_ids, query_ids, boxes = zip(*data) # Merge images (convert tuple of 3D tensor to 4D tensor) images_lengths = [image.shape[0] for image in images] images_tensor = torch.zeros(len(images), max(images_lengths), 2048) images_masks = torch.zeros(len(images), max(images_lengths)) boxes_tensor = torch.zeros(len(images), max(images_lengths), 4) for i, image in enumerate(images): end = images_lengths[i] images_tensor[i, :end,:] = image[:,:] images_masks[i, :end] = 1 boxes_tensor[i, :end-1,:] = boxes[i][:,:] #images_tensor[i, :end,:] = image[:end,:] #images = torch.stack(images, 0) # Merget captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] targets = torch.zeros(len(captions), max(lengths)).long() txt_masks = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] targets[i, :end] = cap[:end] txt_masks[i, :end] = 1 return images_tensor, targets, images_lengths, lengths, images_masks, txt_masks, product_ids, query_ids, boxes_tensor
25,980
def parse_statement(tokens): """ statement: | 'while' statement_list 'do' statement_list 'end' | 'while' statement_list 'do' 'end' | 'if' if_body | num_literal | string_literal | builtin | identifier """ if tokens.consume_maybe("while"): condition = parse_statement_list(tokens) tokens.consume_only("do") statement = WhileStatement(condition) if tokens.consume_maybe("end"): return statement body = parse_statement_list(tokens) statement.body = body tokens.consume_only("end") return statement if tokens.consume_maybe("if"): return parse_if_body(tokens) token = tokens.consume() return Statement(token)
25,981
def test_positive_enrich_observe_observables_relationships( module_headers, observable, observable_type): """ Perform testing for enrich observe observables endpoint to get relationships for observable Qualys module ID: CCTRI-798-bcb33509-c153-4436-93c3-7345e7704b9d Steps: 1. Send request to enrich observe observable endpoint Expectedresults: 1. Check that data in response body contains expected information in relationships from Qualys module Importance: Critical """ observables = [{"value": observable, "type": observable_type}] response_from_all_modules = enrich_observe_observables( payload=observables, **{'headers': module_headers} ) response_from_qualys_ioc = get_observables( response_from_all_modules, MODULE_NAME) assert response_from_qualys_ioc['module'] == MODULE_NAME assert response_from_qualys_ioc['module_instance_id'] assert response_from_qualys_ioc['module_type_id'] relationships = response_from_qualys_ioc['data']['relationships'] sightings = response_from_qualys_ioc['data']['sightings'] indicators = response_from_qualys_ioc['data']['indicators'] judgements = response_from_qualys_ioc['data']['judgements'] indicators_ids = frozenset( indicator['id'] for indicator in indicators['docs']) judgements_ids = frozenset( judgement['id'] for judgement in judgements['docs']) sightings_ids = frozenset( sighting['id'] for sighting in sightings['docs']) assert len(relationships['docs']) > 0 for relationship in relationships['docs']: assert relationship['schema_version'] assert relationship['type'] == 'relationship' assert relationship['source'] == MODULE_NAME assert relationship['id'].startswith('transient:relationship-') assert 'external_ids' in relationship assert 'source_uri' in relationship if relationship['relationship_type'] == 'based-on': if relationship['target_ref'].startswith('transient:indicator-'): assert relationship['target_ref'] in indicators_ids assert relationship['source_ref'] in judgements_ids elif relationship['target_ref'].startswith('transient:judgement-'): assert relationship['target_ref'] in judgements_ids assert relationship['source_ref'] in sightings_ids elif relationship['relationship_type'] == 'sighting-of': assert relationship['target_ref'] in indicators_ids assert relationship['source_ref'] in sightings_ids else: raise AssertionError('Unsupported relationship type') assert relationships['count'] == len(relationships['docs']) <= ( CTR_ENTITIES_LIMIT)
25,982
def vector_to_pytree_fun(func): """Make a pytree -> pytree function from a vector -> vector function.""" def wrapper(state): return func(Vector(state)).pytree return wrapper
25,983
def _write_exif_data( self, filepath, use_albums_as_keywords=False, use_persons_as_keywords=False, keyword_template=None, description_template=None, ignore_date_modified=False, ): """ write exif data to image file at filepath Args: filepath: full path to the image file use_albums_as_keywords: treat album names as keywords use_persons_as_keywords: treat person names as keywords keyword_template: (list of strings); list of template strings to render as keywords ignore_date_modified: if True, sets EXIF:ModifyDate to EXIF:DateTimeOriginal even if date_modified is set """ if not os.path.exists(filepath): raise FileNotFoundError(f"Could not find file {filepath}") exif_info = self._exiftool_dict( use_albums_as_keywords=use_albums_as_keywords, use_persons_as_keywords=use_persons_as_keywords, keyword_template=keyword_template, description_template=description_template, ignore_date_modified=ignore_date_modified, ) with ExifTool(filepath) as exiftool: for exiftag, val in exif_info.items(): if exiftag == "_CreatedBy": continue elif type(val) == list: for v in val: exiftool.setvalue(exiftag, v) else: exiftool.setvalue(exiftag, val)
25,984
def test_convertOrbitalElements_hyperbolic(): """ Read the test dataset for cartesian and keplerian states for each hyperbolic orbit target at each T0. Using THOR convert the cartesian states to keplerian states, and convert the keplerian states to cartesian states. Then compare how well the converted states agree to the ones pulled from Horizons. """ getSPICEKernels(KERNELS_DE440) setupSPICE(KERNELS_DE440, force=True) # Read vectors and elements from test data set vectors_df = pd.read_csv( os.path.join(DATA_DIR, "vectors.csv") ) elements_df = pd.read_csv( os.path.join(DATA_DIR, "elements.csv") ) # Limit vectors and elements to elliptical orbits only hyperbolic_vectors = vectors_df["orbit_class"].str.contains("Hyperbolic") hyperbolic_elements = elements_df["orbit_class"].str.contains("Hyperbolic") vectors_df = vectors_df[hyperbolic_vectors] elements_df = elements_df[hyperbolic_elements] # Pull state vectors and elements vectors = vectors_df[["x", "y", "z", "vx", "vy", "vz"]].values elements = elements_df[["a", "e", "incl", "Omega", "w", "M"]].values # Convert the keplerian states to cartesian states using THOR orbits_cartesian_converted = convertOrbitalElements( elements, "keplerian", "cartesian", mu=MU ) # Convert the cartesian states to keplerian states using THOR orbits_keplerian_converted = convertOrbitalElements( vectors, "cartesian", "keplerian", mu=MU ) # Conversion of cartesian orbits to keplerian orbits # is within this tolerance of Horizons testOrbits( orbits_keplerian_converted, elements, orbit_type="keplerian", position_tol=(10*u.cm), angle_tol=(1*u.microarcsecond), unitless_tol=(1e-10*u.dimensionless_unscaled) ) # Conversion of keplerian orbits to cartesian orbits # is within this tolerance of Horizons testOrbits( orbits_cartesian_converted, vectors, orbit_type="cartesian", position_tol=(10*u.cm), velocity_tol=(1*u.mm/u.s), magnitude=True ) return
25,985
def _filter_contacts(people_filter, maillist_filter, qs, values): """Helper for filtering based on subclassed contacts. Runs the filter on separately on each subclass (field defined by argument, the same values are used), then filters the queryset to only keep items that have matching. """ people = Person.objects.filter(**{people_filter + '__in': values}) mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values}) return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
25,986
def build_uvprojx_element_various_controls(xml_sub_element, xml_tag): """ Modify and update UVPROJX various control element """ tree = ET.parse(DLG_UVPROJX_NAME) root = tree.getroot() updated_data = "" if (CLEAN_PROJ_ENV == True): updated_data = DLG_DEFAULT_INCLUDE_PATHS else: DLG_WORKING_PROJECT_PARENT_DIRECTORY = "..\\" DLG_SDK_SUBFOLDER_DIRECTORY = DLG_SDK_ROOT_DIRECTORY_TO_WRITE + DLG_FIND_STR_PATTERN[0][:-1] DLG_THIRD_PARTY_SUBFOLDER_DIRECTORY = DLG_SDK_ROOT_DIRECTORY_TO_WRITE + DLG_FIND_STR_PATTERN[1][:-1] DLG_PROJECT_SOURCE_SUBFOLDER_DIRECTORY = os.path.join(DLG_WORKING_PROJECT_PARENT_DIRECTORY, 'src') end_ele_char = ";" updated_data = "" for dirpath, dirname, filename in os.walk(DLG_PROJECT_SOURCE_SUBFOLDER_DIRECTORY): # if x = os.path.basename(dirpath) updated_data = updated_data + dirpath + end_ele_char for dirpath, dirname, filename in os.walk(DLG_SDK_SUBFOLDER_DIRECTORY): # if x = os.path.basename(dirpath) updated_data = updated_data + dirpath + end_ele_char for dirpath, dirname, filename in os.walk(DLG_THIRD_PARTY_SUBFOLDER_DIRECTORY): # if x = os.path.basename(dirpath) updated_data = updated_data + dirpath + end_ele_char for t_sub_element in root.findall(xml_sub_element): # print(t_sub_element.tag) for temp_element in t_sub_element: temp_tag = temp_element.tag if (temp_tag == xml_tag): t_sub_element.find(temp_tag).text = updated_data # Print updated_data in a readable format. updated_data = updated_data.replace(";", ";\r\n") print("IncludePath ELEMENT TEXT:\r\n" + updated_data + "END OF IncludePath ELEMENT TEXT.\r\n") # print(temp_text) # print(updated_data[:-1]) # if(temp_text == updated_data[:-1]): # print("OK") # my_file = open(DLG_UVPROJX_NAME,"w") # x = '''<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>\r\n''' # my_file.write(x) # my_file.write((ET.tostring(root, encoding='UTF-8').decode('utf8'))) # my_file.close() write_xml_file(tree, DLG_UVPROJX_NAME)
25,987
def viz_property(statement, properties): """Create properties for graphviz element""" if not properties: return statement + ";"; return statement + "[{}];".format(" ".join(properties))
25,988
def create_fsl_fnirt_nonlinear_reg(name='fsl_fnirt_nonlinear_reg'): """ Performs non-linear registration of an input file to a reference file using FSL FNIRT. Parameters ---------- name : string, optional Name of the workflow. Returns ------- nonlinear_register : nipype.pipeline.engine.Workflow Notes ----- Workflow Inputs:: inputspec.input_skull : string (nifti file) File of input brain with skull inputspec.reference_skull : string (nifti file) Target brain with skull to normalize to inputspec.fnirt_config : string (fsl fnirt config file) Configuration file containing parameters that can be specified in fnirt Workflow Outputs:: outputspec.output_brain : string (nifti file) Normalizion of input brain file outputspec.nonlinear_xfm : string Nonlinear field coefficients file of nonlinear transformation Registration Procedure: 1. Perform a nonlinear registration on an input file to the reference file utilizing affine transformation from the previous step as a starting point. 2. Invert the affine transformation to provide the user a transformation (affine only) from the space of the reference file to the input file. Workflow Graph: .. image:: ../images/nonlinear_register.dot.png :width: 500 Detailed Workflow Graph: .. image:: ../images/nonlinear_register_detailed.dot.png :width: 500 """ nonlinear_register = pe.Workflow(name=name) inputspec = pe.Node(util.IdentityInterface(fields=['input_brain', 'input_skull', 'reference_brain', 'reference_skull', 'interp', 'ref_mask', 'linear_aff', 'fnirt_config']), name='inputspec') outputspec = pe.Node(util.IdentityInterface(fields=['output_brain', 'nonlinear_xfm']), name='outputspec') nonlinear_reg = pe.Node(interface=fsl.FNIRT(), name='nonlinear_reg_1') nonlinear_reg.inputs.fieldcoeff_file = True nonlinear_reg.inputs.jacobian_file = True brain_warp = pe.Node(interface=fsl.ApplyWarp(), name='brain_warp') nonlinear_register.connect(inputspec, 'input_skull', nonlinear_reg, 'in_file') nonlinear_register.connect(inputspec, 'reference_skull', nonlinear_reg, 'ref_file') nonlinear_register.connect(inputspec, 'interp', brain_warp, 'interp') nonlinear_register.connect(inputspec, 'ref_mask', nonlinear_reg, 'refmask_file') # FNIRT parameters are specified by FSL config file # ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified) nonlinear_register.connect(inputspec, 'fnirt_config', nonlinear_reg, 'config_file') nonlinear_register.connect(inputspec, 'linear_aff', nonlinear_reg, 'affine_file') nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file', outputspec, 'nonlinear_xfm') nonlinear_register.connect(inputspec, 'input_brain', brain_warp, 'in_file') nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file', brain_warp, 'field_file') nonlinear_register.connect(inputspec, 'reference_brain', brain_warp, 'ref_file') nonlinear_register.connect(brain_warp, 'out_file', outputspec, 'output_brain') return nonlinear_register
25,989
def _subspace_plot( inputs, output, *, input_names, output_name, scatter_args=None, histogram_args=None, min_output=None, max_output=None ): """ Do actual plotting """ if scatter_args is None: scatter_args = {} if histogram_args is None: histogram_args = {} if min_output is None: min_output = min(output) if max_output is None: max_output = max(output) # see https://matplotlib.org/examples/pylab_examples/multi_image.html _, num_inputs = inputs.shape fig, axes, grid = _setup_axes(input_names=input_names) if output_name is not None: fig.suptitle(output_name) norm = _Normalize(min_output, max_output) hist_plots = [] for i in range(num_inputs): hist_plots.append(_plot_hist( inputs[:, i], axis=axes[i][i], **histogram_args )) scatter_plots = [] scatter_plots_grid = [] for y_index in range(num_inputs): scatter_plots_grid.append([]) for x_index in range(y_index): sc_plot = _plot_scatter( x=inputs[:, x_index], y=inputs[:, y_index], z=output, axis=axes[y_index][x_index], # check order norm=norm, **scatter_args ) scatter_plots.append(sc_plot) scatter_plots_grid[y_index].append(sc_plot) cbar_ax = fig.add_subplot(grid[0, 1:]) fig.colorbar( scatter_plots[0], cax=cbar_ax, orientation='horizontal', ) cbar_ax.set_aspect(1/20) return fig
25,990
def preload_template(fname, comment='#'): """Preloads a template from a file relative to the calling Python file.""" comment = comment.strip() if not os.path.isabs(fname): previous_frame = inspect.currentframe().f_back caller_fname, _, _, _, _ = inspect.getframeinfo(previous_frame) fname = os.path.dirname(caller_fname) + os.sep + fname with open(fname, 'r') as fil: template = fil.read() return annotate_block(template, fname, comment)
25,991
def detect_windows_needs_driver(sd, print_reason=False): """detect if Windows user needs to install driver for a supported device""" need_to_install_driver = False if sd: system = platform.system() #print(f'in detect_windows_needs_driver system:{system}') if system == "Windows": # if windows, see if we can find a DeviceId with the vendor id # Get-PnpDevice | Where-Object{ ($_.DeviceId -like '*10C4*')} | Format-List command = 'powershell.exe "[Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8; Get-PnpDevice | Where-Object{ ($_.DeviceId -like ' command += f"'*{sd.usb_vendor_id_in_hex.upper()}*'" command += ')} | Format-List"' #print(f'command:{command}') _, sp_output = subprocess.getstatusoutput(command) #print(f'sp_output:{sp_output}') search = f'CM_PROB_FAILED_INSTALL' #print(f'search:"{search}"') if re.search(search, sp_output, re.MULTILINE): need_to_install_driver = True # if the want to see the reason if print_reason: print(sp_output) return need_to_install_driver
25,992
async def test_non_cachable_zero_ttl(cache: Cache) -> None: """ We shouldn't bother caching if the cache TTL is zero. """ cache.ttl = 0 scope: Scope = { "type": "http", "method": "GET", "path": "/path", "headers": [], } request = Request(scope) response = PlainTextResponse("Hello, world!") with pytest.raises(ResponseNotCachable): await store_in_cache(response, request=request, cache=cache)
25,993
def compute_footprint(sequence, utilization): """ compute the cache footprint """ # compute the buffer utilization per stencil print(r"\ compute the cache footprint of the individual stencils") for high, stencil in enumerate(sequence): print("f%" + str(high) + " >= " + str(utilization[stencil][high])) for low in range(high): print("f%" + str(high) + " + " + str(utilization[stencil][low]) + " g%" + str(high) + " - " + str(utilization[stencil][low]) + " g%" + str(low) + " >= " + str(utilization[stencil][low]))
25,994
def unload_plugin(name, category=None): """ remove single plugin Parameters ---------- name : str plugin name category : str plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin],category='decoders') >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_plugin('example','decoders') >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} """ if category is not None: _all_plugins[category].pop(name) else: for cat in _all_plugins: if name in _all_plugins[cat]: _all_plugins[cat].pop(name)
25,995
def install_microcode_filter(*args): """ install_microcode_filter(filter, install=True) register/unregister non-standard microcode generator @param filter: - microcode generator object (C++: microcode_filter_t *) @param install: - TRUE - register the object, FALSE - unregister (C++: bool) """ return _ida_hexrays.install_microcode_filter(*args)
25,996
def get_marvel_character_embed(attribution_text, result): """Parses a given JSON object that contains a result of a Marvel character and turns it into an Embed :param attribution_text: The attributions to give to Marvel for using the API :param result: A JSON object of a Marvel API call result :returns: A nice-looking Embed for discord users to look at """ return Embed( title=result["name"], description=result["description"], colour=PRIMARY_EMBED_COLOR ).add_field( name="Series", value="\n".join([ " * `{}`".format(series["name"]) for series in result["series"]["items"] ]) ).add_field( name="Comics", value="\n".join([ " * `{}`".format(comic["name"]) for comic in result["comics"]["items"] ]) ).set_image( url="{}.{}".format( result["thumbnail"]["path"], result["thumbnail"]["extension"] ) ).set_footer( text=attribution_text )
25,997
def get_or_create_dfp_targeting_key(name, key_type='FREEFORM'): """ Get or create a custom targeting key by name. Args: name (str) Returns: an integer: the ID of the targeting key """ key_id = dfp.get_custom_targeting.get_key_id_by_name(name) if key_id is None: key_id = dfp.create_custom_targeting.create_targeting_key(name, key_type=key_type) return key_id
25,998
def positive(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`. See its docstring for more information. """ if x.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in positive") return Array._new(np.positive(x._array))
25,999