content
stringlengths
22
815k
id
int64
0
4.91M
def test_getDisabledChannel_where_all_channels_are_used(): """Get the first disabled channel.""" anode = Node('foo', 'bar') channel1 = Channel(index=1, role=1) channel1.settings.modem_config = 3 channel1.settings.psk = b'\x01' channel2 = Channel(index=2, role=2) channel3 = Channel(index=3, role=2) channel4 = Channel(index=4, role=2) channel5 = Channel(index=5, role=2) channel6 = Channel(index=6, role=2) channel7 = Channel(index=7, role=2) channel8 = Channel(index=8, role=2) channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ] anode.channels = channels ch = anode.getDisabledChannel() assert ch is None
5,343,200
def complexDivision(a, b): """ 复数除法 :param a: 负数a :param b: 负数b :return: 返回除法结果 """ res = np.zeros(a.shape, a.dtype) divisor = 1. / (b[:, :, 0] ** 2 + b[:, :, 1] ** 2) res[:, :, 0] = (a[:, :, 0] * b[:, :, 0] + a[:, :, 1] * b[:, :, 1]) * divisor res[:, :, 1] = (a[:, :, 1] * b[:, :, 0] + a[:, :, 0] * b[:, :, 1]) * divisor return res
5,343,201
def test_int_n(): """Test method""" message = "n must be integer" with raises(AssertionError): _ = BlobFactory(n=5.0) assert False, message
5,343,202
def validate_activation_time(time): """ This function check that activation time not earlier than current moment :param time: activation time :raise ValueError :return: None """ if get_date(time) < dt.now(): raise ValueError('calistra: activation time cannot be earlier ' 'than current moment.\n')
5,343,203
def issue(license_file, license_description, issuer_certificate, issuer_key, license_file_password, **args): """issue [license file] [license description] Issues a new license and shows information about it. You must specify the issuer certificate and key as --issuer-certificate/key on the command line, and the license file password as --license-file-password. [license description] must be one command line argument on the form not_before=2014-01-01T00:00:00,not_after=2016-01-01T00:00:00,... containing license data fields. """ try: license_data_parameters = dict( (p.strip() for p in i.split('=', 1)) for i in license_description.split(',')) except Exception as e: raise RuntimeError( 'Invalid license data description (%s): %s', license_description, e) try: license_data = LicenseData(**license_data_parameters) except TypeError as e: raise RuntimeError( 'Incomplete license data description (%s): %s', license_description, e) license = License.issue(issuer_certificate, issuer_key, license_data=license_data) with open(license_file, 'wb') as f: license.store(f, license_file_password) show(license_file, issuer_certificate, license_file_password)
5,343,204
def dynamics_RK4(OdeFun, tspan, x, u, v): """ # RK4 integrator for a time-invariant dynamical system under a control, u, and disturbance, v. # See https://lpsa.swarthmore.edu/NumInt/NumIntFourth.html This impl adopted from unstable-zeros's learning CBFs example for two airplanes https://github.com/unstable-zeros/learning-cbfs/blob/master/airplane_example/learning_cbfs_airplane.ipynb This function must be called within a loop for a total of N steps of integration, Obviously, the smallet the value of T, the better Inp.ts: OdeFun: Right Hand Side of Ode function to be integrated tspan: A list [start, end] that specifies over what time horizon to integrate the dynamics x: State, must be a list, initial condition u: Control, must be a list v: Disturbance, must be a list Author: Lekan Molu, August 09, 2021 """ M = 4 # RK4 steps per interval h = 0.2 # time step if onp.any(tspan): hh = (tspan[1]-tspan[0])/10/M X = onp.array(x) U = onp.array(u) V = onp.array(v) for j in range(M): if onp.any(tspan): # integrate for this much time steps for h in np.arange(tspan[0], tspan[1], hh): k1 = OdeFun(X, U, V) k2 = OdeFun(X + h/2 * k1, U, V) k3 = OdeFun(X + h/2 * k2, U, V) k4 = OdeFun(X + h * k3, U, V) X = X+(h/6)*(k1 +2*k2 +2*k3 +k4) else: k1 = OdeFun(X, U, V) k2 = OdeFun(X + h/2 * k1, U, V) k3 = OdeFun(X + h/2 * k2, U, V) k4 = OdeFun(X + h * k3, U, V) X = X+(h/6)*(k1 +2*k2 +2*k3 +k4) return list(X)
5,343,205
def random_decay(num_actions=None, decay_type='polynomial_decay', start_decay_at=0, stop_decay_at=1e9, decay_rate=0., staircase=False, decay_steps=10000, min_exploration_rate=0): """Builds a random decaying exploration. Decay a random value based on number of states and the decay_type. Args: num_actions: `int` or None. If discrete num_action must be None. decay_type: A decay function name defined in `exploration_decay` possible Values: exponential_decay, inverse_time_decay, natural_exp_decay, piecewise_constant, polynomial_decay. start_decay_at: `int`. When to start the decay. stop_decay_at: `int`. When to stop the decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. decay_steps: How often to apply decay. min_exploration_rate: `float`. Don't decay below this number. Returns: `function` the exploration logic operation. """ if num_actions is None: exploration_rate = partial(np.random.randn, 1) else: exploration_rate = partial(np.random.randn, num_actions) exploration_rate = _decay_fn(timestep=get_global_timestep(), exploration_rate=exploration_rate, decay_type=decay_type, start_decay_at=start_decay_at, stop_decay_at=stop_decay_at, decay_rate=decay_rate, staircase=staircase, decay_steps=decay_steps, min_exploration_rate=min_exploration_rate) track(exploration_rate, tf.GraphKeys.EXPLORATION_RATE) return exploration_rate
5,343,206
def not_found_handler(not_found): """Basic not found request handler.""" return render_template('except.html', http_excep=not_found, message='Not resource found at this URL.', http_code=404, http_error="Not Found")
5,343,207
def remove_minor_regions(labeled_img, biggest_reg_lab): """ Set all the minor regions to background and the biggest to 1. Returns: A numpy array with the new segmentation. """ f = np.vectorize(lambda x: 1 if x == biggest_reg_lab else 0) return f(labeled_img)
5,343,208
def test_raise_10(): """ Feature: graph raise by JIT Fallback. Description: Test raise(string % var). Expectation: No exception. """ class RaiseNet(nn.Cell): def construct(self, x): raise ValueError(f"The input can not be %s." % x) with pytest.raises(ValueError) as raise_info_10: net = RaiseNet() res = net(11) print("res:", res) assert "The input can not be 11." in str(raise_info_10.value)
5,343,209
def get_right(html): """ 获取公共解析部分(右边页面,用户详细资料部分) """ soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') pattern = re.compile(r'FM.view\((.*)\)') cont = '' # 这里先确定右边的标识,企业用户可能会有两个r_id rids = [] for script in scripts: m = pattern.search(script.string) if m and 'WB_frame_c' in script.string: all_info = m.group(1) cont = json.loads(all_info).get('html', '') if not cont: return '' rsoup = BeautifulSoup(cont, 'html.parser') r_ids = rsoup.find(attrs={'class': 'WB_frame_c'}).find_all('div') for r in r_ids: rids.append(r['id']) for script in scripts: for r_id in rids: m = pattern.search(script.string) if m and r_id in script.string: all_info = m.group(1) cont += json.loads(all_info).get('html', '') return cont
5,343,210
def test(): """ Verify that the renderer can trim long file names correctly """ # get the renderer from journal.Alert import Alert as alert # the color spaces from journal.ANSI import ANSI # and a channel from journal.Warning import Warning as warning # make a channel channel = warning(name="tests.journal.debug") # add a fake stack trace channel.notes["filename"] = "a_" + ("very_" * 60) + "long_filename" channel.notes["line"] = "30" channel.notes["function"] = "test" # inject channel.line("warning channel:") channel.line(" hello from a very long file name") # make a palette palette = { "reset": ANSI.x11("normal"), "channel": ANSI.x11("light slate gray"), "warning": ANSI.x11("orange"), "body": "", } # instantiate the renderer renderer = alert() # ask it to do its thing page = '\n'.join(renderer.render(palette=palette, entry=channel.entry)) # show me # print(page) # all done return
5,343,211
def marginal_ln_likelihood_worker(task): """ Compute the marginal log-likelihood, i.e. the likelihood integrated over the linear parameters. This is meant to be ``map``ped using a processing pool` within the functions below and is not supposed to be in the public API. Parameters ---------- task : iterable An array containing the indices of samples to be operated on, the filename containing the prior samples, and the data. Returns ------- ll : `numpy.ndarray` Array of log-likelihood values. """ slice_or_idx, task_id, prior_samples_file, joker_helper = task # Read the batch of prior samples batch = read_batch(prior_samples_file, joker_helper.packed_order, slice_or_idx, units=joker_helper.internal_units) if batch.dtype != np.float64: batch = batch.astype(np.float64) # memoryview is returned ll = joker_helper.batch_marginal_ln_likelihood(batch) return np.array(ll)
5,343,212
def coverage(session: Session) -> None: """Upload coverage data.""" _install_with_constraints(session, "coverage[toml]", "codecov") session.run("coverage", "xml", "--fail-under=0") session.run("codecov", *session.posargs)
5,343,213
def chunks(l, n): """ Yield n successive chunks from l. """ newn = int(1.0 * len(l) / n + 0.5) for i in xrange(0, n-1): yield l[i*newn:i*newn+newn] yield l[n*newn-newn:]
5,343,214
def test_write_csv(): """Test write_csv.""" with tempfile.TemporaryDirectory() as tmp_dir: csv_path = Path(tmp_dir) / 'tmp.json' utils_data.write_csv(csv_path, [['header 1', 'headder 2'], ['row 1', 'a'], ['row 2', 'b']]) result = csv_path.read_text() assert result == 'header 1,headder 2\nrow 1,a\nrow 2,b\n'
5,343,215
def from_matvec(matrix, vector=None): """ Combine a matrix and vector into an homogeneous affine Combine a rotation / scaling / shearing matrix and translation vector into a transform in homogeneous coordinates. Parameters ---------- matrix : array-like An NxM array representing the the linear part of the transform. A transform from an M-dimensional space to an N-dimensional space. vector : None or array-like, optional None or an (N,) array representing the translation. None corresponds to an (N,) array of zeros. Returns ------- xform : array An (N+1, M+1) homogenous transform matrix. See Also -------- to_matvec Examples -------- >>> from_matvec(np.diag([2, 3, 4]), [9, 10, 11]) array([[ 2, 0, 0, 9], [ 0, 3, 0, 10], [ 0, 0, 4, 11], [ 0, 0, 0, 1]]) The `vector` argument is optional: >>> from_matvec(np.diag([2, 3, 4])) array([[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 4, 0], [0, 0, 0, 1]]) """ matrix = np.asarray(matrix) nin, nout = matrix.shape t = np.zeros((nin + 1, nout + 1), matrix.dtype) t[0:nin, 0:nout] = matrix t[nin, nout] = 1. if vector is not None: t[0:nin, nout] = vector return t
5,343,216
def create_parser(): """ Creates the argparse parser with all the arguments. """ parser = argparse.ArgumentParser( description='Management CLI for Enodebd', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Add subcommands subparsers = parser.add_subparsers(title='subcommands', dest='cmd') parser_get_parameter = subparsers.add_parser( 'get_parameter', help='Send GetParameterValues message') parser_get_parameter.add_argument( 'device_serial', help='eNodeB Serial ID') parser_get_parameter.add_argument( 'parameter_name', help='Parameter Name') parser_set_parameter = subparsers.add_parser( 'set_parameter', help='Send SetParameterValues message') parser_set_parameter.add_argument( 'device_serial', help='eNodeB Serial ID') parser_set_parameter.add_argument( 'parameter_name', help='Parameter Name') parser_set_parameter.add_argument( 'value', help='Parameter Value') parser_set_parameter.add_argument( 'data_type', help='Parameter Data Type', choices=['int', 'bool', 'string']) parser_set_parameter.add_argument( '--parameter_key', default='', help='Parameter Key') parser_config_enodeb = subparsers.add_parser( 'config_enodeb', help='Configure eNodeB') parser_config_enodeb.add_argument( 'device_serial', help='eNodeB Serial ID') parser_reboot_enodeb = subparsers.add_parser( 'reboot_enodeb', help='Reboot eNodeB') parser_reboot_enodeb.add_argument( 'device_serial', help='eNodeB Serial ID') parser_reboot_all_enodeb = subparsers.add_parser( 'reboot_all_enodeb', help='Reboot all eNodeB') parser_get_status = subparsers.add_parser( 'get_status', help='Get enodebd status') parser_get_all_status = subparsers.add_parser( 'get_all_status', help='Get all attached eNodeB status') parser_get_enb_status = subparsers.add_parser( 'get_enb_status', help='Get eNodeB status') parser_get_enb_status.add_argument( 'device_serial', help='eNodeB Serial ID') # Add function callbacks parser_get_parameter.set_defaults(func=get_parameter) parser_set_parameter.set_defaults(func=set_parameter) parser_config_enodeb.set_defaults(func=configure_enodeb) parser_reboot_enodeb.set_defaults(func=reboot_enodeb) parser_reboot_all_enodeb.set_defaults(func=reboot_all_enodeb) parser_get_status.set_defaults(func=get_status) parser_get_all_status.set_defaults(func=get_all_status) parser_get_enb_status.set_defaults(func=get_enb_status) return parser
5,343,217
def read_file(fp, limit=DEFAULT_FILE_READ_SIZE): """ Return output of fp.read() limited to `limit` bytes of output from the end of file. """ fp.seek(0, 2) # Go to EOF total = fp.tell() if total > limit: fp.seek(total - limit) else: fp.seek(0) return fp.read()
5,343,218
def sample_quaternions(shape=None): """ Effective Sampling and Distance Metrics for 3D Rigid Body Path Planning, James J. Kuffner (2004) https://ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf """ s = np.random.random(shape) sigma1 = np.sqrt(1 - s) sigma2 = np.sqrt(s) theta1 = np.random.uniform(0, 2 * np.pi, shape) theta2 = np.random.uniform(0, 2 * np.pi, shape) w = np.cos(theta2) * sigma2 x = np.sin(theta1) * sigma1 y = np.cos(theta1) * sigma1 z = np.sin(theta2) * sigma2 return np.stack([w, x, y, z], axis=-1)
5,343,219
def _select_free_device(existing): """ Given a list of allocated devices, return an available device name. According to http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html all AWS Linux instances have ``/dev/sd[a-z]`` available. However: - ``sda`` is reserved for the root device (technically only ``sda1``); - Amazon "strongly recommend that you don't" use instance store names (usually ``/dev/sd[b-e]``) "because the behavior can be unpredictable"; - some "custom kernels might have restrictions that limit use to ``/dev/sd[f-p]``". ``sd[f-p]`` only allows 11 devices, so to increase this, ignore the least stringent statement above, and allow ``sd[f-z]`` (21 devices). To reduce the risk of failing on custom AMIs, select from ``[f-p]`` first. Any further increase will need to start mining the ``hd[a-z][1-15]`` and ``xvd[b-c][a-z]`` namespaces, but which to use depends on whether the AMI uses paravirtualization or HVM. :param Sequence[bytes]: List of allocated device basenames (e.g. ``[b'sda']``). :return unicode file_name: available device name for attaching EBS volume. """ local_devices = frozenset(existing) sorted_devices = sorted(existing) IN_USE_DEVICES(devices=sorted_devices).write() for suffix in b"fghijklmonpqrstuvwxyz": next_local_device = b'xvd' + suffix next_local_sd_device = b'sd' + suffix file_name = u'/dev/sd' + unicode(suffix) possible_devices = [ next_local_device, next_local_sd_device ] if not local_devices.intersection(possible_devices): return file_name # Could not find any suitable device that is available # for attachment. Log to Eliot before giving up. NO_AVAILABLE_DEVICE(devices=sorted_devices).write() raise NoAvailableDevice()
5,343,220
def create_target_delivery_request(get_offers_opts): """Converts dict representation of get_offers options to TargetDeliveryRequest object""" return TargetDeliveryRequest(request=create_delivery_request(get_offers_opts.get("request")), target_cookie=get_offers_opts.get("targetCookie"), target_location_hint=get_offers_opts.get("targetLocationHint"), consumer_id=get_offers_opts.get("consumerId"), customer_ids=get_offers_opts.get("customerIds"), session_id=get_offers_opts.get("sessionId"), visitor=get_offers_opts.get("visitor"))
5,343,221
def wintype_to_cdata(wintype): """ Returns the underlying CFFI cdata object or ffi.NULL if wintype is None. Used internally in API wrappers to "convert" pywincffi's Python types to the required CFFI cdata objects when calling CFFI functions. Example: >>> from pywincffi.core import dist >>> from pywincffi.kernel32 import CreateEvent >>> from pywincffi.wintypes import wintype_to_cdata >>> ffi, lib = dist.load() >>> # Get an event HANDLE, using the wrapper: it's a Python HANDLE object. >>> hEvent = CreateEvent(bManualReset=False, bInitialState=False) >>> # Call ResetEvent directly without going through the wrapper: >>> hEvent_cdata = wintype_to_cdata(hEvent) >>> result = lib.ResetEvent(hEvent_cdata) :param wintype: A type derived from :class:`pywincffi.core.typesbase.CFFICDataWrapper` :return: The underlying CFFI <cdata> object, or ffi.NULL if wintype is None. """ ffi, _ = dist.load() if wintype is None: return ffi.NULL if isinstance(wintype, (SOCKET, HANDLE, WSAEVENT)): return wintype._cdata[0] return wintype._cdata
5,343,222
def GenerateOutput(target_list, target_dicts, data, params): """ Generates all the output files for the specified targets. """ options = params['options'] if options.generator_output: def output_path(filename): return filename.replace(params['cwd'], options.generator_output) else: def output_path(filename): return filename default_configuration = None for qualified_target in target_list: spec = target_dicts[qualified_target] if spec['toolset'] != 'target': raise Exception( 'Multiple toolsets not supported in scons build (target %s)' % qualified_target) scons_target = SCons.Target(spec) if scons_target.is_ignored: continue # TODO: assumes the default_configuration of the first target # non-Default target is the correct default for all targets. # Need a better model for handle variation between targets. if (not default_configuration and spec['default_configuration'] != 'Default'): default_configuration = spec['default_configuration'] build_file, target = gyp.common.ParseQualifiedTarget(qualified_target)[:2] output_file = TargetFilename(target, build_file, options.suffix) if options.generator_output: output_file = output_path(output_file) if not spec.has_key('libraries'): spec['libraries'] = [] # Add dependent static library targets to the 'libraries' value. deps = spec.get('dependencies', []) spec['scons_dependencies'] = [] for d in deps: td = target_dicts[d] target_name = td['target_name'] spec['scons_dependencies'].append("Alias('%s')" % target_name) if td['type'] in ('static_library', 'shared_library'): libname = td.get('product_name', target_name) spec['libraries'].append('lib' + libname) if td['type'] == 'loadable_module': prereqs = spec.get('scons_prerequisites', []) # TODO: parameterize with <(SHARED_LIBRARY_*) variables? td_target = SCons.Target(td) td_target.target_prefix = '${SHLIBPREFIX}' td_target.target_suffix = '${SHLIBSUFFIX}' GenerateSConscript(output_file, spec, build_file, data[build_file]) if not default_configuration: default_configuration = 'Default' for build_file in sorted(data.keys()): path, ext = os.path.splitext(build_file) if ext != '.gyp': continue output_dir, basename = os.path.split(path) output_filename = path + '_main' + options.suffix + '.scons' all_targets = gyp.common.AllTargets(target_list, target_dicts, build_file) sconscript_files = {} for t in all_targets: scons_target = SCons.Target(target_dicts[t]) if scons_target.is_ignored: continue bf, target = gyp.common.ParseQualifiedTarget(t)[:2] target_filename = TargetFilename(target, bf, options.suffix) tpath = gyp.common.RelativePath(target_filename, output_dir) sconscript_files[target] = tpath output_filename = output_path(output_filename) if sconscript_files: GenerateSConscriptWrapper(build_file, data[build_file], basename, output_filename, sconscript_files, default_configuration)
5,343,223
def check_keypoint(kp, rows, cols): """Check if keypoint coordinates are in range [0, 1)""" for name, value, size in zip(['x', 'y'], kp[:2], [cols, rows]): if not 0 <= value < size: raise ValueError( 'Expected {name} for keypoint {kp} ' 'to be in the range [0.0, {size}], got {value}.'.format( kp=kp, name=name, value=value, size=size ) )
5,343,224
def __add_contrast_tokens(mr_list, utt, slot_alignment, alt_mode=False): """Augments an MR with an auxiliary token indicating a pair of slots that should be contrasted in the corresponding generated utterance. """ contrast_connectors = ['but', 'however', 'yet'] scalar_slots = get_scalar_slots() for contrast_conn in contrast_connectors: contrast_pos = utt.find(contrast_conn) if contrast_pos >= 0: slot_before = None value_before = None slot_after = None value_after = None for pos, slot, value in slot_alignment: if pos > contrast_pos: if slot_before is None: # DEBUG PRINT print('Unrecognized type of contrast/concession:') print(utt) print() break if slot in scalar_slots: slot_after = slot value_after = value break else: if slot in scalar_slots: slot_before = slot value_before = value if slot_before is not None and slot_after is not None: if slot_before in scalar_slots and slot_after in scalar_slots: if scalar_slots[slot_before][value_before] - scalar_slots[slot_after][value_after] == 0: if alt_mode: mr_list.insert([s for s, v in mr_list].index(slot_before), (config.CONCESSION_TOKEN, '')) mr_list.insert([s for s, v in mr_list].index(slot_after), (config.CONCESSION_TOKEN, '')) else: mr_list.append((config.CONCESSION_TOKEN, slot_before + ' ' + slot_after)) # mr_list.append((config.CONCESSION_TOKEN, '')) else: if alt_mode: mr_list.insert([s for s, v in mr_list].index(slot_before), (config.CONTRAST_TOKEN, '')) mr_list.insert([s for s, v in mr_list].index(slot_after), (config.CONTRAST_TOKEN, '')) else: mr_list.append((config.CONTRAST_TOKEN, slot_before + ' ' + slot_after)) # mr_list.append((config.CONTRAST_TOKEN, '')) break
5,343,225
def driver(request_id): """Parquet doesn't need any special setup in the driver.""" pass
5,343,226
def test_get_probable_delete_index_id_failure(): """ Test if probable delete index id is not specified then it should throw KeyError. """ with pytest.raises(KeyError): config = CORTXS3Config() del config._config['indexid']['probable_delete_index_id'] assert config.get_probable_delete_index_id()
5,343,227
def get_imports(module): """ Return an generator with all the imports to a particular py file as string """ source = inspect.getsource(module) path = inspect.getsourcefile(module) root = ast.parse(source, path) for node in ast.iter_child_nodes(root): if isinstance(node, ast.Import): for n in node.names: yield f'import {n.name}' elif isinstance(node, ast.ImportFrom): module = node.module.split('.') # Remove old python 2.x imports if '__future__' not in node.module: yield f"from {node.module} import {', '.join([x.name for x in node.names])}" else: continue
5,343,228
def rating_feedback_view(incident: Incident, channel_id: str): """Builds all blocks required to rate and provide feedback about an incident.""" modal_template = { "type": "modal", "title": {"type": "plain_text", "text": "Incident Feedback"}, "blocks": [ { "type": "context", "elements": [ { "type": "plain_text", "text": "Use this form to rate your experience and provide feedback about the incident.", } ], }, ], "close": {"type": "plain_text", "text": "Cancel"}, "submit": {"type": "plain_text", "text": "Submit"}, "callback_id": RatingFeedbackCallbackId.submit_form, "private_metadata": json.dumps({"incident_id": str(incident.id), "channel_id": channel_id}), } rating_picker_options = [] for rating in FeedbackRating: rating_picker_options.append( {"text": {"type": "plain_text", "text": rating}, "value": rating} ) rating_picker_block = { "type": "input", "block_id": RatingFeedbackBlockId.rating, "label": {"type": "plain_text", "text": "Rate your experience"}, "element": { "type": "static_select", "placeholder": {"type": "plain_text", "text": "Select a rating"}, "options": rating_picker_options, }, "optional": False, } modal_template["blocks"].append(rating_picker_block) feedback_block = { "type": "input", "block_id": RatingFeedbackBlockId.feedback, "label": {"type": "plain_text", "text": "Give us feedback"}, "element": { "type": "plain_text_input", "action_id": RatingFeedbackBlockId.feedback, "placeholder": { "type": "plain_text", "text": "How would you describe your experience?", }, "multiline": True, }, "optional": False, } modal_template["blocks"].append(feedback_block) anonymous_checkbox_block = { "type": "input", "block_id": RatingFeedbackBlockId.anonymous, "label": { "type": "plain_text", "text": "Check the box if you wish to provide your feedback anonymously", }, "element": { "type": "checkboxes", "action_id": RatingFeedbackBlockId.anonymous, "options": [ { "value": "anonymous", "text": {"type": "plain_text", "text": "Anonymize my feedback"}, }, ], }, "optional": True, } modal_template["blocks"].append(anonymous_checkbox_block) return modal_template
5,343,229
def _GetTargetOS(): """Returns the target os specified in args.gn file. Returns an empty string is target_os is not specified. """ build_args = _GetBuildArgs() return build_args['target_os'] if 'target_os' in build_args else ''
5,343,230
def generalise_sent_pos(s): """ generalise sentence pattern by POS tags only :param s: :return: """ rets = [] for token in s['sent']: e = token.idx + len(token.text) is_matched = False for ann in s['anns']: if token.idx >= ann['s'] and e <= ann['e']: rets.append((token.text, token.pos_, True, ann['signed_label'], ann['gt_label'])) is_matched = True break # print '%s-%s, %s: [%s]' % (token.idx, e, token.idx, token.text) if not is_matched: rets.append((token.text, token.pos_)) return {"sent": s['sent'].text, 'pattern': rets}
5,343,231
def add_batting_metrics(df): """ Adds the following columns to a given DataFrame: PA 1B OBP BA SLG OPS ISO PA/HR K BB BABIP wOBA wRAA wRC Args: df (DataFrame): the DataFrame to append additional stats to Returns: DataFrame of stats with additional columns """ df.loc[:, 'PA'] = df.apply(_calculate_pa, axis=1) df = df.loc[df.PA > 0] try: df.loc[:, '1B'] = df.apply(_calculate_singles, axis=1) df.loc[:, 'OBP'] = round((df['H'] + df['BB'] + df['IBB'] + df['HBP']) \ /df['PA'], ROUND_TO) df.loc[:, 'BA'] = round(df['H'] / df['AB'], ROUND_TO) df.loc[:, 'SLG'] = round((1 *df['1B'] + 2 * df['2B']+ 3 * df['3B'] \ + 4 * df['HR']) /df['AB'], ROUND_TO) df.loc[:, 'OPS'] = round(df['OBP'] + df['SLG'], ROUND_TO) df.loc[:, 'ISO'] = round(df['SLG'] - df['BA'], ROUND_TO) df.loc[:, 'HR%'] = round(df['HR'] / df['PA'], ROUND_TO) df.loc[:, 'K%'] = round(df['K'] / df['PA'], ROUND_TO) # df.loc[:, 'K%'] = round(df['K'] / df['PA'], ROUND_TO)*100 df.loc[:, 'BB%'] = round(df['BB'] / df['PA'], ROUND_TO) # df.loc[:, 'BB%'] = round(df['BB'] / df['PA'], ROUND_TO)*100 df.loc[:, 'BABIP'] = round((df['H'] - df['HR']) \ / (df['AB'] - df['K'] - df['HR'] \ + df['SF']), ROUND_TO) df.loc[:, 'wOBA'] = df.apply(_calculate_woba, axis=1) df.loc[:, 'wRAA'] = df.apply(_calculate_wraa, axis=1) df.loc[:, 'wRC'] = df.apply(_calculate_wrc, axis=1) return df.sort_values(by='wOBA', ascending=False) except: print('no records found') return pd.DataFrame()
5,343,232
def line_break(text, line_len=79, indent=1): """ Split some text into an array of lines. Enter: text: the text to split. line_len: the maximum length of a line. indent: how much to indent all but the first line. Exit: lines: an array of lines. """ lines = [text.rstrip()] while len(lines[-1]) > line_len: pos = lines[-1].rfind(' ', 0, line_len) if pos < 0: pos = line_len lines[-1:] = [lines[-1][:pos].rstrip(), ' '*indent+lines[-1][ pos:].strip()] return lines
5,343,233
async def test_delete_cluster( mock_client, mock_cluster_status, mock_cluster_identifier, cluster_state, expected_result ): """Test delete cluster async hook function by mocking return value of delete_cluster""" mock_client.return_value.__aenter__.return_value.delete_cluster.return_value = expected_result mock_cluster_status.return_value = cluster_state hook = RedshiftHookAsync(aws_conn_id="test_aws_connection_id") task = await hook.delete_cluster(cluster_identifier=mock_cluster_identifier) assert task == cluster_state
5,343,234
def init_workflow(bids_path, output_path, participant_label, workdir=None): """Create the preprocessing workflow.""" from nipype.pipeline import engine as pe from nibabies.workflows.anatomical.preproc import init_anat_average_wf from nibabies.workflows.anatomical.registration import init_coregistration_wf from nibabies.workflows.anatomical.brain_extraction import ( init_infant_brain_extraction_wf, ) from nibabies.workflows.anatomical.outputs import init_coreg_report_wf wf = pe.Workflow(name="nibabies_anat") for subid in participant_label: sub_wf = pe.Workflow(name=f"nibabies_anat_{subid}") t1w_files = list( (bids_path / f"sub-{subid}" / "anat").glob(f"sub-{subid}*_T1w.nii.gz") ) t2w_files = list( (bids_path / f"sub-{subid}" / "anat").glob(f"sub-{subid}*_T2w.nii.gz") ) t1w_ref = init_anat_average_wf( num_maps=len(t1w_files), name="t1w_ref", omp_nthreads=8 ) t2w_ref = init_anat_average_wf( num_maps=len(t2w_files), name="t2w_ref", omp_nthreads=8 ) t1w_ref.inputs.inputnode.in_files = [str(f) for f in t1w_files] t2w_ref.inputs.inputnode.in_files = [str(f) for f in t2w_files] be = init_infant_brain_extraction_wf(omp_nthreads=8, age_months=2) cr = init_coregistration_wf(omp_nthreads=8, sloppy=True) rpt = init_coreg_report_wf(output_dir=str(output_path.absolute())) rpt.inputs.inputnode.source_file = [str(f) for f in t1w_files] # fmt:off sub_wf.connect([ (t2w_ref, be, [("outputnode.out_file", "inputnode.in_t2w")]), (t1w_ref, cr, [("outputnode.out_file", "inputnode.in_t1w")]), (be, cr, [ ("outputnode.t2w_preproc", "inputnode.in_t2w_preproc"), ("outputnode.out_mask", "inputnode.in_mask"), ("outputnode.out_probmap", "inputnode.in_probmap"), ]), (cr, rpt, [ ("outputnode.t1w_preproc", "inputnode.t1w_preproc"), ("outputnode.t2w_preproc", "inputnode.t2w_preproc"), ("outputnode.t1w_mask", "inputnode.in_mask"), ]), ]) # fmt:on wf.add_nodes([sub_wf]) if workdir: wf.base_dir = workdir return wf
5,343,235
def visualize_spectrum(y): """Effect that maps the Mel filterbank frequencies onto the LED strip""" global _prev_spectrum y = np.copy(interpolate(y, config.N_PIXELS // 2)) common_mode.update(y) diff = y - _prev_spectrum _prev_spectrum = np.copy(y) # Color channel mappings r = r_filt.update(y - common_mode.value) g = np.abs(diff) b = b_filt.update(np.copy(y)) # Mirror the color channels for symmetric output r = np.concatenate((r[::-1], r)) g = np.concatenate((g[::-1], g)) b = np.concatenate((b[::-1], b)) output = np.array([r, g,b]) * 255 return output
5,343,236
def testDuplicateContours(glyph): """ Contours shouldn't be duplicated on each other. """ contours = {} for index, contour in enumerate(glyph): contour = contour.copy() contour.autoStartSegment() pen = DigestPointPen() contour.drawPoints(pen) digest = pen.getDigest() if digest not in contours: contours[digest] = [] contours[digest].append(index) duplicateContours = [] for digest, indexes in contours.items(): if len(indexes) > 1: duplicateContours.append(indexes[0]) return duplicateContours
5,343,237
def setupGroups(root, VariableGroups): """ Set variable groups. Args: ----- VariableGroups (dict) : each entry must have the form '<Rogue device or Variable>' : {'groups' : [<list of groups>], 'pollInterval': <poll interval> } The 'groups' entry provides a list of groups to add the Device/Variable to. If the path points to a device, the group will be added recursively to all devices and variables deeper in the path. The 'pollInterval' entry provides an optional value which will be used to update the polling interface if the path points to a variable. The poll interval value is in seconds. Use None to leave interval unchanged, 0 to disable polling. If this argument is 'None' then nothing will be done. Ret: ----- None """ if VariableGroups: for k,v in VariableGroups.items(): # Get node n = root.getNode(k) # Did we find the node? if n is not None: # Add to each group for grp in v['groups']: n.addToGroup(grp) # Update poll interval if provided. if v['pollInterval'] is not None and n.isinstance(pyrogue.BaseVariable): n.pollInterval = v['pollInterval'] else: print(f"setupGroups: Warning: {k} not found!")
5,343,238
def expand_strength(row): """Extracts information from Strength column and cleans remaining strengths. Gets Additional Info and Final Volume Quantity columns from Strength. Reformats any malformed strengths and removes commas from within numbers. """ strengths = row['Strength'] # search for additional info marked by double asterisks if '*' in strengths: additional_info = '' for tag in ADDITIONAL_INFO_TAGS: if tag in strengths: additional_info = additional_info + tag + '. ' strengths = strengths.replace(tag, '') row['AdditionalInfo'] = additional_info # search for final final Reconstituted Solution Volume quantity if re.match(r"(.*)?\(\d*[.,]?\d+\s*ML\)", strengths): paren = re.search(r"\(\d*[.,]?\d+\s*ML\)", strengths) strengths = re.sub(r"\(\d*[.,]?\d+\s*ML\)", '', strengths).strip() row['FinalVolQty'] = get_qty_format(paren[0].strip('()')) # replace malformed strings for better formatting for bad_format, improved_format in ILL_FORMATTED_STRENGTHS.items(): strengths = strengths.replace(bad_format, improved_format) # determine if there is a semi colon anywhere between two parentheses paren = re.findall(r'[\(][^)]*;.*?[\)]', strengths) if paren: strengths = reformat_paren_with_semi(strengths) # remove comma from numbers strengths = re.sub(r'(\d),(\d)', r'\1\2', strengths) row['CleanStrength'] = strengths.strip() return row
5,343,239
def dem_autoload(geometries, demType, vrt=None, buffer=None, username=None, password=None, product='dem', nodata=None, hide_nodata=False): """ obtain all relevant DEM tiles for selected geometries Parameters ---------- geometries: list[spatialist.vector.Vector] a list of :class:`spatialist.vector.Vector` geometries to obtain DEM data for; CRS must be WGS84 LatLon (EPSG 4326) demType: str the type of DEM to be used; current options: - 'AW3D30' (ALOS Global Digital Surface Model "ALOS World 3D - 30m") * info: https://www.eorc.jaxa.jp/ALOS/en/aw3d30/index.htm * url: ftp://ftp.eorc.jaxa.jp/pub/ALOS/ext1/AW3D30/release_v1804 * height reference: EGM96 - 'Copernicus 10m EEA DEM' (Copernicus 10 m DEM available over EEA-39 countries) * registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration * url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_EEA-10-DGED/2021_1 * height reference: EGM2008 - 'Copernicus 30m Global DEM' * info: https://copernicus-dem-30m.s3.amazonaws.com/readme.html * url: https://copernicus-dem-30m.s3.eu-central-1.amazonaws.com/ * height reference: EGM2008 - 'Copernicus 30m Global DEM II' * registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration * url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_GLO-30-DGED/2021_1 * height reference: EGM2008 - 'Copernicus 90m Global DEM' * info: https://copernicus-dem-90m.s3.amazonaws.com/readme.html * url: https://copernicus-dem-90m.s3.eu-central-1.amazonaws.com/ * height reference: EGM2008 - 'Copernicus 90m Global DEM II' * registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration * url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_GLO-90-DGED/2021_1 * height reference: EGM2008 - 'GETASSE30' * info: https://seadas.gsfc.nasa.gov/help-8.1.0/desktop/GETASSE30ElevationModel.html * url: https://step.esa.int/auxdata/dem/GETASSE30 * height reference: WGS84 - 'SRTM 1Sec HGT' * url: https://step.esa.int/auxdata/dem/SRTMGL1 * height reference: EGM96 - 'SRTM 3Sec' * url: https://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/TIFF * height reference: EGM96 - 'TDX90m' * registration: https://geoservice.dlr.de/web/dataguide/tdm90 * url: ftpes://tandemx-90m.dlr.de * height reference: WGS84 vrt: str or None an optional GDAL VRT file created from the obtained DEM tiles buffer: int, float, None a buffer in degrees to add around the individual geometries username: str or None (optional) the user name for services requiring registration password: str or None (optional) the password for the registration account product: str the sub-product to extract from the DEM product. The following options are available for the respective DEM types: - 'AW3D30' * 'dem': the actual Digital Elevation Model * 'msk': mask information for each pixel (Cloud/Snow Mask, Land water and low correlation mask, Sea mask, Information of elevation dataset used for the void-filling processing) * 'stk': number of DSM-scene files which were used to produce the 5 m resolution DSM - 'Copernicus 10m EEA DEM' * 'dem': the actual Digital Elevation Model * 'edm': editing mask * 'flm': filling mask * 'hem': height error mask * 'wbm': water body mask - 'Copernicus 30m Global DEM' * 'dem': the actual Digital Elevation Model - 'Copernicus 30m Global DEM II' * 'dem': the actual Digital Elevation Model * 'edm': editing mask * 'flm': filling mask * 'hem': height error mask * 'wbm': water body mask - 'Copernicus 90m Global DEM' * 'dem': the actual Digital Elevation Model - 'Copernicus 90m Global DEM II' * 'dem': the actual Digital Elevation Model * 'edm': editing mask * 'flm': filling mask * 'hem': height error mask * 'wbm': water body mask - 'GETASSE30' * 'dem': the actual Digital Elevation Model - 'SRTM 1Sec HGT' * 'dem': the actual Digital Elevation Model - 'SRTM 3Sec' * 'dem': the actual Digital Elevation Model - 'TDX90m' * 'dem': the actual Digital Elevation Model * 'am2': Amplitude Mosaic representing the minimum value * 'amp': Amplitude Mosaic representing the mean value * 'com': Consistency Mask * 'cov': Coverage Map * 'hem': Height Error Map * 'lsm': Layover and Shadow Mask, based on SRTM C-band and Globe DEM data * 'wam': Water Indication Mask Returns ------- list or None the names of the obtained files or None if a VRT file was defined Examples -------- download all SRTM 1 arcsec DEMs overlapping with a Sentinel-1 scene and mosaic them to a single GeoTIFF file .. code-block:: python from pyroSAR import identify from pyroSAR.auxdata import dem_autoload from spatialist import gdalwarp # identify the SAR scene filename = 'S1A_IW_SLC__1SDV_20150330T170734_20150330T170801_005264_006A6C_DA69.zip' scene = identify(filename) # extract the bounding box as spatialist.Vector object bbox = scene.bbox() # download the tiles and virtually combine them in an in-memory # VRT file subsetted to the extent of the SAR scene plus a buffer of 0.01 degrees vrt = '/vsimem/srtm1.vrt' dem_autoload(geometries=[bbox], demType='SRTM 1Sec HGT', vrt=vrt, buffer=0.01) # write the final GeoTIFF file outname = scene.outname_base() + 'srtm1.tif' gdalwarp(src=vrt, dst=outname, options={'format': 'GTiff'}) # alternatively use function dem_create and warp the DEM to UTM # including conversion from geoid to ellipsoid heights from pyroSAR.auxdata import dem_create outname = scene.outname_base() + 'srtm1_ellp.tif' dem_create(src=vrt, dst=outname, t_srs=32632, tr=(30, 30), geoid_convert=True, geoid='EGM96') """ with DEMHandler(geometries) as handler: return handler.load(demType=demType, username=username, password=password, vrt=vrt, buffer=buffer, product=product, nodata=nodata, hide_nodata=hide_nodata)
5,343,240
def randomString(stringLength=6): """Generate a random string of fixed length """ letters = string.ascii_uppercase return ''.join(random.choice(letters) for i in range(stringLength))
5,343,241
def create_new_record(account,userName,password): """ Function that creates new records for a given user account """ new_record = Records(account,userName,password) return new_record
5,343,242
def is_bow(vec): """ Checks if a vector is in the sparse Gensim BoW format """ return matutils.isbow(vec)
5,343,243
def timeout(seconds, loop=None): """ Returns a channel that closes itself after `seconds`. :param seconds: time before the channel is closed :param loop: you can optionally specify the loop on which the returned channel is intended to be used. :return: the timeout channel """ c = Chan(loop=loop or asyncio.get_event_loop()) c.loop.call_later(seconds, c.close) return c
5,343,244
def analyse_df(df, options): """ Analyses a dataframe, creating a metadata object based on the passed options. Metadata objects can be used to apply dataset preparations across multiple platforms. """ _validate_options(options) metadata = _create_metadata(options) for proc in options["procs"]: if proc == "FillMissing": _analyse_fill_missing(df, options, metadata) elif proc == "Normalize": _analyse_standardization(df, options, metadata) elif proc == "Categorify": _analyse_categorization(df, options, metadata) else: raise ValueError("Unsupported proc type in options " + proc) metadata["columns"][options["dep_var"]] = {"type": COL_TYPES.DEPVAR.value} return metadata
5,343,245
def populate_db(session, num_participants=100, num_tips=50): """ Populate DB with fake data """ #Make the participants participants = [] for i in xrange(num_participants): p = fake_participant() session.add(p) participants.append(p) #Make the "Elsewhere's" for p in participants: #All participants get between 1 and 3 elsewheres num_elsewheres = random.randint(1, 3) for platform_name in platforms[:num_elsewheres]: e = fake_elsewhere(p, platform_name) session.add(e) #Make the tips tips = [] for i in xrange(num_tips): tipper, tippee = random.sample(participants, 2) t = fake_tip(tipper, tippee) tips.append(t) session.add(t) session.commit()
5,343,246
def base_route(): """ Base route to any page. Currently, aboutme. """ return redirect(url_for("pages.links"))
5,343,247
def blend_normal(source, source_mask, target): """Blend source on top of target image using weighted alpha blending. Args: source (np.ndarray): Array of shape (H, W, C) which contains the source image (dtype np.uint8). source_mask (np.ndarray): Array of shape (H, W) which contains the source foreground mask (dtype np.uint8). Background pixels should be assigned 0 and foreground 255. Values inbetween are used to interpolate between source and target. target (np.ndarray): Array of shape (H, W, C) which contains the target image. Returns: output (np.ndarray): Array of the same shape as target containing the blended image. """ return paste_to(source, source_mask, target, (0, 0))
5,343,248
def load_dataset(name): """ Load a dataset. """ try: func = loaders[name] except KeyError: print(f"Dataset '{name}' is not in available list: {list_datasets()}") else: return func()
5,343,249
def display_data(params): """ display data with the parsed arguments """ # evaluation data test_ds = datautils.get_dataflow(params.testfiles, params.batch_size, is_training=False) itr = test_ds.as_numpy_iterator() raw_record = next(itr) data_sample = datautils.transform_raw_record(raw_record, params) true_states = data_sample['true_states'] global_map = data_sample['global_map'] init_particles = data_sample['init_particles'] plt.imshow(global_map[0, :, :, 0]) plt.scatter(true_states[0, 0, 0], true_states[0, 0, 1], s=20, c='#7B241C', alpha=.75) plt.scatter(init_particles[0, :, 0], init_particles[0, :, 1], s=10, c='#515A5A', alpha=.25) plt.show() print('display done')
5,343,250
def func1(): """ # FaaS Interactive Tutorial – Using Python & Dataloop SDK ## Concept Dataloop Function-as-a-Service (FaaS) is a compute service that automatically runs your code based on time patterns or in response to trigger events. You can use Dataloop FaaS to extend other Dataloop services with custom logic. Altogether, FaaS serves as a super flexible unit that provides you with increased capabilities in the Dataloop platform and allows achieving any need while automating processes. With Dataloop FaaS, you simply upload your code and create your functions. Following that, you can define a time interval or specify a resource event for triggering the function. When a trigger event occurs, the FaaS platform launches and manages the compute resources, and executes the function. You can configure the compute settings according to your preferences (machine types, concurrency, timeout, etc.) or use the default settings. # Use Cases **Pre annotation processing**: Resize, video assembler, video dissembler **Post annotation processing**: Augmentation, crop box-annotations, auto-parenting **ML models**: Auto-detection **QA models**: Auto QA, consensus model, majority vote model """
5,343,251
def split_symbols_implicit_precedence(tokens, local_dict, global_dict): # pragma: no cover """Replace the sympy builtin split_symbols with a version respecting implicit multiplcation. By replacing this we can better cope with expressions like 1/xyz being equivalent to 1/(x*y*z) rather than (y*z)/x as is the default. However it cannot address issues like 1/2x becoming (1/2)*x rather than 1/(2*x), because Python's tokeniser does not respect whitespace and so cannot distinguish between '1/2 x' and '1/2x'. This transformation is unlikely to be used, but is provided as proof of concept. """ result = [] split = False split_previous = False for tok in tokens: if split_previous: # throw out closing parenthesis of Symbol that was split split_previous = False continue split_previous = False if tok[0] == tokenize.NAME and tok[1] == 'Symbol': split = True elif split and tok[0] == tokenize.NAME: symbol = tok[1][1:-1] if sympy_parser._token_splittable(symbol): # If we're splitting this symbol, wrap it in brackets by adding # them before the call to Symbol: result = result[:-2] + [(tokenize.OP, '(')] + result[-2:] for char in symbol: if char in local_dict or char in global_dict: # Get rid of the call to Symbol del result[-2:] result.extend([(tokenize.NAME, "{}".format(char)), (tokenize.NAME, 'Symbol'), (tokenize.OP, '(')]) else: result.extend([(tokenize.NAME, "'{}'".format(char)), (tokenize.OP, ')'), (tokenize.NAME, 'Symbol'), (tokenize.OP, '(')]) # Delete the last two tokens: get rid of the extraneous # Symbol( we just added # Also, set split_previous=True so will skip # the closing parenthesis of the original Symbol del result[-2:] split = False split_previous = True # Then close the extra brackets we added: result.append((tokenize.OP, ')')) continue else: split = False result.append(tok) return result
5,343,252
def altsumma(f, k, p): """Return the sum of f(i) from i=k, k+1, ... till p(i) holds true or 0. This is an implementation of the Summation formula from Kahan, see Theorem 8 in Goldberg, David 'What Every Computer Scientist Should Know About Floating-Point Arithmetic', ACM Computer Survey, Vol. 23, No. 1, March 1991.""" if not p(k): return 0 else: S = f(k) C = 0 j = k + 1 while p(j): Y = f(j) - C T = S + Y C = (T - S) - Y S = T j += 1 return S
5,343,253
def action_logging(f: T) -> T: """ Decorates function to execute function at the same time submitting action_logging but in CLI context. It will call action logger callbacks twice, one for pre-execution and the other one for post-execution. Action logger will be called with below keyword parameters: sub_command : name of sub-command start_datetime : start datetime instance by utc end_datetime : end datetime instance by utc full_command : full command line arguments user : current user log : airflow.models.log.Log ORM instance dag_id : dag id (optional) task_id : task_id (optional) execution_date : execution date (optional) error : exception instance if there's an exception :param f: function instance :return: wrapped function """ @functools.wraps(f) def wrapper(*args, **kwargs): """ An wrapper for cli functions. It assumes to have Namespace instance at 1st positional argument :param args: Positional argument. It assumes to have Namespace instance at 1st positional argument :param kwargs: A passthrough keyword argument """ if not args: raise ValueError("Args should be set") if not isinstance(args[0], Namespace): raise ValueError("1st positional argument should be argparse.Namespace instance," f"but is {type(args[0])}") metrics = _build_metrics(f.__name__, args[0]) cli_action_loggers.on_pre_execution(**metrics) try: return f(*args, **kwargs) except Exception as e: metrics['error'] = e raise finally: metrics['end_datetime'] = datetime.utcnow() cli_action_loggers.on_post_execution(**metrics) return cast(T, wrapper)
5,343,254
def decode_public_id(str_id): """ make numeric ID from 4-letter ID Args: str_id (str): ID consisting of a number and 3 alphabets Return: num_id (int): numeric ID """ def alpha2num(c): return encoder.find(c) def num2num(c): return 5 if c == '9' else int(c) - 3 alphas = [alpha2num(c) for c in str_id[1:]] alphas.insert(0, num2num(str_id[0])) return sum(alphas[i] * 18**(3-i) for i in range(4))
5,343,255
def get_sentence_idcs_in_split(datasplit: DataFrame, split_id: int): """Given a dataset split is (1 for train, 2 for test, 3 for dev), returns the set of corresponding sentence indices in sentences_df.""" return set(datasplit[datasplit["splitset_label"] == split_id]["sentence_index"])
5,343,256
def valid_scope_list(): """List all the oscilloscope types.""" s = "\nValid types are:\n" s += ", ".join(DS1000C_scopes) + "\n" s += ", ".join(DS1000E_scopes) + "\n" s += ", ".join(DS1000Z_scopes) + "\n" s += ", ".join(DS4000_scopes) + "\n" s += ", ".join(DS6000_scopes) + "\n" return s
5,343,257
def process_input_file(remote_file): """Process the input file. If its a GCS file we download it to a temporary local file. We do this because Keras text preprocessing doesn't work with GCS. If its a zip file we unpack it. Args: remote_file: The input Returns: csv_file: The local csv file to process """ if is_gcs_path(remote_file): # Download the input to a local with tempfile.NamedTemporaryFile() as hf: input_data = hf.name logging.info("Copying %s to %s", remote_file, input_data) input_data_gcs_bucket, input_data_gcs_path = split_gcs_uri( remote_file) logging.info("Download bucket %s object %s.", input_data_gcs_bucket, input_data_gcs_path) bucket = storage.Bucket(storage.Client(), input_data_gcs_bucket) storage.Blob(input_data_gcs_path, bucket).download_to_filename( input_data) else: input_data = remote_file ext = os.path.splitext(input_data)[-1] if ext.lower() == '.zip': zip_ref = zipfile.ZipFile(input_data, 'r') zip_ref.extractall('.') zip_ref.close() # TODO(jlewi): Hardcoding the file in the Archive to use is brittle. # We should probably just require the input to be a CSV file.: csv_file = 'github_issues.csv' else: csv_file = input_data return csv_file
5,343,258
def load(): """Returns an instance of the plugin""" return SyslogOutOutputPlugin
5,343,259
def test_regenerate_uuids_catalog() -> None: """Test regeneration of uuids with updated refs in catalog.""" orig_cat = catalog.Catalog.oscal_read(catalog_path) new_cat, uuid_lut, n_refs_updated = validator_helper.regenerate_uuids(orig_cat) assert len(uuid_lut.items()) == 2 assert n_refs_updated == 2
5,343,260
def word_sorter(x): """ Function to sort the word frequency pairs after frequency Lowest frequency collocates first - highest frerquency collocates last """ # getting length of list of word/frequency pairs lst = len(x) # sort by frequency for i in range(0, lst): for j in range(0, lst-i-1): if (x[j][1] > x[j + 1][1]): temp = x[j] x[j]= x[j + 1] x[j + 1] = temp return(x)
5,343,261
def le_assinatura(): """[A funcao le os valores dos tracos linguisticos do modelo e devolve uma assinatura a ser comparada com os textos fornecidos] Returns: [list] -- [description] """ print("Bem-vindo ao detector automático de COH-PIAH.") print("Informe a assinatura típica de um aluno infectado:") wal = float(input("Entre o tamanho médio de palavra:")) ttr = float(input("Entre a relação Type-Token:")) hlr = float(input("Entre a Razão Hapax Legomana:")) sal = float(input("Entre o tamanho médio de sentença:")) sac = float(input("Entre a complexidade média da sentença:")) pal = float(input("Entre o tamanho medio de frase:")) return [wal, ttr, hlr, sal, sac, pal]
5,343,262
def incMagLock(): """ rotate the text display """ pass
5,343,263
def category_task_delete(request, structure_slug, category_slug, task_id, structure): """ Deletes task from a category :type structure_slug: String :type category_slug: String :type task_id: String :type structure: OrganizationalStructure (from @is_manager) :param structure_slug: structure slug :param category_slug: category slug :param task_id: task code :param structure: structure object (from @is_manager) :return: render """ category = get_object_or_404(TicketCategory, organizational_structure=structure, slug=category_slug) task = get_object_or_404(TicketCategoryTask, code=task_id, category=category) messages.add_message(request, messages.SUCCESS, _("Attività {} eliminata correttamente").format(task)) # log action logger.info('[{}] manager of structure {}' ' {} deleted a task' ' for category {}'.format(timezone.localtime(), structure, request.user, category)) delete_directory(task.get_folder()) task.delete() return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=category_slug)
5,343,264
def lnposterior_selection(lnprobability, sig_fact=3., quantile=75, quantile_walker=50, verbose=1): """Return selected walker based on the acceptance fraction. :param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration :param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected :param float quantile: Quantile to use as reference lnprobability value. :param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is the meadian, 100 is the highest lnprobability. :param int verbose: if 1 speaks otherwise not :return list_of_int l_selected_walker: list of selected walker :return int nb_rejected: number of rejected walker """ walkers_percentile_lnposterior = percentile( lnprobability, quantile_walker, axis=1) percentile_lnposterior = percentile( walkers_percentile_lnposterior, quantile) mad_lnposterior = mad(walkers_percentile_lnposterior) if verbose == 1: logger.info("lnposterior of the walkers: {}\nquantile {}%: {}, MAD:{}" "".format(walkers_percentile_lnposterior, quantile, percentile_lnposterior, mad_lnposterior)) l_selected_walker = where(walkers_percentile_lnposterior > ( percentile_lnposterior - (sig_fact * mad_lnposterior)))[0] nb_rejected = lnprobability.shape[0] - len(l_selected_walker) if verbose == 1: logger.info( "Number of rejected walkers: {}/{}".format(nb_rejected, lnprobability.shape[0])) return l_selected_walker, nb_rejected
5,343,265
def space_check(board, position): """Returns boolean value whether the cell is free or not.""" return board[position] not in PLAYERS_MARKS
5,343,266
def find_repo_root_by_path(path): """Given a path to an item in a git repository, find the root of the repository.""" repo = git.Repo(path, search_parent_directories=True) repo_path = repo.git.rev_parse("--show-toplevel") logging.debug("Repository: {}".format(repo_path)) return repo_path
5,343,267
def validate_params(serializer_cls): """利用Serializer对请求参数进行校验""" def decorator(func): @functools.wraps(func) def wrapper(request: Request): data = request.query_params if request.method == "GET" else request.data serializer = serializer_cls(data=data) serializer.is_valid(raise_exception=True) setattr(request, "validated_data", serializer.validated_data) return func(request) return wrapper return decorator
5,343,268
def get_pairs_observations(kdata: kapture.Kapture, kdata_query: Optional[kapture.Kapture], keypoints_type: str, max_number_of_threads: Optional[int], iou: bool, topk: int): """ get observations pairs as list """ if iou: individual_observations = get_observation_images(keypoints_type, kdata, kdata_query, max_number_of_threads) gc.collect() else: individual_observations = None all_pairs = get_observation_image_pairs(keypoints_type, kdata, kdata_query, max_number_of_threads) if iou: assert individual_observations is not None final_pairs = {} for img1 in all_pairs.keys(): for img2 in all_pairs[img1].keys(): if img1 not in final_pairs: final_pairs[img1] = {} union = individual_observations[img1] + individual_observations[img2] - all_pairs[img1][img2] if union == 0: final_pairs[img1][img2] = 0 else: final_pairs[img1][img2] = all_pairs[img1][img2] / union all_pairs = final_pairs getLogger().info('ranking co-observation pairs...') assert kdata.records_camera is not None image_pairs = get_topk_observation_pairs(all_pairs, kdata.records_camera, topk) return image_pairs
5,343,269
def col2im_conv(col, input, layer, h_out, w_out): """Convert image to columns Args: col: shape = (k*k, c, h_out*w_out) input: a dictionary contains input data and shape information layer: one cnn layer, defined in testLeNet.py h_out: output height w_out: output width Returns: im: shape = (h_in, w_in, c) """ h_in = input['height'] w_in = input['width'] c = input['channel'] k = layer['k'] stride = layer['stride'] im = np.zeros((h_in, w_in, c)) col = np.reshape(col, (k*k*c, h_out*w_out)) for h in range(h_out): for w in range(w_out): im[h*stride: h*stride+k, w*stride: w*stride+k, :] = \ im[h*stride: h*stride+k, w*stride: w*stride+k, :] + \ np.reshape(col[:, h*w_out + w], (k, k, c)) return im
5,343,270
def parse_arguments(argv): """ Parse command line arguments """ parser = create_parser() args = parser.parse_args(argv) if args.n is None and not args.tch: parser.error('-n is required') if args.m is None and args.grnm: parser.error('-m is required') if args.d is None and args.grnd: parser.error('-d is required') if args.p is None and args.grnp: parser.error('-p is required') if args.h is None and args.tch: parser.error('-h is required') if args.c is None and (args.tch or args.tcn): parser.error('-c is required') if args.directed and (args.grnd or args.trn): parser.error('--directed is not supported for the graph type') if args.output is not None and os.path.exists(args.output): parser.error('file %s already exists' % args.output) if args.wmin > args.wmax: parser.error('min weight is greater than max weight') return args
5,343,271
def parse_args() -> argparse.Namespace: """ Creates the parser for train arguments Returns: The parser """ parse = argparse.ArgumentParser() parse.add_argument('--local_rank', dest='local_rank', type=int, default=0) parse.add_argument('--port', dest='port', type=int, default=44554) parse.add_argument('--model', dest='model', type=str, default='bisenetv2') parse.add_argument('--finetune-from', type=str, default='/home/bina/PycharmProjects/fast-segmentation/models/5/best_model.pth') parse.add_argument('--im_root', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data') parse.add_argument('--train_im_anns', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/train.txt') parse.add_argument('--val_im_anns', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/val.txt') parse.add_argument('--log_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/logs/regular_logs') parse.add_argument('--false_analysis_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/false_analysis') parse.add_argument('--tensorboard_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/logs/tensorboard_logs') parse.add_argument('--models_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/models') parse.add_argument('--config_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/configs/main_cfg.yaml') parse.add_argument('--amp', type=bool, default=True) return parse.parse_args()
5,343,272
def fix_missing_period(line): """Adds a period to a line that is missing a period""" if line == "": return line if line[-1] in END_TOKENS: return line return line + " ."
5,343,273
async def main() -> int: """main async portion of command-line runner. returns status code 0-255""" try: await tome.database.connect() return await migrations.main( whither=args.whither, conn=tome.database.connection(), dry_run=args.dry_run ) except KeyboardInterrupt: logger.critical("keyboard interrupt") try: await tome.database.disconnect() except Exception as e: logger.error("failed to cleanly disconnect database", exc_info=e) logger.info("rolled back") return 130 except Exception as e: logger.critical("a fatal error occurred!", exc_info=e) await tome.database.disconnect() logger.info("rolled back") return 3 finally: await tome.database.disconnect()
5,343,274
def getTaskStatus( task_id ) : """Get tuple of Instance status and corresponding status string. 'task_id' is the DB record ID.""" _inst = Instance.objects.get( id = task_id ) return ( _inst.status , STATUS2TEXT[_inst.status] )
5,343,275
def fit_affine_matrix(p1, p2): """ Fit affine matrix such that p2 * H = p1 Hint: You can use np.linalg.lstsq function to solve the problem. Args: p1: an array of shape (M, P) p2: an array of shape (M, P) Return: H: a matrix of shape (P, P) that transform p2 to p1. """ assert (p1.shape[0] == p2.shape[0]),\ 'Different number of points in p1 and p2' p1 = pad(p1) p2 = pad(p2) ### YOUR CODE HERE H = np.linalg.lstsq(p2, p1, rcond=None)[0] ### END YOUR CODE # Sometimes numerical issues cause least-squares to produce the last # column which is not exactly [0, 0, 1] H[:,2] = np.array([0, 0, 1]) return H
5,343,276
def cnn_lstm_nd(pfac, max_features=NUM_WORDS, maxlen=SEQUENCE_LENGTH, lstm_cell_size=CNNLSTM_CELL_SIZE, embedding_size=EMBEDDING_SIZE): """CNN-LSTM model, modified from Keras example.""" # From github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py filters = 64 kernel_size = 5 pool_size = 4 model = Sequential() model.add(pfac(Embedding(max_features, embedding_size, input_length=maxlen, name='embedding'))) model.add(pfac(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1, name='conv'))) model.add(MaxPooling1D(pool_size=pool_size)) model.add(pfac(LSTM(lstm_cell_size, name='lstm'))) model.add(pfac(Dense(2, name='dense'))) return model
5,343,277
def updateUserPassword(userName, newPassword): """ update the user password """ cursor = conn('open') if cursor: try: cursor.execute("UPDATE user SET u_Pass= %s where u_name= %s ", (generate_password_hash(newPassword), userName)) conn("commit") conn('close') except Exception as e: return False else: conn('close') return False return True
5,343,278
def mit_b1(**kwargs): """ Constructs a mit_b1 model. Arguments: """ model = MixVisionTransformer(embed_dims=64, num_layers=[2, 2, 2, 2], num_heads=[1, 2, 5, 8], **kwargs) return model
5,343,279
def test_beams_to_bintable(): """ Check that NPOL is set """ beamlist = [Beam(1*u.arcsec)]*2 beamhdu = beams_to_bintable(beamlist) assert beamhdu.header['NPOL'] == 0
5,343,280
def draw_img(frame, x, y, img): """Draw an image onto a frame Args: frame: Frame to modify x: Centered x coordinate on frame y: Centered y coordinate on frame img: Image to draw Returns: ndarray: Modified frame """ hw = img.shape[1] // 2 hh = img.shape[0] // 2 snip = frame[y - hh:y + hh, x - hw:x + hw] snip[img != 0] = img[img != 0]
5,343,281
def GenerateConfig(_): """Returns empty string.""" return ''
5,343,282
def calc_procrustes(points1, points2, return_tform=False): """ Align the predicted entity in some optimality sense with the ground truth. Does NOT align scale https://github.com/shreyashampali/ho3d/blob/master/eval.py """ t1 = points1.mean(0) # Find centroid t2 = points2.mean(0) points1_t = points1 - t1 # Zero mean points2_t = points2 - t2 R, s = orthogonal_procrustes(points1_t, points2_t) # Run procrustes alignment, returns rotation matrix and scale points2_t = np.dot(points2_t, R.T) # Apply tform to second pointcloud points2_t = points2_t + t1 if return_tform: return R, t1 - t2 else: return points2_t
5,343,283
def _check_name_func(name_func): """ Make sure ``name_func`` is ``None`` or a callable that takes ``func_name``, ``kwargs`` arguments. """ if name_func is None: return if not callable(name_func): raise ParametrizationError("name_func must be a callable or `None`") try: interface.check_signature(name_func, ["func_name", "kwargs"]) except interface.MethodSignatureMismatch: raise ParametrizationError( '"name_func" must be a callable that takes 2 arguments' ' named "func_name" and "kwargs"' " (e.g. def custom_name_func(func_name, kwargs): ..." )
5,343,284
def sync_trusted_origins(neo4j_session, okta_org_id, okta_update_tag, okta_api_key): """ Sync trusted origins :param neo4j_session: session with the Neo4j server :param okta_org_id: okta organization id :param okta_update_tag: The timestamp value to set our new Neo4j resources with :param okta_api_key: okta api key :return: Nothing """ logger.debug("Syncing Okta Trusted Origins") api_client = create_api_client(okta_org_id, "/api/v1/trustedOrigins", okta_api_key) trusted_data = _get_trusted_origins(api_client) trusted_list = transform_trusted_origins(trusted_data) _load_trusted_origins(neo4j_session, okta_org_id, trusted_list, okta_update_tag)
5,343,285
def read_image_pillow(input_filename: Path) -> torch.Tensor: """ Read an image file with pillow and return a torch.Tensor. :param input_filename: Source image file path. :return: torch.Tensor of shape (C, H, W). """ pil_image = Image.open(input_filename) torch_tensor = TF.to_tensor(pil_image) return torch_tensor
5,343,286
def compute_solar_angle(balloon_state: balloon.BalloonState) -> float: """Computes the solar angle relative to the balloon's position. Args: balloon_state: current state of the balloon. Returns: Solar angle at the balloon's position. """ el_degree, _, _ = solar.solar_calculator( balloon_state.latlng, balloon_state.date_time) return el_degree
5,343,287
def print_dictionary(employee_as_dict): """Print a dictionary representation of the employee.""" print(json.dumps(employee_as_dict, indent=2))
5,343,288
def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]: """parse a list of URLs with their metadata from an RSS feed, bookmarks export, or text file """ check_url_parsing_invariants() timer = TimedProgress(TIMEOUT * 4) with open(source_file, 'r', encoding='utf-8') as file: links, parser = run_parser_functions(file, timer, root_url=root_url) timer.end() if parser is None: return [], 'Failed to parse' return links, parser
5,343,289
def executemany(c: CursorType, sql: str, args: Iterable[Iterable[Any]]) -> OtherResult: """ Call c.executemany, with the given sqlstatement and argumens, and return c. The mysql-type-plugin for mysql will special type this function such that the number and types of args matches the what is expected for the query by analyzing the mysql-schema.sql file in the project root. """ c.executemany(sql, args) return cast(OtherResult, c)
5,343,290
def test_get_warship_directory(config, wowsdir): """Test the get_warship_directory method.""" assert get_warship_directory(config) == wowsdir
5,343,291
def do_add_student_bad(first_name, last_name, card_info): """Insert a student into the DB. VULNERABLE TO SQL INJECTION""" # Note that we are building our own query via an f-string: this is what makes # us vulnerable to SQL injection query = ( "INSERT INTO students (card_info, first_name, last_name) VALUES " f"('{card_info}', '{first_name}', '{last_name}')" ) cursor.execute(query)
5,343,292
def get_default_directory(): """Return the default directory in a string .. note:: Not tested on unix! Returns: (str): Default directory: Windows: %AppData%/deharm Unix: '~' expanded """ if kivy.utils.platform == 'win': # In case this is running in Windows use a folder pointed to by APPDATA folder = os.path.join(os.getenv('APPDATA'), "deharm") if not os.path.isdir(folder): # If folder doesn't exist yet then create it. os.mkdir(folder) return folder else: return str(os.path.expanduser('~'))
5,343,293
def validate_host(): """Ensure that the script is being run on a supported platform.""" supported_opsys = ["darwin", "linux"] supported_machine = ["amd64"] opsys, machine = get_platform() if opsys not in supported_opsys: click.secho( f"this application is currently not known to support {opsys}", fg="red", ) raise SystemExit(2) if machine not in supported_machine: click.secho( f"this application is currently not known to support running on {machine} machines", fg="red", ) if struct.calcsize("P") * 8 != 64: click.secho( "this application can only be run on 64 bit hosts, in 64 bit mode", fg="red" ) raise SystemExit(2) return True
5,343,294
def test_version(): """第一个单元测试,校验是不是一个字符串格式""" from hogwarts_apitest import __version__ assert isinstance(__version__, str) # import requests # # class BaseApi(object): # # method = "GET" # url = "" # parmas = {} # headers = {} # data = {} # json = {} # # # 共性的 set_parmas 和 validate 是相同的,尅抽象出来 # def set_parmas(self, **parmas): # self.parmas = parmas # return self # # def set_data(self, data): # self.data = data # return self # # def set_json(self, json_data): # self.json = json_data # return self # # # requests所有的方法都是基于request.request()来实现的 # def run(self): # self.response = requests.request( # method = self.method, # url = self.url, # params = self.parmas, # headers = self.headers, # data = self.data, # json = self.json # ) # return self # # def validate(self, key, expected_value): # actual_result = getattr(self.response, key) # assert actual_result == expected_value # return self # class ApiHttpbinGet(BaseApi): """对测试接口的定义"""
5,343,295
def EAD_asset(x,RPs,curves,positions): """ Calculates the expected annual damage for one road segment (i.e. a row of the DataFrame containing the results) based on the damage per return period and the flood protection level. WARNING: THIS FUNCTION PROBABLY REALLY SLOWS DOWN THE OVERALL POST-PROCESSING SCRIPT (COMPARE ITS PERFORMANCE WITH THE CLIMATE CHANGE SCRIPT) Most likely, the cause is the use of a loop and an if-statement for the manipulation; this is not smart for a function that is applied on a DataFrame!!! Arguments: *x* (Geopandas Series) - A row of the GeoPandas DataFrame containing all road segments, should have dam cols, rps, flood protection level *RPs* (List) - return periods of the damage data, in descending order e.g. [500,200,100,50,20,10] *curves* (List) - short names of damage curves for which to calculate the EAD e.g. ["C1","C2","C3","C4","C5","C6","HZ"] *positions* (List) - tuple positions of available max damage estimates e.g. [0,1,2,3,4] Returns: *x* (Geopandas Series) - with added the new columns containing EADs """ PL = x["Jongman_FP"] RPs_copy = [y for y in RPs] #make a new list, or it will be altered in the function and mess up everything!!! for curve in curves: damcols = ["dam_{}_rp{}".format(curve,rp) for rp in RPs] #this was RPs; but somehow this did not work EAD = [0,0,0,0,0] #initialize empty lists for pos in positions: #iterate over all the max damage estimates dam = list(x[damcols].apply(lambda y: pick_tuple(y,pos)).values) #creates a numpy array with the damage values of the desired max_dam EAD[pos] = risk_FP(dam,RPs_copy,PL) if not curve == "HZ": #save results to the series, which will be returned as a row in the df x["EAD_{}".format(curve)] = tuple(EAD) else: x["EAD_HZ"] = EAD[0] return x
5,343,296
def procrustes_alignment(data, reference=None, n_iter=10, tol=1e-5, return_reference=False, verbose=False): """Iterative alignment using generalized procrustes analysis. Parameters ---------- data : list of ndarrays, shape = (n_samples, n_feat) List of datasets to align. reference : ndarray, shape = (n_samples, n_feat), optional Dataset to use as reference in the first iteration. If None, the first dataset in `data` is used as reference. Default is None. n_iter : int, optional Number of iterations. Default is 10. tol : float, optional Tolerance for stopping criteria. Default is 1e-5. return_reference : bool, optional Whether to return the reference dataset built in the last iteration. Default is False. verbose : bool, optional Verbosity. Default is False. Returns ------- aligned : list of ndarray, shape = (n_samples, n_feat) Aligned datsets. mean_dataset : ndarray, shape = (n_samples, n_feat) Reference dataset built in the last iteration. Only if ``return_reference == True``. """ if n_iter <= 0: raise ValueError('A positive number of iterations is required.') if reference is None: # Use the first item to build the initial reference aligned = [data[0]] + [procrustes(d, data[0]) for d in data[1:]] reference = np.mean(aligned, axis=0) else: aligned = [None] * len(data) reference = reference.copy() dist = np.inf for i in range(n_iter): # Align to reference aligned = [procrustes(d, reference) for d in data] # Compute new mean new_reference = np.mean(aligned, axis=0) # Compute distance reference -= new_reference reference **= 2 new_dist = reference.sum() # Update reference reference = new_reference if verbose: print('Iteration {0:>3}: {1:.6f}'.format(i, new_dist)) if dist != np.inf and np.abs(new_dist - dist) < tol: break dist = new_dist return aligned, reference if return_reference else aligned
5,343,297
def test_directory_origin_configuration_ignore_control_characters_xml(sdc_builder, sdc_executor, ignore_control_characters, shell_executor, file_writer): """Directory origin not able to read XML data with control characters. Filed bug :- https://issues.streamsets.com/browse/SDC-11604. """ pass
5,343,298
def get_paramsets(args, nuisance_paramset): """Make the paramsets for generating the Asmimov MC sample and also running the MCMC. """ asimov_paramset = [] llh_paramset = [] gf_nuisance = [x for x in nuisance_paramset.from_tag(ParamTag.NUISANCE)] llh_paramset.extend( [x for x in nuisance_paramset.from_tag(ParamTag.SM_ANGLES)] ) llh_paramset.extend(gf_nuisance) for parm in llh_paramset: parm.value = args.__getattribute__(parm.name) boundaries = fr_utils.SCALE_BOUNDARIES[args.dimension] tag = ParamTag.SCALE llh_paramset.append( Param( name='logLam', value=np.mean(boundaries), ranges=boundaries, std=3, tex=r'{\rm log}_{10}\left (\Lambda^{-1}' + \ misc_utils.get_units(args.dimension)+r'\right )', tag=tag ) ) llh_paramset = ParamSet(llh_paramset) tag = ParamTag.BESTFIT if args.data is not DataType.REAL: flavor_angles = fr_utils.fr_to_angles(args.injected_ratio) else: flavor_angles = fr_utils.fr_to_angles([1, 1, 1]) asimov_paramset.extend(gf_nuisance) asimov_paramset.extend([ Param(name='astroFlavorAngle1', value=flavor_angles[0], ranges=[ 0., 1.], std=0.2, tag=tag), Param(name='astroFlavorAngle2', value=flavor_angles[1], ranges=[-1., 1.], std=0.2, tag=tag), ]) asimov_paramset = ParamSet(asimov_paramset) return asimov_paramset, llh_paramset
5,343,299