query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Parses a simple float coordinate and returns hours, minutes and seconds and vice verse. Input
def parse_ra (ra,string=False): from scipy import array if type(ra) != type(''): a = ra/15 hours = int(a) b = (a-hours)*60 minutes = int(b) seconds = (b-minutes)*60 if string: return '{0:0>2}:{1:2}:{2:0>4.2f}'.format(hours,minutes,seconds) retu...
[ "def convert_HourPoint_to_HourAndMin(f_num):\n decimal, integer = modf(f_num)\n hour = int(integer)\n minu = int(decimal * 60)\n return hour, minu", "def test_get_time_float(self):\n self.dbgfunc()\n obj = CrawlConfig.CrawlConfig.dictor({'crawler': {'dsec': '10s',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the indices of all the elements between vals[0] and vals[1]. Alternatively also between vals[2] and vals[3] if they are given.
def get_indices (arr,vals,disp=False): from scipy import concatenate, where, array, diff dx = abs(.5*diff(arr)[0]) if len(vals)==4: v1,v2,v3,v4 = vals + array([-1,1,-1,1])*dx # if the user wants two velocity areas to calculate noise low = where((arr>=v1)*(arr<=v2))[0] high =...
[ "def _data_interval_indices(self):\n tmp = np.insert(np.cumsum(self.lengths),0,0)\n indices = np.vstack((tmp[:-1], tmp[1:])).T\n return indices", "def indices(self) -> list[int]:\n return list(range(self.lower, self.upper + 1))", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
X the coordinates of the xaxis (array) Y the data to be fitted (array) params are the initial guesses of the parameters, the parameters are grouped in three, with order amplitude, peak position and distribution FWHM [[AMPL, POS, FWHM]] or ((AMPL, POS, FWHM)) err error of the Y data fixlist a list of which parameters to...
def fit_gauss1d((X,Y), params, err = None, fixlist = None, minbool = None, minpar = None, maxbool = None, maxpar = None, tie = None, verbose = 1, full_output=0)...
[ "def fit_fano(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False,\n verbose=True, **kwarg):\n if domain is not None:\n fitdatax, fitdatay = selectdomain(xdata, ydata, domain)\n else:\n fitdatax = xdata\n fitdatay = ydata\n if fitparams is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deconvolves one gaussian with parameters bmaj1, bmin1, theta1 (major, minor, PA) with another (bmaj2,bmin2, theta2) all in FWHM and radians (if deg is wanted, set ang='deg')
def gauss2d_decon ((bmaj1, bmin1, theta1, bmaj2, bmin2, theta2), ang='rad'): from scipy import pi, cos, sin, arctan2, sqrt # # check the ang keyword, if deg, go over to radians from deg if ang=='deg': theta1 *= pi/180 theta2 *= pi/180 # # define some calculations alpha = (bm...
[ "def gauss2d_convolve ((bmaj1, bmin1, theta1, bmaj2, bmin2, theta2), ang='deg'):\n from scipy import pi, cos, sin, arctan2, sqrt, log\n #\n # check the ang keyword, if deg, go over to radians from deg\n if ang=='deg':\n theta1 *= pi/180\n theta2 *= pi/180\n else:\n pass\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convolves one gaussian with parameters bmaj1, bmin1, theta1 (major, minor, PA) with another (bmaj2,bmin2, theta2) all in FWHM and radians (if deg is wanted, set ang='deg', if radians ang='rad')
def gauss2d_convolve ((bmaj1, bmin1, theta1, bmaj2, bmin2, theta2), ang='deg'): from scipy import pi, cos, sin, arctan2, sqrt, log # # check the ang keyword, if deg, go over to radians from deg if ang=='deg': theta1 *= pi/180 theta2 *= pi/180 else: pass cospa1 = cos(...
[ "def add_gaussian(grid, amp, x, y, sig_x, sig_y, rot):\n cent = np.array([len(grid[0]) // 2 + x, len(grid[0]) // 2 + y])\n X = grid[1]\n Y = grid[2]\n gaussian = grid[0]\n gaussian += gaussian_component(\n X,\n Y,\n amp,\n sig_x,\n sig_y,\n rot,\n cent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to ensure that all ablated_features and featureset values are correct for each row in results summary file.
def check_ablation_rows(reader): row_num = 0 for row_num, row in enumerate(reader, 1): if row['ablated_features']: fs_str, ablated_str = row['featureset_name'].split('_minus_') actual_ablated = json.loads(row['ablated_features']) else: fs_str, ablated_str = ro...
[ "def test_correct_feature_dataframe(features):\n # This should not raise anything\n check_feature_specification(features)", "def _check_features(self, features: FeatureDataset):\n common_logger.info(f\"Check features compatibility with existing feature metas\")\n for _, feature_meta in self.fe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation + crossvalidation
def test_ablation_cv(): config_template_path = join(config_dir, 'test_ablation.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=1) # read in the summary file and make sure it has # 7 ablated featuresets * (10 folds + 1 ave...
[ "def test_cross_validation():\n features=[]\n param=[1,2,3]\n assert CovEstHard._cross_validation(features, param, 5) == None", "def test_ablation_cv_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_sampler.template.cfg')\n config_path = fill_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation allcombos + crossvalidation
def test_ablation_cv_all_combos(): config_template_path = join(config_dir, 'test_ablation_all_combos.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=None) # read in the summary file and make su...
[ "def test_ablation_cv_all_combos_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_sampler_all_combos.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, ablation=None)\n\n # read in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation + crossvalidation + feature hashing
def test_ablation_cv_feature_hasher(): config_template_path = join(config_dir, 'test_ablation_feature_hasher.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=1) # read in the summary file and ma...
[ "def test_ablation_cv_feature_hasher_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_feature_hasher_sampler.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, ablation=1)\n\n # re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation allcombos + crossvalidation + feature hashing
def test_ablation_cv_feature_hasher_all_combos(): config_template_path = join(config_dir, 'test_ablation_feature_hasher_all_combos.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=None) # read i...
[ "def test_ablation_cv_feature_hasher_all_combos_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_feature_hasher_sampler_all_combos.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation + crossvalidation + samplers
def test_ablation_cv_sampler(): config_template_path = join(config_dir, 'test_ablation_sampler.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=1) # read in the summary file and make sure it has...
[ "def train_and_test():\n\ttrain_data, test_data, test_users, test_movies = get_train_data()\n\tprint \"loaded train & test data\"\n\tcf = collaborative_filtering(train_data)\n\t# evaluate the collaborative filtering model by printing the rmse value for the test data\n\tprint cf.score(test_data)", "def test_train(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation allcombos + crossvalidation + samplers
def test_ablation_cv_all_combos_sampler(): config_template_path = join(config_dir, 'test_ablation_sampler_all_combos.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=None) # read in the summary ...
[ "def test_ablation_cv_all_combos():\n\n config_template_path = join(config_dir,\n 'test_ablation_all_combos.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, ablation=None)\n\n # read in the summary fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation + crossvalidation + feature hashing + samplers
def test_ablation_cv_feature_hasher_sampler(): config_template_path = join(config_dir, 'test_ablation_feature_hasher_sampler.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=1) # read in the sum...
[ "def test_ablation_cv_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_sampler.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, ablation=1)\n\n # read in the summary file and mak...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ablation allcombos + crossvalidation + feature hashing + samplers
def test_ablation_cv_feature_hasher_all_combos_sampler(): config_template_path = join(config_dir, 'test_ablation_feature_hasher_sampler_all_combos.template.cfg') config_path = fill_in_config_paths(config_template_path) run_configuration(config_path, quiet=True, ablation=Non...
[ "def test_ablation_cv_all_combos_sampler():\n\n config_template_path = join(config_dir,\n 'test_ablation_sampler_all_combos.template.cfg')\n config_path = fill_in_config_paths(config_template_path)\n\n run_configuration(config_path, quiet=True, ablation=None)\n\n # read in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wavelet background subtraction for STORM.
def background_subtraction_wavelet_hazen(img, holder, THRES=100, ITER=5, WLEVEL=6, OFFSET=50): back = wavelet_subtraction_hazen(img, ITER=ITER, THRES=THRES, WLEVEL=WLEVEL) img = img - back return convert_positive(img, OFFSET)
[ "def subtract_background(self):\n n = 20\n back = (np.average(self.init_image[:n])+np.average(self.init_image[-n:]))/2\n self.image = np.subtract(self.image,back)", "def finalize_background_subtract(self,im,sky):\n\n sky.quantize() # Quantize sky\n sky = self.finalize_sky_im(sky...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assure we fail when run without subcommands and show some info
def test_exits_without_subcommand(self): runner = CliRunner() result = runner.invoke(cli.cli) self.assertEqual(1, result.exit_code) self.assertNotEqual('', result.output)
[ "def test_subcommands_root_failure(self):\n root = AlwaysFails()\n root.add_subcommand(AlwaysSucceeds, name='never executed')\n root.do()\n self.assertTrue(root.failed)\n self.assertFalse(root['never executed'].done)", "def test_subcommands_failure(self):\n s = list()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assure we have a plot command and it calls reckoner install
def test_plot_exists(self, reckoner_mock, validation_mock): reckoner_instance = reckoner_mock() reckoner_instance.results = mock.MagicMock(has_errors=False) runner = CliRunner() with runner.isolated_filesystem(): with open('nonexistent.file', 'wb') as fake_file: ...
[ "def _prepare_plot_package(self):\n pass", "def plot(self) -> None:\n try:\n import matplotlib.pyplot as plt\n from PIL import Image\n except ModuleNotFoundError:\n LOGGER.warning(\"You need to install matplotlib to plot the product.\")\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""""The multiplication `u @ diag(w) @ v.T`. The name stands for matrix/vector/matrixtransposed.
def mvmt(u, w, v): return torch.einsum("...ij,...j,...kj->...ik", u, w, v)
[ "def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl", "def outerprod(u,v)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""SVD wrapper for CPU offloading.
def svd(x, cpu_offload=True, *args, **kwargs): return cpuoffload_op_( torch.svd, x, SvdOut, cpu_offload=cpu_offload, *args, **kwargs)
[ "def performSegmentCpu(self, src, ifDraw=...) -> retval:\n ...", "def main():\n\n @dppy.kernel\n def atomic_add(a):\n dppy.atomic.add(a, 0, 1)\n\n global_size = 100\n a = np.array([0])\n\n try:\n d = dpctl.select_gpu_device()\n with dpctl.device_context(d):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""QR wrapper for CPU offloading.
def qr(x, cpu_offload=True, *args, **kwargs): return cpuoffload_op_( torch.qr, x, QrOut, cpu_offload=cpu_offload, *args, **kwargs)
[ "def execute_jr(s, inst):\n if is16bit:\n inst.bits &= 0xffff\n if save_lr:\n s.rf[epiphany.isa.reg_map['LR']] = trim_32(s.pc + (2 if is16bit else 4))\n s.pc = s.rf[inst.rn]", "def preloadWorkload(x):\n def wrapper(self):\n self.getWMWorkload()\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try asking the commandline predictor (e.g. netMHCpan) which alleles it supports.
def _determine_supported_alleles(command, supported_allele_flag): try: # convert to str since Python3 returns a `bytes` object supported_alleles_output = check_output([ command, supported_allele_flag ]) supported_alleles_str = supported_alleles_out...
[ "def _determine_supported_alleles(command, supported_allele_flag):\n try:\n # convert to str since Python3 returns a `bytes` object\n supported_alleles_output = check_output([\n command, supported_allele_flag\n ])\n supported_alleles_str = supported_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Install included modules and subworkflows
def install_included_components(self, subworkflow_dir): modules_to_install, subworkflows_to_install = get_components_to_install(subworkflow_dir) for s_install in subworkflows_to_install: original_installed = self.installed_by self.installed_by = Path(subworkflow_dir).parts[-1] ...
[ "def install(ctx, subworkflow, dir, prompt, force, sha):\n from nf_core.subworkflows import SubworkflowInstall\n\n try:\n subworkflow_install = SubworkflowInstall(\n dir,\n force,\n prompt,\n sha,\n ctx.obj[\"modules_repo_url\"],\n ctx.o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect component name. Check that the supplied name is an available module/subworkflow.
def collect_and_verify_name(self, component, modules_repo): if component is None: component = questionary.autocomplete( f"{'Tool' if self.component_type == 'modules' else 'Subworkflow'} name:", choices=sorted(modules_repo.get_avail_components(self.component_type, comm...
[ "def _missing_component(self, name):\n\n self.error(\"the sysroot specification must contain an entry for '{0}' before anything that depends on it\".format(name))", "def _merge_module_name(self, package_name, component_name):\n\n parts = component_name.split('.')\n return self._get_module_nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether there are previously installed components with the same org_path but different remote urls Log error if multiple remotes exist.
def check_alternate_remotes(self, modules_json): modules_json.load() for repo_url, repo_content in modules_json.modules_json.get("repos", dict()).items(): for component_type in repo_content: for dir in repo_content.get(component_type, dict()).keys(): if di...
[ "def check_remote():\n\n if \"gerrit\" in commands.getoutput(\"git remote\").split(\"\\n\"):\n\n for remote in commands.getoutput(\"git branch -a\").split(\"\\n\"):\n if remote.strip() == \"remotes/gerrit/master\" and not UPDATE:\n return\n # We have the remote, but aren't...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculating reward the purpose is to keep buffer in the safe zone from (lo_threshold to hi_threshold) reward value is in [1, 1] 1 > 0 > 1 > 0 > 1 max> hi> mid>low>min = 0 results = w1BU(t) + w2(VM(t)/VM_max)
def _buffer_reward(self, ht, nb_instance, action): max_threshold = 100. hi_threshold = 70. mid_threshold = 50. lo_threshold = 30. safe_zone = (hi_threshold - lo_threshold)/2 w1, w2 = 0.8, 0.2 if ht > hi_threshold: reward = -(ht-hi_threshold)/(max_thres...
[ "def reward(self):\n\n w_temp = self.config['temperature_w_in_reward']\n w_light = self.config['light_w_in_reward']\n w_cost = self.config['cost_w_in_reward']\n\n cost = self._calculate_cost_and_update_energy_source()\n temp, light = (self.inside_sensors['first']['temperature'],\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enable the PWM functionality of this pin.
def enable_pwm(self): self.enabled = True
[ "def pwmEnable(pwm_pin):\r\n global PWM_PINS_ENABLED\r\n if PWM_PINS_ENABLED.get(pwm_pin): return\r\n pin_config = PWM_PINS[pwm_pin]\r\n assert (pin_config), \"*Invalid PWM pin: '%s'\" % pwm_pin\r\n\r\n for overlay in pin_config[2]:\r\n cape_manager.load(overlay, auto_unload=False)\r\n delay(250) # Give ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets or sets the pulse width, in ms, of the pin.
def pulse_width(self): return self._pulse_width
[ "def set_pulse_width(self, width):\n\n if type(width) != int and type(width) != float or width <= 0:\n raise ValueError(\"Pulse width must be a positive, non-zero number value (no strings)!\")\n\n width = float(width)\n\n response = self._send_command(\"DW \" + str(width))\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the microseconds per PWM tick.
def microseconds_per_tick(self) -> float: return 1000000.0 / (self.frequency_hz * 65536)
[ "def period_microseconds(self) -> float:\n return 1000000 / self.frequency_hz", "def get_ticks() -> int:\n return time.perf_counter_ns()", "def ms(self, t):\n return t // 1000000", "def micros():\r\n return time.time()*1000000 - START_TIME_MS*1000", "def duration(self):\n\t\treturn int(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the period, in microseconds, the PWM module is operating at.
def period_microseconds(self) -> float: return 1000000 / self.frequency_hz
[ "def period(self):\n if self._period is None:\n self._period = math.sqrt((4*(math.pi**2)*(self.a**3))/self.gm)\n return self._period", "def get_pulse_period(self):\n self.pulse_period = self._telnet_query(\"PULM:PER?\") # Gets the pulse period of the device\n # Returns the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete all events and all creatorOrganizer
def delete_all_events_and_creator_organizer(self): Event.objects.all().delete() print('Events delete success') CreatorOrganizer.objects.all().delete() print('Creator and Organizer delete success')
[ "def event_delete_all():\n # Create a cursor object\n cur = conn.cursor()\n\n # Add new entry into \"events\" table in the \"Prototype_Events\" database\n cur.execute(\"TRUNCATE TABLE events\")\n cur.execute(\"ALTER SEQUENCE events_id_seq RESTART WITH 1\")\n cur.execute(\"UPDATE events SET id=next...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the TimeFreqSupport with its TimeFreqSupport message (if possible).
def __init__(self, time_freq_support=None, server=None): if server is None: server = dpf.core._global_server() self._server = server self._stub = self._connect() if isinstance(time_freq_support, time_freq_support_pb2.TimeFreqSupport): self._message = time_freq_su...
[ "def _set_time_frequencies(self, frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_real.CopyFrom(frequencies._message)\n self._stub.Update(request)", "def initialize(fmp_file, lo_freq, lo_mu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the time frequencies of the time_freq_support. Frequencies field can have one value by set.
def _set_time_frequencies(self, frequencies): request = time_freq_support_pb2.TimeFreqSupportUpdateRequest() request.time_freq_support.CopyFrom(self._message) request.freq_real.CopyFrom(frequencies._message) self._stub.Update(request)
[ "def _set_complex_frequencies(self, complex_frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_complex.CopyFrom(complex_frequencies._message)\n self._stub.Update(request)", "def setFreq(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field of complex frequencies for the active result. Complex frequencies field can have one value by set. Examples >>> freq = time_freq_support.complex_frequencies
def complex_frequencies(self): return self._get_frequencies(cplx=True)
[ "def complex_frequencies(self, value):\n return self._set_complex_frequencies(value)", "def _set_complex_frequencies(self, complex_frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_complex.C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the frequencies of the time_freq_support. Complex frequencies field can have one value by set.
def _set_complex_frequencies(self, complex_frequencies): request = time_freq_support_pb2.TimeFreqSupportUpdateRequest() request.time_freq_support.CopyFrom(self._message) request.freq_complex.CopyFrom(complex_frequencies._message) self._stub.Update(request)
[ "def _set_time_frequencies(self, frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_real.CopyFrom(frequencies._message)\n self._stub.Update(request)", "def setFreq(self, freq, target_value = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Complex frequencies that define the time_freq_support of the analysis. Complex frequencies field can have one value by set.
def complex_frequencies(self, value): return self._set_complex_frequencies(value)
[ "def _set_complex_frequencies(self, complex_frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_complex.CopyFrom(complex_frequencies._message)\n self._stub.Update(request)", "def complex_frequ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the RPMs values of the time_freq_support. RPMs field has one value by load step.
def _set_rpms(self, rpms): request = time_freq_support_pb2.TimeFreqSupportUpdateRequest() request.time_freq_support.CopyFrom(self._message) request.rpm.CopyFrom(rpms._message) self._stub.Update(request)
[ "def _set_time_frequencies(self, frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_real.CopyFrom(frequencies._message)\n self._stub.Update(request)", "def set_frequency(self):\n if se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the field of harmonic indices for the active result. Returns
def get_harmonic_indices(self, stage_num=0): return self._get_harmonic_indices(stage_num)
[ "def _get_harmonic_indices(self, stage_num=0):\n request = time_freq_support_pb2.ListRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.cyclic_stage_num = stage_num\n\n list_response = self._stub.List(request)\n if list_response.cyc_harmonic_index.id != 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the harmonic indices values of the time frequency support.
def set_harmonic_indices(self, harmonic_indices, stage_num=0): request = time_freq_support_pb2.TimeFreqSupportUpdateRequest() cyclic_data = time_freq_support_pb2.CyclicHarmonicData() request.time_freq_support.CopyFrom(self._message) cyclic_data.cyc_harmonic_index.CopyFrom(harmonic_indice...
[ "def _set_harmonic_indices_at_stage(self, stage_num, step_harmonic_indices, step_id):\n harmonic_indices = self.get_harmonic_indices(stage_num)\n if harmonic_indices is None:\n harmonic_indices = core.Field(\n server=self._server,\n nature=core.natures.scalar,\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of result sets. Examples >>> time_freq_support.n_sets 35
def n_sets(self): return self._sets_count()
[ "def numSets(self):\n return self.sets", "def number_of_record_sets(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"number_of_record_sets\")", "def max_number_of_record_sets(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"max_number_of_record_sets\")", "def result_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the frequence corresponding to a requested step/substep or cumulative index.
def get_frequency(self, step=0, substep=0, cumulative_index=None, cplx=False): return self._get_frequency(step, substep, cumulative_index, cplx)
[ "def _get_cumulative_index(self, step, substep, freq, cplx):\n request = time_freq_support_pb2.GetRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.bool_cumulative_index = True\n request.complex = cplx\n if freq is not None:\n request.frequency = f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the cumulative index corresponding to the requested step/substep or frequency.
def get_cumulative_index(self, step=0, substep=0, freq=None, cplx=False): return self._get_cumulative_index(step, substep, freq, cplx)
[ "def _get_cumulative_index(self, step, substep, freq, cplx):\n request = time_freq_support_pb2.GetRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.bool_cumulative_index = True\n request.complex = cplx\n if freq is not None:\n request.frequency = f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the cumulative index corresponding to the requested step/substep or frequency.
def _get_cumulative_index(self, step, substep, freq, cplx): request = time_freq_support_pb2.GetRequest() request.time_freq_support.CopyFrom(self._message) request.bool_cumulative_index = True request.complex = cplx if freq is not None: request.frequency = freq ...
[ "def get_cumulative_index(self, step=0, substep=0, freq=None, cplx=False):\n return self._get_cumulative_index(step, substep, freq, cplx)", "def get_frequency(self, step=0, substep=0, cumulative_index=None, cplx=False):\n return self._get_frequency(step, substep, cumulative_index, cplx)", "def get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a field of all the RPMs in the model. Returns
def _get_rpms(self): request = time_freq_support_pb2.ListRequest() request.time_freq_support.CopyFrom(self._message) list_response = self._stub.List(request) if list_response.rpm.id != 0: return dpf.core.Field(server=self._server, field=list_response.rpm) return None
[ "def _fields(self):\n return getattr(self, self._fields_attribute)", "def rpm_params(self):\n RPM = []\n if self.GENERAL_SNAP:\n rpmlist = self.__snap_stanza_read(self.GENERAL_SNAP, 'rpm -qa')\n if rpmlist:\n for rpm in rpmlist:\n RPM.ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a field of all the harmonic indices in the model. Returns
def _get_harmonic_indices(self, stage_num=0): request = time_freq_support_pb2.ListRequest() request.time_freq_support.CopyFrom(self._message) request.cyclic_stage_num = stage_num list_response = self._stub.List(request) if list_response.cyc_harmonic_index.id != 0: re...
[ "def get_harmonic_indices(self, stage_num=0):\n return self._get_harmonic_indices(stage_num)", "def harmonic_field(L, k, i):\n\n return poisson_equation(add_constraints(L, i), k, eps=0)", "def measure_fld_index(self):\n return self.container['measure_fld_index']", "def get_spectral_index(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append a step with all its field values in the time frequencies support. The RPM value is a step (or load step)based value. The values for time freqencies, complex frequencies, and harmonic indices are setbased. There is one set value for each step/substep combination. It is necessary that each call of my_time_freq_sup...
def append_step( self, step_id, step_time_frequencies, step_complex_frequencies=None, rpm_value=None, step_harmonic_indices=None, ): # noqa: E501 time_frequencies = self.time_frequencies if time_frequencies is None: time_frequencies = cor...
[ "def addSpeedOnStep(self, timeStep, speed):\n self._speed[timeStep] = speed", "def _set_time_frequencies(self, frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_real.CopyFrom(frequencies._me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a deep copy of the data for a time frequency support on a given server. This methos is useful for passing data from one server instance to another.
def deep_copy(self, server=None): tf = TimeFreqSupport(server=server) tf.time_frequencies = self.time_frequencies.deep_copy(server=server) if self.complex_frequencies: tf.complex_frequencies = self.complex_frequencies.deep_copy(server=server) if self.rpms: tf.rpms...
[ "def __init__(self, time_freq_support=None, server=None):\n if server is None:\n server = dpf.core._global_server()\n\n self._server = server\n self._stub = self._connect()\n if isinstance(time_freq_support, time_freq_support_pb2.TimeFreqSupport):\n self._message = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set values for harmonic indices for a specific stage number.
def _set_harmonic_indices_at_stage(self, stage_num, step_harmonic_indices, step_id): harmonic_indices = self.get_harmonic_indices(stage_num) if harmonic_indices is None: harmonic_indices = core.Field( server=self._server, nature=core.natures.scalar, ...
[ "def get_harmonic_indices(self, stage_num=0):\n return self._get_harmonic_indices(stage_num)", "def set_harmonic_indices(self, harmonic_indices, stage_num=0):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n cyclic_data = time_freq_support_pb2.CyclicHarmonicData()\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writing the entities in a .entities file for the specified pmids. Writes all entities of the dataset if no pmids are given.
def write_entities(pmids=None): if pmids is None: # If no pmids are given as argument, downloads entities for every PMID in the dataset. training_set = pd.read_csv(PATH_TO_TRAINING_SET, sep=";", encoding='latin').\ dropna(subset=['PMID1', 'PMID2', 'Authorship']).values testing_s...
[ "def write(pmids):\n\n # Writing once per each file because bugs happens otherwise (inefficient, might be changed)\n for pmid in pmids:\n filename = str(pmid)+\".entities\"\n\n if not os.path.isfile(filename):\n file = open(filename, 'w')\n entities = UTILITY.get_entities_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the entities to files for the specified pmids.
def write(pmids): # Writing once per each file because bugs happens otherwise (inefficient, might be changed) for pmid in pmids: filename = str(pmid)+".entities" if not os.path.isfile(filename): file = open(filename, 'w') entities = UTILITY.get_entities_by_pmids([pmid])...
[ "def write_entities(pmids=None):\n\n if pmids is None:\n # If no pmids are given as argument, downloads entities for every PMID in the dataset.\n training_set = pd.read_csv(PATH_TO_TRAINING_SET, sep=\";\", encoding='latin').\\\n dropna(subset=['PMID1', 'PMID2', 'Authorship']).values\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates mediasite module using provided module name and module id
def create_module(self, module_name, module_id): logging.info("Creating module '"+module_name+"' with module id "+module_id) post_data = {"Name":module_name, "ModuleId":module_id } result = self.controller.api_client.request("post", "Modules...
[ "def new(id):\n return Module(_core.LLVMModuleCreateWithName(id))", "def _createModule(module_name, source):\n # Create a module-like object object and return it.\n class ModuleWrapper:\n pass\n module = ModuleWrapper()\n module.__dict__ = {}\n return module", "def create(module):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the provided moduleid already exists
def module_moduleid_already_exists(self, module_id): result = self.controller.api_client.request("get", "Modules", "$filter=ModuleId eq '"+module_id+"'", "").json() if self.controller.experienced_request_errors(result): return result else: if "odata.error" in resul...
[ "def _has_module(self, path, module_name):\n\n return os.path.isfile(os.path.join(path, '{module}.py'.format(module=module_name)))", "def is_name_updated(self, can_id):\n mod = self._modules.get(can_id)\n if mod:\n while not mod.is_name_complete:\n yield\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The number of seconds since this package was started multiplied by config.hardwareSpeedup. This can effectively simulate time speedups for testing recipies.
def secondSinceStart(): elapsed = time.time() - timer if hasattr(config,'hardwareSpeedup'): speed = config.hardwareSpeedup if not (speed == None): return elapsed * speed return elapsed
[ "def time_per_demand_unit(self):\n return 1", "def run_time(self):\n return self._elapsed", "def warmup(self):\n\t\treturn int(self._warmup/self.tick_period) * self.tick_period", "def _calculate_runtime(self):\n\n _time = 0\n for _, _passes, _captures in self._batches:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sleep for a number of seconds or if config.harwareSpeedup is configured, for a number of seconds/config.hardwareSpeedup The point of this method is to allow for speeding up time without modifying the recipes. This is especially useful for testing.
def sleep(seconds): if hasattr(config,'hardwareSpeedup'): speed = config.hardwareSpeedup if not (speed == None): time.sleep(seconds/speed) return time.sleep(seconds)
[ "def doSleep(self):\n if os.environ.get(\"TRAVIS\"):\n time.sleep(10)\n else:\n time.sleep(20)\n return", "def sleepDelay(ms):\r\n time.sleep(ms/1000.0)", "def delay():\n latency = 0.49\n sleep(latency)", "def rand_sleep():\n time.sleep(random.uniform(0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dispense a number of ml from a particular pump.
def pumpDispense(pumpId, volume): return reagentDispenser.dispense(pumpId, volume)
[ "def retract_pump(self):\n # TODO TODO what does \"remember to stop manually\" mean? need to\n # instruct the pump to stop if it detects motor stall or will it do that\n # automatically? implement s.t. software limits are maintained and\n # respected?\n self.set_direction(\"WDR\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to get a sample ddo for testing.
def get_ddo_sample(datatoken_address): did = f"did:op:{remove_0x_prefix(datatoken_address)}" sample_ddo_path = get_resource_path("ddo", "ddo_sa_sample.json") assert sample_ddo_path.exists(), "{} does not exist!".format(sample_ddo_path) asset = DDO(json_filename=sample_ddo_path) asset.metadata["main...
[ "def sample(cls):\n return Department.list()[0]", "def get_sample(self, _id):\n\n sample = self.collection.find_one({'_id': ObjectId(_id)})\n\n return sample", "def _get_primary_dna_test_sample(self):\n sample = next(self._iter_all_test_samples(EXTRACTION_TYPE_DNA, True), None)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests chain operations on a DDO.
def test_ddo_on_chain(): config = ConfigProvider.get_config() ddo_address = get_contracts_addresses("ganache", config)[ MetadataContract.CONTRACT_NAME ] dtfactory_address = get_contracts_addresses("ganache", config)[ DTFactory.CONTRACT_NAME ] ddo_registry = MetadataContract(ddo_a...
[ "def test_ProcessChain0300(self):\n self.assertTrue(True)", "def test_nochain(self):\n self.__assert_empty_builder()\n self.__builder.nochain()\n self.assertEqual('path - -nochain ', str(self.__builder))\n\n self.__builder.nochain(False)\n self.__assert_empty_builder()", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FTP files to endpoint as specified by the file_type to use in the glob e.g. "/.zip"
def ftp_to_endpoint(self, from_dir, file_type="/*.zip", passive=True): try: ftp_provider = FTP(self.logger) ftp_instance = ftp_provider.ftp_connect( uri=self.settings.PMC_FTP_URI, username=self.settings.PMC_FTP_USERNAME, password=self.setti...
[ "def ftp_to_endpoint(self, from_dir, file_type=\"/*.zip\", passive=True):\n try:\n ftp_provider = FTP(self.logger)\n ftp_instance = ftp_provider.ftp_connect(\n uri=self.settings.GLENCOE_FTP_URI,\n username=self.settings.GLENCOE_FTP_USERNAME,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload PMC zip file to S3
def upload_article_zip_to_s3(self): bucket_name = self.publish_bucket storage = storage_context(self.settings) storage_provider = self.settings.storage_provider + "://" for file_name in article_processing.file_list(self.directories.get("ZIP_DIR")): resource_dest = ( ...
[ "def upload_article_zip_to_s3(self):\n bucket_name = self.publish_bucket\n\n storage = storage_context(self.settings)\n storage_provider = self.settings.storage_provider + \"://\"\n\n for file_name in article_processing.file_list(self.directories.get(\"ZIP_DIR\")):\n resource_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given article id and existing zip file names return the next revision number
def next_revision_number(fid, s3_key_names): revision = None s3_key_name = s3lib.latest_pmc_zip_revision(fid, s3_key_names) if s3_key_name: # Found an existing PMC zip file, look for a revision number revision_match = re.match(r'.*r(.*)\.zip$', s3_key_name) if revision_match is None...
[ "def next_revision_number(fid, s3_key_names):\n revision = None\n s3_key_name = s3lib.latest_pmc_zip_revision(fid, s3_key_names)\n\n if s3_key_name:\n # Found an existing PMC zip file, look for a revision number\n revision_match = re.match(r\".*r(.*)\\.zip$\", s3_key_name)\n if revisio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dataset from filename. Loads the file filename and returns all the tables contained in the group 'group_name' in form of a unified dataset. Prior to returning it, the dataset is grouped by pose, to
def prepare_dataset(filename, group_name): with PoseDatasetIO(dataset=filename, columns=COLUMNS, mode='r') as dataset: dataset = {node._v_name: dataset.store.select(node._v_pathname). groupby('pose').mean(). rename(_rm_stand_pref) for node in dataset....
[ "def getDataset(filename, group, dataset):\n\n FILE = h5py.File(filename, \"r\")\n\n GROUP = FILE[group]\n\n try:\n data = np.array(GROUP[dataset])\n except:\n print(\"[ERROR]: <{:s}> dataset in <{:s}> group does not exist, returning None.\".format(dataset, group))\n return None\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drop the entered dataset columns.
def drop_columns(dataset, cols=COLS_TO_CLEAN): return dataset.drop(cols, axis=1)
[ "def clean(self):\n for column in self.columns:\n column.change_misc_values()\n column.drop_greater_than()", "def drop_columns(df_data, del_col):\n for i in del_col:\n df_data.drop(i, axis=1, inplace=True)", "def delete_columns(self):\n self.focus()\n self.di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert vector y to nums.
def numerize_y(y): labels = sorted(set(y)) return np.array(map(labels.index, y))
[ "def y(self):\n return np.array(self.y_list, dtype=float)", "def y_to_class_id(y_data):\n return np.asarray([constants.class_ids[str(y)] for y in y_data])", "def reverse_ordinal(y_ordinal):\n n_lignes = y_ordinal.shape[0]\n li = [] # la liste qu'on va ensuite transformer en vecteur\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a dataframe to scikitlearn's compatible X and y format.
def df_to_Xy(dataframe): y = zip(*dataframe.index)[1] y_num = numerize_y(y) return (dataframe.values, y_num)
[ "def _df_to_xy(df):\n \n y = df[['sentence_idx', 'tag']].copy()\n y = y.groupby('sentence_idx').apply(lambda d: d['tag'].values.tolist()).values\n \n df.drop(columns='tag', inplace=True)\n X = df.groupby('sentence_idx').apply(lambda d: d.to_dict('records')).values\n if len(X) != len(y):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate exact center Noctave space center frequencies. In practive, what is often desired is the "simplified" center frequencies, so this function is not of much use.
def noctave_center_freq(lowf, highf, width=3): n_centers = np.log2(highf / lowf) * width + 1 n_octave = np.log2(highf / lowf) return lowf * np.logspace(0, n_octave, num=n_centers, base=2)
[ "def _auditory_filters_centre_freq():\n\n band_number = arange(53)\n z_step_size = 0.50\n af_f0 = 81.9289 # ECMA-418-2\n c = 0.1618 # ECMA-418-2\n\n # Critical band rate scale\n z = (band_number + 1) * z_step_size\n var = c * z\n\n # Central frequency\n centre_freq = (af_f0 / c) * sinh(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a wavefile as a float.
def read_wav_as_float(path): _, signal = scipy.io.wavfile.read(path) if np.issubdtype(signal.dtype, np.integer): return signal.T / np.abs(np.iinfo(signal.dtype).min) return signal.T
[ "def read_wav(filename):\n s,fs = load(filename) # scipy reads int\n s = np.array(s)/float(max(abs(s)))\n s = add_wgn(s) # Add jitter for numerical stability\n return fs,s", "def read_float(stream):\n return struct.unpack('f', stream.read(4))[0]", "def float_read( self, mem_addr ):\n\t\treturn st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the News object for the specified site.
def getNewsForSite(self, siteid): return self.session.executeRequest(self.session.baseurl, 'GET', '/news/site/{0}.json'.format(siteid))
[ "def get(cls, site_id):\n site_attributes = Yola().get_site(site_id)\n return cls(**site_attributes)", "def get_news(self, **kwargs):\n return self._get_endpoint(\"news\", fmt_p=no_pandas, params=kwargs)", "def get(self, id):\n result = NewsService.get_by_id(id)\n if not resul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that raw set returns Stat
async def test_raw_set(full_zk, path): # type: (aiozk.ZKClent, str) -> None stat = await full_zk.set(path, 'asdf', -1) assert stat.data_length == 4 assert stat.version == 1
[ "def test_get_set(set_values):\n packet = PacketBE()\n for key in packet.keys():\n packet[key] = set_values[key]\n\n for key in packet.keys():\n assert packet[key] == set_values[key]", "def test_read_set(self):\n load_known_modules()\n dosi = DetectedObjectSetInput.create('sim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the hexadecimal IP for that IP.
def hexip(self, irc, msg, args, ip): quads = ip.split('.') ret = "" for quad in quads: i = int(quad) ret += '%02x' % i irc.reply(ret.upper())
[ "def get_ipaddr():\n return get('https://api.ipify.org').text", "def getip():\n\tsi='Address: '\n\tr=urlopen('http://checkip.dyndns.org').read()\n\ti=r.find(si)+len(si)\n\te=r.find('<',i)\n\treturn r[i:e]", "def getIpAddress():\n # type: () -> String\n return socket.gethostbyname(str(getHostName()))", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the CPDS from a pair of input light curve files.
def calc_cpds(lcfile1, lcfile2, fftlen, save_dyn=False, bintime=1, pdsrebin=1, outname='cpds' + HEN_FILE_EXTENSION, normalization='leahy', back_ctrate=0., noclobber=False): if noclobber and os.path.exists(outname): ...
[ "def calculate_dtc_vals(filename, detName=None) :\n numChannels = detSettingsBean.getDetectorList().size()\n if detName == None : \n detName=detSettingsBean.getDetectorName()\n\n raw_scaler_data = load_raw_scaler_data(filename, detName)\n raw_scalers_total = raw_scaler_data[0]\n tfg_resets = r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function called by the `HENdumpdyn` command line script.
def dumpdyn_main(args=None): import argparse description = ('Dump dynamical (cross) power spectra') parser = argparse.ArgumentParser(description=description) parser.add_argument("files", help=("List of files in any valid HENDRICS " "format for PDS or CPDS"), narg...
[ "def main():\n # set up the program to take in arguments from the command line", "def main(args):\n global disas_cmd\n\n # argument parser\n parser = argparse.ArgumentParser(description='Enables the user to manually identify the versions of located but unknown libraries, later to be used by %s\\'s Mat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Are the input and output indexes within range of the number of inputs and outputs?
def _indexes_valid(self): return self.input_index in range(self.num_inputs) and self.output_index in range(self.num_outputs)
[ "def validate_in_out(inputs, outputs):\n for port in outputs:\n port_num = port.split('-')[0]\n if port_num in inputs:\n raise ValueError(f\"Port {port_num} is both an input and output\")", "def _output_offsets(self):\n if (self.l_pad != 0 or self.r_pad != 0 or s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the block to the new output index if the index changed.
def set_output_index(self, output_index): if self.output_index != output_index: self.lock() self._disconnect_current() self.output_index = output_index self._connect_current() self.unlock()
[ "def update(self, block, idx):\n self.file.seek(idx * self.block_size)\n self.file.write(block)", "def _update_index(self):\n self.current_index = (self.current_index + 1) % self.nb_intervals", "def __rewrite_block_index_to(self, minor_block, add_tx_back_to_queue=True):\n new_chain =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identifier of the CloudFormation resource type version.
def type_version_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type_version_id")
[ "def type_version_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_version_id\")", "def type_version(self) -> str:\n return self._type_version", "def get_genomic_resource_id_version(self) -> str:\r\n return f\"{self.resource_id}{version_tuple_to_suffix(self.version...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing Resource resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, desired_state: Optional[pulumi.Input[str]] = None, properties: Optional[pulumi.Input[str]] = None, role_arn: Optional[pulumi.Input[str]] = None, schem...
[ "def _get_resource(\n resource,\n name=None,\n resource_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not _exactly_one((name, resource_id)):\n raise SaltInvocationError(\"One (but not both) of name or id must be provided.\")\n\n conn = _get_conn(regio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Amazon Resource Name (ARN) of the IAM Role to assume for operations.
def role_arn(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "role_arn")
[ "def role_arn_for_logging(self) -> str:\n return pulumi.get(self, \"role_arn_for_logging\")", "def _get_role_arn(self):\n if self.stack.cloudformation_service_role:\n return {\"RoleARN\": self.stack.cloudformation_service_role}\n else:\n return {}", "def role_name(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identifier of the CloudFormation resource type version.
def type_version_id(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "type_version_id")
[ "def type_version_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_version_id\")", "def type_version(self) -> str:\n return self._type_version", "def get_genomic_resource_id_version(self) -> str:\r\n return f\"{self.resource_id}{version_tuple_to_suffix(self.version)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
self.symptoms is a set of all symptoms self.url is a list of all urls relating to those symptoms self.website_content is the website content of the first self.url self.disease_name is pretty much the entirety of the disease, it was misscalled "disease name
def __init__(self, symptoms): self.symptoms = symptoms self.urls = list(self.search(self.symptoms)) self.website_content = self.parse_websites(self.urls) #self.disease_name = self.get_info(self.website_content) # self.disease name # self.disease_symptoms # self.description
[ "def setSite(self, stringUrl, content):\r\n # remove trailing / characters from the base ur\r\n self.stringUrl = stringUrl #.rstrip('/ ')\r\n preDomain = urlparse.urlparse(self.stringUrl)\r\n self.domain = urlparse.urlunparse((preDomain[0], preDomain[1],'', '', '', ''))\r\n #self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the aux_input1 of this InlineResponse2005VehicleStats.
def aux_input1(self) -> AuxInputSeries: return self._aux_input1
[ "def aux_input2(self) -> AuxInputSeries:\n return self._aux_input2", "def aux_input1(self, aux_input1: AuxInputSeries):\n\n self._aux_input1 = aux_input1", "def get_aux_vector(self, cur_idx):\n return self.data_values[cur_idx, self.aux_col_ids]", "def get_input_vec(self):\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the aux_input1 of this InlineResponse2005VehicleStats.
def aux_input1(self, aux_input1: AuxInputSeries): self._aux_input1 = aux_input1
[ "def aux_input1(self) -> AuxInputSeries:\n return self._aux_input1", "def aux_input2(self, aux_input2: AuxInputSeries):\n\n self._aux_input2 = aux_input2", "def aux_input2(self) -> AuxInputSeries:\n return self._aux_input2", "def set1Value(self, *args):\n return _coin.SoMFVec4i32_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the aux_input2 of this InlineResponse2005VehicleStats.
def aux_input2(self) -> AuxInputSeries: return self._aux_input2
[ "def aux_input1(self) -> AuxInputSeries:\n return self._aux_input1", "def aux_input2(self, aux_input2: AuxInputSeries):\n\n self._aux_input2 = aux_input2", "def get_aux_vector(self, cur_idx):\n return self.data_values[cur_idx, self.aux_col_ids]", "def i2(self):\n return self.input_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the aux_input2 of this InlineResponse2005VehicleStats.
def aux_input2(self, aux_input2: AuxInputSeries): self._aux_input2 = aux_input2
[ "def aux_input2(self) -> AuxInputSeries:\n return self._aux_input2", "def aux_input1(self, aux_input1: AuxInputSeries):\n\n self._aux_input1 = aux_input1", "def SetInput2(self, input: 'itkImageSS2') -> \"void\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the engine_state of this InlineResponse2005VehicleStats.
def engine_state(self) -> List[EngineState]: return self._engine_state
[ "def sslenginestatus(self) :\n try :\n return self._sslenginestatus\n except Exception as e:\n raise e", "def get_state(self):\n return self.StateEngine(self.symbols)", "def state(self):\n\n try:\n out = self.__get_facts()\n except VsanNotPrese...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the engine_state of this InlineResponse2005VehicleStats.
def engine_state(self, engine_state: List[EngineState]): self._engine_state = engine_state
[ "def set_ets(self, mode, ets_cycles, ets_interleave):\n return self.SetEts(self._handle, mode, ets_cycles, ets_interleave)", "def set_engine(self, engine, meta):\n self.engine = engine\n self.meta = meta", "def set_state(self, gameState) :\n self.__gameState = gameState", "def engc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the vehicle_id of this InlineResponse2005VehicleStats.
def vehicle_id(self) -> int: return self._vehicle_id
[ "def getVehicleId(self) -> int:\n return self.vehicleId", "def vios_id(self):\n return self._get_val_int(_VADPT_LOCAL_ID)", "def getVeh_id(self):\n return \"VEHICLE IDENTIFICATION NO. %s\"%self.veh_Iden_No", "def get_vehicle_index(self, vehicle):\n index = None\n for i,v in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the vehicle_id of this InlineResponse2005VehicleStats.
def vehicle_id(self, vehicle_id: int): if vehicle_id is None: raise ValueError("Invalid value for `vehicle_id`, must not be `None`") # noqa: E501 self._vehicle_id = vehicle_id
[ "def id(self, vehicleId):\n self._id = vehicleId", "def virtual_router_id(self, virtual_router_id: str):\n\n self._virtual_router_id = virtual_router_id", "def getVehicleId(self) -> int:\n return self.vehicleId", "def _set_virtual_router_id(self, v, load=False):\n parent = getattr(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Watch for SSDP notify messages The handler shall be called on any service coming online. byehandler is called whenever a system advertises that it is departing. If no byehandler is specified, byebye messages are ignored. The handler is given (as possible), the mac address, a list of viable sockaddrs to reference the pe...
def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): # Normally, I like using v6/v4 agnostic socket. However, since we are # dabbling in multicast wizardry here, such sockets can cause big problems, # so we will have two distinct sockets tracelog = log.Logger('trace') try: ac...
[ "def demo_service(service_name, respond_to_wildcard=True, process_func=print_all, host_ip=SSDP_MULTICAST_ADDR, host_port=SSDP_PORT):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.IPPROTO_IP...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Page where we ask user to pay with paypal.
def paypal_pay(request): paypal_dict = { "business": "ruslan_cimbalyuk-facilitator@mail.ru", "amount": "1000.00", "currency_code": "RUB", "item_name": "T-Shirt", "invoice": "INV-00312", "notify_url": reverse('paypal-ipn'), "return_url": "http:/...
[ "def redirect_to_express(self):\n wpp = PayPalWPP(self.request)\n try:\n nvp_obj = wpp.setExpressCheckout(self.item)\n except PayPalFailure:\n self.context['errors'] = self.errors['paypal']\n return self.render_payment_form()\n else:\n pp_param...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the level of indirection.
def indirection_level(self): return self.ty.count("*") + self.ty.count("[")
[ "def get_level():\n return LEVEL", "def get_level(self):\r\n return self.__level", "def getLogLevel(self) -> \"int\":\n return _coin.ScXMLStateMachine_getLogLevel(self)", "def get_depth(self):\n return self.level_type[1]", "def indent_level(self):\n return self.container['inde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a named declaration in C, with vulkan.h formatting.
def c_pretty_decl(self, name, attr=""): plist = [] for param in self.params: idx = param.ty.find("[") if idx < 0: idx = len(param.ty) pad = 44 - idx if pad <= 0: pad = 1 plist.append(" %s%s%s%s" % (param.ty[...
[ "def c_declaration(self):\n # Write the function return type\n c_declaration = \"YEP_PUBLIC_SYMBOL enum YepStatus YEPABI \"\n # Parse the declaration passed in for its parameter names and types\n c_declaration += self.name + \"(\"\n # Parse and write the declaration for the input ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the typedef for the prototype in C.
def c_typedef(self, suffix="", attr=""): return self.c_decl(self.name + suffix, attr=attr, typed=True)
[ "def getctype(c):\n if isinstance(c, int):\n if c in BUILTIN_CTYPES:\n return BUILTIN_CTYPES[c]\n else:\n raise CrumbError('Invalid payload field size \"%d\"; must be the size of a built-in C integer type' % c)\n elif isinstance(c, float):\n if c in BUILTIN_CTYPES:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a call to the prototype in C.
def c_call(self): return "%s(%s)" % (self.name, self.c_params(need_type=False))
[ "def guess_prototype(args, prototype=None):\n if type(prototype) is str:\n prototype = parse_signature(prototype)\n elif prototype is None:\n l.warning(\"Guessing call prototype. Please specify prototype.\")\n\n charp = SimTypePointer(SimTypeChar())\n result = proto...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate image by sampling latent from prior distribution Returns
def sample_image(self): z = torch.randn(1, self.latent_size) r_t = self.decoder(z) return r_t
[ "def sample(self):\n return np.random.dirichlet(self.alpha)", "def generate_noise(samples):\n return np.random.normal(0, 1, (samples, LATENT_DIM))", "def generate_init_samples(self, im: np.ndarray) -> TensorList:\r\n\r\n # Compute augmentation size\r\n aug_expansion_factor = getattr(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionary with tuples as keys describing x, y coordinates the value of which is a count of how many Rects cover that coordinate.
def count_overlaps(rects): coords = defaultdict(int) for r in rects: for x in range(r.x, r.x + r.w): for y in range(r.y, r.y + r.h): coords[(x, y)] += 1 return coords
[ "def get_scores(self):\n scores = {0:0, 1:0, 2:0}\n for i in range(self.height-1):\n for j in range(self.width-1):\n owner = self.boxes[i][j].owner\n scores[owner] += 1\n return scores", "def count_overlaps(data: Counter) -> int:\n # Any point occup...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a string, tags all named entities, geofeatures, place names and spatial grammar
def process_all_text(text_string, quick=False, use_placenames=False): # print("Preliminary tagging...") token_list = core.tgc(text_string) # print("Name Entity chunking...") token_list = core.ne_group_extended(token_list) # for x in token_list: # print(type(x), x) if use_placename...
[ "def tag_ingredient(txt):\n\ttext = nltk.word_tokenize(txt)\n\tposTagged = pos_tag(text)\n\tsimplifiedTags = [(word, map_tag('en-ptb', 'universal', tag)) for word, tag in posTagged]\n\tprint(simplifiedTags)", "def test_sentence_with_loc():\n tagged = tools.tag('To drink alcohol is very good for you in Berlin',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if a sentence contains the components to be identified as geographic text
def is_geotext(token_list, gnn_requirement, use_placenames): num_of_placenames = len( [x for x in token_list if isinstance(x, pn.PlacenameTree)]) num_of_geonouns = len( [x for x in token_list if isinstance(x, gn.GeonounTree)]) num_of_spatial_grammar = len( [x for x in token_lis...
[ "def is_urban(text, geo_location_dict=None):\n if geo_location_dict is not None:\n return geo_location_dict['road_no'] == ''\n road_examples = ['כביש ' + str(digit) for digit in range(10)]\n return not any(road_example in text for road_example in road_examples)", "def is_location(word):\n patte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator which yields the image to be downloaded. This function parses the specimen page html, finds all , fetches the image via requests
def get_specimen_images(html): try: soup = BeautifulSoup(html, 'html.parser') imgs = soup.find_all("img") for img in imgs: yield requests.get(img.get("src"), stream=True, allow_redirects=True) except: raise
[ "def macys_make_selenium_search(url):\n\n #CHROME DRIVER\n #chromedriver = \"/Users/tomlarge/Desktop/FashionSense/nordstrom_scraper/chromedriver\"\n #os.environ[\"webdriver.chrome.driver\"] = chromedriver\n #browser = webdriver.Chrome()\n\n #PHANTOM JS\n webdriver.DesiredCapabilities.PHANTOMJS['ph...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the specimen image of the font variant using the url & save it as png
def fetch_font_variants(url, font_name, output_dir): res = requests.get(url) if res.status_code == 200: font_output_dir = os.path.join(output_dir, font_name) if not os.path.exists(font_output_dir): os.makedirs(font_output_dir) for image in get_specimen_images(res.text): ...
[ "def get_png(url,filename = \"test.png\"):\n import requests\n r = requests.get(url)\n with open(filename,\"wb\") as png:\n png.write(r.content)", "def _font(size):\n path = os.path.join(os.path.dirname(__file__), 'data',\n \"Inconsolata.otf\")\n return Ima...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load the main page for a font on font squirel
def load_font_main(font_name, font_main_base_url, font_variant_base_url, output_dir): try: LOGGER.info("start {}".format(font_name)) request_url = font_main_base_url + font_name LOGGER.info("Fetching data for font {} from url : {}".format( font_name, font_main_base_url)) ...
[ "def load_fonts(self):\n self.levelfont = pygame.font.Font(data.filepath('chalkdust.ttf'), 20)\n self.destfont = pygame.font.Font(data.filepath('chalkdust.ttf'), 20)\n self.barfont = pygame.font.Font(data.filepath('chalkdust.ttf'), 16)\n self.menufont = pygame.font.Font(data.filepath('ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load_image (...) > Surface Loads and returns an image. Tries to load an image from the passed 'filename' argument and automatically converts it to the display pixel format for faster blit operations. The 'alpha' argument will enforce alpha transparency on the image by invoking Surface.convert_alpha(). If the surface co...
def load_image (filename, alpha=False, colorkey=None): surface = image.load (filename) if colorkey: surface.set_colorkey (colorkey) if alpha or surface.get_alpha (): return surface.convert_alpha () return surface.convert ()
[ "def load_image(name, alpha_value=None):\n fullname = join(dirname(realpath(__file__)), 'data', name)\n image = pygame.image.load(fullname).convert()\n if alpha_value:\n image.set_alpha(alpha_value)\n return image", "def load_image(self, fullname, colorkey=None):\n image = pygame.image.l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting list of changed files in git
def test_get_git_changed_files(self, repo): repo.return_value.merge_base.return_value[0].diff.return_value = [ Change("/foo", "/foo", False, False), Change(None, "/bar", True, False), Change("/baz", None, False, True), ] actual = get_git_changed_files(os.getcw...
[ "def test_list_staged_files(repo: Repo):\n driver = GitDriver.from_repo(repo)\n file = driver.stage()\n assert file.where_change_type(GitChange.Add) in list_staged_files(repo)", "def test_repo_list_git_hooks(self):\n pass", "def _files_changed_in_git_diff(self, hash_1, hash_2):\n\n os.chd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }