query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Computes the number of frames for a given signal length.
def number_frames(signal_len, frame_len, frame_step): frames = 1 if signal_len > frame_len: temp = (1.0 * signal_len - frame_len)/frame_step frames += int(np.floor(temp)) return frames
[ "def get_num_frames(signal_length_samples, window_size_samples, hop_size_samples):\r\n o = window_size_samples - hop_size_samples\r\n \r\n return math.ceil((signal_length_samples - o)/(window_size_samples - o))", "def get_nframes(self):\n return len(self._frames)/self._sampwidth", "def get_num_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes a grid of indices for possibly overlapping frames.
def indices_grid(frame_len, frame_step, num_frames): indices = np.tile(np.arange(0, frame_len), (num_frames, 1)) + \ np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_len, 1)).T indices = np.array(indices, dtype=np.int32) return indices
[ "def compute_index_grid(self):\n vecs = [np.arange(n) for n in self.shape]\n grid_locs = np.stack([v.flatten() for v in np.meshgrid(*vecs, indexing='ij')], axis=0).reshape(\n (-1,) + self.shape)\n return grid_locs.astype(int)", "def _getLocalOverlappingCellIDs(self):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes either the hamming window or its inverse and applies it to a sequence of frames.
def apply_hamming(frames, inv=False): M = frames.shape[1] win = np.hamming(M)**(-1) if inv else np.hamming(M) return frames * win
[ "def windowing(input_):\n frame_size = input_.shape[1] # We apply the hamming window to each frame\n return input_ * scipy.signal.hamming(frame_size, sym=False)", "def windowing(input):\n w = hamming(input.shape[1], sym=False)\n\n # window shape (for explanation)\n # plt.figure()\n # plt.plot(w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy the basic env file and config file to a tmp_path.
def copy_basic_fixtures(cfngin_fixtures: Path, tmp_path: Path) -> None: copy_fixture( src=cfngin_fixtures / "envs" / "basic.env", dest=tmp_path / "test-us-east-1.env" ) copy_fixture( src=cfngin_fixtures / "configs" / "basic.yml", dest=tmp_path / "basic.yml" )
[ "def createCfg( self ):\n copyfile(self.home + self.conf + \".template\", self.tempDirectory + self.conf)", "def logging_conf_tmp_file_path(tmp_path_factory: pytest.TempPathFactory) -> Path:\n tmp_dir = tmp_path_factory.mktemp(\"tmp_log\")\n shutil.copy(Path(logging_conf_module.__file__), Path(f\"{tm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure a mock action.
def configure_mock_action_instance(mock_action: Mock) -> Mock: mock_instance = Mock(return_value=None) mock_action.return_value = mock_instance mock_instance.execute = Mock() return mock_instance
[ "def test_action_mocked(self):\n with self.mock_global_connection:\n self.assertEqual(0, Action.count())\n\n manager = Manager(self.connection)\n self.assertFalse(manager.is_populated())\n manager.populate()\n self.assertTrue(manager.is_populated())\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test load a CFN template.
def test_load_cfn_template(self, caplog: LogCaptureFixture, tmp_path: Path) -> None: cfn_template = tmp_path / "template.yml" cfn_template.write_text("test_key: !Ref something") cfngin = CFNgin(ctx=self.get_context(), sys_path=tmp_path) caplog.set_level("ERROR", logger="runway.cfngin") ...
[ "def test_load(self):\n template = config.load_template('tmux')\n self.assertTrue(isinstance(template, Template))", "def test_read_namespaced_template(self):\n pass", "def load_cfn_template(template_str):\n\n # cfn_flip.load() raises a JSONDecodeError even when the content was YAML (but ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input iperf client log; output final bw value in kbps
def get_iperf_bw(self, filename): #last line has avg values for line in open(filename, 'r'): pass bw = line.split(',')[-1].strip() return int(bw)/1000 # bw in kbps
[ "def iperf3_bandwidth(self, client, port):\n\n if not client:\n return\n\n iperf_res = None\n\n if self.nma.conf['databases']['tinydb_enable']:\n speed = self.speed_db.all()\n\n measured_bw = {'upload': 0, 'download': 0}\n measured_jitter = {'upload': 0, 'dow...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes visibilities from the last result, if there is one, and associates them with galaxies in this search where fullpath galaxy names match. If the galaxy collection has a different name then an association is not made. e.g.
def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( ag.Galaxy ): if galaxy...
[ "def _update_galaxy_file_mapping(self):\n galaxy_to_refinery_mapping_list = []\n for node in self.tool._get_input_nodes():\n galaxy_to_refinery_mapping_list.append(\n {\n WorkflowTool.GALAXY_DATASET_HISTORY_ID:\n self.FAKE_DATASET_HIS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Amazon Resource Name (ARN) of the inference scheduler being created.
def inference_scheduler_arn(self) -> Optional[str]: return pulumi.get(self, "inference_scheduler_arn")
[ "def schedule_name(self) -> str:\n return pulumi.get(self, \"schedule_name\")", "def monitoring_schedule_arn(self) -> Optional[str]:\n return pulumi.get(self, \"monitoring_schedule_arn\")", "def task_definition_arn(self) -> str:\n return pulumi.get(self, \"task_definition_arn\")", "def ta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Any tags associated with the inference scheduler.
def tags(self) -> Optional[Sequence['outputs.InferenceSchedulerTag']]: return pulumi.get(self, "tags")
[ "def tag_specifications(self) -> pulumi.Output[Optional[Sequence['outputs.LaunchTemplateTagSpecification']]]:\n return pulumi.get(self, \"tag_specifications\")", "def get_tasks_tag(self, tag=None):\n cur = self.conn.cursor()\n if tag == None:\n return None\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resource schema for LookoutEquipment InferenceScheduler.
def get_inference_scheduler(inference_scheduler_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInferenceSchedulerResult: __args__ = dict() __args__['inferenceSchedulerName'] = inference_scheduler_name opts = pulumi.InvokeOptions.merge(_uti...
[ "def getIrriSchedule():", "def test_get_scheduling_v1_api_resources(self):\n pass", "def ilp_scheduler(time_slots: TimeSlots, observations: List[Observation]) -> Tuple[Schedule, Schedule]:\n\n # Note: Start slots run from 0 to time_slots.time_slots_per_site[Site.GS] +\n # time_slots.time_slots_per_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method(isInternal, docstring name, args, isConst) > Method Creates a new Method description with the given docstring, name and args, for the language, with special consideration if the method was declared constant and/or internal.
def __init__ (self, isInternal, docstring, name, args, isConst): self.name = name self.isConst = isConst self.isInternal = isInternal if isInternal: if language == 'java': # We have a special Javadoc doclet that understands a non-standard # Javadoc tag, @internal. When ...
[ "def _make_method(name, doc):\r\n \r\n slicers = {\"__getslice__\" : \"__getitem__\", \"__delslice__\" : \"__delitem__\", \"__setslice__\" : \"__setitem__\"}\r\n \r\n name = str(name) # IronPython issue #10\r\n if name == \"__call__\":\r\n def __call__(_sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CClassDoc(docstring, name) > CClassDoc Creates a new CClassDoc with the given docstring and name.
def __init__ (self, docstring, name, isInternal): # Take out excess leading blank lines. docstring = re.sub('/\*\*(\s+\*)+', r'/** \n *', docstring) self.docstring = docstring self.name = name self.isInternal = isInternal
[ "def class_doc(self, classname: str, doc: str):\n self.class2doc[classname] = doc", "def member_doc(self, classname: str, cpp_name: str, doc: str):\n self.member2doc[(classname, cpp_name)] = doc", "def build_doc(cls, **kwargs):\n if datalad.in_librarymode():\n lgr.debug(\"Not assembling ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getHeadersFromSWIG (filename) > (filename1, filename2, .., filenameN) Reads the list of %include directives from the given SWIG (.i). The list of C/C++ headers (.h) included is returned.
def getHeadersFromSWIG (filename): stream = open(filename) lines = stream.readlines() stream.close() lines = [line for line in lines if line.strip().startswith('%include')] lines = [line for line in lines if line.strip().endswith('.h')] return [line.replace('%include', '').strip() for line in lines]
[ "def headers(filepath: str) -> tuple[str, str]:\n filename = filepath.split(\"/\")[-1]\n namespaces = 'namespace chip {\\nnamespace TestCerts {\\n\\n'\n h_top = copyrightNotice + '\\n#pragma once\\n\\n#include <lib/support/Span.h>\\n\\n' + namespaces\n c_top = copyrightNotice + '\\n#include \"' + filena...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sanitizeForHTML (docstring) > docstring Performs HTML transformations on the C++/Doxygen docstring.
def sanitizeForHTML (docstring): # Remove @~, which we use as a hack in Doxygen 1.7-1.8 docstring = docstring.replace(r'@~', '') # First do conditional section inclusion based on the current language. # Our possible conditional elements and their meanings are: # # java: only Java # python: on...
[ "def to_html(docstring: str) -> str:\n # careful: markdown2 returns a subclass of str with an extra\n # .toc_html attribute. don't further process the result,\n # otherwise this attribute will be lost.\n return pdoc.markdown2.markdown( # type: ignore\n docstring,\n extras=markdown_extensi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rewriteDocstringForJava (docstring) > docstring Performs some mimimal javadocspecific sanitizations on the C++/Doxygen docstring.
def rewriteDocstringForJava (docstring): # Preliminary: rewrite some of the data type references to equivalent # Java types. (Note: this rewriting affects only the documentation # comments inside classes & methods, not the method signatures.) docstring = docstring.replace(r'const char *', 'String ') docstr...
[ "def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Pyth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rewriteDocstringForCSharp (docstring) > docstring Performs some mimimal Cspecific sanitizations on the C++/Doxygen docstring.
def rewriteDocstringForCSharp (docstring): # Preliminary: rewrite some of the data type references to equivalent # C# types. (Note: this rewriting affects only the documentation # comments inside classes & methods, not the actual method signatures.) docstring = docstring.replace(r'const char *', 'string ') ...
[ "def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Pyth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rewriteDocstringForPython (docstring) > docstring Performs some mimimal Python specific sanitizations on the C++/Doxygen docstring.
def rewriteDocstringForPython (docstring): # Take out the C++ comment start and end. docstring = docstring.replace('/**', '').replace('*/', '') p = re.compile('^(\s*)\*([ \t]*)', re.MULTILINE) docstring = p.sub(r'\2', docstring) # Rewrite some of the data type references to equivalent Python types. # (No...
[ "def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of parag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rewriteDocstringForPerl (docstring) > docstring Performs some mimimal Perl specific sanitizations on the C++/Doxygen docstring.
def rewriteDocstringForPerl (docstring): # Get rid of the /** ... */ and leading *'s. docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ') # Get rid of indentation p = re.compile('^\s+(\S*\s*)', re.MULTILINE) docstring = p.sub(r'\1', docstring) # Get rid of paragraph indentation n...
[ "def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Pyth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
processFile (filename, ostream) Reads the the given header file and writes to ostream the necessary SWIG incantation to annotate each method (or function) with a docstring appropriate for the given language.
def processFile (filename, ostream): istream = open(filename) header = CHeader(istream) istream.close() processClassDocs(ostream, header.classDocs) processClasses(ostream, header.classes) processFunctions(ostream, header.functions) ostream.flush()
[ "def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if file exist on ftp
def file_exist() -> bool: pass
[ "def file_exists(host, fqpath):\n command = \"ls -ld %s\" % fqpath\n rcode, _, rerr = g.run(host, command)\n if rcode == 0:\n return True\n\n g.log.error('File does not exist: %s', rerr)\n return False", "def remote_file_exists(url):\n status = requests.head(url).status_code\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the background of the regime diagram following Fig. 3 of Belcher et al., 2012
def plot_regime_diagram_background_BG12( ax=None, ): if ax is None: ax = plt.gca() # range of power xpr = [-1, 1] ypr = [-3, 3] # range xlims = [10**i for i in xpr] ylims = [10**i for i in ypr] # size of x and y nx = 500 ny = 500 xx = np.logspace(xpr[...
[ "def plot_regime_diagram_background_L19(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # background following Fig. 3 of Belcher et al., 2012\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the background of the reegime diagram in Li et al., 2019
def plot_regime_diagram_background_L19( ax=None, ): if ax is None: ax = plt.gca() # range of power xpr = [-1, 1] ypr = [-3, 3] # range xlims = [10**i for i in xpr] ylims = [10**i for i in ypr] # background following Fig. 3 of Belcher et al., 2012 nx = 500 ...
[ "def plot_regime_diagram_background_BG12(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # size of x and y\n nx = 500\n ny = 500\n x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use multiple colors in the ylabel
def set_ylabel_multicolor( ax, strings, colors, anchorpad = 0., **kwargs, ): from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker boxes = [TextArea(text, textprops=dict(color=color, ha='left',va='bottom',rotation=90,**kwargs)) for text,co...
[ "def setAxisLabelColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'LABELS', axes)", "def yaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.ybox.set_text(r\"$%s$\" % (label))\r\n pass", "def setGraphYLabel(self, label, axis):\n pass", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Redirect mobile browsers to /mobile and others to /home.
def desktop_or_mobile(request): url_name = 'home.mobile' if request.MOBILE else 'home' return redirect_to(request, url_name, permanent=False)
[ "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print all datatypes in the model.
def print_datatypes(model: nn.Module, model_name: str, sep: str = "\n") -> None: log = model_name + "'s datatypes:" + sep log += sep.join(str(t) for t in model_utils.get_model_tensor_datatype(model)) logger.info(log)
[ "def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)", "def show_features_datatypes(df):\n\tfor inum,icol in enumerate(df.columns):\n\t\tprint('Column id: {0:3d} \\tName: {1:12s} \\tDataType: {2}'.format(inum, icol, df[icol].dtypes))", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the trained model with the best accuracy.
def _load_best_model(self) -> None: self.trainer.resume()
[ "def load_model(self) -> None:\n log.info('Loading trained model')\n self.model = tf.keras.models.load_model('mnistnet_RNN')", "def load_best(self):\n try:\n best = pu.best_model_file_by_loss(self.get('model_dir'))\n self.load(best)\n except:\n pass #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the basic sleet calculation works.
def test_basic_calculation(self): expected_result = np.array( [ [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]], [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]], ], dtype=np.float32, ) result = calculate_sleet_probabilit...
[ "def test_get_solution(self):\n pass", "def test_sum():\n from simplecalc.calculator import sum_\n\n assert sum_([0, 1]) == 1\n assert sum_([0, 1, -0.1]) == 0.9", "def test_secant_system(testFunctions, tol, printFlag): \n pass", "def test_check_cost():", "def test_savings_calculator(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the basic sleet calculation works with int8 data.
def test_with_ints(self): rain_prob_cube = self.rain_prob_cube.copy( np.array( [[[1, 0, 0], [0, 1, 1], [0, 0, 1]], [[1, 0, 0], [0, 1, 1], [0, 0, 1]]], dtype=np.int8, ) ) snow_prob_cube = self.snow_prob_cube.copy( np.array( ...
[ "def test_int96():\n assert b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe7\\x03\\x00\\x00' == fastparquet.encoding.read_plain(\n struct.pack(b\"<qi\", 0, 999),\n parquet_thrift.Type.INT96, 1)", "def test_sizedIntegerTypes(self):\n baseIntIn = +2147483647\n baseNegIn = -21...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an exception is raised for negative values of probability_of_sleet in the cube.
def test_negative_values(self): rain = self.rain_prob_cube high_prob = self.high_prob_cube msg = "Negative values of sleet probability have been calculated." with self.assertRaisesRegex(ValueError, msg): calculate_sleet_probability(rain, high_prob)
[ "def test_raise_probabilities_negative(self):\n noise_ops = [([{\n \"name\": \"id\",\n \"qubits\": [0]\n }], 1.1), ([{\n \"name\": \"x\",\n \"qubits\": [0]\n }], -0.1)]\n self.assertRaises(NoiseError, lambda: QuantumError(noise_ops))", "def t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the name has been changed to sleet_probability
def test_name_of_cube(self): result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube) name = "probability_of_sleet" self.assertEqual(result.long_name, name)
[ "def spellability(name, test=False):\n score = 1\n for metaphone in name.metaphones:\n for other in _metaphone_index[metaphone]:\n if other is name: continue\n pop_ratio = (other.get_popularity(emphasize_recent=True) /\n (name.get_popularity(emphasize_recen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert an appropriate exception is raised when UMAP is not installed
def test_umap_unavailable(): from yellowbrick.text.umap_vis import UMAP assert UMAP is None with pytest.raises( YellowbrickValueError, match="umap package doesn't seem to be installed" ): UMAPVisualizer()
[ "def check_for_setup_error(self):", "def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")", "def testNetworkPlistIsAbsent(self):\n nw = '/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist'\n self.assertEqual(self.CheckForExistence(nw), Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the pipeline creation step for UMAP
def test_make_pipeline(self): umap = UMAPVisualizer() # Should not cause an exception. assert umap.transformer_ is not None assert len(umap.transformer_.steps) == 1
[ "def test_valid(self):\n\n #No exception raised\n pipe = Pipeline([\n TestPipeline.MockComponentStart(),\n TestPipeline.MockComponentMid()\n ])\n self.assertFalse(pipe.validate())\n\n #Run the pipeline\n out = pipe.run()\n expected = {\"MCA_2\":...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check to make sure sklearn's UMAP doesn't use the size param
def test_sklearn_umap_size(self): # In UMAPVisualizer, the internal sklearn UMAP transform consumes # some but not all kwargs passed in by user. Those not in get_params(), # like size, are passed through to YB's finalize method. This test should # notify us if UMAP's params change on th...
[ "def test_custom_size_umap(self):\n umap = UMAPVisualizer(size=(100, 50))\n\n assert umap._size == (100, 50)", "def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check to make sure sklearn's UMAP doesn't use the title param
def test_sklearn_umap_title(self): # In TSNEVisualizer, the internal sklearn UMAP transform consumes # some but not all kwargs passed in by user. Those not in get_params(), # like title, are passed through to YB's finalize method. This test should # notify us if UMAP's params change on ...
[ "def test_custom_title_umap(self):\n umap = UMAPVisualizer(title=\"custom_title\")\n\n assert umap.title == \"custom_title\"", "def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check UMAP can accept a custom title (string) from the user
def test_custom_title_umap(self): umap = UMAPVisualizer(title="custom_title") assert umap.title == "custom_title"
[ "def verify_title(title,user_id):\n query =\"\"\"select entries.title from entries\n where user_id = {} and title = '{}'\n \"\"\".format(user_id, title)\n try:\n cur.execute(query)\n title = cur.fetchone()\n if title:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check UMAP can accept a custom size (tuple of pixels) from the user
def test_custom_size_umap(self): umap = UMAPVisualizer(size=(100, 50)) assert umap._size == (100, 50)
[ "def test_sklearn_umap_size(self):\n # In UMAPVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like size, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params cha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check UMAP accepts and properly handles custom colors from user
def test_custom_colors_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=5, random_state=42, ) ## specify a list of custom colo...
[ "def _iscolorstring(我, 顏色):\n try:\n rgb = 我.cv.winfo_rgb(顏色)\n ok = 真\n except TK.TclError:\n ok = 假\n return ok", "def test_uda_value(self):\n code, out, err = self.t(('/uda_xxx_4/', 'rc.color.uda.xxx=', 'info'))\n self.assertIn('\\x1b[34m', ou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test UMAP integrated visualization on a sklearn classifier dataset
def test_make_classification_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## visualize data with UM...
[ "def visualize(model):\r\n\r\n if model.method == 'LDA':\r\n return\r\n reducer = umap.UMAP()\r\n print('Calculating UMAP projection ...')\r\n vec_umap = reducer.fit_transform(model.vec[model.method])\r\n plot_proj(vec_umap, model.cluster_model.labels_)", "def test_make_classification_umap_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test UMAP integrated visualization with class labels specified
def test_make_classification_umap_class_labels(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## visualize...
[ "def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visua...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test UMAP when no target or classes are specified
def test_no_target_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=6897, ) ## visualize data with UMAP ...
[ "def test_make_classification_umap_class_labels(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the user can supply an alpha param on instantiation
def test_alpha_param(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## Instantiate a UMAPVisualizer, provid...
[ "def test_valid_alpha(alpha: Any) -> None:\n check_alpha(alpha=alpha)", "def test_invalid_alpha(alpha: Any) -> None:\n with pytest.raises(ValueError, match=r\".*Invalid alpha.*\"):\n check_alpha(alpha=alpha)", "def test_Alpha_getter(self):\r\n self.assertEqual(self.mc.Alpha, 0.05)", "def t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for umap quick method with hobbies dataset
def test_quick_method(self): corpus = load_hobbies() tfidf = TfidfVectorizer() X = tfidf.fit_transform(corpus.data) y = corpus.target viz = umap(X, y, show=False) assert isinstance(viz, UMAPVisualizer) self.assert_images_similar(viz, tol=50)
[ "def test_get_boat(self):\n pass", "def test_hobbies(self):\n response = self.client.get('/hobbies/')\n self.assertEqual(response.status_code, 200)", "def test_many_bdd_labels_for_one_function():\n pass", "def test_get_learners(self):\n pass", "def test_amphibians_get(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A wrapper that does dot for a multidimensional image that is often used in the pipeline. The input image should be Ccontiguous.
def dot_image(image, B): imshape = image.shape if not image.flags['C_CONTIGUOUS']: raise TypeError, 'Error: cannot deal with non-C-contiguous image' output = gemm(1.0, image.reshape((np.prod(imshape[:-1]), imshape[-1])), B) return output.reshape(imshape[:-1] + (B.shape[1],))
[ "def batched_dot(\n a: torch.FloatTensor,\n b: torch.FloatTensor,\n) -> torch.FloatTensor:\n return _batched_dot_manual(a, b)", "def dot_batch(x1, x2):\n\n batch = x1.shape[0]\n return torch.reshape(x1*x2, (batch, -1)).sum(1)", "def dot(inputs, axes, normalize=False, **kwargs):\n return Dot(axes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An mpi implementation of the mean over different nodes.
def mpi_mean(data): s_local = data.sum(0) m = np.empty_like(s_local) mpi.COMM.Allreduce(s_local, m) num_data = mpi.COMM.allreduce(data.shape[0]) m /= float(num_data) return m
[ "def mpi_avg(x):\n return mpi_sum(x) / num_procs()", "def mpi_avg_grads(module):\n if num_procs()==1:\n return\n for p in module.parameters():\n p_grad = p.grad.cpu()\n p_grad_numpy = p_grad.numpy() # numpy view of tensor data\n avg_p_grad = mpi_avg(p_grad_numpy)\n p_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An mpi implementation of the std over different nodes.
def mpi_std(data): m = mpi_mean(data) data_centered = data - m data_centered **= 2 std_local = data_centered.sum(0) std = np.empty_like(std_local) mpi.COMM.Allreduce(std_local, std) num_data = mpi.COMM.allreduce(data.shape[0]) std /= float(num_data) return std
[ "def fun1():\n size = MPI.COMM_WORLD.Get_size()\n rank = MPI.COMM_WORLD.Get_rank()\n name = MPI.Get_processor_name()\n\n print \"Hello, World! I am process %d of %d on %s.\\n\" % (rank, size, name)", "def factor(A: core.DistributedMatrix, S: core.DistributedMatrix, n:int, m:int, overwrite_a=True, allo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An mpi implementation of the covariance matrix over different nodes
def mpi_cov(data): m = mpi_mean(data) data_centered = data - m cov_local = dot(data_centered.T, data_centered) covmat = np.empty_like(cov_local) mpi.COMM.Allreduce(cov_local, covmat) num_data = mpi.COMM.allreduce(data.shape[0]) covmat /= float(num_data) return covmat
[ "def covariance_matrix(self):\n covariance_matrix = np.zeros((self.N, self.N))\n for j in range(0, self.J):\n covariance_matrix += float(1)/self.J*(self.dataset[:, j].reshape(self.N, 1) - self.mean_data_vect)\\\n .dot((self.dataset[:, j].reshape(self.N, 1) - self.mean_data_ve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates Hypercube objects for every index in the multidimensional self.hypercubes list.
def create_grids_structure(self): for indices, hypercube in np.ndenumerate(self.hypercubes): self.hypercubes[indices] = Hypercube(coords=indices)
[ "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates LowerLevelGrid when it is possible level equal to one means that this is the coarsest level.
def create_lower_level_grid(self): if self.level == 1: return False else: return LowerLevelGrid(level=self.level - 1, parent_hypercubes_number=self.hypercubes_number, parent_hypercubes=self.hypercubes, dims=self.dims)
[ "def min_level(self):\n return self.__min", "def create_lungcontour(self):\n \tself._view_frame.SetStatusText(\"Calculating lungcontour...\")\n \tcontourLung = vtk.vtkMarchingCubes()\n \tcontourLung.SetValue(0,1)\n \tcontourLung.SetInput(self.mask_data)\n\n \tsmoother = vtk.vtkWindowedSincPo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an Example, finds the class of its nearest nonempty Hypercube. First, it gathers the data (center coordinates and the class) of each parent Hypercube which coordinates are listed in parents_indices. Then it returns the class of the nearest nonempty parent Hypercube.
def nearest_neighbours_class(self, example_coords, parents_indices): print("Computing the nearest neighbours class.") parents_data = [(self.hypercubes[parent].center, self.hypercubes[parent].hypercube_class) for parent in parents_indices] distances = sorted([(distance.euc...
[ "def classify(self, example_coords, hypercubes_coords):\n print(\"Classifying an observation with coordinates; \" + str(example_coords))\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n if self.hypercubes[hypercubes_coords].hypercube_class == EMPTY_HYPERCUBE_INDICATOR:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds provided Example to the correct Hypercube in BasicGrid
def add_example_to_grid(self, example): indices = tuple([int(example.coords[x] / self.hypercube_measurements[x]) for x in range(self.dims - 1, -1, -1)]) self.hypercubes[indices].add_example(example)
[ "def add_example(self, example):\n raise NotImplementedError", "def addExample(self, example):\n self.exampleList.append(example)", "def example(self, example):\n\n self._example = example", "def batch_update(self, examples):\n print(\"Updating the BaseGrid with a batch of examples\")\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets classes for all hypercubes in self and in its child (LowerLevelGrid).
def set_hypercubes_classes(self): print("Setting the BaseGrid hypercubes' classes.") list_of_all_hc = list(self.hypercubes.flatten()) print("Number of hypercubes: " + str(len(list_of_all_hc))) for hypercube in list_of_all_hc: hypercube.set_hypercube_class() if self.c...
[ "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes every hypercube's center by taking the midpoint between the beginning and the end of hypercube in every dimension.
def compute_centers_of_hypercubes(self): for hc in self.hypercubes.flatten(): for i in range(self.dims - 1, -1, -1): index = self.dims - (i + 1) hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]
[ "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predicts the class of an observation with given coordinates.
def test(self, example_coords): print("Predicting the class of an observation with coordinates: " + str(example_coords)) hypercubes_coords = tuple( [int(example_coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) if self.hypercubes[hypercubes_coords].h...
[ "def predict(self, obs):\n return self.model(obs)", "def predict(self, X):\n (t0, t1, t2) = self.theta\n g = lambda x: t0 + t1 * x[0] + t2 * x[1]\n return np.array([\n self.classes[1] if g(x) > 0 else self.classes[0]\n for x in X\n ])", "def predict(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the grid with a given Example. If the Grid has a child, it is also forced to update itself with the new observation.
def update(self, example, hypercubes_coords=None): hypercubes_coords = tuple( [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example]) print("Update. Changed cl...
[ "def batch_update(self, examples):\n print(\"Updating the BaseGrid with a batch of examples\")\n examples_grouping_dict = {}\n for example in examples:\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the grid, given the batch_size of Examples. It groups Examples by class_id and by Hypercubes containing these Examples. If the Grid has a child, it is also forced to update itself.
def batch_update(self, examples): print("Updating the BaseGrid with a batch of examples") examples_grouping_dict = {} for example in examples: hypercubes_coords = tuple( [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) ...
[ "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes coordinates of 2dims parents' Hypercubes for every Hypercube in the grid. Each coarser Hypercube consists of 2dims finer level Hypercubes.
def set_hypercubes_parents_indices(self): for hypercube in self.hypercubes.flatten(): coordinates = [] for coord in hypercube.coords: coordinates.append([2 * coord, 2 * coord + 1]) for indices in list(itertools.product(*coordinates)): hypercube...
[ "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets classes for all hypercubes in self and in it's child (another LowerLevelGrid) if it exists.
def set_hypercubes_classes(self): print("Setting the Hypercubes' classes of grid at level: " + str(self.level)) for hypercube in self.hypercubes.flatten(): coordinates = [] for coord in hypercube.coords: coordinates.append([2 * coord, 2 * coord + 1]) p...
[ "def set_hypercubes_classes(self):\n print(\"Setting the BaseGrid hypercubes' classes.\")\n list_of_all_hc = list(self.hypercubes.flatten())\n print(\"Number of hypercubes: \" + str(len(list_of_all_hc)))\n for hypercube in list_of_all_hc:\n hypercube.set_hypercube_class()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes every hypercube's center by taking the midpoint between the beginning and the end of hypercube in every dimension.
def compute_centers_of_hypercubes(self): for hypercube in self.hypercubes.flatten(): sums = np.zeros((len(hypercube.coords))) for coords in hypercube.parent_hypercubes_indices: for index, summ in enumerate(sums): sums[index] += self.parent_hypercubes[c...
[ "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def find_center(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Classifies observation with given coordinates.
def classify(self, example_coords, hypercubes_coords): print("Classifying an observation with coordinates; " + str(example_coords)) hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords]) if self.hypercubes[hypercubes_coords].hypercube_class == EMPTY_HYPERCUBE_INDICATOR: r...
[ "def tag(self, coordinates, classification):\n for x, y in coordinates:\n self.classification[y][x] = classification", "def test(self, example_coords):\n print(\"Predicting the class of an observation with coordinates: \" + str(example_coords))\n hypercubes_coords = tuple(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding two users works
def testAdd2(self): self.assertEquals(models.SUCCESS, self.users.add("userC", "password")) self.assertEquals(models.SUCCESS, self.users.add("userD", "password"))
[ "def testAdd2(self):\n self.assertEquals(SUCCESS, UsersModel.UsersModelManager.add(\"user1\", \"password\"))\n self.assertEquals(SUCCESS, UsersModel.UsersModelManager.add(\"user2\", \"password\"))", "def test_add_user(self):\n pass", "def test_duplicate_addition(self):\n \n te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding an user with empty username fails
def testAddEmptyUsername(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add("", "password"))
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddEmptyUsername(self):\n self.assertEquals(ERR_BAD_USERNAME, UsersModel.UsersModelManager.add(\"\", \"password\"))", "def testAddNoneUsername(self):\n self.assertE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with a None password fails
def testAddNonePassword(self): self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add("userF", None))
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddNoneUsername(self):\n self.assertEquals(mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with a None username fails
def testAddNoneUsername(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, "password"))
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddEmptyUsername(self):\n self.assertEquals(mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with both username and password as None fails
def testAddNoneUsernameAndPassword(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def testAddNonePassword(self):\n self.assertEquals(models...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with both a blank username and password fails
def testAddNoneUsernameAndPassword(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add("", ""))
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def testAddEmptyUsername(self):\n self.assertEquals(ERR_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with a long username fails
def testAddLongUsername(self): original_username = "thiswillbelong" longer_username = original_username*10 self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, "password"))
[ "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with a long password fails
def testAddLongPassword(self): original_password = "thiswillbelong" longer_password = original_password*10 self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add("paulinarocks", longer_password))
[ "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that adding a user with both a long username and long password fails
def testAddLongUsernameAndPassword(self): original_username = "thisgonnabelong" longer_username = original_username*10 original_password = "thisalsogonnabelong" longer_password = original_password*10 self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longe...
[ "def testAddLongUsername(self):\n original_username = \"thiswillbelong\"\n longer_username = original_username*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, \"password\"))", "def testAddLongPassword(self):\n original_password = \"thiswillbelong\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that logging in with both an invalid username and invalid password fails
def testLoginBadUsernameAndPassword(self): self.assertEquals(models.SUCCESS, self.users.add("userJ", "password")) self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login("nobody_user", "nobody_password"))
[ "def test_incorrect_login(self):\n # test incorrect username\n self.incorrect_login(\"\", \"password\")\n # test incorrect password\n self.incorrect_login(\"admin\", \"\")", "def test_signin_invalid_username(self):\n with self.client:\n response = self.login('testuser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a master spec graph.
def master_spec_graph(master_spec): if not isinstance(master_spec, spec_pb2.MasterSpec): raise TypeError("master_spec_graph() expects a MasterSpec input.") graph = pygraphviz.AGraph(directed=True) graph.node_attr.update(shape="box", style="filled", fillcolor="white", fontname="roboto, helvetica, arial", fontsize=1...
[ "def construct_master(self, master_scenarios):\n lr_instance = self.tsdro.lr_instance\n\n self.master, self.stage1_vars = lr_instance.construct_stage1()\n if self.method == \"RO\":\n self.wass_mult = 0\n else:\n self.wass_mult = self.master.addVar(name=\"wass_multip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch child nodes for a given Zookeeper path.
def _get_zk_path_children(self, zk_conn, zk_path, name_for_error): children = [] try: children = zk_conn.get_children(zk_path) except NoNodeError: self.log.info('No zookeeper node at %s', zk_path) except Exception: self.log.exception('Could not read %s...
[ "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def _get_recursively(zoo_client, path, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch Consumer Group offsets from Zookeeper. Also fetch consumer_groups, topics, and partitions if not already specified in consumer_groups.
def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''): zk_consumer_offsets = {} # Construct the Zookeeper path pattern # /consumers/[groupId]/offsets/[topic]/[partitionId] zk_path_consumer = zk_prefix + '/consumers/' zk_path_topic_tmpl = zk_path_...
[ "def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''):\n zk_consumer_offsets = {}\n\n # Construct the Zookeeper path pattern\n # /consumers/[groupId]/offsets/[topic]/[partitionId]\n zk_path_consumer = zk_prefix + '/consumers/'\n zk_path_topic_tmpl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Widget specific css class
def css_class(self): css_type = self.widget_type css_title = normalizer.normalize(self.data.title) return ('faceted-checkboxtree-widget ' 'faceted-{0}-widget section-{1}').format(css_type, css_title)
[ "def get_widget_css_class(self, field_name, field):\n return self.widget_css_class or None", "def custom_widget_wrapper(cls):\n cls.__webwidget__ = True\n return cls", "def getWidget(self):", "def CSSClasses(self):", "def listing_style_class(self):", "def create_widgets(self):", "def set_st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if key in self.default
def selected(self, key): default = self.default if not default: return False for item in default: if compare(key, item) == 0: return True return False
[ "def __contains__(self, key):\n if super(Settings, self).__contains__(key):\n return True\n\n return key in self.extension.default_settings", "def __contains__(self, key):\n\t\treturn key in self.configuration", "def has(self, key):\n return False", "def __contains__(self, key)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get value from form and return a catalog dict query
def query(self, form): query = {} index = self.data.get('index', '') index = index.encode('utf-8', 'replace') if not self.operator_visible: operator = self.operator else: operator = form.get(self.data.getId() + '-operator', self.operator) operato...
[ "def query(self, form):\n query = {}\n index = self.data.get('index', '')\n index = index.encode('utf-8', 'replace')\n\n if not self.operator_visible:\n operator = self.operator\n else:\n operator = form.get(self.data.getId() + '-operator', self.operator)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predicts whether examples are anomalies.
def predict(X, epsilon, gaussian, **kwargs): p = gaussian(X=X, **kwargs) return is_anomaly(p, threshold=epsilon)
[ "def anomaly(self):\n return self._test(result_count=1, failure_amount=1)", "def predictFailures (self) :\n \n while self.traceData :\n\n if self.traceData [0] == self.traceType :\n\n self.totalEvents += 1\n\n if random.random () < self.recall :\n\n self.predictedEvent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register requirement syntax for later use returns an ID for retrieving the syntax
def _register_requirement_syntax(self, syntax): syntaxId = self.nextSyntaxId self.nextSyntaxId += 1 return syntaxId
[ "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def syntax_text():", "def get_syntax(self):\n # Implemented from template for osid.Metadata.get_element_id_template...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an expression, create an atomic proposition factory.
def _create_atomic_proposition_factory(self, node): lineNum = ast.Constant(node.lineno) ast.copy_location(lineNum, node) closure = ast.Lambda(noArgs, node) ast.copy_location(closure, node) syntaxId = self._register_requirement_syntax(node) syntaxIdConst = ast.Constant(s...
[ "def factory(expr):\n values = [c for c in expr if c != ' ']\n new_node = Node(values[0])\n return ParseTree(new_node)", "def new_expression_op(op_meta_info: OpMetaInfo, expression: str) -> Operation:\n\n if not op_meta_info:\n raise ValueError('op_meta_info must be given')\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a BoolOp node (`and`, `or`) to a corresponding proposition factory
def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST: # 1. wrap each operand with a lambda function operands = [] for operand in node.values: o = self.visit(operand) if self.is_proposition_factory(o): # if the operand is already an temporal requirement fact...
[ "def convert_broadcast_logical_or(node, **kwargs):\n return create_basic_op_node('Or', node, kwargs)", "def bool_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for left, right in combinations(ctx.expressions_by_type(bool), 2):\n yield AnnotatedExpression(\n ast.Boo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of statements that defines precondition and invariant checker
def makeGuardCheckers( self, args: ast.arguments, preconditions: List[s.Precondition], invariants: List[s.Invariant], ) -> List[ast.AST]: # Statements that check preconditions are satisfied preconditionChecks = [] for precondition in preconditions: ...
[ "def _build_preconditions(self, raw_preconditions_str):\n preconditions = []\n # First, parse out any substrings enclosed in curly braces; we ignore all characters between\n # such elements, which allows an author to easily include code comments, indentation, etc.\n raw_preconditions = r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of preconditions and invariants, separate items into the list of preconditions and list of invariants
def separatePreconditionsAndInvariants( self, header: List[Union[s.Precondition, s.Invariant]] ) -> Tuple[List[s.Precondition], List[s.Invariant]]: preconditions: List[s.Precondition] = [] invariants: List[s.Invariant] = [] for n in header: if isinstance(n, s.Precondition...
[ "def split_clauses(clauses, model):\n satisfied, unsatisfied = [], []\n for c in clauses:\n if check_clause_true(c, model):\n satisfied.append(c)\n else:\n unsatisfied.append(c)\n return [satisfied, unsatisfied]", "def makeGuardCheckers(\n self,\n args: a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an invocation of an action, behavior, or scenario.
def generateInvocation(self, node: ast.AST, actionlike, invoker=ast.Yield): invokeAction = ast.Expr(invoker(actionlike)) checker = ast.Attribute( ast.Name(behaviorArgName, loadCtx), checkInvariantsName, loadCtx ) args = ast.Starred( ast.Attribute(ast.Name(behavior...
[ "def generate_actions(self, *args, deterministic=False):\n raise NotImplementedError", "def KB_AgentProgram(KB):\n steps = itertools.count()\n\n def program(percept):\n t = steps.next()\n KB.tell(make_percept_sentence(percept, t))\n action = KB.ask(make_action_query(t))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the metacontroller state. Concatenatates vector representation of the largest selected primitive action with the tried constraints vector.
def get_meta_controller_state(self): state = np.zeros(self._num_primitive_actions) if len(self._selected_primitive_actions): selected_primitive_actions = np.array(self._selected_primitive_actions) max_primtive_action = np.max(selected_primitive_actions) state[max_pri...
[ "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def get_optimal_action(self, state):\n \n if self.limits_low != None and self.l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an array of controller environment states.
def get_controller_environment_states(env_state): controller_environment_states = np.split(env_state, self._num_controllers) return controller_environment_states
[ "def get_environment_state(self) -> np.array:\n raise NotImplementedError", "def list_environment() -> List[Environment]:\n _check_active_client()\n return _merlin_client.list_environment() # type: ignore", "def states(self):\n return array(self.state[:self.last_n])", "def get_list_of_sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the controller state containing the controller's environment state, constraint, ordering vector, and received communication vectors.
def get_controller_state(self, env_state, constraint, ordering, comm_turn, communication_vector=None): controller_state = np.zeros(self._controller_state_size) # Apply the constraint to the environment state. env_state_plus_constraint = np.logical_and(env_state, constraint).astype(int) ...
[ "def get_Controller_State(self):\n if self.observedRobotStatesList is None:\n len_observed = 0\n else:\n len_observed = len(self.observedRobotStatesList)\n if self.currentlyActiveTrajectoryNumber is None:\n len_currentTrajectory = 0\n else:\n l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Intrinsically rewards a subset of controllers using the provided critic function.
def intrinsic_reward(self, env_states, constraints, orderings, selected_actions): return self._critic_fn( controller_states, constraints, orderings, selected_actions)
[ "def all_template_functions_run(self, critic_class, state):\n critic = critic_class(state)\n\n action = critic.get_action(state)\n\n self.assertIsInstance(action, int)\n\n action, q_value = critic.get_target_action_and_q_value(state)\n\n self.assertIsInstance(action, int)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns initilizations for controller states, actions, communications, and outputs.
def controller_bookkeeping_vars(self): # Keeps track of all the controller states. controller_states = np.zeros( self._num_communication_turns + 1, self._num_controllers, self._controller_state_size) # Keeps track of all controllers' selected actions (communication + output). ...
[ "def _get_init_states(self):\n init1 = self.ext_builder.addInput(self.state_dims[:1], iclass=NormalInputNode)\n init2 = self.ext_builder.addInput(self.state_dims[1:], iclass=NormalInputNode)\n \n return [init1, init2]", "def completed_initializations(self) -> Dict[Instance, ActionResult]:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits data frame into trainingvalidationtest data frames. This also calibrates scaling object, and transforms data for each split.
def split_data(self, df, valid_boundary=2016, test_boundary=2018): stock_count = len(self.sl) test_ratio = 0.2 print('Stock count:%d'% stock_count) train_x = [] test_x = [] for label_, d_ in enumerate(self.sl): stock_train_len = int(len(d_.train_y) * (1 - tes...
[ "def scale_split(df, train_ratio=0.65):\r\n n_train = int(df.values.shape[0]*0.65)\r\n X = df.drop('y', axis=1).values\r\n y = df['y'].values.reshape(df.shape[0], 1)\r\n \r\n X_scaler = StandardScaler()\r\n X = X_scaler.fit_transform(X)\r\n \r\n y_scaler = StandardScaler()\r\n y = y_scale...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calibrates scalers using the data supplied.
def set_scalers(self, df): print('Setting scalers with training data...') column_definitions = self.get_column_definition() id_column = utils.get_single_col_by_input_type(InputTypes.ID, column_definitions) target_column = utils.get_...
[ "def Calibrate(datafile):\n\t\n\twls = pl.array([])\t\t\t\t\t\t\t\t\t\t\t\t\t# wavelength in nm\n\tbeampwr = pl.array([])\t\t\t\t\t\t\t\t\t\t\t\t# measured fianium beam power in nW\n\tdarkpwr = pl.array([])\t\t\t\t\t\t\t\t\t\t\t\t# measured ambient light power in nW\n\tinttime = pl.array([])\t\t\t\t\t\t\t\t\t\t\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns fixed model parameters for experiments.
def get_fixed_params(self): fixed_params = { 'total_time_steps': 40, 'num_encoder_steps': 39, 'num_epochs': 100, 'early_stopping_patience': 10, 'multiprocessing_workers': 2, } return fixed_params
[ "def get_fixed_params():\n fixed_params = {\n 'total_time_steps': 8 * 24,\n 'num_encoder_steps': 7 * 24,\n 'num_epochs': 100,\n 'early_stopping_patience': 5,\n 'multiprocessing_workers': 5\n }\n return fixed_params", "def get_fixed_params(self):\n raise NotImplem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns default optimised model parameters.
def get_default_model_params(self): model_params = { 'dropout_rate': 0.3, 'hidden_layer_size': 160, 'learning_rate': 0.01, 'minibatch_size': 64, 'max_gradient_norm': 0.01, 'num_heads': 1, 'stack_size': 1 } retu...
[ "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_pena...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sum of all the values from one prop of fund daily report, of coures many of the props make no sense to sum
def tot(self, prop="基金现值", date=yesterdayobj()): res = 0 for fund in self.fundtradeobj: res += fund.dailyreport(date).iloc[0][prop] return res
[ "def sum_values(self):\n raise NotImplementedError", "def summation(self, children, prop):\n total = 0\n for child in children:\n try:\n total = total + self.get(child).get(prop)\n except:\n print 'Child of ', self.name, ',', child, \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
merge the different cftable for different funds into one table
def _mergecftb(self): dtlist = [] for fund in self.fundtradeobj: dtlist2 = [] for _, row in fund.cftable.iterrows(): dtlist2.append((row["date"], row["cash"])) dtlist.extend(dtlist2) nndtlist = set([item[0] for item in dtlist]) nndtlis...
[ "def _all_funds():\n return _df", "def combine_tables():\n ranked = unicef_data()\n cpi_table = cpi_data()\n cpi_and_cl = cpi_table.join(ranked, 'Country / Territory',\n 'Countries and areas', inner=True)\n return cpi_and_cl", "def transferfunds(self):", "def merg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a virtue status table with a mf(cash) column based on the given tot money and cftable
def _vcash(totmoney, totcftable, cashobj): cashl = [] cashl.append(totmoney + totcftable.iloc[0].cash) for i in range(len(totcftable) - 1): date = totcftable.iloc[i + 1].date delta = totcftable.iloc[i + 1].cash if delta < 0: cashl.append( ...
[ "def get_cash(self):\n\n\t\tpass", "def cash_income(df):\n return (df.aftertax_income -\n (1 - tc.HOUSING_CASH_SHARE) * df.housing_ben -\n (1 - tc.MCAID_CASH_SHARE) * df.mcaid_ben -\n (1 - tc.MCARE_CASH_SHARE) * df.mcare_ben -\n (1 - tc.OTHER_CASH_SHARE) * df.other_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Revert the associated document instance back to this revision. Return the document instance.
def revert(self): self.instance.save() return self.instance
[ "def restore(self):\n documentUrl = self.metaData.graveyard[0].selfLink + \"/restore\"\n response = self._adapter.putRequest(documentUrl, self._baseHeader, \"{}\")\n self.metaData.graveyard.pop()\n\n return Document(self._client, response['Headers']['location'])", "def revert(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the DCTII matrix of order n
def dctmtx(n): x,y = np.meshgrid(range(n), range(n)) D = np.sqrt(2.0/n) * np.cos(np.pi * (2*x+1) * y / (2*n)) D[0] /= np.sqrt(2) return D
[ "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
klUCB index computation for Poisson distributions.
def klucb_poisson(x, d, precision=1e-6): upperbound = x+d+sqrt(d*d+2*x*d) # looks safe, to check: left (Gaussian) tail of Poisson dev return klucb(x, d, kl_poisson, upperbound, precision)
[ "def Poisson_map(lamb,u=random(),epsilon=0):\r\n k=0\r\n probability=exp(-lamb)\r\n while 1:\r\n if u<probability+epsilon:\r\n return k\r\n else:\r\n u=u-probability\r\n k=k+1\r\n probability=probability*lamb/(k+1)", "def build_prior_KL(self):\n KL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform style transfer on a detected class in a frame
def create_stylized_detection(style_transfer_executor, style_transfer_class, frame: np.ndarray, detections: list, resize_factor, labels: dict): for detection in detections: class_idx, box, confidence = [d for d in detection] label = labels[class_idx][0] if label...
[ "def set_style(self):", "def CSSClasses(self):", "def update_style(self):\n pass", "def highlight(self):\n\n original_style = self.web_element.get_attribute('style')\n self.apply_style(\"background: yellow; border: 2px solid red;\")\n time.sleep(.3)\n self.apply_style(origin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an inference executor for style predict network, style transfer network, list of backends and a style image.
def __init__(self, style_predict_model_path: str, style_transfer_model_path: str, style_image: np.ndarray, backends: list, delegate_path: str): self.style_predict_executor = network_executor_tflite.TFLiteNetworkExecutor(style_predict_model_path, backends, ...
[ "def construct_graph(network_class: Type[InferenceNetwork],\n config: Path, checkpoint_dir: str,\n batch_size: int,\n batches_per_step: int,\n image_filenames: Tuple[str],\n loop: bool,\n preprocess_fn:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }