query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
This function adds Raman experimental data to an existing hdf5 file. It uses the spectrafit.fit_data function to fit the data before saving the fit result and the raw data to the hdf5 file. The data_filename must be in a standardized format to interact properly with this function. It must take the form anyname_temp_tim... | def add_experiment(hdf5_filename, exp_filename):
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] =... | [
"def add_calibration(hdf5_filename, data_filename, label=None):\n # handling input errors\n if not isinstance(hdf5_filename, str):\n raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '\n + str(type(hdf5_filename)))\n if not hdf5_filename.split('/... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that allows the user to manually add or remove peaks from the automatic spectra fitting by inputing an add_list and/or a drop_list. The function pulls some data from the existing fit and overwrites it with the new results. | def adjust_peaks(hdf5_file, key, add_list=None, drop_list=None, plot_fits=False):
# handling input errors
if not isinstance(hdf5_file, str):
raise TypeError('Passed value of `hdf5_file` is not a string! Instead, it is: '
+ str(type(hdf5_file)))
if not hdf5_file.split('/')[-1]... | [
"def addPeaks(self):\n if self.peaksButton.isChecked():\n self.peaksButton.setText(\"Done\")\n self.function = ['peaks', []]\n else:\n self.peaksButton.setText(\"Select Peaks\")\n peaks = self.function[1]\n op_peaks = [-x for x in self.function[1]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function prints out a display of the contents of any hdf5 file. It prints the filename followed by a list of the groups and datasets in a familiar directory/file format. Groups (folders appear bold) while datasets (files) appear in a standard font. | def view_hdf5(filename):
# handling input errors
if not isinstance(filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(filename)))
if not filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`file... | [
"def h5_info(args):\n try:\n import cPickle as pickle\n except:\n import pickle\n import h5py\n\n def print_info(name, obj):\n try:\n shape = obj.shape # 'Group' object has no attribute 'shape'\n print name, ' shape = ', shape\n except:\n pri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cast sha256 to int | def sha256(cls, value):
assert type(value) is str
return int(sha256(value.encode()).hexdigest(), 16) | [
"def hashToInt(h):\n orderBits = Curve.N.bit_length()\n orderBytes = (orderBits + 7) // 8\n if len(h) > orderBytes:\n h = h[:orderBytes]\n\n ret = int.from_bytes(h, byteorder=\"big\")\n excess = len(h) * 8 - orderBits\n if excess > 0:\n ret = ret >> excess\n return ret",
"def ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process all examples in the input directory. Filenames should be of the form CLASSNAMEEXAMPLENAME.yaml E.g Person001.yaml | def process_examples(self):
input_dir = self.input_directory
counter_example_dir = self.counter_example_input_directory
if input_dir is None:
input_dir = Path.cwd() / "examples"
if counter_example_dir is None:
counter_example_dir = Path.cwd() / "counter_examples"
... | [
"def test_validating_all_examples(self):\n for example_name in EXAMPLE_FILES:\n with open(os.path.join(EXAMPLES_DIR, example_name)) as ff:\n ex = json.load(ff)\n\n self.validator.validate(instance=ex)",
"def iterate_test_yaml():\n for filename in os.listdir(TEST_DATA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the list of example source inputs. | def example_source_inputs(self, class_name: str = None) -> List[str]:
input_dir = self.input_directory
if input_dir is None:
return []
all_inputs = []
for fmt in self.input_formats:
glob_expr = f"*.{fmt}"
if class_name is not None:
glob... | [
"def get_inputs(self):\n return self.inputs",
"def getListOfInputs(self, *args):\n return _libsbml.Transition_getListOfInputs(self, *args)",
"def get_input_files(self):\n import jip.options\n for opt in self.configuration.get_by_type(jip.options.TYPE_INPUT):\n values = opt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load an object from a dict, using the target class to determine the type of object to create. | def _load_from_dict(self, dict_obj: Any, target_class: Union[str, ElementName] = None) -> Any:
if not self.use_type_designators:
return dict_obj
sv = self.schemaview
if target_class is None:
target_class_names = [c.name for c in sv.all_classes().values() if c.tree_root]
... | [
"def from_dict(data):\n datatype = data[TYPE_FLAG]\n if MODULE_FLAG in data and data[MODULE_FLAG] in sys.modules:\n objtype = getattr(sys.modules[data[MODULE_FLAG]], datatype)\n else:\n # Fallback to search for the type in the loaded modules.\n for module in sys.modules.itervalues():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds fused batch norm layers and folds them into preceding layers. | def _FoldFusedBatchNorms(graph):
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF ... | [
"def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clones layer_op with input_tensor and weight_tensor as new inputs. | def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
... | [
"def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)",
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds all ops and tensors related to found FusedBatchNorms. | def _FindFusedBatchNorms(graph):
input_pattern = graph_matcher.OpTypePattern('*')
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matche... | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Othe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets tensors needed for FusedBatchNormMatch from match_result. | def _GetCommonTensors(match_result, bn_op, bn_input_tensor):
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in tra... | [
"def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds unfused batch norm layers and folds them into preceding layers. | def _FoldUnfusedBatchNorms(graph):
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(graph, bn,... | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Othe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Checks if batch norm has scaling enabled. | def _HasScaling(graph, input_to_ops_map, bn):
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1 | [
"def is_scale_enabled(self) -> bool:\r\n ...",
"def use_scale(self):\n return self.scale_type > 0",
"def scaling_enabled(self):\n return False",
"def is_scaled(self):\n return self._scaled",
"def isSetScale(self):\n return _libsbml.Unit_isSetScale(self)",
"def _no_batchn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clones a given op, replaces its name and some of its inputs. | def _CloneOp(op, new_name, new_inputs):
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name) | [
"def clone(self) -> Op:",
"def clone_operations(self, ops, input_mapping):\n # pylint: disable=protected-access\n mapping = copy.copy(input_mapping)\n prev_num_operations = len(self.operations)\n for op in ops:\n if isinstance(op, Variable):\n continue\n if isinstance(op, RandomOperat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes sure that convolution inputs have compatible shapes. | def _AssertConvShapes(self, op_name, input_tensor, weights):
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s... | [
"def validate_input_layer(self, incoming):\n assert len(self.input_shape) == 4, (\n \"{} Input shape error: 2D convolution \"\n \"requires input shape: (batch_size, \"\n \"height, width, channels)\".format(self.type))\n return True",
"def test_convolve_input_dim_chec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes sure that FC layer inputs have compatible shapes. | def _AssertFCShapes(self, op_name, weights, input_tensor):
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' ... | [
"def validInputOutputShapes():\n global model\n model_inputDim = model.layers[0].input_shape[1] #number of inputs\n model_outputDim = model.layers[-1].units #number of outputs\n current_inputDim = inputCol_end - inputCol_start + 1\n current_outputDim = outputCol_end - outputCol_start + 1\n\n #Non ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes sure that shapes of input and output tensors are compatible. | def _AssertShapesMatch(op_name, in_tensor, out_tensor):
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape)) | [
"def validInputOutputShapes():\n global model\n model_inputDim = model.layers[0].input_shape[1] #number of inputs\n model_outputDim = model.layers[-1].units #number of outputs\n current_inputDim = inputCol_end - inputCol_start + 1\n current_outputDim = outputCol_end - outputCol_start + 1\n\n #Non ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the server_enabled of this FtsSftpSettings. | def server_enabled(self, server_enabled):
self._server_enabled = server_enabled | [
"def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def set_dhcpserver_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetDHCPServe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the authentication_method of this FtsSftpSettings. | def authentication_method(self, authentication_method):
self._authentication_method = authentication_method | [
"def setAuthMethod(self, auth_method):\n self.auth_method = auth_method\n if len(self.auth_credentials) == 2:\n username, password = self.auth_credentials\n if self.auth_method == \"basic\":\n from requests.auth import HTTPBasicAuth\n self.h.auth = H... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the keystore_file_path of this FtsSftpSettings. | def keystore_file_path(self, keystore_file_path):
self._keystore_file_path = keystore_file_path | [
"def keystore_file_password(self, keystore_file_password):\n\n self._keystore_file_password = keystore_file_password",
"def set_keyfile(self, keyfile):\n self._keyfile = keyfile",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the keystore_file_password of this FtsSftpSettings. | def keystore_file_password(self, keystore_file_password):
self._keystore_file_password = keystore_file_password | [
"def set_file(self, password_file):\n self.password_file = password_file\n return self",
"def org_apache_felix_https_keystore_key_password(self, org_apache_felix_https_keystore_key_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_key_password = org_apache_felix_h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the ciphers of this FtsSftpSettings. | def ciphers(self, ciphers):
self._ciphers = ciphers | [
"def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")",
"def ciphers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ciphers\")",
"def set_cipher(self, cipher):\r\n self.cipher = cipher",
"def set_ssl(self):\n for para... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the known_users_file_path of this FtsSftpSettings. | def known_users_file_path(self, known_users_file_path):
self._known_users_file_path = known_users_file_path | [
"def __parse_user_keyfiles(self):\n\n user_sshdir = os.path.expanduser('~/.ssh')\n if not os.path.isdir(user_sshdir):\n return\n\n paths = []\n for filename in os.listdir(user_sshdir):\n if filename in SSH_CONFIG_FILES or os.path.splitext(filename)[1] != '.pub':\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the overridden_users_home_directories of this FtsSftpSettings. | def overridden_users_home_directories(self, overridden_users_home_directories):
self._overridden_users_home_directories = overridden_users_home_directories | [
"def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)",
"def set_cifs_homedirs(self, homedirs):\n\n homedir_paths = NaElement('homedir-paths')\n\n for d in homedirs:\n homedir_paths.child_add(NaElement... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
article is initialized with xml text contained inside tags | def __init__(self, article_xml):
self.article_xml = article_xml
self.links = self.grab_links()
self.first_link = self.parse_first_link() | [
"def get_article_content(self):",
"def article(self):\n self.soup = BeautifulSoup(self.raw_text, self.html_parser)\n\n self._article()\n\n if len(self._title) == 0 or len(self._paragraphs) == 0:\n raise ArticleNotParsable()\n\n article = dict()\n\n article['title'] = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a list of the outermost links not in parenthesis a tempalte, or a tag | def grab_links(self):
links = []
link_char = []
w_temp = [] #in template?
par = [] #in parentheses?
rtag = [] #in <ref> tag?
dtag = [] #in <div> tag?
skip_char = []
for i, c in enumerate(self.article_xml):
if i in skip_char: continue #elimina... | [
"def get_non_image_links(links: List[str]) -> List[str]:\n return [link for link in links if not is_image_link(link)]",
"def getOtherRecipeLinks(self):\n data = []\n for link in self.tree.xpath('//div[contains(@class,\"recipes\")]/ul[contains(@class,\"content\")]/*/a'):\n if 'href' in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
filters links to images, files, or other Wikimedia projects returns false if it's an invalid link (including links with a colon) | def check_link(self, link):
false_links = ["wikipedia:", "w:", "wikitionary:", "wikt:", "wikinews:",
"n:", "wikibooks:", "b:", "wikiquote:", "q:", "wikisource:",
"s:", "wikispecies:", "species:", "wikiversity", "v:",
"wikivoyage:", "voy:",... | [
"def is_good_link(link):\n sad_endings = ['gif', 'pdf', 'jpg', 'gifv', 'png', 'mp3', 'mp4']\n for ending in sad_endings:\n if link.endswith(ending):\n return False\n\n\n bad_websites = ['youtube', 'google', 'reddit', 'amazon', 'wikipedia', 'facebook', 'twitter']\n for text in bad_websi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
strips brackets, returns link destination (not display name) | def clean_link(self, link):
link = link.strip("[]")
if "|" in link:
link = link.split("|",1)[0]
link = link.strip() #remove trailing white space
return link | [
"def extract_image_link(image_link):\n return image_link.strip(\"'[] \")",
"def get_name_from_link(link):\n name = link.split(\"/\")[-1]\n return name",
"def createCompleteLink(link, domain):\n if link is not None and len(link) > 0:\n if re.match('^http', link) is not None:\n return li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate quality of the fit result. Subclasses can override this method to do post analysis. | def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:
return None | [
"def quality(self):\n return self._quality",
"def eval_fis(self,fis):\n #res = 0.0\n #for cl_state in self.classes:\n # res += cl_state.eval_fis(fis)\n #print \"=>\",res\n #return 1.0/res\n try:\n correct,count = self.quality_fis(fis)\n except ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract curve data from experiment data. This method internally populates two types of curve data. | def _extract_curves(
self, experiment_data: ExperimentData, data_processor: Union[Callable, DataProcessor]
):
self.__processed_data_set = list()
def _is_target_series(datum, **filters):
try:
return all(datum["metadata"][key] == val for key, val in filters.items()... | [
"def make_curve(self, data):\n # Simple case: we have a real number which represents a constant curve\n if isinstance(data, float):\n return parametric.ConstCurve(data)\n\n # Otherwise, \n curve_type = data[0]\n args = data[1:]\n if isinstance(curve_type, float):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return type of experiment. | def _experiment_type(self) -> str:
try:
return self.__experiment_metadata["experiment_type"]
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | [
"def experiment_type(self):\n return self._experiment_type",
"def type(self) -> pulumi.Output['InferenceExperimentType']:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Input['InferenceExperimentType']:\n return pulumi.get(self, \"type\")",
"def get_experiment_type(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter for physical qubit indices. | def _physical_qubits(self) -> List[int]:
try:
return list(self.__experiment_metadata["physical_qubits"])
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | [
"def get_local_qubit_indices(self, qubit_list: list):\n res = []\n for qubit in qubit_list:\n index = self.get_local_qubit_index(qubit)\n if index is None:\n raise Exception(\"Cannot retrieve qubit indices... a qubit does not belong to the current node\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter for backend object. | def _backend(self) -> Backend:
return self.__backend | [
"def backend(self):\n # This never changes (so no read locking needed).\n return self._backend",
"def get_backend(self):\n return self.BACKEND",
"def backend_model(self):\n return self._backend_model",
"def backend_object(self, id):\r\n return self.model.Element.everything.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the experiment options of given job index. | def _experiment_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["experiment_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
... | [
"def _run_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"run_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the analysis options of given job index. | def _analysis_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["analysis_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
retu... | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the run options of given job index. | def _run_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["run_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the transpile options of given job index. | def _transpile_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["transpile_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
re... | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse input kwargs with predicted input. Class attributes will be updated according to the ``options``. For example, if ``options`` has a key ``p0``, and the class has an attribute named ``__p0``, then the attribute ``__0p`` will be updated to ``options["p0"]``. Options that don't have matching attributes will be inclu... | def _arg_parse(self, **options) -> Dict[str, Any]:
extra_options = dict()
for key, value in options.items():
private_key = f"__{key}"
if hasattr(self, private_key):
setattr(self, private_key, value)
else:
extra_options[key] = value
... | [
"def _run_kwargs(cls, kwargs: Dict[str, Any]):\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)",
"def _parse_kwargs(kwargs):\n layout_kwargs = {}\n # For the layout object\n if \"dim\" in kwargs:\n layout_kwa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Key generator that allows to switch between keys that are provided in the `secret_key.txt` file. | def switch_key():
with open("secret_key.txt", 'r') as key_file:
api_keys = key_file.read().splitlines()
for api_key in api_keys:
yield api_key | [
"def generate_keys(key_file):\n sk = SecretKey()\n sk.save(key_file)",
"def new_secret_key():\n # FYI: AES keys are just random bytes from a strong source of randomness.\n return os.urandom(SYMMETRIC_KEY_SIZE // 8)",
"def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in rang... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
High level hook called when a SIP has been deposited in a landing zone | def ingestPostProcSipDepositInLandingZone(dataObjectPath, user, zone):
logger.info("ingestPostProcSipDepositInLandingZone()")
logger.info("dataObjectPath: %s" % dataObjectPath)
logger.info("user:%s" % user)
logger.info("zone:%s" % zone) | [
"def place_call_offhold(self) -> None:",
"def place_call_onhold(self) -> None:",
"def _extract_kiss_destination(self):\n self.destination = aprs.Callsign(self.frame)",
"def after_attacked(self, action):",
"def test_contract_pre_exploit(self):\n print(\"PRE EXPLOIT TEST RUNNING...\")\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Do API calls, and save data in cache files. | def do_api_calls_update_cache(self):
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) | [
"def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes an Linode API call to get the list of nodes. | def get_nodes(self):
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError, e:
print "Looks like Linode's API is down:"
print
print e
sys.exit(1) | [
"def test_list_nodes(self):\n clb = self.get_clb('get', 'clburl/loadbalancers/12345/nodes',\n ((), self.expected_kwargs),\n Response(200), '{\"nodes\": []}')\n d = clb.list_nodes(self.rcs)\n self.assertEqual({'nodes': []}, self.successResultOf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates self._datacenter_cache, containing all Datacenters indexed by ID. | def populate_datacenter_cache(self):
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc | [
"def Datacenters(self):\n if not self._datacenters:\n dcs = self._get_objects(vim.Datacenter)\n for dc in dcs:\n self._datacenters[dc.name] = dc\n return self._datacenters",
"def Datacenters(alias=None, session=None):\n\t\tif not alias: alias = clc.v2.Account.Ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a the lowercase city name of the node's data center. | def get_datacenter_city(self, node):
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location | [
"def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")",
"def datacenter(self) -> str:\n return pulumi.get(self, \"datacenter\")",
"def data_center_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_center_name\")",
"def datacenter(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds an node to the inventory and index. | def add_node(self, node):
public_ip = [addr.address for addr in node.ipaddresses if addr.is_public][0]
dest = public_ip
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.label] = [dest]
# Inve... | [
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node(self, node):",
"def add_node(self, node):\n self.nodes.add(node)",
"def add(self, node):\n pass",
"def add_node(self, node, player):\n self.nodes[node] = player",
"def add_node(self, inds, node=No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pushed an element onto an array that may not have been defined in the dict. | def push(self, my_dict, key, element):
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element] | [
"def push(self, my_dict, key, element):\n\n if key in my_dict:\n my_dict[key].append(element)\n else:\n my_dict[key] = [element]",
"def array_entry(self):\n self._vmcode += \"add\\npop pointer 1\\npush that 0\\n\"",
"def __setitem__(self, key, value):\n msg = f'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the inventory from the cache file and returns it as a JSON object. | def get_inventory_from_cache(self):
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory | [
"def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass",
"def load():\n inventory_path = os.path.join(get_kolla_cli_etc(), INVENTORY_PATH)\n data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the index from the cache file and sets self.index. | def load_index_from_cache(self):
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index) | [
"def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))",
"def _load_index(self):\n try:\n with open(self._index_path,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the regular expression pattern s in dictionary. | def findPattern(self,s):
# pat = re.compile('^'+s+'$')
pat = re.compile(s)
results = {}
for k in self.__clidRep.keys():
if pat.match(str(k)) or pat.match(self.__clidRep[k]):
results[k] = self.__clidRep[k]
return results | [
"def dict_regex_lookup(thedict, regexstr):\n # add anchor characters if they're not explicitly specified to prevent T1001 from matching T1001.001\n regexstr = regexstr.strip()\n if not regexstr.endswith(\"$\"):\n regexstr = regexstr + \"$\"\n if not regexstr.startswith(\"^\"):\n regexstr =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
coverts devices to json string into | def devicelist_to_json(self):
devices_json = json.dumps(self.device_list)
print(devices_json) | [
"def get_device(self):\r\n device_list = dict()\r\n # device_list['Cisco-IOS-XE-native:native'] = {'device':[]}\r\n api_data = self._execute_call('Cisco-IOS-XE-native:native')\r\n device = DictQuery(api_data.json).get(\r\n 'Cisco-IOS-XE-native:native')\r\n\r\n # print(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns an integer that respresents base_depth for specified date | def base_depth_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
new_date = str(date)
base_depth_to_return = None
query = "SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')" %(resort_table, date)
connection = get_connection()
if connection i... | [
"def base_depth_average_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns average of base depth across all years on specific date | def base_depth_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d... | [
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns int that is avg snowfall on this date over all years | def snowfall_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d" %(r... | [
"def avg(self) -> int:\n return self.sky[\"avg\"]",
"def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend",
"def forecast_for_year(self) -> int:\n return self._forecast_for_date.year",
"def get_daily_average(self):\n from politico.models import Story\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a date that had the highest snowfall during specified year | def highest_snowfall_for_year(resort_name, year):
resort_table = resort_table_dict[resort_name]
year = int(year)
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(YEAR FROM status_date) AS INTEGER) = %d" %(resort_table, year)
connection = get_connection()
snowfall_list = []
if connection is ... | [
"def maxyear():\n\n return datetime.MAXYEAR",
"def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_pe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns list of snowfall for each date in the period | def snowfall_for_period(resort_name, start_date, end_date):
#yyyymmdd
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8]... | [
"def scrape_snowfall():\n base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'\n\n snowfall = []\n for year in [2016, 2017, 2018]:\n for month in range(1, 13):\n for day in range(1, 32):\n try:\n date = '%s%02d%02d' % (year, month... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns list of base_depth for each date in the period | def base_depth_for_period(resort_name, start_date, end_date):
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8])
resor... | [
"def base_depth_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n base_depth_to_return = None\n query = \"SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, date)\n\n connection = get_connection()\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Downloads the olivetti faces dataset and saves it in the output_filepath directory. | def main(output_filepath):
logger = logging.getLogger(__name__)
logger.info('Downloading Olivetti faces...')
olivetti_faces = fetch_olivetti_faces()
data = pd.DataFrame(data=np.apply_along_axis(exposure.equalize_hist, 1, olivetti_faces.data))
labels = pd.DataFrame(data=olivetti_faces.target)
l... | [
"def download(self, path=None, mode=\"pretrained\"):\n valid_modes = ['pretrained', 'test_data']\n if mode not in valid_modes:\n raise UserWarning(\"mode parameter not valid:\", mode, \", should be one of:\", valid_modes)\n if mode == 'pretrained':\n\n if path is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform 12 OT for Bob and return Alice's input list m_c without revealing c. | def Bob_OT(c, l, n=100):
# Error handling.
if c != 0 and c != 1:
raise Exception("Input argument c must be either 0 or 1.")
if l > n:
raise Exception("Input argument l cannot be greater than n.")
# (Step 1)
# Bob runs 1-2 ROT.
s_c = Bob_ROT(c, l, n)
# (Step 3)
# Bob rec... | [
"def getMutation(AA,Codon):\r\n temp_mutationlist = []\r\n '''create a list of possible triplets within hamming distance 1 '''\r\n for item in INI.genetic_code.keys():\r\n isvalid = INI.isvalidtriplet(item,Codon)\r\n ''' Hamming distance 1, AA is not equal to the given AA,forbid mutation to s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start a daemon with given daemon class. | def run(self, name: str, daemon_class: object, **kwargs) -> None:
if name in self._running_daemons:
raise AlreadyRunningDaemon(
'Daemon with name "{0}" already running'.format(name)
)
logger.info(self, 'Starting daemon with name "{0}" and class "{1}" ...'
... | [
"def set_daemon_class(class_object):\n if not isinstance(class_object, type):\n raise Exception(\"Bad Daemon class\")\n global DAEMON_CLASS # pylint: disable=W0603\n DAEMON_CLASS = class_object",
"def daemon(self):\n obj = self.subparsers.add_parser(\"daemon\", help=\"Daemon scripts\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stop daemon with his name and wait for him. Where name is given name when daemon started with run method. | def stop(self, name: str) -> None:
if name in self._running_daemons:
logger.info(self, 'Stopping daemon with name "{0}" ...'
.format(name))
self._running_daemons[name].stop()
self._running_daemons[name].join()
del self._running_daemon... | [
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def stop_endpoint(name: str = typer.Argument(\"default\", autocompletion=complete_endpoint_name)):\n\n endpoint_dir = os.path.join(State.FUNCX_DIR, name)\n pid_file = os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stop all started daemons and wait for them. | def stop_all(self) -> None:
logger.info(self, 'Stopping all daemons')
for name, daemon in self._running_daemons.items():
logger.info(self, 'Stopping daemon "{0}" ...'.format(name))
daemon.stop()
for name, daemon in self._running_daemons.items():
logger.info(
... | [
"def stop_daemons(self):\n self.stop_receiving_messages_daemon()\n self.stop_sending_messages_daemon()",
"def stopdaemons(self):\n # TODO: we may want to improve this if we had the PIDs from the\n # specific EMANE daemons that we\"ve started\n cmd = [\"killall\", \"-q\", \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add callback to self._daemon_execute_callbacks. See service_actions function to their usages. | def append_thread_callback(self, callback: collections.Callable) -> None:
self._daemon_execute_callbacks.append(callback) | [
"def add_servicepoint(self, identifier, callback):\n self._services[identifier] = callback",
"def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)",
"def add_done_callback(self, fn):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Give the callback to running server through tracim.lib.daemons.TracimSocketServerMixinappend_thread_callback | def append_thread_callback(self, callback: collections.Callable) -> None:
self._server.append_thread_callback(callback) | [
"def append_thread_callback(self, callback: collections.Callable) -> None:\n self._daemon_execute_callbacks.append(callback)",
"def listen(self, callback):\n\n self.listen_thread = Thread(target=self.__listen, args=(callback,), daemon=True)\n self.listen_thread.start()",
"def listen(self, c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate if price amount does not have too many decimal places. Price amount can't have more decimal places than currency allow to. Works only with decimal created from a string. | def validate_price_precision(value: Optional["Decimal"], currency: str = None):
# check no needed when there is no value
if not value:
return
currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)
value = value.normalize()
if abs(value.as_tuple().exponent) > curre... | [
"def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price",
"def check_price(self, price):\n\t\tparts = price.partition('.')\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to handle the initialization of the class. Creates a [x,y] sample for each timestep std sequenceLenght | def __init__(self, std, sequenceLength, device):
#create data steps from 2 to 10 with the given sequence length
xTimeSteps = np.linspace(2, 10, sequenceLength + 1)
#create numpy array with sin(x) input
yNp = np.zeros((2, sequenceLength + 1))
... | [
"def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))",
"def sample(self, sequence_length):",
"def __init__(self, samples):\n self._samples = samples",
"def initializeSample(self, theta):",
"def __init__(self, L, T_range):\n self.L = L\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the matrices for the Elman model, in this case W1 and V contextConcatInputLayerSize hiddenLayerSize outputLayerSize | def __init__(self, contextConcatInputLayerSize, hiddenLayerSize, outputLayerSize, device):
super(ElmanNet, self).__init__()
self.hidden_layer_size = hiddenLayerSize
# Initializes the W1 matrix
W1 = torch.zeros((contextConcatInputLayerSize, hiddenLa... | [
"def create_model4():\n MAX_LEN = 26\n VOCAB_SIZE = 12602\n num_hidden_layers_lstm = 2\n word_vec_dim = 300\n drop = 0.7\n\n model_lang = Sequential()\n model_lang.add(InputLayer(input_shape=(MAX_LEN,))) ## placeholder for input layer\n model_lang.add(Embedding(VOCAB_SIZE+1, word_vec_dim)) ## ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that retrieves the size of the hidden layer | def get_hidden_layer_size(self):
return self.hidden_layer_size | [
"def hidden_size(self):\n return self._internal.get_hidden_size()",
"def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size",
"def layers_compressed_size(self):\n # don't have this information at this point\n return None",
"def hidden_d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Model forward pass input, current input in t contextState, previous output in (t 1) the sequence of hidden states | def forward(self, x, contextState):
#concatenate input and context state
#x = x.t()
xAndContext = torch.cat((x, contextState), 1)
#calculate next context state (hidden output for current t) with tanh(xAndContext * W1)
... | [
"def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r",
"def forward(ctx, input, ):\n ctx.save_for_back... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if two Elongation objects are equivalent. | def __eq__(self, other):
return isinstance(other, Elongation)\
and len(self.xs) == len(other.xs)\
and all(self.xs == other.xs) and all(self.ys == other.ys)\
and self.gauge_length == other.gauge_length\
and self.sample_width == other.sample_width\
and s... | [
"def areEquivalent(*args):\n return _libsbml.Unit_areEquivalent(*args)",
"def equivalent(self, other):\n return id(self) == id(other)",
"def is_equivalence(self) -> bool:",
"def areEquivalent(*args):\n return _libsbml.UnitDefinition_areEquivalent(*args)",
"def __eq__(self, other):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cross sectional area of the material. | def cross_section(self):
return self.sample_thickness*self.sample_width # m x m = m² | [
"def calc_outer_cross_sectional_area(self):\n return (pi / 4.0) * self.outer_dia ** 2",
"def calc_inner_cross_sectional_area(self):\n return (pi / 4.0) * self.inner_dia ** 2",
"def test_cross_section(self, energy, cross_section):\n assert (CC_NU.cross_section(energy) ==\n pyt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a smoothed version of the Elongation. | def smoothed(self, box_pts=True):
elong = self.copy()
elong.ys = smooth_curve(self.ys, box_pts)
return elong | [
"def ApplySmooth(self) -> None:\n ...",
"def get_smoothed_emission(self, tag, word):\n return 0",
"def setSmoothing(self, smooth=True):\n \n pass",
"def smooth(self) -> Tuple[float, ...]:\n return self._smooth",
"def rts_interval_smoothing(self):\n xN = self.filter_hist... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crop the Elongation by index. | def cropped_index(self, start_i=None, end_i=None, shifted=True):
xs = self.xs[start_i:end_i]
ys = self.ys[start_i:end_i]
if shifted:
xs = xs - xs[0]
return self.__class__(xs, ys, self.gauge_length, self.sample_width, self.sample_thickness, self.name) | [
"def crop(self, N):\n self.data = self.data[:,:N]",
"def crop_to_min_index(self, inplace=True):\n obs_set = super(ObservationROISet, self).crop_to_min_index(inplace)\n obs_set.valid_end = obs_set.length_vector\n return obs_set",
"def get_image_slice(image, index):\n window_size = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the strain index of break. Break is defined herein as the last peak in the stress/strain curve. | def break_index(self, **kwargs):
return self.peak_indices(**kwargs)[0][-1] | [
"def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write Elongation object to a csv file. | def write_csv(elongation, file_name):
e = elongation
with open(file_name, 'w') as f:
f.write(f"""\
Break Load, {e.break_load()}
Break Strength, {e.break_strength()}
Break Elongation, {e.break_elongation()}
Yield Load, {e.yield_load()}
Yield Strength, {e.yield_strength()}
Yield Elongation, {e.yield_elon... | [
"def write_ref_to_csv(self):\n with open (str(self.create_folder().parent.joinpath('REFERENCE.csv')),'a',newline='') as file:\n writer = csv.writer(file)\n writer.writerow(self.acceleration)",
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fna... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read an iterable of elongation files. | def read_elongations(file_names):
return list(itertools.chain(*(read_elongation(f) for f in file_names))) | [
"def readfiles(self, files):\n for filename in files:\n self.read(filename)",
"def ez_reader(*args):\n reader = corpus_reader(*args)\n for each in reader:\n yield unpacker(each)",
"def read_files(*args, **kwargs):\n return FEMData.read_files(*args, **kwargs)",
"def read_files... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Downloads all files from the SugarSync account to the provided output folder | def download_files(self, output, replace=False):
try:
# Create output directory
# self._output_path = os.path.join(output,
# "sugardl_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
# os.makedirs(self._output_path)... | [
"def download(urls, dest_folder):\n pass",
"def download_files(self):",
"def download_results(self, output_dir, progress=None):\n\n if self._uuid is not None:\n self.update()\n\n if not path.exists(output_dir):\n makedirs(output_dir)\n\n if self._dirty:\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves user information to include sync folders | def _get_user_info(self):
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_headers)
add_headers['Authorization'] = self._access_token
resp = requests.get(BASE_URL + "user/{}".fo... | [
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def get_user_info(self):\n pw_struct = pwd.getpwnam(self.user.name)\n return {\n 'home': pw_struct.pw_dir,\n 'uid': pw_struct.pw_uid,\n 'gid': pw_struct.pw_gid,\n }",
"def _get_lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves metadata on all sync folders | def _get_sync_folders(self):
if not self._user_sync_folders_url:
raise ValueError("User sync folders URL not retrieved")
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_hea... | [
"def _get_root_metadata(self):\n r, rx_dict = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'listfolder']),\n params={'folderid': 0})\n return rx_dict['metadata']",
"def list_metadata(context):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If we're unable to establish a connection to the Elasticsearch server, CannotLoadConfiguration (which the circulation manager can understand) is raised instead of an Elasticsearchspecific exception. | def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self):
# Unlike other tests in this module, this one runs even if no
# ElasticSearch server is running, since it's testing what
# happens if there's a problem communicating with that server.
class Mock(ExternalS... | [
"def es_connect(self):\n try:\n self.es = Elasticsearch([self.es_cluster], use_ssl=False, http_auth=(self.es_user_ingest,\n self.es_user_ingest_pwd))\n except Exception as e:\n logging.debug(\"Elastic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The name of the search index is the prefix (defined in ExternalSearchTest.setup) plus a version number associated with this version of the core code. | def test_works_index_name(self):
assert "test_index-v4" == self.search.works_index_name(self._db) | [
"def _index_name(self):\n return settings.SEARCH_INDEX",
"def test_index_name():\n return \"oracc_test\"",
"def base_index_name(self, index_or_alias):\n\n current_re = re.compile(self.CURRENT_ALIAS_SUFFIX+'$')\n base_works_index = re.sub(current_re, '', index_or_alias)\n base_work... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
When all the filters are applied to `start`, the result is `finish`. | def filters_to(start, finish):
for find, replace in filters:
start = find.sub(replace, start)
assert start == finish | [
"def after_all(self) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_all()",
"def active_in_range(self, start, finish):\n return (self.started_by(finish) & self.not_finished_by(start))",
"def after_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate over a WorkList until it ends, and return all of the pages. | def pages(worklist):
pagination = SortKeyPagination(size=2)
facets = Facets(
self._default_library, None, None, order=Facets.ORDER_TITLE
)
pages = []
while pagination:
pages.append(worklist.works(
self._db, f... | [
"def itermwpages():\n query = {\"format\": \"xml\",\n \"action\": \"query\",\n \"list\": \"allpages\",\n \"aplimit\": 100}\n while True:\n resp = requests.get(API_URL, params=query)\n root = etree.fromstring(resp.content)\n for p in root.iterfind(\"quer... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that when the books created during test setup are ordered by the given `sort_field`, they show up in the given `order`. Also verify that when the search is ordered descending, the same books show up in the opposite order. This proves that `sort_field` isn't being ignored creating a test that only succeeds by cha... | def assert_order(sort_field, order, **filter_kwargs):
expect = self._expect_results
facets = Facets(
self._default_library, Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL, order=sort_field, order_ascending=True
)
expect(order, None, Filter(fa... | [
"def test_it_is_possible_to_sort_by_multiple_fields(self):\n url = f\"{reverse('book-list')}?ordering=-edition_year,-author,title\"\n response = self.client.post(url, data=self.valid_books, format='json')\n\n expected_result = [\n {\"title\": \"thinking, Fast and Slow\", \"author\": ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simulate the creation of an ElasticsearchDSL `Search` object from an ElasticsearchDSL `Query` object. | def query(self, query):
return MockSearch(
self, query, self.nested_filter_calls, self.order,
self._script_fields
) | [
"def _search(self, query):\n return self._request(query)",
"def create_from_search_query(search_query):\n if search_query.total_results_size == 0: # A search query with no results: build minimal details.\n return SearchResult.create_from_search_query_no_results(search_query)\n\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a Query object from a set of facets, then call build() on it. | def from_facets(*args, **kwargs):
facets = Facets(self._default_library, *args, **kwargs)
filter = Filter(facets=facets)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
# Return the rest to be verified in a test-specific way.
... | [
"def facets(self):\r\n \r\n return self.queries",
"def get_queryset(self):\n if self.searchqueryset is None:\n self.searchqueryset = SearchQuerySet()\n sqs = self.searchqueryset\n if self.model:\n sqs = sqs.models(self.model)\n for facet in self.face... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate the 'easy' part of the sort order the tiebreaker fields. Return the 'difficult' part. | def validate_sort_order(filter, main_field):
# The tiebreaker fields are always in the same order, but
# if the main sort field is one of the tiebreaker fields,
# it's removed from the list -- there's no need to sort on
# that field a second time.
default_sor... | [
"def validateTechOrder(self, techOrderDict):\n try:\n type = techOrderDict['type']\n value = int(techOrderDict['value'])\n techGid = techOrderDict['id']\n myTech = self.techTree[type]\n \n # have the prereqs been researched?\n num =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that `filter` is a boolean filter that matches one of a number of possibilities. Return those possibilities. | def dichotomy(filter):
assert "bool" == filter.name
assert 1 == filter.minimum_should_match
return filter.should | [
"def validate_filter(filter: Union[List[any], bool]) -> bool:\n if isinstance(filter, bool):\n return bool\n else:\n try:\n left, operator, right = filter\n except ValueError as ve:\n logger.info(ve)\n logger.info(f\"Filter is a {type(filter)}\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a filter only matches when there is no value for the given field. | def assert_matches_nonexistent_field(f, field):
assert (
f.to_dict() ==
{'bool': {'must_not': [{'exists': {'field': field}}]}}) | [
"def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True",
"def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A mock of _chain_filters so we don't have to check test results against supercomplicated Elasticsearch filter objects. Instead, we'll get a list of smaller filter objects. | def _mock_chain(self, filters, new_filter):
if filters is None:
# There are no active filters.
filters = []
if isinstance(filters, elasticsearch_dsl_query):
# An initial filter was passed in. Convert it to a list.
filters = [filters]
filters.append... | [
"def test_apply_filter(mocker):\n list_of_filter_dict_keys = [\n 'EqualTo',\n 'Contains',\n 'ContainsAll',\n 'ContainsAny',\n 'ContainsIgnoreCase',\n 'DoesNotContain',\n 'GreaterThan',\n 'GreaterThanOrEqualTo',\n 'DoesNotContainIgnoreCase',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears the model directory and only maintains the latest `checkpoints` number of checkpoints. | def clear_model_dir(self, checkpoints, logger):
files = os.listdir(self.model_dir)
last_modification = [(os.path.getmtime(os.path.join(self.model_dir, f)), f) for f in files]
# Sort the list by last modified.
last_modification.sort(key=itemgetter(0))
# Delete everything but the... | [
"def clear_checkpoints(self):\n if tf.gfile.Exists(str(self.info.checkpoint_path)):\n tf.gfile.DeleteRecursively(str(self.info.checkpoint_path))",
"def clean_checkpoint_dir(self):\n import shutil\n shutil.rmtree(self.checkpointdir, ignore_errors=True)",
"def clear_checkpoint(chec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rebuilds the surfaces based on the original positions and alpha value. This can be used to reset the states of buttons after returning to a Menu a second time. | def reset(self):
self.x = self.x_original
self.alpha = self.alpha_original
# Button "background" - active
self.active_background_surface.set_alpha(self.alpha)
# Button "background" - inactive
self.inactive_background_surface.set_alpha(self.alpha)
# active
... | [
"def toggle_surface(self):",
"def _reset_surface(self):\n self.surface = cairocffi.RecordingSurface(\n cairocffi.CONTENT_COLOR_ALPHA,\n None,\n )\n self.ctx = self.new_ctx()",
"def _createSurface(self):\n # Create main surface\n self.surface = pygame.Surf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rendering the inactive button onto the screen surface. | def render_inactive(self):
# Rendering button "background"
self.screen.blit(self.inactive_background_surface, (self.x, self.y))
# Rendering button text
self.screen.blit(self.active_text_surface, self.active_textRect) | [
"def render_active(self):\n # Rendering button \"background\"\n if self.resize_right:\n self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))\n else:\n self.active_background_surface = pygame.Surface((self.w, self.h))\n self.active_background_surf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rendering the active button onto the screen surface. | def render_active(self):
# Rendering button "background"
if self.resize_right:
self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))
else:
self.active_background_surface = pygame.Surface((self.w, self.h))
self.active_background_surface.set_alpha... | [
"def render_inactive(self):\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y))\n # Rendering button text\n self.screen.blit(self.active_text_surface, self.active_textRect)",
"def draw_button(self):\n self.screen.fill(self.b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether the mouse is on the button and returns a boolean. | def mouse_on_button(self, mouse) -> bool:
return self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y | [
"def is_mouse_on_button(self):\n\t\tmouse = pg.mouse.get_pos()\n\t\tif(mouse[0] > self.x and self.y+100 > mouse[1] > self.y):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def is_mouse_clicked_on_button(self):\n\t\tmouse = pg.mouse.get_pos()\n\t\tclick = pg.mouse.get_pressed()\n\t\tif(self.is_mouse_on_butt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a correct description passes the check and that a dot is added. | def test_description(self):
self.assertEqual(
"Description.",
DescribedModel.parse_obj({"name": "Name", "description": "Description"}).description,
) | [
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at leas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a description with punctuation passes the check. | def test_description_with_punctuation(self):
self.assertEqual(
"Description?",
DescribedModel.parse_obj({"name": "Name", "description": "Description?"}).description,
) | [
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at leas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the description is mandatory. | def test_missing_description(self):
self.check_validation_error("description\n field required", name="Name") | [
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def test_no_description():\n assert (venue1.manageEvent(event1, \"I'm not cramming!\", valid_date_new, None) and\n event1.name == \"I'm no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the description has a nonzero length. | def test_empty_description(self):
self.check_validation_error('description\n string does not match regex ".+"', name="Name", description="") | [
"def testLenZero(self):\n self.assertEqual(0, len(MultilineString()))",
"def test_description_blank(self): # noqa\n field = Talk._meta.get_field('description')\n self.assertTrue(field.blank)",
"def test_same_length_empty_strings_1(self):\n self.assertTrue(hw4.same_length('','',''))"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wait for clone process to finish | def wait_for_clone(repo, wait_for_ready, http_exc):
start_time = time.time()
while time.time() - start_time < wait_for_ready:
repo.wipe_data()
try:
if repo.is_cloned:
return
except HTTPRequestError:
_mod_log().debug('Failed to get status of the r... | [
"def clone(self):\n if not self.cloned:\n logger.info(\"Cloning the git repo from {}\".format(self.fork.url))\n os.system(\"mkdir {}\".format(self.path))\n result = subprocess.check_output([\"git\", \"clone\", self.fork.url, self.path])\n if not result:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks out the given branch in the given repository on the give system | def checkout(connection, branch, rid=None, repo=None):
if repo is None:
repo = Repository(connection, rid)
return repo.checkout(branch) | [
"def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()",
"def checkout_git_branch():\n branches = get_remote_branches()\n choice_index = -1\n while choice_index < 0 or choice_index > len(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |