query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Function to remove a project entirely | def remove_single_project(project_name):
p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)
p.wait() | [
"def remove(project_id):\n remove_project(project_id)",
"def on_removeProject(self):\n self.log.detail(\">>> Launch 'remove Project' ...\")\n selItems = self.tw_myProjects.selectedItems() or []\n if selItems:\n #--- Check Project ---#\n if selItems[0].project not in s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dev function used to remove all projects | def __remove_all_projects__():
p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)
p.wait() | [
"def test_remove_project(self):\n pass",
"def prune_projects(cls, obj):\r\n\r\n dead = obj.projects - set([x.id() for x in sublime.windows()])\r\n for key in dead:\r\n obj.projects.remove(key)",
"def remove_project(project):\r\n run('rm -rf %s/user_builds/%s' % (env.code_dir, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dev method used to remove the base directory of the application | def __remove_base_directory__():
p = subprocess.Popen('rm -rf {}/.wcscanner'.format(context.__BASE_PATH__), shell=True)
p.wait() | [
"def cleanup_step(self):\n remove_dir(self.short_start_dir)",
"def remove_basepath(self, basepath):\n\n # remove leading '/' from basepath so it doesn't screw stuff up\n target = self.install_dir / basepath.lstrip('/')\n\n try:\n # move all contents to the mod-root\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new revision for each instance of the requested model | def create_revisions_for(model):
total = model.objects.count()
for idx, obj in enumerate(model.objects.iterator()):
with create_revision():
obj.save()
if idx % 100 == 0:
logger.info('Created revision for %s: %s / %s',
model._meta.verbose_name, idx ... | [
"def create_revision(self):\r\n pass",
"def create_initial_revisions(self, app, model_class, verbosity=2, **kwargs):\n # Import the relevant admin module.\n try:\n import_module(\"%s.admin\" % app.__name__.rsplit(\".\", 1)[0])\n except ImportError:\n pass\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a sequence of (app_label, model_name) pairs, determine which are still Django models and registered with reversions | def reversion_models(model_pairs):
for app_label, model_name in model_pairs:
try:
model = apps.get_model(app_label, model_name)
if reversion.is_registered(model):
yield model
else:
logger.warn("Model not registered with reversions %s %s",
... | [
"def register_models(self, app_label, *models):\n for model in models:\n # Store as 'name: model' pair in a dictionary\n # in the app_models dictionary\n model_name = model._meta.model_name\n model_dict = self.app_models.setdefault(app_label, SortedDict())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A post_migrate signal handler which creates revisions for models listed in appropriately annotated migrations. | def create_versions_after_migration(**kwargs):
migrations = [migration
for migration, rollback in kwargs.get('plan', [])
if not rollback]
models: Set[Any] = set()
for migration in migrations:
models.update(getattr(migration, 'REVISED_MODELS', []))
with transa... | [
"def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False",
"def post_migrations(self):",
"def model_pre_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = True",
"def handle_migrations():\n call_command(\n \"migrate\",\n )",
"def _post... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
bsort simple sorting algorithm that uses any comparison function seq a list to be sorted cmp a function for comparing two elements of seq | def bsort(seq, cmp):
sorted = False # assume the seq is not sorted to start with
while not sorted:
sorted = True # assume it's already sorted correctly
for index, value in enumerate(seq): # for every element in seq
if index > 0: # past the first..
... | [
"def cmp(a, b):\n return (a > b) - (a < b)",
"def merge_sort(lst):",
"def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interprets the input line as command line arguments to the ``law`` executable and runs it in a subprocess using bash. Output and error streams are piped to the cell. | def law(self, line):
line = line.strip()
if not line:
logger.error(r"the command passed to %law must not be empty")
return
# build the full command
cmd = "law " + line
if line_cmd:
cmd = "{} && {}".format(line_c... | [
"def ilaw(self, line):\n line = line.strip()\n if not line:\n logger.error(r\"the command passed to %ilaw must not be empty\")\n return\n\n argv = shlex.split(line)\n prog = argv.pop(0)\n\n # prog must be a valid law cli prog\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interprets the input line as command line arguments to the ``law`` executable, but rather than invoking it in a subprocess, it is evaluated interactively (or inline, thus the i) within the running process. This is especially useful for programmatically running tasks that were defined e.g. in the current notebook. | def ilaw(self, line):
line = line.strip()
if not line:
logger.error(r"the command passed to %ilaw must not be empty")
return
argv = shlex.split(line)
prog = argv.pop(0)
# prog must be a valid law cli prog
if prog n... | [
"def law(self, line):\n line = line.strip()\n if not line:\n logger.error(r\"the command passed to %law must not be empty\")\n return\n\n # build the full command\n cmd = \"law \" + line\n if line_cmd:\n cmd = \"{} &... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
register_magics(init_cmd=None, init_fn=None, line_cmd=None, line_fn=None, log_level=None) Registers the two IPython magic methods ``%law`` and ``%ilaw`` which execute law commands either via a subprocess in bash (``%law``) or interactively / inline within the running process (``%ilaw``). init_cmd can be a shell command... | def register_magics(*args, **kwargs):
ipy = None
magics = None
try:
ipy = get_ipython()
except NameError:
logger.error("no running notebook kernel found")
# create the magics
if ipy:
magics = create_magics(*args, **kwargs)
# register it
if ipy and magics:
... | [
"def _register_magics(ipython):\n ipython.register_magic_function(\n _start_magic,\n magic_kind=\"line\",\n magic_name=\"tensorboard\",\n )",
"def load_ipython_extension(ipython):\n\n for module in _MAGICS:\n ipython.register_magic_function(\n getattr(module, 'magic'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CSIIsilonSpec defines the desired state of CSIIsilon | def __init__(__self__, *,
driver: 'outputs.CSIIsilonSpecDriver'):
pulumi.set(__self__, "driver", driver) | [
"def driver(self) -> 'outputs.CSIIsilonSpecDriver':\n return pulumi.get(self, \"driver\")",
"def test_CSI(config, expected):\n assert strip_control_sequences(config) == expected",
"def get_lepsilon_incar(custom_parameters_dictionary=None):\n\n\t\tincar = IncarMaker.get_static_incar()\n\t\tincar['ediff... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Driver is the specification for the CSI Isilon Driver | def driver(self) -> 'outputs.CSIIsilonSpecDriver':
return pulumi.get(self, "driver") | [
"def __init__(__self__, *,\n driver: 'outputs.CSIIsilonSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def __init__(__self__, *,\n driver: 'outputs.CSIVXFlexOSSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def driver(self) -> 'outputs.CSIVXF... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ConfigVersion is the configuration version of the driver | def config_version(self) -> str:
return pulumi.get(self, "config_version") | [
"def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()",
"def increment_config_version(self):\n self.config_version += 1\n if self.config_version > MAX_CONFIG_VERSION:\n self.config_version = 1",
"def get_config_version(self):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replicas is the count of controllers for Controller plugin | def replicas(self) -> int:
return pulumi.get(self, "replicas") | [
"def _num_replicas_for_platform_app(self):\n return max(1, self._num_provisioned_controllers())",
"def get_num_replicas():\n\n tf_replicator = get_tf_replicator()\n\n if tf_replicator:\n return tf_replicator.num_replicas_in_sync\n elif tf.distribute.has_strategy():\n return tf.distribute.get_strat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AuthSecret is the name of the credentials secret for the driver | def auth_secret(self) -> Optional[str]:
return pulumi.get(self, "auth_secret") | [
"def getAuthSecret(self, authKey):\r\n return None",
"def secret(self, secret):\n self._secret = secret",
"def client_secret(self) -> str:",
"def set_secret(self, secret):\r\n self._secret = secret",
"def secret(self, secret):\n\n self._secret = secret",
"def pull_secret(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SideCars is the specification for CSI sidecar containers | def side_cars(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverSideCars']]:
return pulumi.get(self, "side_cars") | [
"def side_cars(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverSideCars']]:\n return pulumi.get(self, \"side_cars\")",
"def get_side_car(self):\n return {\n 'name': 'openvpn',\n 'image': self.configs['OPENVPN_SIDECAR'],\n 'env': [\n {'name': 'OPE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TLSCertSecret is the name of the TLS Cert secret | def tls_cert_secret(self) -> Optional[str]:
return pulumi.get(self, "tls_cert_secret") | [
"def get_certificate_from_secret(secret_name, secret_ns):\n kube = kubernetes.KubeOperator()\n secret = kube.kube_get_secret(secret_name, secret_ns)\n\n if not hasattr(secret, 'data'):\n raise Exception('Invalid secret %s\\\\%s' % (secret_ns, secret_name))\n\n data = secret.data\n if 'tls.crt'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Path of the field to select in the specified API version. | def field_path(self) -> str:
return pulumi.get(self, "field_path") | [
"def from_field_path(self) -> str:\n return pulumi.get(self, \"from_field_path\")",
"def get_field_in_version_json(field_name):\n if not os.environ.get(\"create_version_request\"):\n return None\n request = json.loads(os.environ.get(\"create_version_request\"))\n if not request or not isinstance(requ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. | def effect(self) -> Optional[str]:
return pulumi.get(self, "effect") | [
"def testEffects(self):\n \n action = Parser.parse_as(drive.split(\"\\n\"), Action, self.domain)\n self.assert_(isinstance(action.effect, SimpleEffect))",
"def test_effect(self):\n self.check_search(\n dict(similar_to=u'icy wind'),\n [ u'Bubble', u'BubbleBeam', u'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AllowVolumeExpansion is a boolean flag which indicates if volumes can be expanded | def allow_volume_expansion(self) -> Optional[bool]:
return pulumi.get(self, "allow_volume_expansion") | [
"def volume_enable_quotas(volume: str) -> Result:\n arg_list = [\"volume\", \"quota\", volume, \"enable\"]\n return run_command(\"gluster\", arg_list, True, False)",
"def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n siz... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReclaimPolicy is the reclaim policy for the storage class | def reclaim_policy(self) -> Optional[str]:
return pulumi.get(self, "reclaim_policy") | [
"def redrive_policy(self, redrive_policy):\n self._redrive_policy = redrive_policy",
"def rebalance_policy(self):\n return self._rebalance_policy",
"def redrive_policy(self):\n return self._redrive_policy",
"def rebalance_policy(self, rebalance_policy):\n allowed_values = [\"auto\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RevProxyConfig represents the reverse proxy configuration | def config(self) -> 'outputs.CSIPowerMaxRevProxySpecConfig':
return pulumi.get(self, "config") | [
"def configure_proxy(self, proxy):\n server_name = self.get_external_domain()\n tls_enabled = self.get_tls()\n ircd_enabled = self.charm_config.get(\"enable-ircd\")\n federation_enabled = self.get_federation()\n\n if tls_enabled:\n self.external_port = 443\n else... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
LinkConfig is one of the configuration modes for reverse proxy | def link_config(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigLinkConfig']:
return pulumi.get(self, "link_config") | [
"def supports_link(self):\n return 'link' in self.config",
"def configuration_url(self):",
"def _setup_links(self):\n pass",
"def test_reverse_proxy_config():\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"1,2,3,4\"\n\n app = create_ctfd(config=ReverseProxyConfig)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
StandAloneConfig is one of the configuration modes for reverse proxy | def stand_alone_config(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigStandAloneConfig']:
return pulumi.get(self, "stand_alone_config") | [
"def test_reverse_proxy_config():\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"1,2,3,4\"\n\n app = create_ctfd(config=ReverseProxyConfig)\n with app.app_context():\n assert app.wsgi_app.x_for == 1\n assert app.wsgi_app.x_proto == 2\n assert app.wsgi_app.x_hos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Driver is the specification for the CSI PowerMax Driver | def driver(self) -> 'outputs.CSIPowerMaxSpecDriver':
return pulumi.get(self, "driver") | [
"def __init__(__self__, *,\n driver: 'outputs.CSIPowerStoreSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def driver(self) -> 'outputs.CSIVXFlexOSSpecDriver':\n return pulumi.get(self, \"driver\")",
"def driver(self) -> 'outputs.CSIUnitySpecDriver':\n return pul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CSIPowerStoreSpec defines the desired state of CSIPowerStore | def __init__(__self__, *,
driver: 'outputs.CSIPowerStoreSpecDriver'):
pulumi.set(__self__, "driver", driver) | [
"def test_regulation_mode():\n with expected_protocol(\n DCXS,\n [\n (\"D0\", None),\n (\"c\", 0),\n ],\n ) as inst:\n inst.regulation_mode = \"power\"\n assert \"power\" == inst.regulation_mode",
"def power_configuration(self):\n\t\tPOWER_CONFIG = (I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SnapshotClass is the specification for Snapshot Classes | def snapshot_class(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverSnapshotClass']]:
return pulumi.get(self, "snapshot_class") | [
"def snapshot_class(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSnapshotClass']]:\n return pulumi.get(self, \"snapshot_class\")",
"def snapshot_type(self) -> str:\n return pulumi.get(self, \"snapshot_type\")",
"def __init__(self, snapshot_name):\n self._snapshot_name = snapshot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Restrict the node topologies where volumes can be dynamically provisioned. | def allowed_topologies(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverStorageClassAllowedTopologies']]:
return pulumi.get(self, "allowed_topologies") | [
"def allowed_topologies(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClassAllowedTopologies']]:\n return pulumi.get(self, \"allowed_topologies\")",
"def allowed_topology_access_create(user):\n return user.has_perm(\"vnswww.add_topology\")",
"def _get_allowable_node_list(si, vol_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CSIUnitySpec defines the desired state of CSIUnity | def __init__(__self__, *,
driver: 'outputs.CSIUnitySpecDriver'):
pulumi.set(__self__, "driver", driver) | [
"def driver(self) -> 'outputs.CSIUnitySpecDriver':\n return pulumi.get(self, \"driver\")",
"def test_CSI(config, expected):\n assert strip_control_sequences(config) == expected",
"def test_csi_io(client, core_api, csi_pv, pvc, pod_make): # NOQA\n csi_io_test(client, core_api, csi_pv, pvc, pod_make... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Driver is the specification for the CSI Unity Driver | def driver(self) -> 'outputs.CSIUnitySpecDriver':
return pulumi.get(self, "driver") | [
"def __init__(__self__, *,\n driver: 'outputs.CSIUnitySpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def driver(self) -> 'outputs.CSIVXFlexOSSpecDriver':\n return pulumi.get(self, \"driver\")",
"def __init__(__self__, *,\n driver: 'outputs.CSIVXFlexOSSp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ConfigVersion is the configuration version of the driver | def config_version(self) -> str:
return pulumi.get(self, "config_version") | [
"def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()",
"def increment_config_version(self):\n self.config_version += 1\n if self.config_version > MAX_CONFIG_VERSION:\n self.config_version = 1",
"def get_config_version(self):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replicas is the count of controllers for Controller plugin | def replicas(self) -> int:
return pulumi.get(self, "replicas") | [
"def _num_replicas_for_platform_app(self):\n return max(1, self._num_provisioned_controllers())",
"def get_num_replicas():\n\n tf_replicator = get_tf_replicator()\n\n if tf_replicator:\n return tf_replicator.num_replicas_in_sync\n elif tf.distribute.has_strategy():\n return tf.distribute.get_strat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AuthSecret is the name of the credentials secret for the driver | def auth_secret(self) -> Optional[str]:
return pulumi.get(self, "auth_secret") | [
"def getAuthSecret(self, authKey):\r\n return None",
"def secret(self, secret):\n self._secret = secret",
"def client_secret(self) -> str:",
"def set_secret(self, secret):\r\n self._secret = secret",
"def secret(self, secret):\n\n self._secret = secret",
"def pull_secret(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SideCars is the specification for CSI sidecar containers | def side_cars(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverSideCars']]:
return pulumi.get(self, "side_cars") | [
"def side_cars(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverSideCars']]:\n return pulumi.get(self, \"side_cars\")",
"def get_side_car(self):\n return {\n 'name': 'openvpn',\n 'image': self.configs['OPENVPN_SIDECAR'],\n 'env': [\n {'name': 'OP... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TLSCertSecret is the name of the TLS Cert secret | def tls_cert_secret(self) -> Optional[str]:
return pulumi.get(self, "tls_cert_secret") | [
"def get_certificate_from_secret(secret_name, secret_ns):\n kube = kubernetes.KubeOperator()\n secret = kube.kube_get_secret(secret_name, secret_ns)\n\n if not hasattr(secret, 'data'):\n raise Exception('Invalid secret %s\\\\%s' % (secret_ns, secret_name))\n\n data = secret.data\n if 'tls.crt'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ImagePullPolicy is the image pull policy for the image | def image_pull_policy(self) -> Optional[str]:
return pulumi.get(self, "image_pull_policy") | [
"def _determine_image_pull_policy(self) -> ImagePullPolicy:\n if not self.image_pull_policy:\n _, tag = self._get_image_and_tag()\n if tag == \"latest\" or not tag:\n return ImagePullPolicy.ALWAYS\n return ImagePullPolicy.IF_NOT_PRESENT\n return self.ima... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. | def node_selector(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "node_selector") | [
"def node_selector(self, node_selector: Dict[str, str]):\n\n self._node_selector = node_selector",
"def node_selector(self, node_selector):\n\n self._node_selector = node_selector",
"def list_of_pods_in_a_node(self, node_name, namespace: Optional[str] = None, label_selector: Optional[str] = None):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Path of the field to select in the specified API version. | def field_path(self) -> str:
return pulumi.get(self, "field_path") | [
"def from_field_path(self) -> str:\n return pulumi.get(self, \"from_field_path\")",
"def get_field_in_version_json(field_name):\n if not os.environ.get(\"create_version_request\"):\n return None\n request = json.loads(os.environ.get(\"create_version_request\"))\n if not request or not isinstance(requ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AllowVolumeExpansion is a boolean flag which indicates if volumes can be expanded | def allow_volume_expansion(self) -> Optional[bool]:
return pulumi.get(self, "allow_volume_expansion") | [
"def volume_enable_quotas(volume: str) -> Result:\n arg_list = [\"volume\", \"quota\", volume, \"enable\"]\n return run_command(\"gluster\", arg_list, True, False)",
"def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n siz... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReclaimPolicy is the reclaim policy for the storage class | def reclaim_policy(self) -> Optional[str]:
return pulumi.get(self, "reclaim_policy") | [
"def redrive_policy(self, redrive_policy):\n self._redrive_policy = redrive_policy",
"def rebalance_policy(self):\n return self._rebalance_policy",
"def redrive_policy(self):\n return self._redrive_policy",
"def rebalance_policy(self, rebalance_policy):\n allowed_values = [\"auto\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ControllerStatus is the status of Controller pods | def controller_status(self) -> Optional['outputs.CSIUnityStatusControllerStatus']:
return pulumi.get(self, "controller_status") | [
"def controller_status(self) -> Optional['outputs.CSIVXFlexOSStatusControllerStatus']:\n return pulumi.get(self, \"controller_status\")",
"def status(self) -> ControllerStatus:\n return self._status",
"def status(self) -> Optional['outputs.HorizontalPodAutoscalerStatus']:\n return pulumi.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DriverHash is a hash of the driver specification | def driver_hash(self) -> Optional[int]:
return pulumi.get(self, "driver_hash") | [
"def spec_hash(self, hash):\n # TODO: currently we strip build dependencies by default. Rethink\n # this when we move to using package hashing on all specs.\n if hash.override is not None:\n return hash.override(self)\n node_dict = self.to_node_dict(hash=hash)\n json_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NodeStatus is the status of Controller pods | def node_status(self) -> Optional['outputs.CSIUnityStatusNodeStatus']:
return pulumi.get(self, "node_status") | [
"def node_status(self) -> Optional['outputs.CSIVXFlexOSStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def status(self):\n url = API_PATH[\"node_status\"].format(tuneUuid=self._parentTune.uuid())\n rsp_json = self._parse(self._get(url))\n\n for status_obj in rsp_json:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CSIVXFlexOSSpec defines the desired state of CSIVXFlexOS | def __init__(__self__, *,
driver: 'outputs.CSIVXFlexOSSpecDriver'):
pulumi.set(__self__, "driver", driver) | [
"def driver(self) -> 'outputs.CSIVXFlexOSSpecDriver':\n return pulumi.get(self, \"driver\")",
"def sample_control_devices():\n\treturn dsslib.SolutionI(ctypes.c_int32(34), ctypes.c_int32(0))",
"def test_set_st_to_vx(self, cpu):\n cpu.V_register = bytearray([1, 5, 8, 12, 15, 18, 29, 53,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Driver is the specification for the CSI VxFlexOS Driver | def driver(self) -> 'outputs.CSIVXFlexOSSpecDriver':
return pulumi.get(self, "driver") | [
"def __init__(__self__, *,\n driver: 'outputs.CSIVXFlexOSSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def driver(self) -> 'outputs.CSIUnitySpecDriver':\n return pulumi.get(self, \"driver\")",
"def __init__(__self__, *,\n driver: 'outputs.CSIUnitySpecD... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SnapshotClass is the specification for Snapshot Classes | def snapshot_class(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSnapshotClass']]:
return pulumi.get(self, "snapshot_class") | [
"def snapshot_class(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverSnapshotClass']]:\n return pulumi.get(self, \"snapshot_class\")",
"def snapshot_type(self) -> str:\n return pulumi.get(self, \"snapshot_type\")",
"def __init__(self, snapshot_name):\n self._snapshot_name = snapsh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tolerations is the list of tolerations for the driver pods | def tolerations(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverControllerTolerations']]:
return pulumi.get(self, "tolerations") | [
"def tolerations(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverNodeTolerations']]:\n return pulumi.get(self, \"tolerations\")",
"def tolerations(self, tolerations):\n\n self._tolerations = tolerations",
"def toleration(self) -> Dict[str, str]:\n return self._toleration",
"def t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tolerations is the list of tolerations for the driver pods | def tolerations(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverNodeTolerations']]:
return pulumi.get(self, "tolerations") | [
"def tolerations(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverControllerTolerations']]:\n return pulumi.get(self, \"tolerations\")",
"def tolerations(self, tolerations):\n\n self._tolerations = tolerations",
"def toleration(self) -> Dict[str, str]:\n return self._toleration",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Restrict the node topologies where volumes can be dynamically provisioned. | def allowed_topologies(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClassAllowedTopologies']]:
return pulumi.get(self, "allowed_topologies") | [
"def allowed_topologies(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverStorageClassAllowedTopologies']]:\n return pulumi.get(self, \"allowed_topologies\")",
"def allowed_topology_access_create(user):\n return user.has_perm(\"vnswww.add_topology\")",
"def _get_allowable_node_list(si, vol_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ControllerStatus is the status of Controller pods | def controller_status(self) -> Optional['outputs.CSIVXFlexOSStatusControllerStatus']:
return pulumi.get(self, "controller_status") | [
"def controller_status(self) -> Optional['outputs.CSIUnityStatusControllerStatus']:\n return pulumi.get(self, \"controller_status\")",
"def status(self) -> ControllerStatus:\n return self._status",
"def status(self) -> Optional['outputs.HorizontalPodAutoscalerStatus']:\n return pulumi.get(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DriverHash is a hash of the driver specification | def driver_hash(self) -> Optional[int]:
return pulumi.get(self, "driver_hash") | [
"def spec_hash(self, hash):\n # TODO: currently we strip build dependencies by default. Rethink\n # this when we move to using package hashing on all specs.\n if hash.override is not None:\n return hash.override(self)\n node_dict = self.to_node_dict(hash=hash)\n json_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NodeStatus is the status of Controller pods | def node_status(self) -> Optional['outputs.CSIVXFlexOSStatusNodeStatus']:
return pulumi.get(self, "node_status") | [
"def node_status(self) -> Optional['outputs.CSIUnityStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def status(self):\n url = API_PATH[\"node_status\"].format(tuneUuid=self._parentTune.uuid())\n rsp_json = self._parse(self._get(url))\n\n for status_obj in rsp_json:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store browser cookies in a pickle file | def save_cookies_in_pickle(self):
with open(self.path, "wb") as file:
pickle.dump(self.browser.get_cookies(), file)
print(f'Cookies saved to {self.service}.pickle') | [
"def save_cookies(self):\n\n with open(self.location_of_cookies, 'wb') as f:\n pickle.dump(self.get_cookies(), f)\n f.close()",
"def save_cookies(requests_cookiejar, filename):\n with open(filename, 'wb') as f:\n pickle.dump(requests_cookiejar, f)",
"def __save_cookie(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read_file Here this function will read a text file (UTF8) and prints it as the standardoutput | def read_file(filename=""):
with open(filename, encoding="utf-8") as n:
print(n.read(), end="") | [
"def read_file(filename=\"\"):\n with open(filename, encoding=\"UTF-8\") as fl:\n print(fl.read(), end=\"\")",
"def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print objects to stderr then exit | def printExit(*objects):
print(*objects, file=sys.stderr)
sys.exit(1) | [
"def finalize_error():\n print('')\n exit(-1)",
"def test_printing_works_stderr():\n old_stderr = sys.stderr\n try:\n sys.stderr = StringIO()\n quick_task(printing_error)\n assert_equals( 'PANIC!', sys.stderr.getvalue())\n finally:\n sys.stderr = old_stderr",
"def prin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot computed solutions to recurrences | def plot_example(length):
rec_plot = []
sol_plot = []
sol = SOL_DICT[INDEX]
for num in range(2, length):
rec_plot.append([num, recur(num)])
sol_plot.append([num, sol(num)])
simpleplot.plot_lines("Recurrence solutions", 600, 600, "number", "value",
[rec_plot,... | [
"def plot_solution(self):\n\n plt.plot(self.x_values, self.analytical(self.x_values, self.C,self.D), label = \"Analytical\")\n plt.plot(self.x_values, self.numerical, label = \"Numerical\")\n plt.title(\"Numerical vs. Analytical Solution\")\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Text definitions to format strings. | def formatter(text):
repl_map = {
"degC": "$^o$C",
"K": "$^o$C",
"month-1": "month$^{{-1}}$",
"day-1": "day$^{{-1}}$",
"d-1": "day$^{{-1}}$",
"decade-1": "decade$^{{-1}}$",
"year-1": "year$^{{-1}}$",
"rcp85": "RCP8.5... | [
"def format(self, text):\n return text.format(**self)",
"def syntax_text():",
"def textFormats():\n log = QtGui.QTextCharFormat()\n log.setFontFamily(\"monospace\")\n \n url = QtGui.QTextCharFormat(log)\n url.setForeground(QtGui.QBrush(QtGui.QColor(\"blue\")))\n url.setFontUnderline(Tru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sorts the longitudes of the cubes from 0/360 degrees to 180/180. | def regrid_longitude_coord(self, cube):
# make a list with the 'longitude' coord in the form: 0/180/-180/0
neg_lons = ((cube.coord("longitude").points + 180) % 360) - 180
# interpolates the cube data to the new 'longitude' dimensions
cube = cube.interpolate([("longitude", neg_lons)],
... | [
"def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a provenance record describing the hotspot fields plots. | def get_hotspot_provenance(self, suptitle, scenario, ancestor_files):
caption = (f"{suptitle}. Calculated for seasons "
f"{self.seasons[0].upper()}, "
f"{self.seasons[1].upper()} and {self.seasons[2].upper()} "
f"in the future periods {self.cfg['future_pe... | [
"def _write_xy_provenance(cfg, cubes, plot_path, title, *attrs):\n cubes = cubes.copy()\n if isinstance(cubes, iris.cube.Cube):\n cubes = iris.cube.CubeList([cubes])\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a provenance record with the rolling mean diagnostic data. | def get_rolling_mean_provenance(self, suptitle, ancestor_files):
suptitle = suptitle.replace('\n', '')
caption = (f"{suptitle}. For CMIP5 ("
f"{self.formatter(f'cmip5-{self.scenarios[0]}')}, "
f"{self.formatter(f'cmip5-{self.scenarios[1]}')} and "
... | [
"def add_rolling_mean(self, rm):\n self.data['rolling_mean'] = rm",
"def instance(data):\n return FieldPACContentsMeanStd(data)",
"def mean_std_cal(self):\n log_return = np.log(1 + self.data_adj_close.pct_change())\n return (log_return.mean(), log_return.std())",
"def _create_repor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
validate Prophet parameters This method validates some key parameters including growth rate and custom_seasonalities. | def validate_params(self) -> None:
# cap must be given when using logistic growth
if (self.growth == "logistic") and (self.cap is False):
msg = "Capacity must be provided for logistic growth"
logging.error(msg)
raise ValueError(msg)
# If custom_seasonalities ... | [
"def _validate_parameter_values(self):\n # pylint: disable=too-many-branches,too-many-locals\n # check for presence of required parameters\n expected_num_params = 5\n num_params = 0\n for param in self._vals:\n if param == '_BE_sub':\n num_params += 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate whether `data` contains specified regressors or not. | def _data_params_validation(self) -> None:
extra_regressor_names = set(self.params._reqd_regressor_names)
# univariate case
if self.data.is_univariate():
if len(extra_regressor_names) != 0:
msg = (
f"Missing data for extra regressors: {self.params.... | [
"def fitRegressor(self, data):\r\n if data.SETS == 2:\r\n self.regressor.fit(data.trainX, data.trainy)",
"def validate(self, data):\n return any(imap(lambda validator: validate_common(validator, data), self.validators))",
"def check_regressor(self):\n\n # Sklearn and Mlxtend stac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get default parameter search space for Prophet model | def get_parameter_search_space() -> List[Dict[str, object]]:
return get_default_prophet_parameter_search_space() | [
"def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n param... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample draws of the future trend values. Vectorized version of sample_predictive_trend(). | def _sample_predictive_trend_vectorized(
prophet_model: Prophet, df: pd.DataFrame, n_samples: int, iteration: int = 0
) -> np.ndarray:
if prophet_model.growth == "linear":
return sample_linear_predictive_trend_vectorize(
prophet_model, df, n_samples, iteration
)
deltas = prophe... | [
"def get_trend_pred(united_samples, look_back):\n\n features = united_samples[:, :1].astype(str)\n labels = united_samples[:, -1:]\n\n # move all dates a day behind\n delta = -1\n generator = (change_date(date[0], delta_days=delta) for date in features)\n new_dates = np.fromiter(generator, feature... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a matrix of random trend shifts based on historical likelihood and size of shifts. Can be used for either linear or logistic trend shifts. Each row represents a different sample of a possible future, and each column is a time step into the future. | def _make_trend_shift_matrix(
mean_delta: float, likelihood: float, future_length: float, n_samples: int
) -> np.ndarray:
# create a bool matrix of where these trend shifts should go
bool_slope_change = np.random.uniform(size=(n_samples, future_length)) < likelihood
shift_values = np.random.laplace(0, m... | [
"def gen_all_arms_reward_shifts(self):\n reward_shifts = (\n torch.randn(self._num_arms_all) * self.sigma_shift + self.mu_shift\n )\n return reward_shifts",
"def random_shift_logs(self, max_shift, mnemonics=None):\n df = self.logs\n mnemonics = df.columns if mnemonics... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Vectorizes prophet's logistic uncertainty by creating a matrix of future possible trends. | def _logistic_uncertainty(
prophet_model: Prophet,
mat: np.ndarray,
deltas: np.ndarray,
k: float,
m: float,
cap: np.ndarray,
t_time: np.ndarray,
n_length: int,
single_diff: float,
) -> np.ndarray:
def _ffill(arr: np.ndarray) -> np.ndarray:
mask = arr == 0
idx = n... | [
"def get_emotion_statistics(self):\n statistics_matrix = np.empty((len(self.logits_emotion_major), 8, 3))\n for utterance_idx, utterance in enumerate(self.logits_emotion_major):\n for emotion_idx, emotion in enumerate(utterance):\n statistics_matrix[utterance_idx][emotion_idx... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the 1st version of the cookies checker In this function, we check the website cookies | def cookies_check_v1():
cookies_check = driver.find_element_by_xpath("//button[@id='didomi-notice-agree-button']")
cookies_check.click() | [
"def test_cookie(self):\n url = '%s?a=b&c=d&e=f' % settings.MODERNIZR_SENTINEL_IMAGE_URL\n response = self.client.get(url)\n self.assertTrue(settings.MODERNIZR_COOKIE_NAME in response.cookies)",
"def cookies_check_v2():\n cookies_check = driver.find_element_by_xpath(\"//button[@class='jad_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the 2nd version of the cookies checker In this function, we check the website cookies | def cookies_check_v2():
cookies_check = driver.find_element_by_xpath("//button[@class='jad_cmp_paywall_button jad_cmp_paywall_button-cookies jad_cmp_paywall_cookies didomi-components-button didomi-button didomi-dismiss-button didomi-components-button--color didomi-button-highlight highlight-button']")
cookies_c... | [
"def cookies_check_v1():\n cookies_check = driver.find_element_by_xpath(\"//button[@id='didomi-notice-agree-button']\")\n cookies_check.click()",
"def test_cookie(self):\n url = '%s?a=b&c=d&e=f' % settings.MODERNIZR_SENTINEL_IMAGE_URL\n response = self.client.get(url)\n self.assertTrue(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the 1st version of movie reviews page browser In this function, we navigate to the movie reviews page in order to get the top ten prolifics reviews | def go_to_movie_reviews_page_v1(movie):
#get search bar input and send the movie name as key
search_bar = driver.find_element_by_xpath("//input[@id='header-search-input']")
search_bar.send_keys(movie)
sleep(5)
search_button = driver.find_element_by_xpath("//button[@class='header-search-submit icon i... | [
"def go_to_movie_reviews_page_V2(movie):\n #/film/fichefilm-249877/critiques/spectateurs/\n driver.get(search_url_base+movie)\n # driver.get(\"https://www.allocine.fr/rechercher/?q=yourname\")\n sleep(5)\n movie_link = driver.find_element_by_link_text(movie)\n movie_link.click()\n # sleep(5)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the 2nd version of movie reviews page browser In this function, we navigate to the movie reviews page in order to get the top ten prolifics reviews | def go_to_movie_reviews_page_V2(movie):
#/film/fichefilm-249877/critiques/spectateurs/
driver.get(search_url_base+movie)
# driver.get("https://www.allocine.fr/rechercher/?q=yourname")
sleep(5)
movie_link = driver.find_element_by_link_text(movie)
movie_link.click()
# sleep(5)
# close_popu... | [
"def go_to_movie_reviews_page_v1(movie):\n #get search bar input and send the movie name as key\n search_bar = driver.find_element_by_xpath(\"//input[@id='header-search-input']\")\n search_bar.send_keys(movie)\n sleep(5)\n search_button = driver.find_element_by_xpath(\"//button[@class='header-search-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert keyboard event into double2 movement code and return if unrecognized key obtained, return None ======KEYBOARD MANUAL====== W/up_arrow = forward S/down_arrow = backward A/left_arrow = turn left D/right_arrow = turn right P = parking V = stop ALL action I = pole up K = pole down ============================ | def db2_movement_convert(evtype, kname):
if evtype == 'down':
if kname == 'w' or kname == 'W' or kname == 'up':
return 'f'
elif kname == 's' or kname == 'S' or kname == 'down':
return 'b'
elif kname == 'a' or kname == 'A' or kname == 'left':
return 'l'
... | [
"def input_pg(self):\n move = None\n\n for event in pg.event.get():\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RIGHT:\n move = MOVE_RIGHT\n\n elif event.key == pg.K_LEFT:\n move = MOVE_LEFT\n\n eli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M6 remeshing formula in 2D, 2 kernel, simple precision, o2 splitting. | def test_2D_m6_2k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L4_2,
Support: 'gpu_2k',
... | [
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M6 remeshing formula in 2D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m6_1k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L4_2,
Support: 'gpu_2k',
... | [
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M6 remeshing formula in 2D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m6_2k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L4_2,
Support: 'gpu_2k',
... | [
"def test_2D_m6_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M6 remeshing formula in 3D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m6_1k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L4_2,
Support: 'gpu_1k',
... | [
"def test_build_submesh_3(self):\n\n nodes = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.0], \\\n [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]\n\n\n triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M6 remeshing formula in 3D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m6_2k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L4_2,
Support: 'gpu_2k',
... | [
"def test_build_submesh_3(self):\n\n nodes = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.0], \\\n [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]\n\n\n triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M4 remeshing formula in 2D, 2 kernel, simple precision, o2 splitting. | def test_2D_m4_2k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_2k',
... | [
"def test_pmesh_2(self):\n\n # FIXME: Need to update expected values on macos\n if sys.platform == 'darwin':\n return\n\n\n\n points, vertices, boundary = rectangular_cross(2,2)\n\n\n true_points = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M4 remeshing formula in 2D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m4_1k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_1k',
... | [
"def test_pmesh_2(self):\n\n # FIXME: Need to update expected values on macos\n if sys.platform == 'darwin':\n return\n\n\n\n points, vertices, boundary = rectangular_cross(2,2)\n\n\n true_points = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M4 remeshing formula in 2D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m4_2k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_2k',
... | [
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 2D, 1 kernel, simple precision, o2 splitting. | def test_2D_m8_1k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
... | [
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 2D, 2 kernel, simple precision, o2 splitting. | def test_2D_m8_2k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
... | [
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 2D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m8_1k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
... | [
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 2D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m8_2k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
... | [
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 3D, 2 kernel, simple precision, o2 splitting. | def test_3D_m8_2k():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
... | [
"def test_pmesh_2(self):\n\n # FIXME: Need to update expected values on macos\n if sys.platform == 'darwin':\n return\n\n\n\n points, vertices, boundary = rectangular_cross(2,2)\n\n\n true_points = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 3D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m8_1k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
... | [
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing M8 remeshing formula in 3D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m8_2k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
... | [
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attach a model to our Buddy, and move it onto `buddy.device`. If a model isn't explicitly passed into the constructor's `model` field, `attach_model` should be called before any optimization, checkpointing, etc happens. | def attach_model(self, model: nn.Module) -> None:
assert isinstance(model, nn.Module)
# Move model to correct device
model.to(self._device)
# Attach model
self._model = model | [
"def _attach_to_model(self, model):\n self._model = model",
"def _addmodel(self, model: Model):\n model = copy.deepcopy(model)\n\n if self.domain is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(model, self.domain)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Readonly interface for the active torch device. Autodetected in the constructor based on CUDA support. | def device(self) -> torch.device:
return self._device | [
"def device(self) -> torch.device:\n pass # pragma: no cover",
"def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"def _set_device(self):\n self.device = torch.device(self.args.device)",
"def gpu(self):\n self.__array = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts Latin character to similar english characters" | def latin_to_english(self, latin_string):
return ''.join(
c for c in unicodedata.normalize('NFD', latin_string)
if unicodedata.category(c) != 'Mn'
and c in self.all_chars) | [
"def convert_to_latin(input_text):\n # caps\n input_text = input_text.replace(\"А\", \"a\")\n input_text = input_text.replace(\"Б\", \"b\")\n input_text = input_text.replace(\"В\", \"v\")\n input_text = input_text.replace(\"Г\", \"g\")\n input_text = input_text.replace(\"Д\", \"d\")\n input_tex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert JSON lines from a file or stdin into a CrateDB cluster. If no hosts are specified the statements will be printed. | def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
infile=None,
output_fmt=None):
if not hosts:
return print_only(infile, table)
queries = (to_insert(table, d) for d in dicts_from_lines(infile))
bul... | [
"def do_insert_json_rows(args):\n cc = Client(args.file, args.sheetname)\n row = args.row.replace(\" \", \"\").replace(\"{\", \"{\\\"\").replace(\"}\", \"\\\"}\").replace(\",\", \"\\\",\\\"\").replace(\":\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for send product to styleboard on homepage | def _set_send_to_styleboard_product_positions(request, obj, sessionid):
prod_id = obj.get('prod_id')
product = Product.objects.get(pk=int(prod_id))
alt_id = obj.get('alt_id', None)
if not alt_id:
original_image = product.original_image
no_background_image = product.no_background
... | [
"def buy_product():\n try:\n product = db(db.clsb_product.product_code.like(request.args[0]))(\n db.clsb_category.id == db.clsb_product.product_category).select().first()\n except:\n return dict(error=\"Mã sách không đúng\")\n token = request.args[2]\n user = db(db.clsb_user.use... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generates a dictionary from the occupations CSV file | def gen_dict():
lines = [line for line in csv.reader(open(__ppath__ + "/data/occupations.csv"))] # uses a csv.reader to parse the file, converts the generic iterable to a list
lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and "Total" row, re-expresses as a list of tuples t... | [
"def read_occupations(filename: str) -> dict:\n\n\toccupations = {}\n\twith open(filename, newline=\"\") as csvfile:\n\t\treader = csv.reader(csvfile)\n\n\t\t# We ignore the first header line with the column titles.\n\t\tnext(reader)\n\n\t\tfor row in reader:\n\t\t\tjob_class = row[0]\n\t\t\tpercentage = row[1]\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get cache path for given fuse_path. If it is a file and file is not in cache, return path to dummy file. If there is no dummy file either, then the file does not exist. In this case, return None | def _get_path_or_dummy(self, fuse_path):
cache_path = self.converter.to_cache_path(fuse_path)
dummy_cache_path = self.converter.add_dummy_ending(cache_path)
if os.path.exists(cache_path):
return cache_path
elif os.path.exists(dummy_cache_path):
return dummy_cache_... | [
"def get_path(self, url):\n cache_path = self._url_to_path(url)\n if os.path.exists(cache_path):\n return cache_path\n\n return None",
"def get_cached_path(self):\n if util.IS_CACHE_ENABLED and not self.physical_key.is_local():\n return ObjectPathCache.get(str(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert Vina Docking Output PDBQT file into SDF file with pose id and vina docking score. | def vina_pose_to_sdf(ifile,ofile):
output = Outputfile("sdf",ofile,overwrite=True)
poses = list(readfile("pdbqt",ifile))
for i in range(len(poses)):
poseid = poses[i].data['MODEL']
del poses[i].data['MODEL']
vinascore = poses[i].data['REMARK'].split()[2]
del poses[i].data['RE... | [
"def save_as_pdb(pose, pose_idx, sanitize=True, removeHs=True):\n name = \"_\".join(os.path.basename(pose.pdbqt).split(\"_\")[:2]) + \"_%02d\" % pose_idx\n pdbfile = name + \".pdb\"\n pdbqt_lines = read_pdbqt_lines (pose.pdbqt, pose.model)\n pdb_block, data = convert_to_pdb_block (pdbqt_lines,removeHs)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
guard against single num turn a single number into a constant dataframe whose index is consistent with others | def _to_constant_df(self, num):
if isinstance(num, pd.DataFrame):
# pdb.set_trace()
return num
else:
return self.data['ones'].copy() * num | [
"def convert_1_to_0(data_set,column_to_convert=0,index_base=0):",
"def fixture_int_dataframe() -> pd.DataFrame:\n return pd.DataFrame({\"a\": [-1, 0, 1]})",
"def _makeExplicitSampleDF(self, size):\n df_X = self.df_X.copy()\n isolates = df_X.index.tolist()\n isolate_indices = range(len(isolates))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
readFasta reads in the fasta, calls parseFasta and then calls analyzeSequence to analyze the dna | def readFasta(self, fp):
for head, seq in self.parseFasta(fp):
#analyzing the sequence
self.analyzeSequence(seq)
#saving the header
if head == '':
continue
else:
self.header.append(head) | [
"def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable i2c device (device1). | def EnableI2c(self):
try:
if os.path.exists('/sys/bus/i2c/devices/i2c-0/0-0060'):
result = " - I2C device already enabled!"
else:
with open('/sys/bus/i2c/devices/i2c-0/new_device', 'a') as f:
# 'echo '+i2c_device.driver+' '+i2c_dev... | [
"def enable_i2s(self, enable):\n control = self.get_control()\n if enable:\n control = control | CONTROL_ENABLE\n else:\n control = control & (~CONTROL_ENABLE)\n\n self.set_control(control)",
"def use_i2c():\n _LIB.oled_click_use_i2c()",
"def enable_cl2_copy_ad9866(self):\n self.writ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read i2c raw value. | def i2cRead(self, sensor):
try:
with open(device1_path + "in_" + sensor + "_raw") as raw:
value = raw.read()
except Exception as err:
LOG.error("Error reading I2C device: " + str(err))
value = None
return value | [
"def _read(self):\n self.init()\n ret = self.i2c.writeto(self.address, b'\\x24\\x00')\n time.sleep_ms(50)\n raw_val = self.i2c.readfrom(self.address, 6)\n return raw_val",
"def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to set digital PIN value. | def setPIN(self, DPIN, value):
try:
with open('/sys/class/gpio/' + DPIN + '/value', 'a') as f:
f.write(value)
except Exception as err:
LOG.error("Error setting PIN value: " + str(err)) | [
"def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)",
"def gpio_set_pin(self, pin, value):\n if value:\n # set bit 4\n self._gp_config[pin] |= 1 << 4\n else:\n # clear bit 4\n self._gp_config[pin] &= ~(1 <<... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes the response from the proxy. If the tunnel is successfully created, notifies the client that we are ready to send requests. If not raises a TunnelError. | def processProxyResponse(self, bytes):
self._protocol.dataReceived = self._protocolDataReceived
if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes):
# print 'test: requestTunnel successfully'
self._protocol.transport.startTLS(self._contextFactory,
self._protocolFactory)
self._tu... | [
"def _onresponse(self, msg):\n reqid = msg['tunnel']\n if not self._pending_requests.has_key(reqid):\n return\n event, handler = self._pending_requests[reqid]\n if event != None:\n # leave result to be picked up by blocked client\n self._pending_requests[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |