query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Hide the markers in the current model. | def hideModelMarkers(self):
# Handle to current model
cmcModel = getCurrentModel()
# Hide markers
markerSet = cmcModel.getMarkerSet()
for i in range(cmcModel.getNumMarkers()):
marker = markerSet.get(i)
toggleObjectDisplay(marker,False) | [
"def hide(self):\n self.visible = False",
"def hidePlot(self, index):\n self.pathItem_list[index].hide()",
"def videoHideMarkers_clicked(self):\n\t\tif self.videoHideMarkers.isChecked():\n\t\t\tself._draw_on_video = True\n\t\telse:\n\t\t\tself._draw_on_video = False\n\t\tself.refresh()\n\t\t# prin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads the CMC motion into the adjusted model. | def loadCMCMotion(self):
# Load motion file to current model
loadMotion(self.trcFilePath.replace('.trc','_CMC_states.sto')) | [
"def loadCTMC(file_path: str) -> CTMC:\n\tf = open(file_path,'r')\n\tl = f.readline()[:-1] \n\tif l != \"CTMC\":\n\t\tprint(\"ERROR: this file doesn't describe an CTMC: it describes a \"+l)\n\tlabelling = literal_eval(f.readline()[:-1])\n\tname = f.readline()[:-1]\n\tinitial_state = array(literal_eval(f.readline()[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dynamic Programming Python implementation of Matrix Chain Multiplication build the worst sequence of brackets (it means that this sequence have the biggest number of elementary operations). | def matrix_chain_dynamic(dimensions, n):
m = [[-1 for _ in range(n)] for _ in range(n)]
s = [[0 for _ in range(n)] for _ in range(n)]
# multiplying matrix by itself
for i in range(1, n):
m[i][i] = 0
for length in range(2, n):
for i in range(1, n - length + 1):
j = i + ... | [
"def matrix_chain_order(p: List[int]) -> List:\n # n is the number of matrices to multiply\n n = len(p) - 1\n # Initialize m, s with None\n m = []\n s = []\n for i in range(n):\n m += [[None] * n]\n s += [[None] * n]\n # For chains consisting of one matrix, m[i,i] = 0\n for i i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
函数功能: 通过读取redis数据库,返回待查询地点所在路径的主名 参数: test_name 返回: | def search_path(self, test_name):
path_list = []
try:
if test_name in Loc_recognize_class.REDIS_MAP and len(Loc_recognize_class.REDIS_MAP[test_name]) > 0:
redis_list = Loc_recognize_class.REDIS_MAP[test_name]
if len(redis_list[0]) > 0:
... | [
"def search_path(self, test_name):\r\n path_list = []\r\n try:\r\n if test_name in Loc_recognize_class.ALL_PATH_MAP and len(Loc_recognize_class.ALL_PATH_MAP[test_name]) > 0:\r\n redis_list = Loc_recognize_class.ALL_PATH_MAP[test_name]\r\n if len(redis_list[0]) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with .bin are added. | def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]:
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
elif isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.isdir(cache_dir):
return []
cached_models = []
for file in os.listdir(ca... | [
"def get_all_cached_files(cache_dir=None):\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n else:\n cache_dir = str(cache_dir)\n if not os.path.isdir(cache_dir):\n return []\n\n cached_files = []\n for file in os.listdir(cache_dir):\n meta_path = os.path.join(cac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats a useragent string with basic info about a request. | def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if DISA... | [
"def format_user_agent(name=None):\n parts = ['TronAPI/%s' % tronapi.__version__,\n '%s/%s' % (platform.python_implementation(),\n platform.python_version())]\n if name:\n parts.insert(0, name)\n return ' '.join(parts)",
"def format_user_agent(name=None):\n pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the commit hash from a resolved filename toward a cache file. | def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]):
if resolved_file is None or commit_hash is not None:
return commit_hash
resolved_file = str(Path(resolved_file).as_posix())
search = re.search(r"snapshots/([^/]+)/", resolved_file)
if search is None:
retur... | [
"def _git_intern_file(self, file_contents, cwd, commit_hash):\n cmd = 'hash-object -t blob -w --stdin'.split(' ')\n stdin = self.api.m.raw_io.input(file_contents)\n stdout = self.api.m.raw_io.output()\n step_name = 'Hashing modified DEPS file with revision ' + commit_hash\n step_result = self.api.m.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. | def has_file(
path_or_repo: Union[str, os.PathLike],
filename: str,
revision: Optional[str] = None,
proxies: Optional[Dict[str, str]] = None,
token: Optional[Union[bool, str]] = None,
**deprecated_kwargs,
):
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token... | [
"def _candidate_exists(candidate, git_repo):\n # this is how git diffs represent a file that didn't exist in an\n # earlier revision\n if not candidate.fname or candidate.fname == '/dev/null':\n return False\n try:\n with open(os.path.join(git_repo, candidate.fname), 'rb'):\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves the token. | def _create_repo(
self,
repo_id: str,
private: Optional[bool] = None,
token: Optional[Union[bool, str]] = None,
repo_url: Optional[str] = None,
organization: Optional[str] = None,
) -> str:
if repo_url is not None:
warnings.warn(
"T... | [
"def create_token():\n credentialManager.delete('token')\n Git()",
"def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the list of files with their last modification timestamp. | def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]):
return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)} | [
"def _get_modified_files_list(repo_root):\n git_instance = _get_git_instance(repo_root)\n return git_instance.ls_files(\"-m\")",
"def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`. | def _upload_modified_files(
self,
working_dir: Union[str, os.PathLike],
repo_id: str,
files_timestamps: Dict[str, float],
commit_message: Optional[str] = None,
token: Optional[Union[bool, str]] = None,
create_pr: bool = False,
revision: str = None,
):
... | [
"def upload_release_files():\n version = get_release_version()\n target = sf_files + sourceforge_target_dir(version)\n\n print()\n print(\"Uploading release files...\")\n print(\" Source:\", release_path)\n print(\" Target: \" + target)\n print(\" Files: \" + ', '.join(glob.glob('*')))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends telemetry that helps tracking the examples use. | def send_example_telemetry(example_name, *example_args, framework="pytorch"):
if is_offline_mode():
return
data = {"example": example_name, "framework": framework}
for args in example_args:
args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None}
... | [
"def send_telemetry(x, y, z):",
"def send_framework_info(framework: str):\n t = tm.Telemetry()\n t.send_event('mo', 'framework', framework)",
"async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). | def convert_file_size_to_int(size: Union[int, str]):
if isinstance(size, int):
return size
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-... | [
"def _size_to_bytes(size):\n\tunits = 'KMGTPEZY' # note that position of letter is same as power - 1\n\tmatch = re.search(r'^\\s*([-+]?\\s*[0-9]*\\.?[0-9]*)\\s*([' + units + r']?\\s*B?\\s*S?)\\s*', size, re.IGNORECASE)\n\tif match is None or match.group(1) == '':\n\t\traise ValueError(\"size string not in proper f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list for all files cached with appropriate metadata. | def get_all_cached_files(cache_dir=None):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
else:
cache_dir = str(cache_dir)
if not os.path.isdir(cache_dir):
return []
cached_files = []
for file in os.listdir(cache_dir):
meta_path = os.path.join(cache_dir, f"{file... | [
"def _get_all_cache_files(self):\n files = set()\n dir_tree = os.walk(self.config.get('cachedir', self.CACHEDIR))\n for dirpath, _, filenames in dir_tree:\n for file_name in filenames:\n if 'cache' in file_name:\n files.add(os.path.join(dirpath, file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract repo_name, revision and filename from an url. | def extract_info_from_url(url):
search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url)
if search is None:
return None
repo, revision, filename = search.groups()
cache_repo = "--".join(["models"] + repo.split("/"))
return {"repo": cache_repo, "revision": revision, "fi... | [
"def extract_name_from_git_url(url):\n if url.endswith(\".git\"):\n url = url[:-4]\n #print \"url:\", url\n splitted = url.rsplit(\":\", 1)\n if len(splitted) == 2:\n url = splitted[1]\n splitted = url.rsplit(\"/\", 1)\n if len(splitted) == 2:\n url = splitted[1]\n return u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove, if they exist, file, file.json and file.lock | def clean_files_for(file):
for f in [file, f"{file}.json", f"{file}.lock"]:
if os.path.isfile(f):
os.remove(f) | [
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def cleanUp(self, f):\n os.system('rm ' + f)",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move file to repo following the new huggingface hub cache organization. | def move_to_new_cache(file, repo, filename, revision, etag, commit_hash):
os.makedirs(repo, exist_ok=True)
# refs
os.makedirs(os.path.join(repo, "refs"), exist_ok=True)
if revision != commit_hash:
ref_path = os.path.join(repo, "refs", revision)
with open(ref_path, "w") as f:
... | [
"def move_to_processed(self, file): \n source = self._downloaded_folder / f'{file.name}'\n destination = self._processed_folder / f'{file.name}'\n source.replace(destination)",
"def MoveTo(self):\n print('move image file to...')",
"def _move(self):\n newpath = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subtracts CLEAN model from data | def subtract_model(self, outfile, del_script=True):
os.system('cp -r {} {}'.format(self.ms, outfile))
ct.subtract_model(outfile, delete=del_script) | [
"def get_subtract_model() -> Tuple[bytes, str, str, str]:\n # Create a builder and construct a graph\n builder = popart.Builder()\n\n data_shape = popart.TensorInfo(\"FLOAT\", [3])\n\n lhs = builder.addInputTensor(data_shape)\n rhs = builder.addInputTensor(data_shape)\n\n output = builder.aiOnnx.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uncover blanks at this location | def uncover_blanks(self, row, col):
checked = {}
to_be_checked = []
to_be_checked.append((row, col))
while len(to_be_checked) > 0:
sq_row, sq_col = to_be_checked.pop()
if checked.has_key((sq_row, sq_col)):
continue
checked[(sq_row, sq_c... | [
"def removeBlankPoints(self):\n i = 0\n while i < len(self.items):\n if self.items[i].mark.strip() == \"\":\n self.removeItem(i)\n else:\n i+=1",
"def fill_blank_space(self):\n for y in range(self.height):\n for x in range(self.wi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns load balancer name for the current environment. | def get_balancer_name(self):
return '{}-{}'.format(
self.config['namespace'],
self.get_current_env(),
) | [
"def load_balancer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_name\")",
"def load_balancing_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancing_name\")",
"def get_load_balancer(loadBalancerName=None):\n pass",
"def getDNSName(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns load balancer security group short name. | def get_security_group_short_name(self):
return self.config['security_group'] | [
"def group_short_name(self) -> str:\n return pulumi.get(self, \"group_short_name\")",
"def security_group_name(self):\n return self._security_group_name",
"def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")",
"def get_lb_secgrp_name ( base_name, app_name ) :\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the unique name of the target group for the current environment. The difference between this function and `get_target_group_fully_qualified_name` is that fits the name into 32 characters. | def get_target_group_name(self, short_name):
app_env = self.get_current_env()
full_name = self.get_target_group_fully_qualified_name(short_name)
namespace = self.config['namespace']
if len(full_name) <= 32:
return full_name
elif len(namespace) + 10 <= 32:
... | [
"def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")",
"def target_group_identifier(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group_identifier\")",
"def _group_name(cls, group=None):\n prefix = GraphqlWsConsume... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns target groups configuration. | def get_target_groups_config(self):
return self.config['target_groups'] | [
"def get_target_groups_info(self):\n target_groups_config = self.get_target_groups_config()\n groups_info = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n data = self.get_target_group_info(short_name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns load balancer details for the current environment. | def get_balancer_info(self):
try:
response = self.client.describe_load_balancers(
Names=[self.get_balancer_name()],
)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
vpc_id = self.get_vpc_id()
balancers = [balancer for bal... | [
"def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns whether load balancer exists in the current environment. | def balancer_exists(self):
return self.get_balancer_info() is not None | [
"def is_load_balancer_enabled(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n return cluster[\"environment\"][\"sg_lb_enabled\"]",
"def validate_load_balancer(self, is_prod: bool = True) -> bool:\n if is_prod:\n env = \"prod\"\n else:\n env = \"dev... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns load balancer ARN for the current environment. | def get_balancer_arn(self):
return self.get_balancer_info()['LoadBalancerArn'] | [
"def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )",
"def load_balancer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_name\")",
"def network_load_balancer_arns(self) -> pulumi.Ou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns load balancer DNS name for the current environment. | def get_balancer_dns(self):
return self.get_balancer_info()['DNSName'] | [
"def getDNSName(self):\n response = self.client.describe_load_balancers(\n LoadBalancerNames=[self.LBName])\n try:\n self.DNSName=response[\"LoadBalancerDescriptions\"][0][\"DNSName\"]\n except KeyError:\n self.DNSName=\"\"\n return self.DNSName",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all target groups details for the current environment. | def get_target_groups_info(self):
target_groups_config = self.get_target_groups_config()
groups_info = {}
for short_name in target_groups_config.keys():
target_group_name = self.get_target_group_name(short_name)
data = self.get_target_group_info(short_name)
i... | [
"def get_target_groups_config(self):\n return self.config['target_groups']",
"def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns whether target group exists in the current environment. | def target_group_exists(self, short_name):
return self.get_target_group_info(short_name) is not None | [
"def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))",
"def has_group(self, group_name):\n # print self.groups\n return group_name in self.groups",
"def group_exists(self, group_name):",
"def groupExists(self, groupName):\n\t\treturn groupName in self.groups... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns target group ARN for the current environment. | def get_target_group_arn(self, short_name):
target_group_info = self.get_target_group_info(short_name)
return target_group_info['TargetGroupArn'] | [
"def log_group_arn(self) -> str:\n return jsii.get(self, \"logGroupArn\")",
"def group_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"group_arn\")",
"def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")",
"def cloud_w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates load balancer in the current environment. | def create_balancer(self):
app_env = self.get_current_env()
balancer_name = self.get_balancer_name()
subnet_ids = self.get_subnet_ids()
response = self.client.create_load_balancer(
Name=balancer_name,
Subnets=subnet_ids,
SecurityGroups=[self.get_secur... | [
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def create_load_balancer(loadBalancerName=None, instancePort=None, healthCheckPath=None, certificateName=None, certificateDoma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates target groups for the current environment. | def create_target_groups(self):
target_groups_config = self.get_target_groups_config()
vpc_id = self.get_vpc_id()
response_data = {}
for short_name in target_groups_config.keys():
target_group_name = self.get_target_group_name(short_name)
if self.target_group_ex... | [
"def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def create_groups ():\n group_list = ['Cores', 'Coords', 'Vols',]\n for group_name in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes target groups for the current environment. | def delete_target_groups(self):
target_groups_config = self.get_target_groups_config()
for short_name in target_groups_config.keys():
if not self.target_group_exists(short_name):
self.logger.info('Target group {} does not exists, nothing to delete.'.format(
... | [
"def nuke_target_groups(self) -> None:\n for target_group in self.list_target_groups():\n try:\n self.elbv2.delete_target_group(TargetGroupArn=target_group)\n print(\"Nuke Target Group {0}\".format(target_group))\n except ClientError as exc:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates listeners for the default balancer of the current environment. | def create_listeners(self):
target_groups_config = self.get_target_groups_config()
balancer_arn = self.get_balancer_arn()
response_data = {}
for short_name in target_groups_config.keys():
target_group_name = self.get_target_group_name(short_name)
response = self... | [
"def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)",
"def _initialize_load_balancing(self):\r\n self.logger.info(\"Setting ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes listeners for the default balancer of the current environment. | def delete_listeners(self):
listeners_info = self.describe_listeners()
for listener in listeners_info:
response = self.client.delete_listener(
ListenerArn=listener['ListenerArn']
)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
... | [
"def delete_listeners(ctx):\n if self.balancer_exists():\n self.delete_listeners()\n ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, no listeners to remove... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes balancer for current environment. | def delete_balancer(ctx):
if self.balancer_exists():
self.delete_balancer()
ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))
else:
ctx.info('Load balancer {} does not exist, nothing to delete.'.format(
... | [
"def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')",
"def delete_load_balancer(loadBalancerName=None):\n pass",
"def delete(self):\r\n return self.connection.delete_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Describes balancer for current environment. | def describe_balancer(ctx):
data = self.get_balancer_info()
if data is not None:
ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))
ctx.pp.pprint(data)
else:
ctx.info('Load balancer {} does not exist.'.format(self.ge... | [
"def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )",
"def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates load balancer target groups for current environment. | def create_target_groups(ctx):
data = self.create_target_groups()
ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data) | [
"def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')",
"def create_target_group(stack,\n name,\n port,\n protocol... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Describes target groups for current environment. | def describe_target_groups(ctx):
data = self.get_target_groups_info()
ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data) | [
"def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def get_target_groups_config(self):\n return self.config['target_groups']",
"def li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates listeners between load balancer and target groups for current environment. | def create_listeners(ctx):
data = self.create_listeners()
ctx.info('Created listeners for load balancer {}:'.format(
self.get_balancer_name()
))
ctx.pp.pprint(data) | [
"def create_listeners(self):\n target_groups_config = self.get_target_groups_config()\n balancer_arn = self.get_balancer_arn()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes listeners for current environment. | def delete_listeners(ctx):
if self.balancer_exists():
self.delete_listeners()
ctx.info('Deleted all listeners for load balancer {}:'.format(self.get_balancer_name()))
else:
ctx.info('Load balancer {} does not exist, no listeners to remove.'.format(... | [
"def delete_listeners(self):\n listeners_info = self.describe_listeners()\n\n for listener in listeners_info:\n response = self.client.delete_listener(\n ListenerArn=listener['ListenerArn']\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Describes listeners for current environment. | def describe_listeners(ctx):
data = self.describe_listeners()
ctx.info('Listeners details for load balancer {}:'.format(self.get_balancer_name()))
ctx.pp.pprint(data) | [
"def _create_listener(self):",
"def get_all_lvs_listeners_status(self):",
"def _add_listeners(vehicle):\n @vehicle.on_attribute('mode')\n def mode_listener(self,name, msg):\n util.log_info(\"Mode switched to %s\" % msg.name)\n \n if msg.name != shared.status['manual_mode']: # manua... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates fully operational load balancer setup for the current environment. | def create(ctx):
create_target_groups(ctx)
create_balancer(ctx)
create_listeners(ctx)
ctx.info('Load balancers setup completed.') | [
"def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes load balancer for current environment and all related resources. | def delete(ctx):
delete_listeners(ctx)
delete_balancer(ctx)
delete_target_groups(ctx)
ctx.info('Load balancers deletion completed.') | [
"def delete(self):\r\n return self.connection.delete_load_balancer(self.name)",
"def delete_load_balancer(loadBalancerName=None):\n pass",
"def delete(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_load_balancer(loadbalancer_id,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resets load balancer setup for the current environment. | def reset(ctx):
delete(ctx)
create(ctx)
ctx.info('Load balancers reset completed.') | [
"def reset(self):\n return self.master_multi_agent_envs.reset()",
"def reset_env(self, train_env, test_env):\n self.train_env = train_env\n self.test_env = test_env",
"def reset(self):\n self._config = Config()\n self._router = Router(())\n self._middleware = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Momentum update in the paper model_ema = m model_ema + (1m) model | def moment_update(model, model_ema, m):
for p1, p2 in zip(model.parameters(), model_ema.parameters()):
p2.data.mul_(m).add_(1-m, p1.detach().data) | [
"def momentum_update(model_q, model_k, m=0.999):\n for p1, p2 in zip(model_q.parameters(), model_k.parameters()):\n p2.data.mul_(m).add_(1 - m, p1.detach().data)",
"def momentum(E,m):\n\treturn math.sqrt(E*E - m*m)",
"def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_inde... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set an alternative useragent header | def set_user_agent(self, user_agent: str) -> None:
self.headers['User-Agent'] = user_agent | [
"def _set_agent_header(self):\n self._api_client.set_default_header('User-Agent', self._api_client.user_agent)",
"def setUA(self, useragent):\n\t\tpass",
"def set_user_agent(self, user_agent):\n # set a default user_agent if not specified\n if user_agent is None:\n requests_ua = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract announcement data from the extracted html partial The html partial should be a Tag object returned from BeautifulSoup4.find() | def parse_announcement_data(self) -> 'Scraper':
logger.info('Parsing extracted html partial')
for tag in self.html_partial: # there are 63 tags
if tag.name == 'h4':
announcement_data = self.get_data_from_tag(tag)
self.announcement_data_list.append(announcemen... | [
"def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check the title and url of the announcement | def check_announcement_content_validity(self, a: dict) -> None:
url_regex = r'[(http(s)?):\/\/(www\.)?a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)'
if a['title'] == '' or type(a['title']) is not NavigableString:
raise AnnouncementContentNotFound('Announcement title... | [
"def _is_news(self, url, year_from):\n pass",
"def check_title(self, title):\n self.logger.info('Check title is displayed correctly.')\n assert_true(self.mainview.item_metadata(title))",
"def test_announcement_view(self):\n response = self.client.get(url_for('main.announcements'))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract announcement data from a BeautifulSoup4 Tag object | def get_data_from_tag(self, tag: Tag) -> dict:
self.verify_tag_structure(tag)
title = tag.string
url = tag.contents[0]['href'] # tag.contents[0].name is 'a'
date_string = tag.next_sibling.next_sibling.contents[0]
published_date = (self.get_date_from_string(date_string))
... | [
"def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a collection of announcement objects in the form of an AnnouncementCollection object | def get_announcements(self, factory: 'AnnouncementFactory') -> 'AnnouncementCollection':
collection = factory.get_announcement_collection(self.get_announcement_data_list())
return collection | [
"def get(self):\n announcements = Announcement.query.all()\n announcements = announcements_schema.dump(announcements)\n\n if not announcements:\n return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served\n\n return {'status': 'success', 'annou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a date only datetime object by extracting a date from a string The date string should be in the format "May 11st, 2020 by " else method raises DateStringFormatMismatch exception. | def get_date_from_string(date_string: str) -> datetime:
regex = r'^(January|February|March?|April|May|June|July|August|September|October|November|December)' \
r' (\d{1,2})(st|nd|rd|th), (\d{4}) by $'
if re.match(regex, date_string) is None:
raise DateStringFormatMismatch('Sc... | [
"def get_date_from_str(date_str, dt_format):\n return datetime.strptime(date_str, dt_format)",
"def string_to_date(string):\n params = string.strip().split('-')\n year = int(params[0])\n month = int(params[1])\n day = int(params[2])\n d = date(year, month, day)\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixedsize numpy array. | def _interactive_input_fn(hparams, decode_hp):
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_i... | [
"def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self._input = raw_input.split('\\n')\n self._input = self._input[0:-1]\n\n for line in self._input:\n direction, steps = line.split()\n self._instructions.append((direction, int(steps)))",
"def gene... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shows an image using matplotlib and saves it. | def show_and_save_image(img, save_path):
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save... | [
"def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')",
"def show_plot(savefig=False, title='Title'):\n if savefig:\n # create dir if it doesn't exist\n if not os.path.is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run hooks after decodes have run. | def run_postdecode_hooks(decode_hook_args, dataset_split):
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet avail... | [
"def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turn LED on for the duration at the given intensity. | def blink(self, duration: int=1, intensity: int=0xff):
# Turn LED on
self.intensity(max(0, min(intensity, 0xff)))
# Turn LED off (after a delay)
upyt.sched.loop.call_later_ms(duration, self.off) | [
"def set_intensity(self, value):\n self._intensity = value\n self._iotool.execute(*self._iotool_lamp_commands(intensity=value))\n self._update_property('intensity', value)",
"def turn_on(color, duration=None):\n if has_gpio:\n pin = get_pin(color)\n gpio.output(pin, True)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Install a fresh Drupal site Use Drush to setup the Drupal structure in database | def site_install(path, db_user, db_pass, db_host, db_name):
db_url = 'mysql://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name)
warning = """
WARNING: This is an inherently insecure method for interacting with the
database since the database password will be written to the command line
and will be visible to ... | [
"def install(self):\n if not self.installed:\n self.site.rebuild_site_directory()\n self.link_settings_local()\n self.setup_settings()\n self.site.sync_database()\n self.site.migrate_database()\n self.site.create_admin_user()\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download the latest Drupal project | def download(parent, name=None):
with cd(parent):
if not name:
run("drush dl")
else:
run("drush dl --drupal-project-rename=%s" % name) | [
"def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()",
"def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a dev site by copying an existing live site | def dev_site(live_path, dev_parent, dev_name, dev_db_name='',
base_url='', rewrite_base=''):
with mute():
remote = git.get_remote_url(live_path)
dev_path = '%s/%s' % (dev_parent, dev_name)
if exists(dev_path):
warning = """
A folder already exists at your destination path.
Do y... | [
"def test_render_as_target_existing(self):\n\n # Create source site\n self.source_site = self._make_site(\n name=REMOTE_SITE_NAME,\n url=REMOTE_SITE_URL,\n mode=SITE_MODE_SOURCE,\n description='',\n secret=REMOTE_SITE_SECRET,\n )\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setup a complete, vanilla Drupal install Download Drupal, configure the settings.php database file, configure the .htaccess file, and then populate the database with the default Drupal structure. | def vanilla_site(parent, name, db_name, base_url=None, rewrite_base=None):
# TODO check for trailing slash
path = parent + '/' + name
print header("Checking dependencies")
if exists(path):
warning = """
A folder already exists at your destination path.
Do you wish to overwrite?
"""
co... | [
"def install(self):\n super(WPWebsite, self).install()\n self.create_database()\n self.setup()",
"def install(self):\n if not self.installed:\n self.site.rebuild_site_directory()\n self.link_settings_local()\n self.setup_settings()\n self.sit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
an iterator over the keys of metakey and its dereferenced values. | def __getitem__(self, metakey):
for key in self.metadb[metakey]:
yield key, self.datadb[key] | [
"def items(self):\n for metakey in self:\n yield metakey, self[metakey]",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def __iter__(self):\n return self.meta.iterkeys()",
"def iterentries(self):\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
an iterator over the metakeys and their corresponding values. | def items(self):
for metakey in self:
yield metakey, self[metakey] | [
"def __getitem__(self, metakey):\n for key in self.metadb[metakey]:\n yield key, self.datadb[key]",
"def __iter__(self):\n return self.meta.iterkeys()",
"def __iter__(self):\n for metatag in self.meta.findall(CN('meta:user-defined')):\n yield (metatag.get(CN('meta:name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
an iterator over the unique keys of all metakeys and their dereferenced values. | def unique_values(self):
for key in self.metadb.unique_values():
yield key, self.datadb[key] | [
"def items(self):\n for metakey in self:\n yield metakey, self[metakey]",
"def __iter__(self):\n return self.meta.iterkeys()",
"def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())",
"def _key_index_iter(self: Any) -> Iterator[Tuple[st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
an iterator over the keys whose metakeys satisfy q and their dereferenced values. | def query(self, q):
for key in self.metadb.query(q):
yield key, self.datadb[key] | [
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def __iter__(self):\n\n # First, yield all our local keys\n keys = set()\n for key in self._values:\n keys.add(key)\n yield key\n\n # If we do... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overwrite default hyperparameters of a network, based on the flags | def overwrite_hyperparams(self):
try:
default_hyperparams = self.hyperparams
for key in default_hyperparams:
try:
flag = self.FLAGS[key]
param_value = flag.value
if param_value is not None:
... | [
"def set_default_hyperparameters(self):\n self.hyperparameter_space = {\n 'scale_X': hp.choice('scale_X', ['std', 'mm01', 'mm11', None]),\n 'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11', None]),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves session = weights as a checkpoint | def save_session(self):
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(ch... | [
"def save_checkpoint(self):\n torch.save(self.model.state_dict(), self.args.checkpoint)",
"def _save_checkpoint(self):\n data = {\n \"policy_state\": self.policy.get_state(),\n \"train_state\": self._get_train_state()\n }\n self.logger.save_checkpoint(data, se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
converts pdf file to xml file | def pdftoxml(pdfdata):
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes "%s" "%s"' % (pdf... | [
"def pdftoxml(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
converts pdf file to xml file | def pdftoxml(pdfdata):
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes "%s" "%s"' % (pdf... | [
"def pdftoxml(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Train weak classifier based on a given feature. | def trainWeakClassifier(trainingSamples, weights, feature):
#compute feature values
featureValues = []
positiveOrNegative = []
for sample in trainingSamples:
featureValues.append(feature.computeScore(sample[0], 0, 0))
positiveOrNegative.append(sample[1])
#zip with weights an... | [
"def _train(self, feature, label):\n self.mdl.fit(feature, label)",
"def train(self, feature, a_class):\n f = self._training_data\n if feature in f['feature'].keys():\n f['feature'][feature]+=1\n else:\n f['feature'][feature]=1\n if a_class in f['class'].ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a list of finished workers into a result. | def getResults(workers):
results = []
for worker in workers:
results += worker.getResults()
return results | [
"def contract(self, jobs, result):\n for j in jobs:\n WorkerPool.put(self, j)\n\n r = []\n for i in xrange(len(jobs)):\n r.append(result.get())\n\n return r",
"def finished_analyses(all_workers: Dict[WorkerName, List[Task]]=None) \\\n -> Dict[TaskType, List... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
unpacks buffer contents into dictionary | def read(self, buf):
contents = dict()
for element in self.elements:
if element.offset + element.size > len(buf):
logger.trace("cannot unpack {} for {}.{} buffer too small {}",
element.name, element.block_name, element.block_version, len(buf))
... | [
"def _unpack_dictionary(buffer, dictionary, rawdata=False):\n # get format and substructures of dictionary\n fmt, sub = _get_fmt_string(dictionary, retsub=True)\n\n # unpack into OrderedDict\n data = OrderedDict(zip(dictionary, struct.unpack(fmt, buffer)))\n\n # remove spares\n if not rawdata:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translates a word into Pig Latin. The "word" parameter is assumed to be an English word, returned as a string. | def pig_latin(word):
if word[0] in 'aeiou':
return f'{word}way'
return f'{word[1:]}{word[0]}ay' | [
"def pig_latin(word):\n\n # Translate words starting with vowel.\n if word[0] in 'aeiou':\n return word + 'way'\n\n # Translate words starting with consonants.\n return word[1:] + word[0] + 'ay'",
"def get_pig_latin_word(word):\n #To check the words that start with this combination\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans the database before fully installing the module by uninstalling old Soupese modules and remapping data | def _clean_database(self):
# noinspection PyUnresolvedReferences
env = self.env
cr = env.cr
modules_to_resolve = [
'ch_vendor_info',
'API_PDA_receiver',
'delivery_report_custom',
'myevo_base',
'myevo_nobutton_sending_email',
... | [
"def clean_up():\n drop_all_tables()\n create_all()",
"def _purge_mlrun_db(self):\n self._delete_mlrun_db()\n self._scale_down_mlrun_deployments()",
"def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)",
"def clean():\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform a PropertiesList into a list of names | def property_list_to_str(properties: th.PropertiesList) -> List[str]:
return [name for (name, prop) in properties.items()] | [
"def getPropertyNames(self):\n return self._property_names",
"def PropertiesStrings(self):\n\n s = []\n\n # Add all properties.\n for p in self.Dict:\n v = self.Get(p)\n if not (type(v) is type(lambda x: x)):\n s.append(p + ' = ' + str(self.Get(p)))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge multiple PropertiesList objects into a single one | def merge_properties_lists(*properties_lists: th.PropertiesList) -> th.PropertiesList:
result = th.PropertiesList()
for properties_list in properties_lists:
for name, prop in properties_list.items():
result.append(prop)
return result | [
"def merge_list(metas: List[ProjectMeta]) -> ProjectMeta:\n res_meta = ProjectMeta()\n for meta in metas:\n res_meta = res_meta.merge(meta)\n return res_meta",
"def join_all(self, props: List[AbsProp]):\n nprops = len(props)\n assert nprops > 0\n\n # initialize... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Downsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image... | def downsample_2d(x, k=None, factor=2, gain=1, data_format='NHWC'):
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
down=factor,
pad0=(p + 1) // 2,
pad1=p // 2,... | [
"def downsample(self,image, kernel):\n blur_image = self.convolve(image, kernel)\n img_downsampled = blur_image[::2, ::2]\n return img_downsampled",
"def downsample(images, downsample_by=4):\n h = np.hamming(2 * downsample_by + 1)\n h /= h.sum()\n H = h[:, np.newaxis] * h[np.newaxis,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes an object to a text file, using JSON representation | def save_to_json_file(my_obj, filename):
import json
with open(filename, 'w', encoding='utf-8') as f:
obj = json.dumps(my_obj)
f.write(obj) | [
"def json_writer(obj, filename):\n with open(filename, 'w') as f:\n json.dump(obj, f)",
"def save(obj, filename, format = \"JSON\"):\n if format == \"Python\":\n s = str(obj)\n else:\n s = json.dumps(obj, indent=2)\n open(filename,'w').write(s)\n return",
"def write_json(path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that takes a dataframe and outputs a BedTool object | def get_Bedtool_from_dataframe(df: object, output_file: str):
pybedtools.BedTool.from_dataframe(df).saveas(
output_file
)
return | [
"def dfTObedtool(df):\n\n df=df.astype(str)\n df=df.drop_duplicates()\n df=df.values.tolist()\n df=[\"\\t\".join(s) for s in df ]\n df=\"\\n\".join(df)\n df=BedTool(df, from_string=True)\n return df",
"def dataset_to_bd(dataframe):\n\n tabla = dataframe.to_dict('index')\n creado = False... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that extracts the information for motifs functional in a specified tissue from a psql database | def get_BedTool_for_functional_motifs(funMotifs: dict, tissue: str, db_user_name: str, db_name: str, output_file: str):
# establish connection
conn = psycopg2.connect(
database=db_name, user=db_user_name)
# create list of motif ids that will be extracted from table
motifs = "("
for f... | [
"def extract_notes(db_config,\n note_id_column_name=constants.NOTE_ID_COLUMN_NAME,\n text_column_name=constants.TEXT_COLUMN_NAME,\n financial_flag_column_name=constants.OUTCOME_COLUMN_NAME):\n db_connection = pg.connect(db_config)\n\n # exclude any notes tagg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that extracts the essential information of a variant file and returns it as BedTool object | def get_BedTool_from_variant_file(variant_file: str):
# TODO: create checks for file format
os.system(f"grep -v '#' {variant_file} " + "| awk -F '\t' '{print substr($2, 4), $3, $4, $5, $6, $7, $8, $9}' " +
f" >{variant_file}_tmp")
pybedtools.BedTool(variant_file + "_tmp").saveas(variant_fi... | [
"def test_get_BedTool_from_variant_file(self):\r\n FindMotifMutations.get_BedTool_from_variant_file(\"InputTestFilesSection6/Variant_test_input_file.maf\")\r\n os.remove(\"InputTestFilesSection6/Variant_test_input_file.maf_tmp\")\r\n\r\n assert compare_files(\"InputTestFilesSection6/Variant_tes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that overlaps variants and functional motifs | def overlap_variants_and_motifs(motifs, variants, output_file: str):
# TODO: if necessary to make BedTool again, change architecture, figure out why one file okay and the other not
mot = pybedtools.BedTool(motifs)
mot.intersect(variants, wo=True, header=True).saveas(output_file)
return | [
"def _primitive_overlap(a1, a2, ax, ay, az, bx, by, bz, l1, m1, n1, l2, m2, n2):\n #N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz = \\\n p = _gaussian_product(a1, a2, ax, ay, az, bx, by, bz)\n return _primitive_overlap_product(l1, m1, n1, l2, m2, n2, *p)",
"def test_mtmconvolv_overlap_effect(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that returns the overlaps between functional motifs of a tissue and variants | def find_funMotif_variants_in_tissue(funMotifs: dict, tissue: str, variant_BedTool_file: str, db_name: str,
db_user_name: str, output_file: str, motif_BedTool_file: str):
get_BedTool_for_functional_motifs(funMotifs, tissue, db_user_name, db_name, motif_BedTool_file)
overl... | [
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect WS to DUT then change sec wpa2 to wpa/wpa2 | def test_wpa2_to_wpa(self, setUp):
network = conn()
assertion = Assert()
# select wireless interface and enable wireless
radio_page = RadioPage(self.firefox)
radio_page.select_wifi_interface(iface="2.4GHZ")
radio_page.enable(radio_page.get_wireless())
radio_page... | [
"def test_owe_transition_mode_connect_cmd(dev, apdev):\n wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')\n wpas.interface_add(\"wlan5\", drv_params=\"force_connect_cmd=1\")\n run_owe_transition_mode([ wpas ], apdev)",
"def SetWPADriver(self, driver):\n print \"setting wpa driver\", str(driver... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
insert document; All values need to be passed as string eventHub, consumerGroup, partitionId and offset, These match the requirements of the EventHub This method does not allow any additional storage; other option does | def insert_offset_document(self, eventHub, consumerGroup,partition_id, offset, messageType, removeExisting=True):
dictObject = self._getDictionaryObjectOffset(eventHub, consumerGroup,partition_id, offset, messageType)
return self.insert_offset_document_from_dict(dictObject, removeExisting) | [
"def insert(self, collection, document):\n res = self.db[collection].insert_one(document) # Insert using Mongo\n if not res.acknowledged: # Ensure successful insert\n raise InsertFailureException(\"Failed to insert!\")",
"def add_insert(self, document):\n ...",
"def insert(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to create a feedback matrix | def create_matrix(self):
self.matrix = np.zeros((len(self.users), len(self.items)))
for user in self.train_set['users']:
for item in self.train_set['feedback'][user]:
self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \
self.train_set[... | [
"def create_matrix(self):\n \n if self.test_set is None:\n self.matrix = self.train_set.pivot(index='user_id', columns='item_id', values='feedback').fillna(0)\n else:\n # Combining both training and test set\n df_temp = self.test_set.copy()\n df_temp[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921 | def test_break_security_group_usual_case_specify_sg(): | [
"def test_break_security_group_failed():",
"def test_aws_service_api_vm_security_group_delete(self):\n pass",
"def test_break_security_group_rollback_previous():",
"def lambda_handler(event, context):\n Stop_Instances()",
"def usage_demo():\n logging.basicConfig(level=logging.INFO, format='%(le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921 in rollback | def test_break_security_group_rollback_previous(): | [
"def rollback(self, stage, enodes, exception):",
"def test_break_security_group_failed():",
"def test_create_namespaced_deployment_rollback(self):\n pass",
"def test_cpu_stress_on_ec2_instance_with_rollback():",
"def rollback():\n\n rollback_release()",
"def test_create_namespaced_deployment_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute SSM automation document DigitoBreakLambdaSecurityGroupTest_20200921 to test failure case | def test_break_security_group_failed(): | [
"def test_break_security_group_usual_case_specify_sg():",
"def test_break_security_group_rollback_previous():",
"def test_error_handling_stop_pipeline(sdc_builder, sdc_executor):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n raw_data = dict(a='a', b='b')\n\n dev_raw_data_source = pipeline... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the smearing (in ms) in each channel at the specified DM | def chan_smear(self, DM):
try:
DM = where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)
except TypeError:
if (DM-cDM==0.0): DM = cDM+self.dDM/2.0
return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM) | [
"def total_smear(self, DM):\n return sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0 +\n self.chan_smear(DM)**2.0)",
"def DM_for_smearfact(self, smearf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the total smearing in ms due to the sampling rate, the smearing over each channel, the smearing over each subband (if numsub > 0) and the smearing over the full BW assuming the worstcase DM error. | def total_smear(self, DM):
return sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0 +
self.chan_smear(DM)**2.0) | [
"def DM_for_smearfact(self, smearfact):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0)\n return smearfact*0.001*other_smear/se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the DM where the smearing in a single channel is a factor smearfact larger than all the other smearing causes combined. | def DM_for_smearfact(self, smearfact):
other_smear = sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0)
return smearfact*0.001*other_smear/self.obs.cha... | [
"def chan_smear(self, DM):\n try:\n DM = where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)\n except TypeError:\n if (DM-cDM==0.0): DM = cDM+self.dDM/2.0\n return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM)",
"def calc_mmd_cond(net):\n\n true_ps, true_samples... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the DM where the smearing in a single channel is causes the same smearing as the effects of the new dosnsampling rate and dDM. | def DM_for_newparams(self, dDM, downsamp):
other_smear = sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*downsamp)**2.0 +
BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +
self.sub_smearing**2.0)
return 0.001*other_sme... | [
"def DM_for_smearfact(self, smearfact):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0)\n return smearfact*0.001*other_smear/se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cls hold db while self hold field_name, and model | def full_init_self(self, db, field_name, model):
if not self.db:
self.__class__.db = db
self.field_name = field_name
self.model = model # property | [
"def __init__(self, table, field):\r\n self.table = table\r\n self.field = field",
"def db_for_read(self, model, **hints):\n return None",
"def _load_from_db(self, model_inst):\n keyFieldVal = None\n if model_inst._meta.has_auto_field:\n autoField = model_inst._meta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure we have a markdown folder to write to. | def directory(self) -> Path:
(directory := Path("markdown").resolve(strict=False)).mkdir(exist_ok=True, parents=True)
return directory | [
"def clean_directory(request_user):\n\n # get config model\n model = SystemExporterMarkdownConfigModel.objects.get(system_exporter_markdown_config_name = 'SystemExporterMarkdownConfig')\n\n # clean or create markdown directory\n if os.path.exists(model.markdown_path + \"/docs/systems/\"):\n # rem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rasterize all the vectors in the given directory into a single image. | def vectors_to_raster(file_paths, rows, cols, geo_transform, projection):
labeled_pixels = np.zeros((rows, cols))
for i, path in enumerate(file_paths):
label = i+1
ds = create_mask_from_vector(path, cols, rows, geo_transform,
projection, target_value=label)
... | [
"def vectors_to_raster(file_paths, rows, cols, geo_transform, projection):\n labeled_pixels = np.zeros((rows, cols))\n for i, path in enumerate(file_paths):\n label = i + 1\n ds = create_mask_from_vector(path, cols, rows, geo_transform,\n projection, target_va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct an hparam dictionary from the flags. | def _build_flags_hparam_dict():
logging.info('Show FLAGS for debugging:')
for f in HPARAM_FLAGS:
logging.info('%s=%s', f, FLAGS[f].value)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in HPARAM_FLAGS
])
return hparam_dict | [
"def _hparams_from_flags():\n keys = (\"\"\"\n dataset quantization_level num_instruments separate_instruments\n crop_piece_len architecture use_sep_conv num_initial_regular_conv_layers\n sep_conv_depth_multiplier num_dilation_blocks dilate_time_only\n repeat_last_dilation_level num_layers num_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use the state_map for this instance to map a state string into a ServerState constant | def _api_state_to_serverstate(self, api_state):
try:
return self.state_map[api_state]
except KeyError:
self.logger.warn(
"Unmapped Server state '%s' received from system, mapped to '%s'",
api_state, ServerState.UNKNOWN
)
ret... | [
"def from_esi_name(cls, esi_state_name: str) -> \"StructureService.State\":\n STATES_ESI_MAP = {\"offline\": cls.OFFLINE, \"online\": cls.ONLINE}\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.OFFLINE\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the server is in the process of being powered on. | def is_powering_on(self):
return self._get_state() == ServerState.POWERING_ON | [
"def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the server is in the process of powered off. | def is_powering_off(self):
return self._get_state() == ServerState.POWERING_OFF | [
"def is_powered_off(self, instance_name):\n return self._smtclient.get_power_state(instance_name) == 'off'",
"def is_powering_on(self):\n return self._get_state() == ServerState.POWERING_ON",
"def shutting_down(self):\n return self._shutdown.is_set()",
"def is_in_shutdown(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Divides the signal into several, possibly overlapping frames. | def signal_to_frames(signal, frame_len, frame_step, win_func=None):
assert signal.ndim == 1
signal_len = len(signal)
frame_len = int(round(frame_len))
frame_step = int(round(frame_step))
num_frames = number_frames(signal_len, frame_len, frame_step)
indices = indices_grid(frame_len, frame_step,... | [
"def split_frames(self, signal):\n\t\t# Compute number of frames and padding\n\t\tframe_length = int(round(self.frame_length_ms / 1000.0 * self.fs_hz))\n\t\tframe_step = int(round(self.frame_step_ms / 1000.0 * self.fs_hz))\n\t\tnum_frames = int(np.ceil(float(self.signal_length - frame_length) / frame_step))\n\t\tpr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |