query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Make string from int. Hexademical representaion will be used if input value greater that 'max_dec'. | def int2str(val, max_dec=1024):
if val > max_dec:
return "0x%x" % val
else:
return "%d" % val | [
"def try_int_to_str(val, max_dec=1024):\n if isinstance(val, int):\n if val > max_dec:\n return \"0x%x\" % val\n else:\n return \"%d\" % val\n else:\n return val",
"def IntStr( num, dec=None ):\n num = int(num)\n if not dec: return str(num)\n if dec <= len(str(n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if value is non negative integer | def is_non_neg_int(val):
return isinstance(val, int) and val >= 0 | [
"def is_int_neg(x):\n return True if is_int(x) and x < 0 else False",
"def is_int_not_neg(x):\n return True if is_int(x) and x >= 0 else False",
"def is_Negative(x):\n if x < 0:\n return True\n return False",
"def _isNotNegative(self, value):\n\n isNotNegative = False\n if (va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if value is list | def is_list(val):
return isinstance(val, list) | [
"def _is_valid_value(value: List) -> bool:\n return isinstance(value, list)",
"def isListLike(value):\r\n\r\n return isinstance(value, (list, tuple, set))",
"def _is_list(e):\n return isinstance(e, LIST_TYPE)",
"def is_list(x):\n return type(x) == list",
"def _check_is_list(obj):\n return isi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if string starts from a letter | def is_first_letter(val):
return ord(val[0].lower()) in range(ord('a'), ord('z') + 1) | [
"def contains_at_least_one_letter(string):\n first_letter = string[0]\n for letter in string[1:]:\n if first_letter == letter:\n return True\n else:\n first_letter = letter\n return False",
"def IsNameStartChar(c):\n if c <= u\"z\":\n if c >= u\"a\":\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate simple register map template | def create_template_simple():
rmap = RegisterMap()
rmap.add_registers(Register('DATA', 'Data register', 0x0).add_bitfields(
BitField(width=32, access='rw', hardware='ioe')))
rmap.add_registers(Register('CTRL', 'Control register', 0x4).add_bitfields(
BitField(width=16, access='rw', reset=0x... | [
"def create_template():\n # register map\n rmap = RegisterMap()\n\n rmap.add_registers(Register('DATA', 'Data register', 0x4).add_bitfields([\n BitField(\"FIFO\", \"Write to push value to TX FIFO, read to get data from RX FIFO\",\n width=8, lsb=0, access='rw', hardware='q'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate register map template | def create_template():
# register map
rmap = RegisterMap()
rmap.add_registers(Register('DATA', 'Data register', 0x4).add_bitfields([
BitField("FIFO", "Write to push value to TX FIFO, read to get data from RX FIFO",
width=8, lsb=0, access='rw', hardware='q'),
BitField("FERR"... | [
"def create_template_simple():\n rmap = RegisterMap()\n\n rmap.add_registers(Register('DATA', 'Data register', 0x0).add_bitfields(\n BitField(width=32, access='rw', hardware='ioe')))\n\n rmap.add_registers(Register('CTRL', 'Control register', 0x4).add_bitfields(\n BitField(width=16, access='r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return CDD superfamilies (clans) | def get_clans(cddid: str, fam2supfam: str) -> list[Clan]:
superfamilies = {}
families = set()
if cddid.lower().endswith(".gz"):
fh = gzip.open(cddid, "rt")
else:
fh = open(cddid, "rt")
for line in fh:
fields = line.rstrip().split("\t")
accession = fields[1]
n... | [
"def super_categories(self):\n return [BasesOfQSymOrNCSF(self.base()).Commutative()]",
"def super_concepts_of(self, cnl, direct=False):\n return cognipy_call(self._uid, \"GetSuperConceptsOf\", cnl, direct)",
"def table_father_not_concordant():\n pass",
"def _direct_superclasses(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test display at origin | def test_display_at_origin(self):
Base._Base__nb_object = 0
r1 = Square(2)
my_stdout = StringIO()
sys.stdout = my_stdout
r1.display()
sys.stdout = sys.__stdout__
expected = "##\n##\n"
self.assertEqual(expected, my_stdout.getvalue()) | [
"def test_display_not_at_origin(self):\n Base._Base__nb_object = 0\n r1 = Square(2, 1, 1, 2)\n my_stdout = StringIO()\n sys.stdout = my_stdout\n r1.display()\n sys.stdout = sys.__stdout__\n expected = \"\\n ##\\n ##\\n\"\n self.assertEqual(expected, my_stdout.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test display not at origin | def test_display_not_at_origin(self):
Base._Base__nb_object = 0
r1 = Square(2, 1, 1, 2)
my_stdout = StringIO()
sys.stdout = my_stdout
r1.display()
sys.stdout = sys.__stdout__
expected = "\n ##\n ##\n"
self.assertEqual(expected, my_stdout.getvalue()) | [
"def test_display_at_origin(self):\n Base._Base__nb_object = 0\n r1 = Square(2)\n my_stdout = StringIO()\n sys.stdout = my_stdout\n r1.display()\n sys.stdout = sys.__stdout__\n expected = \"##\\n##\\n\"\n self.assertEqual(expected, my_stdout.getvalue())",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if .wit directory exists in any parentdirectory. | def is_wit_exists(abs_path):
parent_dir = os.path.dirname(abs_path)
drive = os.path.join(os.path.splitdrive(abs_path)[0], os.sep)
while parent_dir != drive:
wit_path = os.path.join(parent_dir, ".wit")
is_exists = os.path.exists(wit_path)
if is_exists:
return parent_dir
... | [
"def test_subdirs_exist(self):\n for dirname in template_directories:\n self.assertTrue(\n os.path.isdir(os.path.join(self.builtdir, dirname))\n )",
"def test_check_dir_existence_sub_dir_not_found(self):\n self.assertFalse(self.existing_dirs.append('unexpected_di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy a file or directory to the staging area. Copy all the parent directories of the path to the root directory (which consists '.wit' dir). A directory is copied with all of its content. | def add(path):
abs_path = os.path.abspath(path)
root = is_wit_exists(abs_path)
staging_area = os.path.join(os.path.join(root, '.wit'), 'staging_area')
destination = os.path.join(staging_area, os.path.relpath(abs_path, start=root))
if os.path.isfile(abs_path):
if not os.path.exists(os.path.di... | [
"def copytree(src, dst):\n if os.path.isfile(src):\n shutil.copy2(src, dst)\n else:\n shutil.copytree(src, dst)",
"def copy(self, coderoot, testroot, case, compiler=None, stagedir=None):\n source = self.path(coderoot, case, compiler, stagedir)\n if source is not None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls a coroutine function with parameters, if it's defined. | async def try_call(self, func, *opts):
if inspect.iscoroutinefunction(func):
await func(*opts) | [
"async def _invoke(callback: Callable, *params: object) -> Any:\n _rich_traceback_guard = True\n parameter_count = count_parameters(callback)\n result = callback(*params[:parameter_count])\n if isawaitable(result):\n result = await result\n return result",
"async def call_with_args(self, fn,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Establishes connection to websocket endpoint and calls on_connected callback. | async def connect(self):
self.websocket = await websockets.connect(self.url, **self.kwargs)
await self.try_call(self.on_connected) | [
"async def websocket_connected(ws):\n await ws.send_str(json.dumps({\"subject\": Subject.websocket.value, \"event\": Event.connected.value}))\n logger.debug(\"websocket: new connection from user %s\", ws.cirrina.web_session.get(\"username\"))",
"def __connect(self):\n \n self.ws = websocket.We... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert this rotation string into a classification | def rotclass(val):
# if we have seven years of either corn and soy
if val.count("B") + val.count("C") > 6:
return "Ag"
return "Non Ag" | [
"def rn_classification(array):\n _class = ['', '', '']\n\n # integer / fraction\n # den length == 1 and den = [1, 1, 1]\n if len(array[1]) == 1 and (array[1][0] == 1).all():\n _class[0] = 'integer'\n else:\n _class[0] = 'fraction'\n\n # rational / irrational for each linear\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print current ffmpeg status | def do_status(self):
return "Waiting for {0.prefill_in} frames; Streaming from ffmpeg: {0.ffmpeg_ready}".format(self) | [
"def commandline(self): \n return self._ffmpeg_commandline()",
"def status(dev):\n dev.print_status()",
"def ffmpeg_parse_infos(self,filename, print_infos=False, check_duration=True):\n\n\n # open the file in a pipe, provoke an error, read output\n is_GIF = filename.endswith('.gif... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the current time skew from the source video (dropped frames). | def do_skew(self):
for conn_id, conn in self.connections.items():
return "{} - Skew: {} frames".format(conn_id, conn["time_skew"] / 3600) | [
"def currentTime(snap):",
"def demostrating_video_stab(filename, new_size=(320, 240), tracking_mode=True):\n if tracking_mode:\n from .curve import tracking\n\n def decorator(func):\n funcs = {}\n for i in range(4):\n @tracking(track_len=20, detect_interval=5)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Will perform a search for a given word horizontally along a given direction from a starting point. If the word is found it will return true, otherwise it will return false | def look_horizontal(cls, word_search, word, x, y):
wrong_count = 0
found = True
found_direction = 0
# looking both directions
for direction in range(1, -2, -2):
found = True
wrong_count = 0
found_direction = direction
for i in ran... | [
"def _search_in_direction(self, word, options={'pos': 0, 'direction':0, 'x': 0, 'y': 0}):\n\t\tpos = options['pos']\n\t\tdirection = options['direction']\n\n\t\t# Comptes the next position\n\t\tx, y = self.get_next_pos(word, options)\n\n\t\t# Returns False if out of bounds\n\t\tif x < 0 or x >= self.wid or y<0 or ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Will perform a search for a given word vertically along a given direction from a starting point. If the word is found it will return true, otherwise it will return false | def look_vertical(cls, word_search, word, x, y):
wrong_count = 0
found = True
found_direction = 0
for direction in range(1, -2, -2):
found = True
wrong_count = 0
found_direction = direction
for i in range(1, len(word)):
if... | [
"def _search_in_direction(self, word, options={'pos': 0, 'direction':0, 'x': 0, 'y': 0}):\n\t\tpos = options['pos']\n\t\tdirection = options['direction']\n\n\t\t# Comptes the next position\n\t\tx, y = self.get_next_pos(word, options)\n\n\t\t# Returns False if out of bounds\n\t\tif x < 0 or x >= self.wid or y<0 or ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter out the script so we can parse the xml. | def _filter_script_tags(input_xml):
output_lines = []
in_script = False
for line in input_xml.splitlines():
if "<script>" in line:
in_script = True
if not in_script:
output_lines.append(line)
if "</script>" in line:
in_script = False
return '\n... | [
"def script(self):\n if 'Suppress-Script' in self.data['record']:\n return Subtag(self.data['record']['Suppress-Script'], 'script')\n return None",
"def filterHtml(self, body):\n output = ''\n soup = BeautifulSoup(body, \"html.parser\")\n for script in soup([\"script\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save username and password to config file. Entering nothing keeps the current credentials. Returns whether or not the credentials changed. | def update_credentials():
# Read old credentials
config = read_config()
try:
old_email = config.get(ConfigParser.DEFAULTSECT, 'email')
except ConfigParser.NoOptionError:
old_email = ''
try:
old_password = config.get(ConfigParser.DEFAULTSECT, 'password')
except ConfigParse... | [
"def check_cred_login_and_save(self, server, user, pw):\n try:\n Api.login(server, user, pw)\n except LoginException as e:\n log.error(f\"Could not log in.\\nPW: {pw}\\nError: {e}\")\n answer = sg.popup_yes_no(\n f\"Login failed\\n\\nServer: {server}\\nU... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print each word form a text document from a URL. | def main(url):
words = fetch_words(url)
print_items(words) | [
"def main(url):\n words = fetch_words(url )\n print_items(words)",
"def fetch_words(url):\n # This is function docstring which is documentation for function, modules and scripts\n story= urlopen(url)\n story_words= []\n\n for line in story:\n line_words = line.decode('utf8').split()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Put a host/service in downtime. | def post(self, dt):
data = dt.as_dict()
data.update({'action': 'add'})
requests.post(
pecan.request.ws_arbiter_url + "/downtime",
data=data
)
return info.Info(message='Downtime received.') | [
"def delete(self, dt):\n\n data = dt.as_dict()\n data.update({'action': 'delete'})\n\n requests.post(\n pecan.request.ws_arbiter_url + \"/downtime\",\n data=data\n )\n\n return info.Info(message='Downtime received.')",
"def set_host_into_maintenance_mode(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a host/service downtime. | def delete(self, dt):
data = dt.as_dict()
data.update({'action': 'delete'})
requests.post(
pecan.request.ws_arbiter_url + "/downtime",
data=data
)
return info.Info(message='Downtime received.') | [
"def test_remove_scheduled_delete(self):\n cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')\n cli.remove_scheduled_delete(1)",
"def host_delete(context, host_name, session=None):\n if session is None:\n session = nova_db_sa_api.get_session()\n with session.begin(subtrans... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert zone climate state. | def _assert_zone_state(hass, mode, hvac, current_temp, target_temp, preset, action):
state = hass.states.get("climate.zone_1")
assert hass.states.is_state("climate.zone_1", hvac)
assert state.attributes["current_temperature"] == current_temp
assert state.attributes["max_temp"] == Zone.MAX_TARGET_TEMP
... | [
"def test_ZoneStats(self):\n zone_list = self.xml_obj.stats.zone_stats\n self.assertEqual(zone_list['dom1.example.org']['_default']['serial'], 266)\n self.assertEqual(zone_list['dom1.example.org']['_default']['qrysuccess']['value'], 11508)",
"def test_ExpectedZoneCount(self):\n self.as... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a Datastore key for a User entity. We use user's email as the key. | def user_key(id):
return ndb.Key(User, id) | [
"def user_key(user_number=DEFAULT_USER_NUMBER):\n\treturn ndb.Key('User', user_number)",
"def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key",
"def load_user_key(client, user_id):\n key = None\n key = client.key(config.USER_ENTITY_TYPE, user_id)\n return key",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This test ensures a product is correctly updated when imported data differs from stored data | def test_pies_product_update(updated_test_brand_data, test_brand_record):
PiesDataStorage(updated_test_brand_data).store_brand_data()
compare_products_to_db(updated_test_brand_data, test_brand_record) | [
"def test_product_ca_import_update(self):\n self.import_file(\"custom_attribute_tests.csv\")\n self.import_file(\"custom_attribute_update_tests.csv\")\n prod_0 = Product.query.filter(Product.slug == \"prod0\").first()\n prod_0_expected = {\n u\"normal text\": u\"edited normal text\",\n u\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract host from endpoint. | def _get_host(endpoint):
if endpoint.startswith('http://'):
return endpoint[7:].strip()
if endpoint.startswith('https://'):
return endpoint[8:].strip()
return endpoint.strip() | [
"def getHostFrom(fromHost):",
"def get_host(email):\n host=email.split('@').__getitem__(1).split('.').__getitem__(0)\n return host",
"def get_hostname (url):\n reg = re.search('[^:]+:(/)*([^:/]+)(:[0-9]+)?(/)?.*', url)\n host = ''\n try:\n host = reg.group(2)\n except:\n pass\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the specified function. | def delete_function(self, serviceName, functionName, etag=None, traceId=None):
method = 'DELETE'
path = '/{0}/services/{1}/functions/{2}'.format(self.api_version, serviceName, functionName)
headers = self._build_common_headers()
if etag:
headers['if-match'] = etag
if ... | [
"def delete(self):\n self._transformation_function_engine.delete(self)",
"def delete(self, func_to_reset, *args, **kwargs):\n name = func_to_reset if isinstance(\n func_to_reset, str) else func_to_reset.__name__\n task = self.steps[name]()\n\n path = task._getpath([])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the functions of the specified service. | def list_functions(self, serviceName, limit=None, nextToken=None, prefix=None, startKey=None, traceId=None):
method = 'GET'
path = '/{0}/services/{1}/functions'.format(self.api_version, serviceName)
headers = self._build_common_headers()
if traceId:
headers['x-fc-trace-id'] =... | [
"def list_functions(self):\n request = self.functions.list(location=self.parent)\n return self.__execute_request(request)",
"def list_services(ctx):\n\n ctx.respond(ctx._(\"I am running: {services}\").format(\n services=\", \".join(ctx.bot.services))\n )",
"def list(service_template_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new site directory and init Mambo | def create_site(sitename):
title('Create new site')
mambo_conf = os.path.join(CWD, Mambo.config_yml)
if os.path.isfile(mambo_conf):
error_exit("Can't create new site in a directory that contain 'mambo.yml'")
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
erro... | [
"def init():\n title(\"Init Mambo...\")\n mambo_conf = os.path.join(CWD, Mambo.config_yml)\n if os.path.isfile(mambo_conf):\n error_exit(\"Mambo is already initialized in '%s'. Or delete 'mambo.yml' if it's a mistake \" % CWD)\n else:\n copy_resource(\"skel/\", CWD)\n stamp_mambo_cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize Mambo in the current directory | def init():
title("Init Mambo...")
mambo_conf = os.path.join(CWD, Mambo.config_yml)
if os.path.isfile(mambo_conf):
error_exit("Mambo is already initialized in '%s'. Or delete 'mambo.yml' if it's a mistake " % CWD)
else:
copy_resource("skel/", CWD)
stamp_mambo_current_version(CWD)... | [
"def init_manager(basedir, dbconnect):\n # Test if the base directory exists and is empty.\n basedir = basedir if basedir is not None else config.BASEDIR()\n if os.path.isdir(basedir):\n if os.listdir(basedir):\n click.echo('Not an empty directory {}.'.format(basedir))\n sys.ex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clean the build dir | def clean():
title("Cleaning build dir...")
Mambo(CWD).clean_build_dir()
done() | [
"def clean():\n rm_rf(cwd/'_build')",
"def clean_build():\r\n env.clean_build = True",
"def clean_builds(self, _args):\n ctx = self.ctx\n rmdir(ctx.build_dir)\n rmdir(ctx.python_installs_dir)\n libs_dir = join(self.ctx.build_dir, 'libs_collections')\n rmdir(libs_dir)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Affinity is a group of affinity scheduling rules. | def affinity(self) -> Optional['outputs.InfinispanSpecAffinity']:
return pulumi.get(self, "affinity") | [
"def combine_affinity(affinities):\n affinity = {f: list() for f in affinities[0].keys()}\n for k in affinity.keys():\n if \"cpus\" in k:\n for aff in affinities:\n affinity[k].extend(aff[k])\n elif \"torch_threads\" in k:\n num = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
InfinispanContainerSpec specify resource requirements per container | def container(self) -> Optional['outputs.InfinispanSpecContainer']:
return pulumi.get(self, "container") | [
"def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:\n return pulumi.get(self, \"container\")",
"def validate_required_for_container(data, c_req):\n c_req_set = set(c_req)\n result = True\n if (data['kind'] == \"Deployment\") or \\\n (data['kind'] == \"DaemonSet\") or ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ExposeSpec describe how Infinispan will be exposed externally | def expose(self) -> Optional['outputs.InfinispanSpecExpose']:
return pulumi.get(self, "expose") | [
"def expose(self) -> 'outputs.InfinispanSpecServiceSitesLocalExpose':\n return pulumi.get(self, \"expose\")",
"async def _expose_internal(self, exposure: Exposure, **kwargs) -> Exposure:\n\n raise NotImplementedError",
"def should_expose(self, state) -> bool:\n expose_by_default = self._con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
InfinispanSecurity info for the user application connection | def security(self) -> Optional['outputs.InfinispanSpecSecurity']:
return pulumi.get(self, "security") | [
"def security(self) -> 'outputs.InfinispanStatusSecurity':\n return pulumi.get(self, \"security\")",
"def getSecurity(self):\n return self._security",
"def get_security_config(app):\n items = app.config.items()\n prefix = 'SECURITY_'\n\n def strip_prefix(tup):\n return (tup[0].repl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
InfinispanServiceSpec specify configuration for specific service | def service(self) -> Optional['outputs.InfinispanSpecService']:
return pulumi.get(self, "service") | [
"def _configure_services(self):\n if self.series == 'trusty':\n keystone_config = {'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n 'openstack-origin': 'cloud:trusty-mitaka'}\n designate_config = {'openstack-o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The weights of all of the matched WeightedPodAffinityTerm fields are added pernode to find the most preferred node(s) | def __init__(__self__, *,
pod_affinity_term: 'outputs.InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight) | [
"def _assign_node_weights(self):\n _CONFIG_SERVER_SCORE = 11\n _QUORUM_MANAGER_SCORE = 8\n _QUORUM_SCORE = 5\n _MANAGER_SCORE = 3\n _CLIENT_SCORE = 1\n\n for node in self.state['nodes'].keys():\n\n fullname = self.state['nodes'][node]['admin_node_name']\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
matchExpressions is a list of label selector requirements. The requirements are ANDed. | def match_expressions(self) -> Optional[Sequence['outputs.InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
return pulumi.get(self, "match_expressions") | [
"def match_expressions(self) -> Optional[List[\"LabelSelectorRequirement\"]]:\n return self.__match_expressions",
"def _LabelMatched(obj, selector_map):\n if not obj:\n return False\n if not selector_map:\n return True\n labels = _GetPathValue(obj, ['metadata', 'labels'])\n if not labels:\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
InfinispanServiceContainerSpec resource requirements specific for service | def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:
return pulumi.get(self, "container") | [
"def service(self) -> Optional['outputs.InfinispanSpecService']:\n return pulumi.get(self, \"service\")",
"def container(self) -> Optional['outputs.InfinispanSpecContainer']:\n return pulumi.get(self, \"container\")",
"def _setup_container() -> svc_containers.Container:\n container = svc_contai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ExposeSpec describe how Infinispan will be exposed externally | def expose(self) -> 'outputs.InfinispanSpecServiceSitesLocalExpose':
return pulumi.get(self, "expose") | [
"def expose(self) -> Optional['outputs.InfinispanSpecExpose']:\n return pulumi.get(self, \"expose\")",
"async def _expose_internal(self, exposure: Exposure, **kwargs) -> Exposure:\n\n raise NotImplementedError",
"def should_expose(self, state) -> bool:\n expose_by_default = self._config.get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
InfinispanSecurity info for the user application connection | def security(self) -> 'outputs.InfinispanStatusSecurity':
return pulumi.get(self, "security") | [
"def security(self) -> Optional['outputs.InfinispanSpecSecurity']:\n return pulumi.get(self, \"security\")",
"def getSecurity(self):\n return self._security",
"def get_security_config(app):\n items = app.config.items()\n prefix = 'SECURITY_'\n\n def strip_prefix(tup):\n return (tup... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a Varian .fid file and converts it into an NMR pipe file. | def read_varian_as_nmrpipe(fid_file):
dic, data = ng.varian.read(fid_file)
udic = ng.varian.guess_udic(dic, data)
C = ng.convert.converter()
C.from_varian(dic, data, udic)
dic, data = C.to_pipe()
return dic, data | [
"def write_varian_as_pipe(fid_file, output_folder):\n # Get the basename of the fid_file.\n # base_name = os.path.basename(fid_file)\n base_name = os.sep.join(os.path.normpath(fid_file).split(os.sep)[5:])\n\n dic, data = ng.varian.read(fid_file)\n udic = ng.varian.guess_udic(dic, data)\n convert =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a Varian .fid file and writes it as an NMR pipe file in the output_folder directory. | def write_varian_as_pipe(fid_file, output_folder):
# Get the basename of the fid_file.
# base_name = os.path.basename(fid_file)
base_name = os.sep.join(os.path.normpath(fid_file).split(os.sep)[5:])
dic, data = ng.varian.read(fid_file)
udic = ng.varian.guess_udic(dic, data)
convert = ng.convert.... | [
"def nco_extract( var , infile , outfile ):\n command = \"ncks --overwrite --history\"+\\\n \" --variable \"+var+\\\n \" --output \"+outfile+\\\n \" \"+infile\n process_cmd(command)",
"def create_forna_file(output_folder, origin, name, seq, structure):\n if origin =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Integrates a spectra based on the linewidth of its peaks. | def integrate_peak_by_linewidth(peaks, dic, data, uc):
integrals = dict()
# TODO: Create a unit conversion object.
for peak in peaks:
peak_center = peak[0]
peak_id = peak[1]
peak_width = peak[2]
int_start = (peak_center - peak_width) / 500
int_end = (peak_center + p... | [
"def integrateSpectra(spectra, dlambda):\n \n \"\"\"\n spectra = list of Nx2 arrays describing filter or dye spectra, or laser wavelength profile\n dlambda = wavelength difference betweeen adjacent values in the spectra\n \"\"\"\n\n lowerLimit = min( [min(spectrum[:,0]) for spectrum in spectra] )\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Two identical rings should produce identical .gz files on disk. | def test_deterministic_serialization(self):
os.mkdir(os.path.join(self.testdir, '1'))
os.mkdir(os.path.join(self.testdir, '2'))
# These have to have the same filename (not full path,
# obviously) since the filename gets encoded in the gzip data.
ring_fname1 = os.path.join(self.te... | [
"def gzip_cmp(self, first, second):\n ungz_first = ''.join(os.path.splitext(first)[:-1]) + '.ugz'\n ungz_second = ''.join(os.path.splitext(second)[:-1]) + '.ugz'\n # Unzip the first file.\n with gzip.open(first, 'rb') as gfp, open(ungz_first, 'wb') as ufp:\n ufp.write(gfp.read... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run command cmd in directory d. | def run(self, d, cmd, **kwargs):
print("running %s in %s ..." % (cmd, d))
os.chdir(os.path.join(self.rootdir, d))
r = subprocess.call([sys.executable] + cmd, **kwargs)
if r != 0:
self.failed.append((d, cmd, r))
else:
self.passed += 1
os.chdir(self.... | [
"def run(self, command, src_dir=False):\n dir = self.dep.src_dir if src_dir else self.dep.build_dir\n execute(f'cd {dir} && {command}', echo=True)",
"def run(cmd):\n # TODO: subprocess.run can take in lists, so could do a check with `isinstance` to allow running a command as a list\n print(f\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test faq's page is accessible | def test_page_access(self):
response = self.client.get('/faqs/')
self.assertEqual(response.status_code, 200) | [
"def test_faq_template(self):\n res = self.testapp.get('/faq', status=200)\n self.failUnless('Why is it that C3S wants me to sign?' in res.body)\n self.failUnless(\n 'Copyright 2013, OpenMusicContest.org e.V.' in res.body)",
"def test_quest(self):\n response = self.client.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate the SearchSource (parameters etc.) | def __validate_source(self) -> None:
source = self.search_source
self.review_manager.logger.debug(f"Validate SearchSource {source.filename}")
# if "query" not in source.search_parameters:
# Note : for md-sources, there is no query parameter.
# raise colrev_exceptions.Invali... | [
"def validate(self, *args):\n pass",
"def validate(self):\r\n\t\tfrom ..nrml import NRMLError\r\n\r\n\t\tsource_ids = []\r\n\t\tfor source in self.sources:\r\n\t\t\tif not source.source_id in source_ids:\r\n\t\t\t\tsource_ids.append(source.source_id)\r\n\t\t\telse:\r\n\t\t\t\traise NRMLError(\"Duplicate so... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add SearchSource as an endpoint (based on query provided to colrev search a ) | def add_endpoint(cls, operation: colrev.ops.search.Search, params: str) -> None:
filename = operation.get_unique_filename(
file_path_string=f"local_index_{params}".replace("%", "").replace("'", "")
)
add_source = colrev.settings.SearchSource(
endpoint="colrev.local_index... | [
"def set_search(self, search_term, filters={}):\n\n self.search_term = search_term\n self.search_url = 'https://www.petfinder.com/search/'+self.search_term+'-for-adoption'\n if 'state' in filters:\n append = '/us/' + filters['state'] + '/?distance=100'\n self.search_url = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve masterdata from LocalIndex based on similarity with the record provided | def get_masterdata(
self,
prep_operation: colrev.ops.prep.Prep,
record: colrev.record.Record,
save_feed: bool = True,
timeout: int = 10,
) -> colrev.record.Record:
if any(self.origin_prefix in o for o in record.data["colrev_origin"]):
# Already linked to ... | [
"def bfs_trrust_database_search_target(list_of_input_genes, trrust_filepath=\"../trrust_rawdata.human.tsv\", column_names=[\"Transcription factor\", \"Target gene\", \"Relationship\", \"PubMED identifier\"], return_all=False):\n\n\n\tdf = pd.read_csv(trrust_filepath, delimiter='\\t', header=None)\n\tdf.columns = co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Modified velstor.api call for metadata copies | def ns_copy(session, vtrqid, src, dest, overwrite):
result = ns.copy_vector(session,
vtrqid,
[{'src': src, 'dest': dest}],
overwrite)
# print('ns_copy:', result)
#
# This command always does a single operation, but it i... | [
"def smartCopy(*args, **kwargs):\n \n pass",
"def test_metadata(self):\n\n # Grab the first available part\n part = Part.list(self.api, limit=1)[0]\n\n part.setMetadata(\n {\n \"foo\": \"bar\",\n },\n overwrite=True,\n )\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write a segment and AnalogSignal in a text file. Arguments | def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
writetimecolumn = True,
):
if skiprows:
raise NotImplementedError('skiprows values other than 0 are not ' ... | [
"def write_segment(self, segment,\n delimiter = '\\t',\n \n skiprows =0,\n \n timecolumn = None,\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new MaptilesDataset object with a subset of df_fns and optionally overwritten transform and target_transform. | def make_subset(self, inds: Iterable[int],
transform=None,
target_transform=None
):
df_fns = self.df_fns.iloc[inds].reset_index(drop=True)
return MaptilesDataset(
data_root=self.data_root,
cities=self.cities,
... | [
"def __init__(\n self,\n img_files,\n img_transform: Optional[Callable] = None,\n seg_files=None,\n seg_transform: Optional[Callable] = None,\n labels=None,\n label_transform: Optional[Callable] = None,\n ):\n items = [(img_files, img_transform), (seg_files... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect all Count the number of maptiles from `cities`, for each style in `styles` and at each zoom level in `zooms` | def collect_fns(data_root: Path,
cities: Iterable[str] = None,
styles: Iterable[str] = None,
zooms: Iterable[str] = None,
verbose: bool = False,
) -> pd.DataFrame:
# Collect as a record/row = Tuple[str, str, str,... | [
"def add_cities_to_map(map, cities, city_number):\n counter = 0\n for city in cities:\n try:\n coordinates = find_coordinates(city[0])\n counter += 1\n map.add_child(folium.CircleMarker(location=coordinates,\n radius=10,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inplace projection to the manifold. Returns tensor same instance | def proj_(self) -> torch.Tensor:
return copy_or_set_(self, self.manifold.projx(self)) | [
"def projective_transform(self):\r\n return transform.ProjectiveTransform(self.persp_matrix)",
"def unproject_to_image_plane(self, Xi):\n Xi = np.array(Xi)\n u_hat = self.unproject(Xi)\n v = self.camera_location\n mag_v = np.linalg.norm(v)\n cos_th = np.dot(u_hat,v) / mag_v\n u = (mag_v /... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check whether the current slide is IHC stained | def check_ihc_slide(slide):
sample = slide.read_region((0, 0), slide.level_count - 1,
(slide.level_dimensions[slide.level_count - 1][0],
slide.level_dimensions[slide.level_count - 1][1]))
sample = sample.convert('RGB')
sample_hsv = color.rgb2hsv(n... | [
"def is_contageous(self):\n return (self.health == Health.contageous or self.health == Health.sick)",
"def HasSIC(self):\n return self.__has('SIC')",
"def isContinuous(self): # real signature unknown; restored from __doc__\n pass",
"def HasHIS(self):\n return self.__has('HIS')",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generate binary mask for a given tile | def generate_binary_mask(tile):
tile_hsv = color.rgb2hsv(np.asarray(tile))
roi1 = (tile_hsv[:, :, 0] >= 0.33) & (tile_hsv[:, :, 0] <= 0.67)
roi1 = ~roi1
skmp.remove_small_holes(roi1, area_threshold=500, connectivity=20, in_place=True)
skmp.remove_small_objects(roi1, min_size=500, connectivity=20, i... | [
"def compute_mask(self, t, default_mask):\n pass",
"def _to_binary_mask(self, array):\n # check where the transparency is not zero\n return (array[..., -1] > 0).astype(self.raster_dtype) * self.raster_value",
"def action_mask(self, state: int):\n mask = np.zeros(6, dtype=np.int8)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get config for frontend web map (e.g. access tokens) | def get_map_config():
settings = get_settings()
access_token = settings.w_mapbox_token if settings.w_mapbox_token else MAPBOX_ACCESS_TOKEN
style = settings.w_mapbox_style if settings.w_mapbox_style else MAPBOX_STYLE
return schema.MapConfig(mapbox_token=access_token, mapbox_style=style) | [
"def test_api_get_frontend_configuration_default(self):\n\n response = self.client.get(\"/api/config.json\")\n\n self.assertEqual(\n response.json(),\n {\n \"API_URL\": \"http://localhost:8070/api\",\n \"JITSI_DOMAIN\": \"meeting.education\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sanitize user input by escaping or removing potentially harmful input using a whitelistbased approach with bleach as recommended by OWASP. | def sanitize_input(self, untrusted_text, strip_characters=False):
try:
# Test if the untrusted text is percent encoded
# before running bleech.
if unquote(untrusted_text) != untrusted_text:
use_percent_encoding = True
else:
use_perc... | [
"def sanitize(self, s):\n s = s.lower().replace(\" \", \"\").replace(\"-\", \"\").replace(\",\", \"\").replace(\":\", \"\").replace(\"&\",\"and\").replace(\"(\",\"\").replace(\")\",\"\").strip()\n # Additional sanitization rules\n s = s.replace(\"sulphate\",\"sulfate\")\n return s",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper for the clean function of bleach to be able to catch when illegal UTF8 is processed. | def _safe_clean(self, untrusted_text, strip_characters=False):
try:
return clean(untrusted_text, strip=strip_characters)
except KeyError:
current_app.logger.warn('A malicious user tried to crash the application by '
'sending illegal UTF-8 in an... | [
"def _safe_clean(untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. | def to_dict(self, flat=True):
if flat:
d = {}
for k, v in iteritems(self):
v = self.sanitize_input(v)
d[k] = v
return d
return dict(self.lists()) | [
"def from_shodan_flattened(raw: Union[list, dict]) -> dict:\n return flatten(from_shodan(raw))",
"def flatten(data: Dict) -> Dict[str, Any]:\n return recursive_flatten(\"\", data)",
"def doc_from_flat(doc, flat):\n def doc_from_flat_inner(doc, pos):\n if isinstance(doc, (list, tuple)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sanitized, type conversion get. The value identified by `key` is sanitized, and if `type` is provided, the value is cast to it. | def get(self, key, default=None, type=None):
try:
val = self.sanitize_input(self[key])
if type is not None:
val = type(val)
except (KeyError, ValueError):
val = default
return val | [
"def get_generic(self, _key: str, _type):\n set_func = {\n \"bool\" : self.get_bool,\n \"float\" : self.get_float,\n \"int\" : self.get_int,\n \"point\" : self.get_point,\n \"points\": self.get_points,\n \"str\" : self.get_str\n }\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The headers from the WSGI environ as immutable and sanitized | def headers(self):
return SanitizedEnvironHeaders(self.environ) | [
"def headers(self) -> dict:\n return self._flask_request.headers",
"def get_wsgi_headers(self, environ):\n headers = Headers(self.headers)\n location = None\n content_location = None\n content_length = None\n status = self.status_code\n\n # iterate over the headers... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the average kernel | def _compute_average(kernel: np.array, reps_i: List[np.array], reps_j: List[np.array]):
# Count the number of atoms in the rows and columns
# Works by accessing where the atomic number is stored in the FCHL representation
natoms_i = np.array([np.greater(x[:][0][1], 0).sum() for x in reps_i])
natoms_j ... | [
"def _mean(self):\n mat = self._factorize(self.matrix, self.xdef)\n mat = self._rdc_x(mat, self.xdef)\n ysects = self._by_ysect(mat, self.ydef)\n return np.expand_dims([np.nansum(ymat[:, 0] /\n np.nansum(ymat[:, -1]))\n for ymat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create default output of prediction. Equals to observables of AMICI model. We need to check that call to AMICI was successful (status == 0), before writing the output. | def _default_output(amici_outputs):
amici_nt = [
len(edata.getTimepoints())
for edata in self.amici_objective.edatas
]
amici_ny = len(self.output_ids)
amici_np = len(self.amici_objective.x_names)
outputs = []
output... | [
"def build_output(self, model): # pylint: disable=no-self-use\n if model.mode != utils.INFER:\n model.score = tf.nn.softmax(model.logits, name=\"score\")\n model.preds = tf.argmax(model.logits, axis=-1)\n model.output_dict = {\"score\": model.score, \"preds\": model.preds}\n else:\n model... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encapsulate the call to amici. This allows to use variable scoping as a mean to clean up the memory after calling amici, which is beneficial if large models with large datasets are used. | def _wrap_call_to_amici(
self, amici_outputs, x, sensi_orders, mode, parameter_mapping, edatas
):
chunk = self.amici_objective(
x=x,
sensi_orders=sensi_orders,
mode=mode,
parameter_mapping=parameter_mapping,
edatas=edatas,
retur... | [
"def acontexmanager(func):\n func.__returns_acontextmanager__ = True\n return func",
"def post_compute(self):\n pass",
"def _apply_fit(self,raw_chop=None,chop=None,idx=None):\n ica_obj = None\n self._ics_found_svm = None\n\n fname_ica,fname = self._get_chop_name(raw_chop,chop=N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
$WIND_BASE environment variable. If $WIND_BASE isn't set, return a default of /usr/powerpcwrsvxworks/wind_base | def get_wind_base():
wind_base = os.getenv('WIND_BASE')
if wind_base:
return path.abspath(wind_base)
return path.abspath(path.join('/usr', 'powerpc-wrs-vxworks', 'wind_base')) | [
"def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path",
"def get_base_dir():\n global BASE_DIR\n return BASE_DIR",
"def default_base_dir():\n cwd = P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Both methods 'get_method_of_class_java' and 'get_method_of_class_java2' works correctly. | def get_method_of_class_java2(cls, db, class_name=None, class_entity=None):
if class_entity is None:
class_entity = cls.get_class_entity_by_name(db=db, class_name=class_name)
method_list = class_entity.ents('Define', 'Java Method ~Unknown ~Unresolved ~Jar ~Library')
# print('len meth... | [
"def Node2Method(self, node): \n ##TODO(GuoChenkai) Nodef to Encodedmethod\n ## convert through the method_name\n #res = [] \n #methods = self.d.get_method(gvm_node.method_name)\n #for i in methods:\n #if i.get_name() == gvm_node.method_name:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
upload each file to logmuncher | def upload(self, filenames):
print("I am going to upload the following files", filenames)
for f in filenames:
print("uploading", f)
self.filenames = args.filenames
payload = {
'email': self.email,
'title': os.path.basename(f)
... | [
"def test_upload_run_logs(self):\n pass",
"def log_record_upload(records: List[dict], endpoint: str) -> None:\n for record in records:\n log = \"Record: %s added to collection: %s on trial: %s on assay: %s\" % (\n record[\"file_name\"] if \"file_name\" in record else \" \",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
randomly generates a list of artists which the target_user never heard. It will compare the artists by a random generated user | def recommend_random_artists_RB(UAM, u_idx, train_aidx):
all_idx = range(0, UAM.shape[0])
random_u_idx = random.sample(np.setdiff1d(all_idx, [u_idx]), 1)[0]
# cannot generate the own user
if random_u_idx == u_idx:
recommend_random_artists_RB(UAM, u_idx)
u_aidx = np.nonzero(UAM[u_idx,:])[0]... | [
"def recommend_CF_our(UAM, user_id, artists):\n\n\n users = helper.read_csv(USERS_FILE)\n artists_array = []\n neighbor_array = get_user_neighbors(UAM, user_id)['neighbor_array']\n sim_users = get_user_neighbors(UAM, user_id)['sim_users']\n artist_idx_u = art... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that implements a CF recommender. It takes as input the UAM, metadata (artists and users), the index of the seed user (to make predictions for) and the indices of the seed user's training artists. It returns a list of recommended artist indices | def recommend_CF_our(UAM, user_id, artists):
users = helper.read_csv(USERS_FILE)
artists_array = []
neighbor_array = get_user_neighbors(UAM, user_id)['neighbor_array']
sim_users = get_user_neighbors(UAM, user_id)['sim_users']
artist_idx_u = artists # indic... | [
"def recommend_random_artists_RB(UAM, u_idx, train_aidx):\n all_idx = range(0, UAM.shape[0])\n random_u_idx = random.sample(np.setdiff1d(all_idx, [u_idx]), 1)[0]\n\n # cannot generate the own user\n if random_u_idx == u_idx:\n recommend_random_artists_RB(UAM, u_idx)\n\n u_aidx = np.nonzero(UAM... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dropout for sparse tensors. | def sparse_dropout(x, keep_prob, noise_shape): # The dropout layer for sparse matrix
random_tensor = keep_prob
random_tensor += tf.random_uniform([noise_shape], dtype=tf.float64)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_ou... | [
"def dropout_sparse(x, keep_prob, num_nonzero_elems):\n\tnoise_shape = [num_nonzero_elems]\n\trandom_tensor = keep_prob\n\trandom_tensor += torch.rand(noise_shape)\n\tdropout_mask = torch.floor(random_tensor).bool()\n\t\"\"\" \"\"\"\n\tpre_out = x[dropout_mask]\n\t\"\"\" \"\"\"\n\treturn pre_out * (1./keep_prob)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
_track_ Track the jobs while in condor This returns a threeway ntuple First, the total number of jobs still running Second, the jobs that need to be changed Third, the jobs that need to be completed | def track(self, jobs, info = None):
# Create an object to store final info
trackList = []
changeList = []
completeList = []
runningList = []
noInfoFlag = False
# Get the job
jobInfo = self.getClassAds()
if jobInfo == None:
retu... | [
"def updateJobs(currentJobs, jobTrack):\n for i in range(len(currJobs)):\n try:\n jobTrack[i] = currJobs[i].is_alive()\n except:\n jobTrack[i] = 0\n return jobTrack",
"def number_of_pending_jobs():\n cmd = [\"squeue\", \"-u\", \"lstanalyzer\", \"-h\", \"-t\", \"pending... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kill a list of jobs based on the WMBS job names | def kill(self, jobs, info = None):
for job in jobs:
jobID = job['jobid']
# This is a very long and painful command to run
command = 'condor_rm -constraint \"WMAgent_JobID =?= %i\"' % (jobID)
proc = subprocess.Popen(command, stderr = subprocess.PIPE,
... | [
"def kill_all(self):\n for job_id in self.job_ids:\n cmd = \"kill %s\" % (job_id)\n os.system(cmd)",
"def aws_kill_jobs_command(\n self, args: Namespace, extra_args: List[str], argv: List[str]\n ) -> None:\n scheduler = self.get_scheduler(args)\n\n # Get AWS ex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
_makeSubmit_ For a given job/cache/spec make a JDL fragment to submit the job | def makeSubmit(self, jobList):
if len(jobList) < 1:
#I don't know how we got here, but we did
logging.error("No jobs passed to plugin")
return None
jdl = self.initSubmit()
# For each script we have to do queue a separate directory, etc.
for job in ... | [
"def generate_submit_job(self, submitoptions={}):\n\n # dictionary to contain specific submit options\n submit = {}\n\n submit.update(copy.deepcopy(self.submit_options))\n submit.update(copy.deepcopy(submitoptions))\n\n # add arguments\n submit[\"arguments\"] = \"$(ARGS)\"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
_getCEName_ This is how you get the name of a CE for a job | def getCEName(self, jobSite):
if not jobSite in self.locationDict.keys():
siteInfo = self.locationAction.execute(siteName = jobSite)
self.locationDict[jobSite] = siteInfo[0].get('ce_name', None)
return self.locationDict[jobSite] | [
"def getJobName(self):\n xpath = self.root_tag + \"/updateParameters\" + self.version_filter + \"/jobName\"\n self.debug(\"getDeveloperEmail(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = self.getData(xpath)\n value = \"\"\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
_getClassAds_ Grab classAds from condor_q using xml parsing | def getClassAds(self):
constraint = "\"WMAgent_JobID =!= UNDEFINED\""
jobInfo = {}
command = ['condor_q', '-constraint', 'WMAgent_JobID =!= UNDEFINED',
'-constraint', 'WMAgent_AgentName == \"%s\"' % (self.agent),
'-format', '(JobStatus:\%s) ', 'JobStatu... | [
"def get_attribute(self):\n shop_obj = self.env['sale.shop']\n connection_obj = self.env['ebayerp.osv']\n results = False\n attribute = False\n\n if self:\n print(\"-------self._ids----\", self._ids)\n if isinstance(self, int):\n ids = [self._i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints exception and details in human readable form. You can specify IO stream object in `output` parameter. By default text is printed to standard output. | def print_exception(self, output=None):
if not output:
output = sys.stderr
text = u"stream failed. reason: %s\n" % self.message
text += u"exception: %s: \n" % self.exception.__class__.__name__
text += u"node: %s\n" % self.node
try:
text += unicode(self... | [
"def ShowException(self):\n (etype, value, tb) =sys.exc_info()\n # remove myself from traceback\n tblist =traceback.extract_tb(tb)[1:]\n msg = ' '.join(traceback.format_exception_only(etype, value)\n +traceback.format_list(tblist))\n self.output.write_exc(ms... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
mask all the pixels that either contain (bs[i], ls[i]) or the distance from the point to the center of the pixel is less than dist | def ps2maskpix(nside, bs, ls, dist, nest=False):
nestin = True
npix = healpy.nside2npix(nside)
mask = np.ones(npix)
pixel_size = pix_size(nside)
if not isinstance(dist, np.ndarray):
dists = np.ones(len(bs)) * dist
else:
dists = dist
depth_min = min(dists / pixel_size)
if... | [
"def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask",
"def to_apply_mask(img, bbox):\n x1, y1, x2, y2 = bbox\n img[:,y1:y2,x1:x2] = img[:,y1:y2,x1:x2].normal_(0.0, 0.1) \n return img",
"def la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the min index i such that number < array[i] return len(array) if array[1] < number if nearest = True, then return the index of the closet array entry to the number | def findIndex(array, number, nearest=False):
if array[0] > number:
return 0
elif array[-1] < number:
if nearest:
return len(array) - 1
else:
return len(array)
else:
imin = 0
imax = len(array)
while imax > imin + 1:
imed = (i... | [
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n if array[idx] > value:\n return idx - 1\n elif array[idx] <= value:\n return idx",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that makefile.yml will be searched in RKD_PATH | def test_loads_from_file_is_searching_in_rkd_path(self):
yaml_loader = YamlFileLoader([])
d = tempfile.TemporaryDirectory()
os.environ['RKD_PATH'] = d.name
with open(d.name + '/makefile.yml', 'w') as f:
f.write('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks:
:... | [
"def test_find_path_by_name_founds_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\n version: org.riotkit.rkd/yaml/v1\n impor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that lookup paths includes RKD_PATH and internal RKD directories | def test_get_lookup_paths_includes_internal_path_as_well_as_rkd_path(self):
yaml_loader = YamlFileLoader([])
os.environ['RKD_PATH'] = 'SOME-PATH-THERE'
try:
paths = yaml_loader.get_lookup_paths('harbor-internal/')
finally:
os.environ['RKD_PATH'] = ''
de... | [
"def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))",
"def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that makefile.yml will be searched in RKD_PATH | def test_find_path_by_name_founds_path(self):
yaml_loader = YamlFileLoader([])
d = tempfile.TemporaryDirectory()
os.environ['RKD_PATH'] = d.name
with open(d.name + '/makefile.yml', 'w') as f:
f.write('''
version: org.riotkit.rkd/yaml/v1
imports: []
... | [
"def test_loads_from_file_is_searching_in_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\nversion: org.riotkit.rkd/yaml/v1\nimports: [... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that find_path_by_name() will not return anything if nothing searched was found | def test_find_path_by_name_does_not_found_anything(self):
yaml_loader = YamlFileLoader([])
self.assertEqual('', yaml_loader.find_path_by_name('some-file-that-does-not-exists', '')) | [
"def nonexistent_path_nonexistent_name(self):\n self.assertIsNone(get_folder_by_path(path=\"/home/not_a_user/\", name=\"not_a_test_folder\"))",
"def existing_path_nonexistent_name(self):\n self.assertIsNone(get_folder_by_path(path=self.r_path, name=\"not_a_test_folder\"))",
"def test_find_path_by_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes dict from test ids and ranked training ids, labels, scores. | def get_prediction_map(test_ids, train_ids_labels_and_scores, top_k):
prediction_map = dict()
for test_index, test_id in enumerate(test_ids):
hex_test_id = utils.to_hex(test_id)
aggregate_scores = {}
for _, label, score in train_ids_labels_and_scores[test_index][:top_k]:
if ... | [
"def calculate_metrics_dict(scores, y, lr_predicted, label):\n X1, X2 = Xy_to_Xn(lr_predicted, y)\n\n return {'cllr' + label: round(calculate_cllr(X1, X2).cllr, 4),\n 'auc' + label: roc_auc_score(y, scores),\n 'accuracy' + label: accuracy_score(y, scores > .5)}",
"def map(self, fingerp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets predictions using embedding similarity and local feature reranking. | def get_predictions(model: AbstractEmbeddingModel,
rerank: AbstractRerankStrategy,
labelmap,
num_to_rerank,
top_k,
distance_func='cosine'):
train_image_paths = [
x for x in pathlib.Path(
c... | [
"def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)",
"def getPrediction(self):\r\n \treturn self.prediction",
"def predictions_relevance(self):\n raise NotImplementedError",
"def predict():\n if model:\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test handling of incomplete pgs. Requires 4 osds. | def test_incomplete_pgs(ctx, config):
testdir = teuthology.get_testdir(ctx)
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.ke... | [
"def test_parse_phout_incomplete_fields_count(self, remove_data_file):\n\n filename = remove_data_file()\n data = self.set_phout_data()\n data.append(\"a\\tb\")\n self.set_phout_file(filename, data)\n\n # check exception text\n with pytest.raises(\n ValueErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test that the class initialised correctly. Mainly I want to check that when the class initialised, it ran tally_hpo_terms() correctly. Check that the counts of the HPO terms used in the probands match what is expected. | def test_setup(self):
self.assertEqual(self.graph.total_freq, 3)
self.assertEqual(self.graph.get_ids_per_term("HP:0002011"),
{'person_02', 'person_03'} )
# check that a redundant term has been added, even though a more specific
# descendant term was included... | [
"def test_calculate_217_count(self):\r\n\r\n self.DUT.hazard_rate_type = 1\r\n self.DUT.operating_voltage = 1.25\r\n self.DUT.acvapplied = 0.025\r\n self.DUT.rated_voltage = 3.3\r\n\r\n self.assertFalse(self.DUT.calculate_part())\r\n self.assertEqual(self.DUT.hazard_rate_mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check that get_descendants works correctly | def test_get_descendants(self):
# check that a high-level node returns the expected set of nodes
self.assertEqual(self.graph.get_descendants("HP:0000118"), \
set(['HP:0000707', 'HP:0002011', 'HP:0000924']))
# check that a terminal node doesn't have any descendants
... | [
"def test_children_of(self):\n expected = [self.second_category, self.third_category, ]\n self.assertEqual(expected, models.Category.objects.children_of(self.root_category))\n\n expected = [self.third_category, ]\n self.assertEqual(expected, models.Category.objects.children_of(self.secon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check that find_common_ancestors works correctly | def test_find_common_ancestors(self):
# check that two terms on different arms only return their common
# ancestors
self.assertEqual(self.graph.find_common_ancestors('HP:0000924', \
'HP:0000707'), set(["HP:0000001", "HP:0000118"]))
# check that two identical... | [
"def test_common_ancestors(self):\n tree = tree_from_tuples(\n (1,\n (3,\n (4, None, None),\n (5, None, None)\n ),\n (6,\n (15, None, None),\n (7,\n None,\n (16, None, None)\n )\n )\n )\n )\n node... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the OpenAQ instance. | def __init__(self, version='v1', **kwargs):
self._baseurl = 'https://api.openaq.org'
super(OpenAQ, self).__init__(version=version, baseurl=self._baseurl) | [
"def initialize(self):\n try:\n api_key = self._pomodoro_service.get_config(\"task.asana\", \"api_key\")\n self.asana_api = self._get_asana_api(api_key)\n except Exception as ex:\n logger.error(\"Error initializing plugin: {0}\".format(ex))",
"def __init__(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provides data about individual measurements | def measurements(self, **kwargs): | [
"def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])",
"def get_data(self,sensor):\n if sensor.id in self.measurements:\n return self.measurements[sensor.id]\n else: raise Exception(\"Sensor has no measurements available\")",
"def get_measur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds monthly accrual days to all users who have not yet accrued days in the current month | def accrue_days():
# Get the current month in ISO format
today = date.today()
current_month = today.strftime('%Y-%m-01T00:00:00.000Z')
# Get profiles that have not been updated yet this month
profiles = Profile.objects.filter(update_timestamp__lt=current_month)
for profile in profiles:
... | [
"async def count_monthly_users(self) -> int:\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return await self.db_pool.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )",
"def inactive_lost_accounts_last_month(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A dictionary of cookie names and values. | def cookies(self):
# TODO:jek: pass this off to the driver? let it use a customized csv
# reader to split & unpack?
cookie_strings = self.selenium('getCookie').split('; ')
cookies = dict()
for cookie_string in cookie_strings:
if not cookie_string:
cont... | [
"def cookies(self) -> Dict[str, http.cookies.Morsel]:\n if not hasattr(self, \"_cookies\"):\n self._cookies = (\n http.cookies.SimpleCookie()\n ) # type: http.cookies.SimpleCookie\n if \"Cookie\" in self.headers:\n try:\n pars... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fill all possible fields with key/[value] pairs from values. | def _fill_fields(fields, values):
unfilled = []
for name, field_values in values:
if len(field_values) == 1:
value = field_values[0]
else:
value = field_values
try:
fields[name] = value
except ValueError:
unfilled.append((name, fiel... | [
"def fill(fields, adapter):",
"def prepare_values(self, fields, values):\n\n return {\n field['name']: field['type'].prepare_value_for_db(\n field['field'],\n values[field_id] if field_id in values else values[field['name']]\n )\n for field_id,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fill form with values, retrying fields that fail with ValueErrors. If multiple passes are required to set all fields in values, the document will be resynchronizes between attempts with wait_for called between each attempt. | def _fill_form_async(form, values, wait_for=None, timeout=None):
browser = form.browser
unset_count = len(values)
while values:
values = _fill_fields(form.fields, values)
if len(values) == unset_count:
# nothing was able to be set
raise ValueError("Unable to set field... | [
"def fill(self, selector, values):\n if not self.exists(selector):\n raise Exception(\"Can't find form\")\n \n for field in values:\n self.set_field_value(\"%s [name=%s]\" % (selector, field),\n values[field])\n return True",
"def _fill_form_item(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The fastest Selenium locator expression for this element. | def _locator(self):
try:
return 'id=' + self.attrib['id']
except KeyError:
return 'xpath=' + self.fq_xpath | [
"def find_element_by_xpath(self, xpath):\n raise NotImplementedError",
"def byXpath(self, xpath):\r\n return self.find_element((By.XPATH, xpath))",
"def _change_to_selenium_elem(self, elem=None, **kwargs):\n if elem:\n soup_element = elem.soup_hoth\n else:\n sou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Place holder function for a future one that will calculate the change mesh (if it doesnt exist). | def calculate_change_mesh(self): | [
"def UpdateMesh(self):\r\n pass",
"def mesher():\n return Mesher(func=sphere, delta=0.1)",
"def reload(self):\n with open(self.filename,'r') as meshfile:\n # scan file until we reach a mesh format declarator\n if not scan_for_keyword(meshfile, \"$meshformat\"):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lists objects related via given property | def list_related(self, prop="http://www.w3.org/2004/02/skos/core#related"):
return [e.object for e in Triple.objects.filter(subject=self,
predicate__uri=prop)
.order_by("predicate")] | [
"def get_related_properties(self):\n return []",
"def get_objects_list(self) -> list:\n user = self.request.user\n site_name = self.model_admin.admin_site.name\n\n objects = []\n for obj in self.queryset:\n # Investigate the field paths in display_fields:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |