query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Naviagtes to a search for posts with a specific tag on IG. | def search_tag(self, tag):
self.driver.get(self.get_tag_url.format(tag)) | [
"def findPosts(self, searchFilter):\n raise NotImplementedError",
"def by_tags(request, tags):\n if tags is None:\n return tag_list(request)\n\n tags_l = tags.split('+')\n\n posts = []\n\n for post in published_posts():\n if all([tag in post.taglist for tag in tags_l]):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Downloads all images from a users profile. | def download_user_images(self, user):
self.nav_user(user)
img_srcs = []
finished = False
while not finished:
finished = self.infinite_scroll() # scroll down
img_srcs.extend([img.get_attribute('src') for img in self.driver.find_elements_by_class_n... | [
"def download_profile_picture(self, username: str) -> str:\n\n self.profile = Profile.from_username(self.loader.context, username)\n self.loader.download_profilepic(self.profile)\n img_file_name = os.listdir(username)[0]\n return img_file_name",
"def download_images(self):\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Likes a number of a users latest posts, specified by n_posts. | def like_latest_posts(self, user, n_posts, like=True):
action = 'Like' if like else 'Unlike'
self.nav_user(user)
posts = self.driver.find_element_by_class_name('g47SY ').text
posts = int(posts.replace(',',''))
if n_posts > posts:
n_posts = posts
i... | [
"def count_likes(cls, post):\n count = 0\n likes = cls.all().filter(\"post = \", post)\n for like in likes:\n if like.do_like:\n count += 1\n return count",
"def like_recent_post(self): # have to wait for elements to load or obscure error will occur.\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a folder named after a user to to store the image, then downloads the image to the folder. | def download_image(self, src, image_filename, folder):
folder_path = './userImages/{}'.format(folder)
os.makedirs(folder_path, exist_ok=True)
img_filename = 'image_{}.jpg'.format(image_filename)
folder = './userImages/' + folder
urllib.request.urlretrieve(src, '{}/{}'.form... | [
"def _download_image(self, image_url, game_name):\n directory_name = 'images/' + TwitchPreviewCrawler.slugify(game_name) # First get the directory name, e.g. DOTA_2\n file_name = directory_name + '/' + image_url.split('ttv/')[1].split(\".jpg\")[0] + \"_\" + str(time.mktime(time.gmtime())) + \".jpg\" #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract all sections for the given module from the current machine | def get_sections(module):
sections_path = "/sys/module/{module}/sections/.*".format(module=module)
output_file = "/tmp/{module}.sections".format(module=module)
with open(output_file, "wt") as out:
for filepath in glob.glob(sections_path):
filename = os.path.basename(filepath)
out.write("%s,%s\n"... | [
"def load_sections(self):\n pass",
"def analyse_modules(section_data_dict):\n modules = {}\n seen_sections = OrderedDict()\n for section in section_data_dict:\n for [sub_section, address, size, object_name] in section_data_dict[section][\"contents\"]:\n sw_module = classify_by_ob... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save the original user object when initializing the instance | def __init__(self, original_user):
super().__init__()
self.original_user = original_user | [
"def _persist_login(self, user, **options):\n pass",
"def save_user (self):\n User.user_list.append(self)",
"def initialize_user():\n flask.g.user = readit.User(flask.session.get('session_key', None))\n flask.g.user.user_id = flask.session.get('user_id', None)",
"def save_login(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save backup copy of file Backup copy is saved as the name of the original with the value of suffix appended. If multi is True, and a backup already exists, an additional backup is made with a numeric index value appended to the name. The backup copy filemode is set to readonly. | def backup_file(path, suffix='.orig', multi=True):
log = logger.getlogger()
backup_path = path + suffix
version = 0
while os.path.exists(backup_path) and multi:
version += 1
backup_path += "." + str(version)
log.debug('Make backup copy of orignal file: \'%s\'' % backup_path)
copy... | [
"def backup(fname):\r\n shutil.copy(fname, fname + '-backup')",
"def backup(cls, filename, subdirectory=\"bak\", hide=True, quiet=False):\n import os\n import shutil\n from .path9 import Path\n from .dir9 import Dir\n from .time9 import Time\n filename = Path.full(file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove line(s) from file containing a regex pattern Any lines matching the regex pattern will be removed. | def remove_line(path, regex):
log = logger.getlogger()
log.debug('Remove lines containing regex \'%s\' from file \'%s\'' %
(regex, path))
for line in fileinput.input(path, inplace=1):
if not re.match(regex, line):
print(line, end='') | [
"def filter_lines(text: str, regex: str, flags=re.MULTILINE) -> str:\n return re.sub(regex, '', text, flags=flags)",
"def clean_nottrimmed_fastqs(filename_regex):\n for f in glob.glob(os.path.join(runs_scratch_dir,'*',filename_regex)):\n os.remove(f)",
"def grep(source_file, pattern, ignore_case=Fa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace line(s) from file containing a regex pattern Any lines matching the regex pattern will be removed and replaced with the 'replace' string. | def replace_regex(path, regex, replace):
log = logger.getlogger()
log.debug('Replace regex \'%s\' with \'%s\' in file \'%s\'' %
(regex, replace, path))
for line in fileinput.input(path, inplace=1):
print(re.sub(regex, replace, line), end='') | [
"def replace_pattern_line(filename, pattern, line2replace):\n\n tmpfile = filename+'.tmp2'\n os.system(' cp -f ' + filename + ' ' + tmpfile)\n tmp = open(tmpfile,'r')\n fil = open(filename,'w')\n for line in tmp:\n fil.write(line2replace if pattern in line else line)\n\n tmp.close()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Launch a subprocess and return the Popen process object. This is non blocking. This is useful for long running processes. | def sub_proc_launch(cmd, stdout=PIPE, stderr=PIPE):
proc = Popen(cmd.split(), stdout=stdout, stderr=stderr)
return proc | [
"def spawn_subprocess(cls, **Popen_args):\n args = [sys.executable, '-m', cls.__module__]\n conn, proc = ipc.spawn_subprocess(args, **Popen_args)\n return cls(conn), proc",
"def _spawn_subprocess(self, cmd, shell=False, **env):\n environ = os.environ.copy()\n environ.update(env)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Launch a subprocess and display a simple time counter while waiting. | def sub_proc_wait(proc):
cnt = 0
rc = None
while rc is None:
rc = proc.poll()
print('\rwaiting for process to finish. Time elapsed: {:2}:{:2}:{:2}'.
format(cnt // 3600, cnt % 3600 // 60, cnt % 60), end="")
sys.stdout.flush()
cnt += 1
print('\n')
resp, er... | [
"def run_timed_subprocess(cmd, timeout):\n proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n preexec_fn=os.setsid, close_fds=True)\n for i in range(timeout):\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if any/all of the fileglobs are present in the url. | def files_present(url, fileglobs, _all=True):
any_present = False
all_present = True
fileglobsstr = ','.join(fileglobs)
if fileglobs:
cmd = (f'wget -r -l 10 -nd -np --spider --accept={fileglobsstr} {url}')
reply, err, rc = sub_proc_exec(cmd)
err = err.replace('%2B', '+')
... | [
"def match_globs(filename, globlist):\n for glob in globlist:\n if fnmatch(filename, glob):\n return True\n return False",
"def check_for_local_file(self, filename=None):\n files = glob.glob(filename)\n return bool(files)",
"def url_has_any_extension(url: UrlT, extensions: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Input a URL from user. The URL is checked for validity using curl and wget and the user can continue modifying it indefinitely until a response is obtained or he can enter 'sss' to skip (stop) entry. If a fileglob is specified, the specified url is searched recursively (crawled) up to 10 levels deep looking for matches... | def get_url(url='http://', fileglob='', prompt_name='', repo_chk='', contains=[],
excludes=[], filelist=[]):
print(f'Enter {prompt_name} URL. ("sss" at end of URL to skip)')
if fileglob:
print('Do not include filenames in the URL. A search of the URL')
print('will be made up to 10 le... | [
"def main(url_or_file):\n url_or_file = url_or_file or ih.user_input('url')\n ph.soup_explore(url_or_file)",
"def do_scan(self, args):\n for url_format in [url.strip() for url in open(args)]:\n logging.info(\"Trying %s\" % url_format)\n self.options.url_format = url_format\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prompt user to select a choice. Entered choice can be a member of choices or items, but a member of choices is always returned as choice. If choices is not specified a numeric list is generated. Note that if choices or items is a string it will be 'split' using sep. If you wish to include sep in the displayed choices o... | def get_selection(items, choices=None, prompt='Enter a selection: ', sep='\n',
allow_none=False, allow_retry=False):
if not items:
return None, None
if not isinstance(items, (list, tuple)):
items = items.rstrip(sep)
items = items.split(sep)
if not choices:
c... | [
"async def prompt_choices(msg_header: str, user: User, choices: list):\n if len(choices) == 1:\n selection = choices[0]\n await user.send(\"Only 1 choice. Selecting {}\".format(selection))\n return selection\n msg = msg_header + \"\\nPlease select a choice by replying with the number of y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search local disk for src_name and allow interactive selection if more than one match. Note that the user is not given the option to change the search criteria. Searching starts recursively in the /home directory and expands to entire file system if no match in /home. | def get_src_path(src_name):
log = logger.getlogger()
while True:
cmd = (f'find /home -name {src_name}')
resp1, err, rc1 = sub_proc_exec(cmd)
if rc1 != 0:
log.error(f'Error searching for {src_name}')
cmd = (f'find /root -name {src_name}')
resp2, err, rc2 = sub... | [
"def select_source(args):\r\n if args.source is None:\r\n print('Choose a source path.')\r\n source = os.path.normpath(askdirectory())\r\n print(f'Source path: {source}')\r\n else:\r\n source = args.source\r\n if not os.path.exists(source):\r\n sys.exit('Error: So... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the indices for the column positions in a text table | def get_col_pos(tbl, hdrs, row_char='-'):
log = logger.getlogger()
tbl = tbl.splitlines()
hdr_span = {}
col_idx = {}
for row in tbl:
dashes_span = re.search(fr'{row_char}+\s+{row_char}+', row)
if dashes_span:
dashes_span = list(re.finditer(r'-+', row))
col_sp... | [
"def get_indexes(table, col, v):\n li = []\n start = 0\n for row in table[col]:\n if row == v:\n index = table[col].index(row, start)\n li.append(index)\n start = index + 1\n return li",
"def get_position(char, table):\n for row in xrange(5):\n for col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a CSV file into data and target arrays. Last column == target | def load_csv(filename):
data = genfromtxt(filename, delimiter=',')[:, :-1]
target = genfromtxt(filename, delimiter=',', usecols=(-1), dtype=str)
uniqueItems = np.unique(target)
numericTarget = []
for item in target:
# Convert strings to numbers
numericTarget.append(np.where(uniqueIte... | [
"def load_from_csv(self, feature_path=None, target_path=None, nrows=None, ncols=None):\n\n if nrows is not None:\n print(\"Using {0} rows\").format(nrows)\n \n if feature_path is None:\n feature_path = pjoin(\"/data/ml2/vishakh/patient-similarity\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse file and return traj_q and traj_t | def read_goal_traj(filename):
with open(filename) as csv_file:
header_log = has_header(csv_file)
csv_read = csv.reader(csv_file, delimiter=',')
if header_log:
next(csv_read)
traj_data = numpy.array(list(csv_read), dtype=float)
traj_t = traj_data[:,0]
traj_q... | [
"def parse_t1(path):\n if not os.path.isfile(path):\n raise InputError('Could not find file {0}'.format(path))\n t1 = None\n with open(path, 'rb') as f:\n for line in f:\n if 'T1 diagnostic:' in line:\n t1 = float(line.split()[-1])\n return t1",
"def load_traj_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generate a message of RobotTrajectory type using read_goal_traj and return as "path" | def robot_traj_generate(filename):
[traj_t, traj_q] = read_goal_traj(filename)
path = RobotTrajectory()
path.joint_trajectory.header.frame_id = "/world"
path.joint_trajectory.joint_names = JOINT_NAMES
path.joint_trajectory.points = [JointTrajectoryPoint(positions=traj_q[1,:],
velocities=[0]... | [
"def get_goal_from_trajectory(self, trajectory):\n pass",
"def get_goal_from_trajectory(\n self, trajectory: ts.TimeStep\n ) -> types.NestedArray:\n pass",
"def get_trajectory_with_goal(self, trajectory, goal):\n pass",
"def send_trajectory(self):\n\n rospy.loginfo(\"Start going ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the given value is valid for the given connection kind. | def validate_value(cls, kind, value):
if kind == 'device' or kind == 'file' or kind == 'pipe':
# TODO: Validate that device path exists on target?
# TODO: Validate that datastore and file path exists on target?
# TODO: Validate that pipe path exists on target?
ret... | [
"def is_valid_input(kind):\n if kind not in [\"bit\", \"phase\", \"both\"]:\n message = f\"The kind argument must be one of bit, phase or both, received {kind} instead\"\n raise Exception(message)\n return True",
"def test_staleness_invalid_value(self):\n connection = self._make_connect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a SerialConnection object of the given kind and value. | def __init__(self, kind, value, options):
logger.debug("Creating SerialConnection: "
"kind: %s, value: %s, options: %s",
kind, value, options)
self.kind = self.validate_kind(kind)
"""Connection type string"""
self.value = self.validate_value(self... | [
"def __str__(self):\n return (\"<SerialConnection kind: {0} value: {1} options: {2}>\"\n .format(self.kind, self.value, self.options))",
"def __init__(self, name: str, value_type_info: TypeInformation):\n if not isinstance(value_type_info, PickledBytesTypeInfo):\n raise Val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Represent SerialConnection object as a string. | def __str__(self):
return ("<SerialConnection kind: {0} value: {1} options: {2}>"
.format(self.kind, self.value, self.options)) | [
"def GetSerialString(self):\n return self._send_string",
"def __str__(self):\n rep = \"<portSpec id=%s name=%s type=%s signature=%s />\"\n return rep % (str(self.id), str(self.name), \n str(self.type), str(self.sigstring))",
"def __str__(self) -> str:\n output = \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get/set mapping of network names to networks. | def network_map(self):
return self._network_map | [
"def network_pmap(self):\n\n if self._ports == None:\n return None\n\n out = {}\n\n for p in self._ports:\n for n in p.networks:\n if n.identifier in out:\n out[n.identifier].append(p.identifier)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create 'deploy' CLI subparser if it doesn't already exist. | def create_subparser(self):
import argparse
if self.ui.subparser_lookup.get('deploy', None) is None:
# Create 'cot deploy' parser
parser = self.ui.add_subparser(
'deploy',
usage=self.ui.fill_usage("deploy", [
"PACKAGE esxi ..."... | [
"def create_command(args):\n if args.subparser_name == \"analyze\":\n cmd = instarepo.commands.analyze.AnalyzeCommand(args)\n elif args.subparser_name == \"fix\":\n cmd = instarepo.commands.fix.FixCommand(args)\n elif args.subparser_name == \"list\":\n cmd = instarepo.commands.list.Lis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test invalid JSON on a TOML style. | def test_invalid_json(request):
# pylint: disable=line-too-long
ProjectMock(request).style(
'''
["another.json".contains_json]
some_field = """
{ "this": "is missing the end...
"""
["another.json".with]
extra = "key"
'''
).flake8().assert_... | [
"def test_13_invalid_json(self):\n data = '{\"foo\": {\"bar\": invalid}}'\n\n with self.app.test_client() as client:\n client.post(\"/error\", content_type=\"application/json\", data=data)\n data = json.loads(g.exceptional)\n request = data[\"request\"]\n pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test configuration for JSON files. | def test_json_configuration(request):
ProjectMock(request).style(
"""
["your.json".has]
an_extra = "key"
["their.json"]
x = 1
"""
).flake8().assert_errors_contain(
"""
NIP001 File nitpick-style.toml has an incorrect style. Invalid config:\x1b[32m
... | [
"def test_json_template(self):\n\n self.copy_files([\"template.json\"])\n\n path = os.path.join(self.working_dir, \"template.json\")\n\n print path\n self.render_config_template(\n elasticsearch={\"hosts\": self.get_host()},\n template_overwrite=\"true\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
multiprocessing sharing with 'device' and 'dtype' | def test_multiprocessing(self, device=None, dtype=None):
buf = io.BytesIO()
t = MetaTensor([0, 0] if dtype in (torch.int32, torch.int64) else [0.0, 0.0], device=device, dtype=dtype)
t.is_batch = True
if t.is_cuda:
with self.assertRaises(NotImplementedError):
F... | [
"def allocate_shared_mem(self):\n # Get array shape and data types\n if self.snapshot.snapshot_type == \"numpy\":\n self.input_shape, self.input_dtype = self.descriptor_calculator. \\\n read_dimensions_from_numpy_file(\n os.path.join(self.snapshot.input_npy_dir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the username of the user | def set_user_name(self, user_name):
self.user_name = user_name | [
"def setUserName(*args, **kwargs):\n \n pass",
"def _username(self, new_username):\n self.__username = new_username",
"def setUserName(self, userName):\n self[Header.PARAM_USERNAME] = userName",
"def _set_username(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utyp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print documents in a collection. | def print_mdb_collection(collection_name):
for doc in collection_name.find():
print(doc) | [
"def print_doc_list(self):\n\t\t# function to print the documents and their document id\n\t\t#print docIdMap\n\t\tfor key in docIdMap:\n\t\t\tprint \"Doc ID: \" + str(key) + \" ==> \" + str(docIdMap[key])",
"def _show_contents(self):\n for database_name in self._mk.connection.database_names():\n print 'DA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write content to the collection and then print the collection's documents | def write_and_print_content(media_db, collection, file_name):
media_collection = media_db[collection]
json_content = read_json_file(file_name)
media_collection.insert_many(json_content)
print_mdb_collection(media_collection)
return media_collection | [
"def _show_contents(self):\n for database_name in self._mk.connection.database_names():\n print 'DATABASE: %s' % database_name\n db = self._mk.connection[database_name]\n for collection_name in db.collection_names():\n print 'COLLECTION: %s' % collection_name\n collection = db[collec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Demonstrate how to relate data documents to each other by printing a collectors' collection. | def relate_data(collector_collection, cd_collection):
for name in collector_collection.find():
print(f'List for {name["name"]}')
query = {"name": name["name"]}
for a_cd in cd_collection.find(query):
print(f'{name["name"]} has collected {a_cd}') | [
"def _show_contents(self):\n for database_name in self._mk.connection.database_names():\n print 'DATABASE: %s' % database_name\n db = self._mk.connection[database_name]\n for collection_name in db.collection_names():\n print 'COLLECTION: %s' % collection_name\n collection = db[collec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prompt whether or not to drop the collections and start fresh. If the user answers yes, both the collector_collection and cd_collection will be dropped. | def prompt_drop(collector_collection, cd_collection):
yorn = input("Drop data?")
if yorn.upper() == 'Y':
cd_collection.drop()
collector_collection.drop() | [
"def test_drop_collection(self):\n self.Person(name='Test').save()\n\n collection = self.Person._meta['collection']\n self.assertTrue(collection in self.db.collection_names())\n\n self.Person.drop_collection()\n self.assertFalse(collection in self.db.collection_names())",
"def r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve or create a SyncInfo entity from the given parameters. | def from_params(cls, remote_key, version, content_hash, target_key=None,
user=None):
entity = datastore.Entity(SYNC_INFO_KIND, name=remote_key)
entity.update({"version": version, "content_hash": content_hash})
if target_key:
entity.update({"target_key": target_key})
... | [
"def kind():\n\n return SYNC_INFO_KIND",
"async def async_get_or_create(hass, config_entry, entity):\n\n i2c_address = entity.address\n\n # DOMAIN data async mutex\n try:\n async with MCP23017_DATA_LOCK:\n if i2c_address in hass.data[DOMAIN]:\n component = hass.dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the entity version. | def version(self):
return self.__entity["version"] | [
"def test_get_entity_version(self):\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Chart, fqn=self.entity.fullyQualifiedName\n )\n res = self.metadata.get_entity_version(\n entity=Chart, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Increment the entity version. | def incr_version(self):
self.__entity["version"] += 1
return self.__entity["version"] | [
"def test_incremented(self):\n version = _incrementResourceVersion(_incrementResourceVersion(None))\n updated = _incrementResourceVersion(version)\n self.expectThat(updated, IsInstance(unicode))\n self.expectThat(updated, AfterPreprocessing(int, Equals(int(version) + 1)))",
"def bump_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the content hash. | def set_content_hash(self, content_hash):
self.__entity["content_hash"] = content_hash | [
"def setHash(self, mcanHash):\n self.setByte('hashH', mcanHash >> 8)\n self.setByte('hashL', mcanHash & 0xff)",
"def set_hashes(self, url, hashes):\n raise NotImplementedError",
"def hash(self):\n if not self.contenthash:\n self.getHash()\n\n return self.contenthash",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the sync target key. | def target_key(self):
return self.__entity.get("target_key") | [
"def id(self):\n\n from mbed_cloud.foundation._custom_methods import pre_shared_key_id_getter\n\n return pre_shared_key_id_getter(self=self)",
"def getmpk(self):\n return self.wallet.get_master_public_key()",
"def target(self):\n\n key = self.__entity.get(\"target_key\")\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the sync target entity. | def target(self):
key = self.__entity.get("target_key")
return datastore.Get(key) | [
"def target_key(self):\n\n return self.__entity.get(\"target_key\")",
"def _get_entity(self) -> \"adsk::core::Ptr< adsk::core::Base >\" :\n return _core.Selection__get_entity(self)",
"def get_id(self):\n return self.target_id",
"def get_target(self, project=None):\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get one or more synchronization info entities. | def get(cls, keys):
if isinstance(keys, datastore_types.Key):
keys_ = [keys]
elif isinstance(keys, list):
keys_ = keys
else:
raise TypeError("SyncInfo.get(keys) takes a key or list of keys")
results = []
for key in keys_:
try:
... | [
"def lock_info(self):\n infos = []\n\n locks = Lock.query.valid_locks(self.object_type, self.object_id)\n for lock in locks:\n infos.append({'creator': lock.creator,\n 'time': lock.time,\n 'token': lock.token,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the key for this synchronization info entity. | def key(self):
return self.__entity.key() | [
"def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)",
"def key(self):\n if self.dirty:\n self._update_hash_and_blob()\n return self._key",
"def entity_key(entity):\n key = entity.k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the entity kind for synchronization info entities. | def kind():
return SYNC_INFO_KIND | [
"def entity_protocol(self, name):\n\n return self._config.get_entity_def(name).protocol",
"def entity_type(self) -> pulumi.Input['EntityTypes']:\n return pulumi.get(self, \"entity_type\")",
"def entity_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entity_type\")",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sources list of encoder source addresses plot_every_n Zero (don't plot until the end), or N (int, plot every N event) accumulate_n Accumulate all (0) or reset the array every n shots fignum matplotlib figure number | def __init__ ( self,
sources = None,
plot_every_n = "0",
accumulate_n = "0",
fignum = "1" ) :
# initialize data
opt = PyanaOptions()
self.sources = opt.getOptStrings(sources)
print("pyana_encoder, %d sources:... | [
"def plot_txrss_live(self, numofplottedsamples=250):\n numoftx = self.__numoftx\n\n if numoftx > 7:\n print('Number of tracked tx needs to be <=7!') # see length of colorvec\n print('Terminate method!')\n return True\n\n rss = np.zeros((numoftx, 1))\n te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the types of which type of keys are allowed to be stored in the STDictobject. Type can be either "S" = strings or "I"= integer. Raise an error if wrong type is given or if the STDictobject already contains keys in a specific type and the type is changed. By default, when a new dictionary is created, the keys are... | def change_type(self, type_):
if type_ != "S" and type_ != "I":
raise TypeError("Error: Type: str("+str(type_)+") not valid, str(S)=string and str(I)=integes.")
elif self._size == 0 or self._type == type_:
self._type = type_
else:
raise TypeError("Can't change... | [
"def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helpfunction to control error for the STDictmethods. Key = key to check if correct type | def _type_check(self, key):
if self._type == "I" and isinstance(key,str):
raise TypeError("STDict keys is set as type int()")
elif self._type == "S" and isinstance(key,int):
raise TypeError("STDict keys is set as type str()")
else:
return | [
"def test_key_init_unknown_type(self):\n with self.assertRaises(UtilsError) as context:\n key = Key(None)\n key.generate(key_type=0)\n self.assertEqual(u'1003', context.exception.event_id)",
"def _check_valid_key(self, key):\n if not isinstance(key, key_type):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
helpfunction to the delete method. tree_root = root of tree (node) , key = key to delete | def _delete(self, tree_root, key):
if tree_root is None:
raise ValueError("key: " + str(key) + " not in STDict")
if key < tree_root._key:
tree_root._left = self._delete(tree_root._left, key)
elif key > tree_root._key:
tree_root._right = self._delete(tree_root.... | [
"def delete(self, key):\n # BEGIN SOLUTION\n\n # find path before the key.\n def find_path_before(node, key):\n # path until key\n path_before = []\n while node.val != key:\n path_before.append(node)\n if node.val > key:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Help function to set and get methods. tree_find = root of tree to search in. Lookup_key = key to search for. set_or_get decides if the method its used in is set or get. set_value = value to set, only used if set_or_get is not get | def _help_set_get(self, tree_find, lookup_key, set_or_get, set_value=None):
if tree_find is None:
if set_or_get == "get":
return tree_find
else:
raise ValueError("key: " + str(lookup_key) + " not in STDict")
if lookup_key == tree_find._key:
... | [
"def update(self, key, newValue):\n \n Node = self.searchNode(key,returnNode)\n if Node != None and Node.isValueSet():\n Node.value = newValue\n return Node\n \n raise pathNotExistsTriesException(\"key not found\")",
"def find(self, key):\n if ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test function for the script loudness_zwicker_stationary Test function for the script loudness_zwicker_stationary with .wav file as input. The input file is provided by ISO 5321 annex B3, the compliance is assessed according to section 5.1 of the standard. One .png compliance plot is generated. | def validation_loudness_zwst():
#
# Test signal as input for stationary loudness
# # (from ISO 532-1 annex B3)
signals = [
{
"data_file": "input/ISO_532_1/Test signal 2 (250 Hz 80 dB).wav",
"N": 14.655,
"N_specif_file": "input/ISO_532_1/test_signal_2.csv",
... | [
"def test():\n # imports specific to this test\n import sys\n import warnings\n from scipy.io import wavfile\n import matplotlib.pyplot as plt\n from psola.experiment_config import ExperimentConfig\n\n # get the data and do the estimation\n filename = sys.argv[1] # filename is first comma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take in a string and return a dictionary of letter keys with values of times they appear in string. | def multiple_letter_count(string):
count = {}
for letter in string:
count[letter] = string.count(letter)
return count | [
"def get_occurrences(string):\n \n # Create an empty dictionary\n dic = {}\n \n # iterate though the characters of the string\n for char in string:\n \n char = char.lower()\n \n # if the chararacter is in the dictionary, add 1 to its value\n # if the character is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the current screen as a numpy pixel array. | def _get_pixel_state(self):
screen_pixels = pygame.PixelArray(self.screen)
state = np.asarray(screen_pixels).T
screen_pixels.close()
# Reshape and convert to RGB
img = Image.fromarray(state)
state = np.array(img.convert('RGB').resize((84, 84)))
return state | [
"def frame(self):\n image = numpy.array(self.get_emulator_screen())\n return bgr_to_rgb(image)",
"def getScreenRGB(self):\n frame = pygame.surfarray.array3d(pygame.display.get_surface()).astype(np.uint8)\n frame = np.rot90(frame, 1, axes=(0, 1))\n frame = np.flipud(frame)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the state of the game as a numpy array of coords of the agent and the obstacles. [blue, red, top_1, bottom_1, right_1, left_1, top_2, bottom_2, right_2, left_2] Where (top_2, bottom_2, right_2, left_2) is (0, 0, 0, 0) if there is no second obstacle. | def _get_coord_state(self):
# Get the player coords
blue_x, blue_y = self.blue_ball.position()
red_x, red_y = self.red_ball.position()
coords = [blue_x, blue_y, red_x, red_y]
# Get the closest obstacle
current_obstacle_set = self.obstacle_manager.oldest_obstacle_set()
... | [
"def get_state(self) -> numpy.ndarray:\n env_data = [\n bool(self.gym_env.lander.awake),\n bool(self.gym_env.game_over),\n copy.copy(self.gym_env.prev_shaping),\n copy.copy(self.gym_env.prev_reward),\n bool(self.gym_env.legs[0].ground_contact),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the red and blue balls. | def _init_balls(self, draw_rects):
# Create blue ball
blue_x = BOARD_WIDTH//2 - CIRCLE_RADIUS
blue_y = BOARD_HEIGHT - DIST_TO_BOTTOM
blue_theta = np.pi
self.blue_ball = Ball(blue_x, blue_y, blue_theta,
CIRCLE_RADIUS, SPIN_STEP, draw_rects)
... | [
"def _draw_balls(self):\n self.blue_ball.draw(self.screen, BLUE)\n self.red_ball.draw(self.screen, RED)",
"def init_colors(self):\n\t\tcurses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n\t\tcurses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)\n\t\tcurses.init_pair(3, curses.COLOR_BLUE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws the gray circle. | def _draw_circle(self):
pygame.draw.circle(self.screen, GREY,
(BOARD_WIDTH//2, BOARD_HEIGHT - DIST_TO_BOTTOM),
CIRCLE_RADIUS, CIRCLE_WIDTH) | [
"def drawCircle(self, x, y, radius, color): \n dx = radius\n dy = 0\n xChange = 1 - 2 * radius\n yChange = 1\n radiusError = 0\n while (dx >= dy):\n self.drawPixel(x + dx, y + dy, color)\n self.drawPixel(x - dx, y + dy, color)\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws the player balls. | def _draw_balls(self):
self.blue_ball.draw(self.screen, BLUE)
self.red_ball.draw(self.screen, RED) | [
"def _draw_players(self):\n self.player1.draw()\n self.player2.draw()",
"def _draw_all(self) -> None:\n self._draw_player()\n self._draw_world()",
"def draw(self):\n for boid in self.boids:\n boid.draw()",
"def draw(self, view):\n for i in self._bricks:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws all the current obstacles. | def _draw_obstacles(self):
for obstacle_set in self.obstacle_manager:
for obstacle in obstacle_set:
pygame.draw.rect(self.screen, WHITE, obstacle.get_rect()) | [
"def _draw_map(self):\n\n for obstacle in self._obstacles:\n obstacle.draw(self._axes)",
"def redraw_obstacles(self):\n for i in self.blocked:\n pdraw.rect(self._display, COLOURS['black'], (i[0], i[1], 19, 19))",
"def _draw_all(self) -> None:\n self._draw_player()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves all obstacles one step. | def _move_obstacles(self):
for obstacle_set in self.obstacle_manager:
for obstacle in obstacle_set:
obstacle.move() | [
"def simulate_move(self):\n for atom in self.list_of_atoms:\n atom.move(self.grid)",
"def move(self):\r\n # move agents\r\n for agent in self.agents:\r\n agent.move(self.agents)",
"def move(self):\n for segment in range(len(self.snake)):\n if segment ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display Game Over message, and give choice to restart or exit. | def _game_over(self):
game_over_surface = self.game_over_font.render("Game Over", False, RED)
self.screen.blit(game_over_surface, (50, BOARD_HEIGHT//2))
restart_surface = self.restart_font.render("Press ESC to quit or RETURN to restart", False, RED)
self.screen.blit(restart_surface, (80... | [
"def restart_game(self):\n self.init_window()\n self.make_choice()\n self.display_updated_choice()\n self.another_choice_button.place(x=670, y=300)",
"def win_game(self):\n self.end_game(\"You have won!\")",
"def win(self):\n\n self.__default_text.set(\"Game won!\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for cases by number. Supports search by precise case number or wildcard case number searches. | def search(self, case_numbers=[], case_details=False, timeout=60):
portal_page = PortalPage(self.driver)
if portal_page.is_current_page:
portal_page.go_to_smart_search()
else:
self.go_to_home()
results = []
CaseInfoKls = self._get_case_info_mapped_class()
... | [
"def search(\n self,\n case_numbers=[],\n start_date=None,\n end_date=None,\n case_types=[],\n download_dir=None,\n headless=True\n ):\n if not case_numbers and not start_date:\n raise SearchConfigurationError(\"You must provide case numbers or a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
I'm using init_app to call the app.load_extension function because if I just call the setup function directly, the extension name would not be added to the __extensions attribute of the app. | def init_app(app):
app.load_extension(__name__) | [
"def register_extensions(app):\n bcrypt.init_app(app)\n ma.init_app(app)\n socketio.init_app(app)",
"def init_app(self, app):\r\n self.app = app\r\n app.extensions = getattr(app, 'extensions', {})\r\n app.extensions['oauthlib.provider.oauth1'] = self",
"def init_app(self, app):\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initialize Kinesis as consumer input, SNS as alert publishing system, PostgreSQL as output storage system | def __init__(self,
docker_device_id,
country='mx',
stream_name='InputReadings',
start_date=datetime(2020, 1, 5, 4, 39, 0),
end_date=datetime(2020, 2, 5, 4, 40, 0),
kinesis_produce_many=False,
kinesis_b... | [
"def test_kinesis_consumer(sdc_builder, sdc_executor, aws):\n # build consumer pipeline\n application_name = get_random_string(string.ascii_letters, 10)\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send raw data for 1 time interval to Kinesis | def _send_data_to_kinesis(self, accelerator_data: pd.DataFrame):
i = 0
records = []
if accelerator_data is not None and accelerator_data.size > 0:
num_readings = accelerator_data.size
# logging.info(f'Start_time: {str(start_date)}, Number of records: {str(num_readings)}'... | [
"def put_record(self):\n timestamp = datetime.datetime.utcnow()\n part_key = self.ipAddr\n data = random_alphanumeric(10)\n print( \"put {} to kinesisStrem {}\".format( data, self.streamName ) )\n self.kinesisClient.put_record(\n StreamName=self.streamName, \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print Markdown fenced block report and skips report. | def print_report(args: Args, blocks: List[FencedBlock]) -> None:
report = []
filename = click.format_filename(args.markdown_file)
title1 = filename + " fenced blocks"
if blocks:
text1 = fenced_block_report(blocks, title=title1)
report.append(text1)
roles = [b.role.name for b in bloc... | [
"def test_indented_block_comment(self):\n \n inp = '2_4_block_comment.txt'\n self.run_single_file_case(inp)",
"def test_md027_good_indented_code_block_in_block_quote():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete volumes in accounts corresponding to a list of clients | def ClientDeleteVolumes(purge,
client_ips,
client_user,
client_pass,
mvip,
username,
password):
log = GetLogger()
# Run all of the client operations in parallel
al... | [
"def delete_volumes(client, volumes):\n failed_volumes = []\n for volume in volumes:\n try:\n client.delete_volume(VolumeId=volume)\n except ClientError as error:\n code = error.response['Error']['Code']\n if code == 'VolumeInUse':\n client.detach_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
delete the volumes for a client, run as a thread | def _ClientThread(client_ip, client_user, client_pass, mvip, username, password, purge):
log = GetLogger()
SetThreadLogPrefix(client_ip)
log.info("Connecting to client")
client = SFClient(client_ip, client_user, client_pass)
account_name = client.HostnameToAccountName()
cluster = SFCluster(mvi... | [
"def ClientDeleteVolumes(purge,\n client_ips,\n client_user,\n client_pass,\n mvip,\n username,\n password):\n log = GetLogger()\n\n # Run all of the client operations in p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a ``ray.air.lightning.LightningCheckpoint`` from a checkpoint path. | def from_path(
cls,
path: str,
*,
preprocessor: Optional["Preprocessor"] = None,
) -> "LightningCheckpoint":
assert os.path.exists(path), f"Lightning checkpoint {path} doesn't exists!"
cache_dir = tempfile.mkdtemp()
new_checkpoint_path = os.path.join(cache_d... | [
"def load_checkpoint(self, load_dir, epoch=None):\n if epoch:\n checkpoint_file_path = os.path.join(\n self.log_dir, '{}-{}.ckpt'.format(self.checkpoint_prefix, epoch))\n else:\n checkpoint_files = glob.glob(os.path.join(load_dir,\n self.checkp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Aligns the lemma and form to identify the prefix, root and suffix for each of them. | def align(lemma, form):
alemma, aform, _ = levenshtein(lemma, form)
lspace = max(len(alemma) - len(alemma.lstrip('_')),
len(aform) - len(aform.lstrip('_')))
tspace = max(len(alemma[::-1]) - len(alemma[::-1].lstrip('_')),
len(aform[::-1]) - len(aform[::-1].lstrip('_')))
... | [
"def doAlignment(self):\n\n seq1len = len(self.seq1)\n seq2len = len(self.seq2)\n\n # 1st subscript = sequence 1,\n # 2nd subscript = sequence 2\n scores = [ [0 for i in range(seq2len+1)] for j in range(seq1len+1) ]\n tracebk = [ [0 for i in range(seq2len+1)] for j in range... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the given string is prefixed with prefix | def is_prefixed_with(string, prefix):
return string.find(prefix) == 0 | [
"def ensure_starts_with(s: str, prefix: str) -> str:\n if not s.startswith(prefix):\n return prefix + s\n return s",
"def dotted_starts_with(string, prefix):\n return prefix == string or string.startswith('%s.' % prefix)",
"def prefix_match(uri: str, prefix: str) -> bool:\n if not uri.sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes suffix w from the input word v If v = uw (u=prefix, w=suffix), u = v w1 | def eliminate_suffix(v, w):
u = v.rstrip(w)
return(u) | [
"def eliminate_suffix(v, w):\n\n u = v.rstrip(w)\n return(u)",
"def eliminate_prefix(v, u):\n\n w = v.lstrip(u)\n return(w)",
"def removeIng(list):\n \n \"\"\" Tests: ['hovering', 'bring', 'handing']\n ['lovi.ng', 'bringing','string'] \"\"\"\n \n vowels = ['a','e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes prefix b from the given input word v If v = uw (u=prefix, w=suffix), w = u1 v | def eliminate_prefix(v, u):
w = v.lstrip(u)
return(w) | [
"def eliminate_suffix(v, w):\n\n u = v.rstrip(w)\n return(u)",
"def eliminate_suffix(v, w):\n\n u = v.rstrip(w)\n return(u)",
"def _strip_prefix(self, prefix, s):\n if s.startswith(prefix):\n return s[len(prefix):]\n else:\n return s",
"def compress_word(w):\n #w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inflects the given word by applying the operations on it | def inflect(word, operations):
for operation in sorted(operations):
method, chunk = operation.split('_')
if method == 'delete':
word = word.rstrip(chunk)
else:
word = word + chunk
return word | [
"def lookup(self, word):",
"def compile_word_ins(word):\n if word.isupper():\n terms = [('%s*%s' % (10**i, d)) for (i, d) in enumerate(word[::-1])]\n return '(' + '+'.join(terms) + ')'\n else:\n return word",
"def add_word(self,word,d):\n w=word.lower() \n # if w not in stop_w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a deep copy of the profiles. | def copy(self):
return VerticalProfiles(
self.ratios.copy(),
self.height.copy(),
) | [
"def reinit_profiles():\n # Grab the remote profiles and merge it in\n try:\n server_url = config.get_server_url()\n new_profiles = util.fetch_file_from_web(server_url, \"/dcae-cli/profiles.json\")\n except:\n # Failing to pull seed profiles from remote server is not considered\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the weights matrix between two profiles. The weights matrix can then simply be used . The two given profiles must be sorted. It is assumed that they are vertical profiles from emiproc convention | def get_weights_profiles_interpolation(
from_p: np.ndarray, to_p: np.ndarray
) -> np.ndarray:
# Initialize parameters for the algorithm
i, j = 0, 0
last = 0.0
diff = np.zeros((len(to_p), len(from_p)))
while i < len(from_p) and j < len(to_p):
# Will check the distance with the last poi... | [
"def calc_dot_matrix_profile(seq, profile):\n\n aa_list = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',\n 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'] #, '-', 'B', 'Z', 'J']\n b62 = MatrixInfo.blosum62\n\n N = len(seq)\n alpha = 20\n W = 15\n \n score_vec = {}\n for aa_1 ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resample vertical profiles into one vertical profiles object. Allows for profiles of different height levels to be groupped into one. Sample the profile on the heights level given. Uses a conservative interpolation method, that ensure that even on higher resolution the profile will be exactly the same. Note that this s... | def resample_vertical_profiles(
*profiles: VerticalProfile | VerticalProfiles,
specified_levels: np.ndarray | None = None,
) -> VerticalProfiles:
# Find the levels we want to use
if specified_levels is None:
levels = np.unique(np.concatenate([p.height for p in profiles]))
else:
leve... | [
"def copy(self):\n return VerticalProfiles(\n self.ratios.copy(),\n self.height.copy(),\n )",
"def interpProfiles(self,method='crtm-wrap'):\n for i in list( self.items.keys() ):\n for ii in list(range(self.nprof)):\n if(self.items[i].ndim>2): # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the vertical profile meets requirements. height must have positive values height must have strictly increasing values ratios must sum up to one ratios must all be >= 0 ratios and height must have the same len no nan values in any of the arrays | def check_valid_vertical_profile(vertical_profile: VerticalProfile | VerticalProfiles):
assert isinstance(vertical_profile, (VerticalProfile, VerticalProfiles))
h = vertical_profile.height
r = vertical_profile.ratios
assert np.all(~np.isnan(r)) and np.all(~np.isnan(h)), "Cannot contain nan values"
... | [
"def detectMissingRatio(df):",
"def error_ratios_cross_val(output_folder):\n\n from parsers import CVOutputParser\n from utils import avg\n\n if not output_folder[-1] == '/':\n output_folder += '/'\n\n\n singleton_thresholds = [0, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 600, 700, 800, 900,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a csv file containing vertical profiles. | def from_csv(file: PathLike) -> tuple[VerticalProfiles, list[str | tuple[str, str]]]:
# Read the file
df, cat_header, sub_header = read_profile_csv(file)
# Get the height levels
heights_mapping = {
float(col_name[:-1]): col_name
for col_name in df.columns
# Take only the column... | [
"def read_csv(csv_path=csv_path):\n movies = []\n file = open(csv_path, \"r\")\n lines = file.read().split('\\n')[1:-1]\n for line in lines:\n line = line.split(\"\\t\")\n movie = {}\n movie[\"name\"] = line[1]\n movie[\"year\"] = line[2]\n movies += [movie]\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test module cars93_summary.py by downloading cars93_summary.csv and testing shape of extracted data has 6 rows and 4 columns | def test_cars93_summary():
test_path = tempfile.mkdtemp()
x_train, metadata = cars93_summary(test_path)
try:
assert x_train.shape == (6, 4)
except:
shutil.rmtree(test_path)
raise() | [
"def test_read_short_data() -> None:\n fo = open('data/short_data.csv', \"r\")\n e = Election(date(2000, 2, 8))\n e.read_results(fo)",
"def _csv_summary_info(self, rows, cols, line):\n print(self._just_str('CSV Name', self.csv_path))\n print(self._just_str('Total Rows with Errors', len(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for a new value, compute the new count, new mean, the new M2. mean accumulates the mean of the entire dataset M2 aggregates the squared distance from the mean count aggregates the number of samples seen so far | def update(existing_aggregate, new_value):
(count, mean, M2) = existing_aggregate
count = count + 1
delta = new_value - mean
mean = mean + delta / count
delta2 = new_value - mean
M2 = M2 + delta * delta2
return (count, mean, M2) | [
"def calculate_mean(self):\n\t\t\t\t\t\n avg = 1.0 * sum(self.data) / len(self.data)\n\t\t\n self.mean = avg\n \n return self.mean",
"def compute_means(self):\n del self.mean_vectors[:]\n for i in range(self.k):\n c=Counter()\n l=0\n for d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
THis function computes the transformation T that transform one point in the camera coordinate (x right, y up, z forward) to the point in the robot coordinate system (x forward, y left, z up). (The camera is fixed on the robot baselink) P_r = P_c T, P_c = [x, y, z, 1] is a row vector in the camera coordinate Please note... | def compute_robot_T_camera(pose_rotation=[1, 0, 0, 0], pose_translation=[0.0, 0.0, 0.0]):
# compute the rotation matrix
# note that the scipy...Rotation accepts the quaternion in scalar-last format
pose_quat = [pose_rotation[1], pose_rotation[2], pose_rotation[3], pose_rotation[0]]
rot = Rotation.from_q... | [
"def compute_camera_T_world(pose_rotation, pose_translation):\n\n # build rotation matrix\n yaw_angle = pose_rotation[0] / 180.0 * np.pi\n pitch_angle = -1 * pose_rotation[1] / 180.0 * np.pi\n roll_angle = -1 * pose_rotation[2] / 180.0 * np.pi\n yaw = np.array([[np.cos(yaw_angle), -np.sin(yaw_angle),... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function computes the transformation matrix T that transforms one point in the robot coordinate system to the world coordinate system. This function needs the scipy package scipy.spatial.transform.Rotation P_w = P_r T, P_r = [x, y, z, 1] is a row vector in the robot coordinate (x forward, y left, z up) MUST NOTICE... | def compute_world_T_robot(pose_rotation, pose_translation):
# compute the rotation matrix
# note that the scipy...Rotation accepts the quaternion in scalar-last format
pose_quat = [pose_rotation[1], pose_rotation[2], pose_rotation[3], pose_rotation[0]]
rot = Rotation.from_quat(pose_quat)
rotation_ma... | [
"def compute_camera_T_world(pose_rotation, pose_translation):\n\n # build rotation matrix\n yaw_angle = pose_rotation[0] / 180.0 * np.pi\n pitch_angle = -1 * pose_rotation[1] / 180.0 * np.pi\n roll_angle = -1 * pose_rotation[2] / 180.0 * np.pi\n yaw = np.array([[np.cos(yaw_angle), -np.sin(yaw_angle),... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is written according to the UpdateProjectMatrix() in IssacSimBoundingBox.cpp It computes the transformation matrix T that transforms one point in the world coordinate into the camera coordinate (x right, y up, z forward) P_c = P_w T, P_w = [x, y, z, 1] a row vector denoting point location in the world coo... | def compute_camera_T_world(pose_rotation, pose_translation):
# build rotation matrix
yaw_angle = pose_rotation[0] / 180.0 * np.pi
pitch_angle = -1 * pose_rotation[1] / 180.0 * np.pi
roll_angle = -1 * pose_rotation[2] / 180.0 * np.pi
yaw = np.array([[np.cos(yaw_angle), -np.sin(yaw_angle), 0],
... | [
"def project_to_image_plane(self, point_in_world):\n fx = self.config.camera_info.focal_length_x\n fy = self.config.camera_info.focal_length_y\n\n image_width = self.config.camera_info.image_width\n image_height = self.config.camera_info.image_height\n\n # get transform between po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The function generates the output text file for the Weight_unc_main program in case of geometry with fuselages. INPUT | def output_fuse_txt(f_nb, FLOORS_NB, ed, out, mw, adui, awg, afg, NAME):
out_name = 'ToolOutput/' + NAME + '/' + NAME\
+ '_Weight_unc_module.out'
OutputTextFile = open(out_name, 'w')
OutputTextFile.write('###############################################')
OutputTextFile.write('\n###### ... | [
"def output_txt(IS_DOUBLE_FLOOR, out, mw, ind, ui, NAME):\n out_name = 'ToolOutput/' + NAME + '/' + NAME\\\n + '_Weight_module.out'\n OutputTextFile = open(out_name, 'w')\n OutputTextFile.write('\\n###############################################')\n OutputTextFile.write('\\n###### AIRCRAFT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function decorator that creates a subclass of the Evaluator class out of a function that is capable of evaluating nodes. | def evaluator(*, requires):
def wrapper(function):
return EvaluatorMeta(function.__name__, (Evaluator,), {'evaluate': staticmethod(function), 'requires': requires})
return wrapper | [
"def accepts_evaluator(function):\n\n def decorated(*args, evaluator=None, **kwargs):\n if evaluator is None:\n return function(*args, **kwargs)\n else:\n return evaluator(function, args, kwargs)\n\n return decorated",
"def create_evaluator(self):\n pass",
"def N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set Segmentation array and `total_flux`. | def set_segmentation(self, seg_array):
self.seg = seg_array*1
self.seg_ids = list(np.unique(self.seg))
try:
self.total_flux = self.direct[self.seg == self.id].sum()
if self.total_flux == 0:
self.total_flux = 1
except:
self.total_flux = ... | [
"def set_diagnostics(self):\n # Cut on good pixels\n if self.sig_is_set:\n gdpx = self.sig > 0.\n else:\n gdpx = np.array([True] * self.wavelength.value.size)\n # Fill in attributes\n self._npix = len(self.data['flux'][self.select].compressed())\n if n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize Fitzpatrick 99 Galactic extinction | def init_galactic_extinction(self, MW_EBV=0., R_V=utils.MW_RV):
self.MW_F99 = None
if MW_EBV > 0:
self.MW_F99 = utils.MW_F99(MW_EBV*R_V, r_v=R_V) | [
"def __init__(self,ham,wfn,ci_basis_set): \n self.assign_hamiltonian(ham)\n self.assign_wavefunction(wfn)\n self.assign_ci_basis_set(ci_basis_set)\n self.assign_integral(ham,wfn)",
"def _init_latent_system(\n self,\n rng: jnp.ndarray,\n z: jnp.ndarray,\n **kw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an offset in Y to the spectral trace | def add_ytrace_offset(self, yoffset):
from .utils_c.interp import interp_conserve_c
self.ytrace_beam, self.lam_beam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad[1])/self.grow,
y=(self.yc+self.ycenter-self.pad[0])/self.grow,... | [
"def EffectivePlotOffsetY(self) -> float:",
"def EffectivePlotOffsetYDevice(self) -> int:",
"def y_offset(self):\n yoffs = float(self.query(\"WFMP:YOF?\"))\n return yoffs",
"def offset_stroke(self, offset=None):\r\n if offset is None:\r\n offset = self.channels['Stroke'][\"data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if subimage slice is fully contained within larger array | def contained_in_full_array(self, full_array):
sh = full_array.shape
if (self.sly_parent.start < 0) | (self.slx_parent.start < 0):
return False
if (self.sly_parent.stop >= sh[0]) | (self.slx_parent.stop >= sh[1]):
return False
return True | [
"def is_partly_within_image(self, image):\n return not self.is_out_of_image(image, fully=True, partly=False)",
"def _is_slice_valid(start_row, start_col, slice_width, slice_height, pizza_object, occupied_tiles):\n\n\ttomato_count = 0\n\tmushroom_count = 0\n\n\t# loop through the tiles of this slice and cou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add spectrum cutout back to the full array `data` is added to `full_array` in place, so, for example, to subtract `self.model` from the full array, call the function with >>> self.add_to_full_image(self.model, full_array) | def add_to_full_image(self, data, full_array):
if self.contained_in_full_array(full_array):
full_array[self.sly_parent, self.slx_parent] += data
else:
sh = full_array.shape
xpix = np.arange(self.sh_beam[1])
xpix += self.origin[1] + self.dxfull[0] + self.... | [
"def truncate_to_mask(self):\n\n new_wavelengths = self.wavelengths[self.mask]\n new_values = self.values[self.mask]\n new_value_errors = self.value_errors[self.mask]\n\n output = Spectrum(wavelengths=new_wavelengths, values=new_values, value_errors=new_value_errors)\n return outp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set 2D wavelength (x) axis labels based on spectral parameters | def twod_axis_labels(self, wscale=1.e4, limits=None, mpl_axis=None):
xarr = np.arange(len(self.lam))
if limits:
xlam = np.arange(limits[0], limits[1], limits[2])
xpix = np.interp(xlam, self.lam/wscale, xarr)
else:
xlam = np.unique(np.cast[int](self.lam / 1.e4*... | [
"def get_axes_labels(self) -> (str, str):\n units = self.units.get()\n\n # First, the x axes\n x_units = units.split('_')[-1]\n if x_units in ('nm', 'm'):\n x_label = 'Wavelength ({})'.format(x_units)\n elif x_units == 'hz':\n x_label = 'Frequency (hz)'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set wavelength (x) axis limits on a 2D spectrum | def twod_xlim(self, x0, x1=None, wscale=1.e4, mpl_axis=None):
if isinstance(x0, list) | isinstance(x0, tuple):
x0, x1 = x0[0], x0[1]
xarr = np.arange(len(self.lam))
xpix = np.interp([x0, x1], self.lam/wscale, xarr)
if mpl_axis:
mpl_axis.set_xlim(xpix)
el... | [
"def set_xlim(self, xlims, **kwargs):\n self.ax.set_xlim(xlims, **kwargs)",
"def xlim(self,min,max):\n self.ax.set_xlim(min,max)",
"def rescale(self):\n xr, yr = self.rewavefunctionlines.get_data()\n xi, yi = self.imwavefunctionlines.get_data()\n maxy=max(max(yi),max(yr))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Integrate the sensitivity curve to the wavelengths for the PSF model | def get_psf_sensitivity(self, wave, sensitivity):
from .utils_c import interp
so = np.argsort(self.lam_psf)
s_i = interp.interp_conserve_c(self.lam_psf[so], wave, sensitivity, integrate=1)
psf_sensitivity = s_i*0.
psf_sensitivity[so] = s_i
return psf_sensitivity | [
"def sensitivity(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_sensitivity(self)",
"def eqWidthSynth(flux, linePoints): \r\n #//, fluxCont) {\r\n \r\n logE = math.log10(math.e) #// for debug output\r\n Wlambda = 0.0 #// Equivalent width in pm - picometers\r\n numPoints = len(lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flip OK data quality bits using utils.unset_dq_bits OK bits are defined as | def unset_dq(self):
okbits_instrument = {'WFC3': 32+64+512, # blob OK
'NIRISS': 1+2+4, #+4096+4100+18432+18436+1024+16384+1,
'NIRCAM': 1+2+4,
'WFIRST': 0,
'WFI': 0}
if self.instrument ... | [
"def clear_bits(*args) -> \"void\":\n return _ida_pro.clear_bits(*args)",
"def ClearBits(env, *args):\n _CheckDeclared(args)\n env['_BITS'] = env['_BITS'].difference(args)",
"def bad_data_mask(value, mask, bits):\n if 'unreliable' in bits.keys(): \n unreliable = (mask & 2**bits['unreliable'])... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flag negative data values with dq=4 | def flag_negative(self, sigma=-3):
if self.data['ERR'].max() == 0:
return 0
bad = self.data['SCI'] < sigma*self.data['ERR']
self.data['DQ'][bad] |= 4
return bad.sum() | [
"def get_subtract_flag(self):\n return 0x40 & self.get_f()",
"def is_still_positive(self,i):\n return i < self.num_band",
"def __neg__(self):\n return Ad_Var(-self._val, -self._ders)",
"def computePositiveExceptional(data):\n mask = data[POSITIVE_EXCEPTIONAL_QUESTIONS] == 4\n data.loc[:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pad the appropriate WCS keywords | def add_padding_to_wcs(wcs_in, pad=(64,256)):
wcs = wcs_in.deepcopy()
is_new = True
for attr in ['naxis1', '_naxis1']:
if hasattr(wcs, attr):
is_new = False
value = wcs.__getattribute__(attr)
if value is not None:
w... | [
"def add_default_keywords(new_hdr):\n wcsaxes = new_hdr['WCSAXES']\n if wcsaxes == 3:\n default_pc = {\n 'PC1_1': 1,\n 'PC1_2': 0,\n 'PC1_3': 0,\n 'PC2_1': 0,\n 'PC2_2': 1,\n 'PC2_3': 0,\n 'PC3_1': 0,\n 'PC3_2': 0,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get slice of a WCS including higher orders like SIP and DET2IM The normal `~astropy.wcs.wcs.WCS` `slice` method doesn't apply the slice to all of the necessary keywords. For example, SIP WCS also has a `CRPIX` reference pixel that needs to be offset along with the main `CRPIX`. | def get_slice_wcs(wcs, slx=slice(480, 520), sly=slice(480, 520)):
NX = slx.stop - slx.start
NY = sly.stop - sly.start
slice_wcs = wcs.slice((sly, slx))
if hasattr(slice_wcs, '_naxis1'):
slice_wcs.naxis1 = slice_wcs._naxis1 = NX
slice_wcs.naxis2 = slice_wcs._naxis... | [
"def slice(self, view, numpy_order=True):\n if hasattr(view, \"__len__\") and len(view) > self.wcs.naxis:\n raise ValueError(\"Must have # of slices <= # of WCS axes\")\n elif not hasattr(view, \"__len__\"): # view MUST be an iterable\n view = [view]\n\n if not all(isinst... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert attributes and data arrays to a `~astropy.io.fits.HDUList` | def get_HDUList(self, extver=1):
h = self.header.copy()
h['EXTVER'] = extver # self.filter #extver
h['FILTER'] = self.filter, 'element selected from filter wheel'
h['PUPIL'] = self.pupil, 'element selected from pupil wheel'
h['INSTRUME'] = (self.instrument,
... | [
"def get_HDUList(HDUList_im_or_fname):\n if isinstance(HDUList_im_or_fname, fits.HDUList):\n return HDUList_im_or_fname\n elif isinstance(HDUList_im_or_fname, str):\n return fits.open(HDUList_im_or_fname)\n elif isinstance(HDUList_im_or_fname, np.ndarray):\n hdu = fits.PrimaryHDU(HDULi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute exact PA of the dispersion axis, including tilt of the trace and the FLT WCS | def get_dispersion_PA(self, decimals=0):
from astropy.coordinates import Angle
import astropy.units as u
# extra tilt of the 1st order grism spectra
if 'BEAMA' in self.conf.conf_dict:
x0 = self.conf.conf_dict['BEAMA']
else:
x0 = np.array([10,30])
... | [
"def get_dispersion_PA(self, decimals=0, local=False):\n from astropy.coordinates import Angle\n import astropy.units as u\n\n # extra tilt of the 1st order grism spectra\n if 'BEAMA' in self.beam.conf.conf_dict:\n x0 = self.beam.conf.conf_dict['BEAMA']\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute a mask where smoothed residuals greater than some value Perhaps useful for flagging contaminated pixels that aren't in the model, such as high orders dispersed from objects that fall off of the direct image, but this hasn't yet been extensively tested. | def smooth_mask(self, gaussian_width=4, threshold=2.5):
import scipy.ndimage as nd
mask = self.grism['SCI'] != 0
resid = (self.grism['SCI'] - self.model)*mask
sm = nd.gaussian_filter(np.abs(resid), gaussian_width)
resid_mask = (np.abs(sm) > threshold*self.grism['ERR'])
s... | [
"def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask",
"def test_im_with_mask_as_masked_array():\n\n im_with_mask = np.ma.masked_where(im_mask < 2, im)\n im_result = make_apply_mask(im_with_mask, ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load segmentation image and catalog, either from photutils or SExtractor. If SExtractor, use `catalog_format='ascii.sextractor'`. | def load_photutils_detection(self, seg_file=None, seg_cat=None,
catalog_format='ascii.commented_header'):
root = self.direct_file.split('.fits')[0]
if seg_file is None:
seg_file = root + '.detect_seg.fits'
if not os.path.exists(seg_file):
... | [
"def initFromCatalogue(cls, catalogue, **kwargs):\n cls._log(\"info\", \"Initializing with catalogue {}\".format(catalogue))\n ins = cls(**kwargs)\n cls._log(\"info\", \"Converting catalogue to internal format\")\n cat = ins.convertCatalogue(catalogue)\n for detector in ins.detect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save entire `GrismFLT` object to a pickle | def save_full_pickle(self, verbose=True):
try:
import cPickle as pickle
except:
# Python 3
import pickle
root = self.grism_file.split('_flt.fits')[0].split('_cmb.fits')[0]
root = root.split('_flc.fits')[0].split('_rate.fits')[0]
root = root.sp... | [
"def save_pickle(self, path=None):\n\n path = path if path else ''\n name = self.name if self.name else 'unnamed'\n\n pickle.dump(self, open(path + 'scaling_state_' + name + '.pckl', 'wb'))",
"def saveData(fname, grating, params, lines, meta):\r\n pickle.dump((grating, params, lines, meta), ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load saved data from a FITS file | def load_from_fits(self, save_file):
fits = pyfits.open(save_file)
self.seg = fits['SEG'].data*1
self.model = fits['MODEL'].data*1
self.direct.data = OrderedDict()
self.grism.data = OrderedDict()
for ext in range(1, len(fits)):
key = fits[ext].header['EXTNAME... | [
"def load(self):\n\t\tif self.full_name.endswith('.fits'):\n\t\t\thdu_in = pyfits.open(self.Spath+self.full_name)\n\t\t\ttb = hdu_in[1].data\n\t\t\thdu_in.close()\n\t\t\tlam = tb.field('loglam')\n\t\t\tbase = np.full(len(lam),10)\n\t\t\tlam = np.power(base,lam)\n\t\t\tflux = tb.field(0)*1e-17\n\t\t\tflux_err = tb.f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Table of trace parameters. Trace is unitindexed. | def trace_table(self):
dtype = np.float32
tab = utils.GTable()
tab.meta['CONFFILE'] = os.path.basename(self.beam.conf.conf_file)
tab['wavelength'] = np.cast[dtype](self.beam.lam*u.Angstrom)
tab['trace'] = np.cast[dtype](self.beam.ytrace + self.beam.sh_beam[0]/2 - self.beam.ycen... | [
"def test_header_to_trace_set_params(self):\n trace_count = 100\n sample_count = 1000\n\n try:\n with trsfile.open(self.tmp_path, 'w', headers={\n Header.LABEL_X: \"s\",\n Header.LABEL_Y: \"V\",\n Header.OFFSET_X: 100,\n Hea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |