query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Test that choice list exists on initialization. | def test_initialization_has_choices(self):
forms = self.get_forms(self.data)
for dummy, form in forms.items():
for item in form.questionnaire:
if isinstance(item, OdkPrompt):
if item.odktype in item.select_types:
msg = 'No choices f... | [
"def test_creation_good():\n value = \"boo\"\n choices = [\"boo\", \"foo\"]\n choice = param.Choice(value=value, choices=choices)\n assert choice.value == value\n assert choice.choices == choices",
"def test_choice_validation_success(self):\n x = TestListFieldModel()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test expected results of converted questionnaire based on position. | def test_questionnaire(self):
forms = self.get_forms(self.data)
for datum in self.data:
expected_output = datum['outputs']
output = \
forms[datum['inputs']['file']].questionnaire[datum['position']]
# - Check Object Representation
got = st... | [
"def test_positions_my_position(self):\n pass",
"def test_correct_estimates(self):\n self.assertEqual(self.ajive.common.rank, 1)\n self.assertEqual(self.ajive.blocks['x'].individual.rank, 1)\n self.assertEqual(self.ajive.blocks['y'].individual.rank, 2)",
"def test_rank_translations(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes a msg object from Python's email parser and formats it into a dictionary (which then becomes JSON that we can put in Redis) | def parse_msg(msg):
subject = msg.get("Subject")
return {
"subject": subject,
"sender": msg.get("Sender"),
"date": msg.get("Date"),
"size": len(bytes(msg)),
} | [
"def parse_message(message):\n return {\n \"msg\": message.message,\n \"sender\": message.sender.name,\n \"sent_on\": message.sent_on.strftime(\"%b %d %y - %H:%M\"),\n }",
"def parse_email(message):\n\n pass",
"def json_dumps(msg):\n return json.dumps(msg)",
"def parse_ema... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse the given list of Models to Document instances | def parse_to_documents(self, models):
return map(self.parse_to_document, models) | [
"def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_type = path.split(\".\")[-1]\n raise Exception(f\"Documents of file type {file_type} cannot be ingested\")\n\n outputs = []\n document = docx.Document(path)\n\n for paragraph in do... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a model, a field name (can include lookups like 'client__name', 'client__goal__name', etc.), and the field_meta object for the immediate field related to the field_name (so for simple case of 'name', this would be the 'name' field meta object, for the complex case of 'client__name', this would be the 'client' fie... | def parse_field(self, model, field_name, field_meta):
if field_meta.concrete and not (field_meta.is_relation or field_meta.one_to_one or field_meta.many_to_one or field_meta.one_to_many or field_meta.many_to_many):
# concrete field
return getattr(model, field_name)
elif field_met... | [
"def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]",
"def _get_field_from_name(model, field_name):\n try:\n return model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return getattr(model, fie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
search through the projects installed apps, for each looking for the presence of a jekyll.py file (or whatever the overriden name is in config.JEKYLL_COLLECTIONS_FILENAME) | def discover_collections():
collections = []
apps = config.JEKYLL_COLLECTIONS_INCLUDE_APPS or settings.INSTALLED_APPS
for app in apps:
try:
jekyll_collection_module = __import__('%s.%s' % (app, config.JEKYLL_COLLECTIONS_MODULE), fromlist=[app])
except ImportError:
co... | [
"def detect_flask_apps():\n\n matches = []\n for root, dirnames, filenames in os.walk(os.getcwd()):\n for filename in fnmatch.filter(filenames, \"*.py\"):\n full = os.path.join(root, filename)\n if \"site-packages\" in full:\n continue\n\n full = os.path.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a collection, atomically write the collections' data to location. Meaning, if any document in the collection fails to generate/write, the entire operation aborts | def atomic_write_collection(collection, build_dir):
counter = 0
collection_dir = os.path.join(build_dir, collection.location)
try:
for doc in collection.docs:
doc.write(collection_dir)
counter += 1
except (exceptions.DocGenerationFailure, exceptions.CollectionSizeExceede... | [
"def _sync_collection_mp(self, dbname, collname):\n self._logger.info('>>>> %s.%s' % (dbname, collname))\n doc_q = multiprocessing.Queue()\n ev = multiprocessing.Event()\n ev.clear()\n processes = []\n for i in range(0, 4):\n p = multiprocessing.Process(target=se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pic of day access. .pod recent gives you the most recent, .pod with no arguments gives you a random pod, .pod followed by either a date (MM/DD/YY format only) or a number will return either the closest POD to that date or the POD matching that number. .pod list will give you a link to a dump of all PODs. | def command_pod(bot, user, channel, args):
settings = _import_yaml_data()
user = settings['db']['user']
pw = settings['db']['pass']
conn = psycopg2.connect(host="localhost", database="quassel", user=user, password=pw)
cursor = conn.cursor()
## Once again, this query is specially craf... | [
"def pod_finder(pod_list):\n pod_scores = {}\n\n for pod in pod_list:\n # convert pod to dict\n pod = dict(pod)\n\n if pod.get('@title') in RESULT_PODS:\n return pod\n\n score = 0\n\n # meh pods\n if pod.get('@title') in NOT_PODS:\n score -= 100\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The most convoluted way to do kind of thing ever invented, I think. The objective is to store the pastebin links for POD lists, so we don't waste time resending them every time somebody asks. Instead, we waste time hashing the returns every time. I'm honestly not sure which is better. Probably just resending them. | def _list(db_tuples):
ret_str = ""
hashy = hashlib.sha224() # sha224 hashes to ascii characters only, which makes them serializable.
## Builds the POD list up, along with the hash query.
for x in range(len(db_tuples)):
build_str = 'POD %s: "%s", posted on %s\n' % (x + 1, db_tuples[x][0], db... | [
"def get_links(subreddit_list: Dict[str, int]) -> List[str]:\n global driver\n\n assert driver is not None\n\n driver.get(\"https://old.reddit.com\")\n\n # prompt the user to log in\n print(\"Logged in accounts see 100 posts instead of 25\")\n input(\"Log into your reddit account in the chromedriv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Override to indicate that a test has finished (it may already have failed or errored) | def notifyTestFinished(self, test):
pass | [
"def end_test(self):",
"def wait_test_done(self):\n self.test_thread.join()\n self.logger.info('Test thread is done')",
"def testResultDone(self):\n ray.init(num_cpus=1, num_gpus=1)\n runner = TrialRunner(BasicVariantGenerator())\n kwargs = {\n \"stopping_criterion\": {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get contours of image X (2D matrix). v is the value of an intensity threshold. | def get_contours(X, v):
return measure.find_contours(X, v) | [
"def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):\n return skimage.measure.find_contours(\n x, level, fully_connected=fully_connected, positive_orientation=positive_orientation\n )",
"def find_contours(img):\n img_copy = img.copy()\n im2, contours, h = cv2.fin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a 2D array X, where each row represents a 2D vector, interpolate between these points using Bspline. Return x and y coords with n points each. | def interpolate(X, n, s=2.0):
tck, u = splprep(X.T, u=None, s=s, per=0)
u_new = np.linspace(u.min(), u.max(), n)
x_new, y_new = splev(u_new, tck, der=0)
return x_new, y_new | [
"def bspline(x, n):\n ax = -abs(asarray(x))\n # number of pieces on the left-side is (n+1)/2\n funclist, condfuncs = _bspline_piecefunctions(n)\n condlist = [func(ax) for func in condfuncs]\n return piecewise(ax, condlist, funclist)",
"def interpolate_1d(array, x):\r\n array = np.asarray(array)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an image X (2D matrix), extract a contour consisting of n points. s controls the smoothness of the contour, where s=0 is a sharp interpolation and higher s makes it smoother. | def get_shape(X, n=50, s=5):
v = X.mean() # use mean value of all entries
cs = get_contours(X, v)
if len(cs) == 0:
raise ValueError('Unable to extract contour.')
# get only outside contour
c = sorted(cs, key=len, reverse=True)[0]
R = np.array([[0, -1], [1, 0]])
c = c.dot(R) + np.arra... | [
"def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):\n return skimage.measure.find_contours(\n x, level, fully_connected=fully_connected, positive_orientation=positive_orientation\n )",
"def point_contour(x, y, data):\n try:\n from scipy.ndimage import label,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace the existing cls.__init__() method with a new one which also initialises the field generators and similar bookkeeping. | def augment_init_method(cls):
orig_init = cls.__init__
def new_init(self, *args, **kwargs):
super(CustomGenerator, self).__init__() # TODO: does this behave correctly with longer inheritance chains?
orig_init(self, *args, **kwargs)
self.orig_args = args
self.orig_kwargs = kw... | [
"def _init_fields(self) -> None:\n ...",
"def __init__(self):\n for field in self.get_fields():\n setattr(self, field, None)",
"def _init_fields(self, schema_name, containing_cls):\n self._schema_name = schema_name\n self.containing_cls = containing_cls",
"def __init__(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark field generator templates as such so that an indication of this is included in the tohu_name. This is purely convenience for easier debugging. | def _mark_field_generator_templates(self):
for g in self.ns_gen_templates.all_generators:
g.is_custom_generator_template = True | [
"def gen_fake(self, field_name, fake):\r\n ...",
"def custom_template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.custom_template_items)",
"def template_field_names(self, template_field_names):\n\n self._template_field... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build a trie from all patterns and append a "$" as a stop sign for each pattern | def build_trie(patterns):
tree = dict()
tree[0] = {}
idx = 1
for pattern in patterns:
cur = tree[0]
for char in pattern:
if char in cur:
cur = tree[cur[char]]
else:
cur[char] = idx
tree[idx] = {}
cur... | [
"def make_pattern(paths, _main=True):\n patterns = {}\n flag = False\n for path in paths:\n if path.startswith('/'):\n path = path[1:]\n splitted = path.split('/',1)\n if len(splitted)==1:\n if patterns:\n assert flag,`flag,paths,patterns, path,spli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the first entry of the table with its field equals to value | def get_entry(table, field, value):
return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value], one=True) | [
"def get_table_row(self, key_field, key_value):\n\n if self.table == []:\n self._get_table_list()\n for row in self.table:\n if row[key_field] == key_value:\n return row\n return None",
"def select_single(self, table, rownum):\n\n with self.connecti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all entries of the table with their field equals to value | def get_entries(table, field, value):
return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value]) | [
"def get_entry(table, field, value):\n\n return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value], one=True)",
"def find_by_fieldname(self, name, value):\n response = self.table.scan(\n FilterExpression=Attr(name).eq(value)\n )\n items = response.get(\"It... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a ballot from an user or update if if exists | def add_update_ballot(voter, poll, choices):
db = get_db()
with db:
poll = get_poll(poll)
if poll is None or poll['closed'] is True or len(poll['choices']) != len(choices):
return False
ballot = []
for choice, grade in choices.iteritems():
ballot.append... | [
"def add_points(user_id, points):\n db = sqlite3.connect('Ranking/Rankings.db')\n adder = db.cursor()\n if int(points) < 0:\n adder.execute('UPDATE ranks SET Points = {} WHERE UserID = {}'.format(0, user_id))\n else:\n cur_points = get_points(user_id)\n adder.execute('UPDATE ranks S... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a poll from the database | def get_poll(poll):
poll = get_entry('polls', 'uid', poll)
if poll is None:
return None
poll = dict(poll)
poll['choices'] = []
for choice in get_entries('choices', 'poll', poll['uid']):
poll['choices'].append(dict(choice))
poll['choices'].sort(key=lambda x: x['id'])
poll[... | [
"def get_single_poll(request_ctx, id, **request_kwargs):\n\n path = '/v1/polls/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response",
"def poll_by_uid(self, uid, context=None):\n if uid == 'latest':\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all polls owned by an user from the database | def get_own_polls(owner):
polls_db = get_entries('polls', 'owner', owner)
if polls_db is None:
return None
polls = []
for poll in polls_db:
poll = dict(poll)
poll['closed'] = poll['end_date'] < datetime.now()
polls.append(poll)
return polls | [
"def get_polls(self):\n docs = self.collection.find()\n polls = [_poll_from_doc(doc) for doc in docs]\n return polls",
"def get_all_by_user(username):\n id = username_to_id(username)\n\n if id == 6: #user is admin, return all jobs\n return {'jobs': [clean_job(job) for job in jobs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a poll from the database | def delete_poll(poll):
get_db().execute('DELETE FROM ballots WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM results WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM choices WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM polls WHERE uid = ?;', [poll])
get_db().commit() | [
"def test_delete_poll(self):\n response = self.client.delete(f\"/api/poll/{self.poll.pk}/delete/\", format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def cancel_poll_record(db_url, db_user, db_password, db_name, poll_id):\n db = connect(db_url, db_user, db_password,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ballot from a user from the database | def get_voter_ballot(voter, poll):
ballot = query_read("SELECT choices.id, ballots.grade FROM choices JOIN ballots ON ballots.poll = ? and choices.id = ballots.choice and ballots.voter = ? ORDER BY choices.id;", [poll, voter])
if not ballot:
return None
return dict(ballot) | [
"def get_user_bid(self):\n if self.good.deal == AUCTION:\n bid = AuctionBids.objects.filter(good=self.good, user=self.user).latest('updated')\n return bid.user_price",
"def get_points(user_id):\n db = sqlite3.connect('Ranking/Rankings.db')\n adder = db.cursor()\n adder.execut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the list of all the voters from a poll | def get_ballot_voters(poll):
voters = query_read("SELECT voter FROM ballots WHERE poll = ? ORDER BY voter;", [poll])
if not voters:
return None
return [voter[0] for voter in voters] | [
"def get_voters(self) -> List['Voter']:\n return self.voters",
"def candidate_votes():\n for name in votedCandidates: \n candidateVotes.append(votes(name))\n return candidateVotes",
"def get_sorted_votes(poll: Poll, votes: List[Vote]) -> InstrumentedList:\n\n def get_user_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get cached results from the poll or compute them. After computation, ballots are destroyed. | def get_results(poll):
assert poll is not None, "Invalid poll: None"
if not poll['closed']:
return None
results = {}
# Get cached results
results_db = get_entries('results', 'poll', poll['uid'])
# If no cache, compute the results and store them
if len(results_db) == 0:
b... | [
"def poll(self):\n data = self.get_data()\n if data:\n self.add_metrics(data)",
"def _result(self, res):\n worker_info = self.workers[res.source]\n worker_info.pending_results -= 1\n self.pending_results -= 1\n assert worker_info.pending_results >= 0\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses alternates, like 'a{b,c}d{e,f}' in a pkgdesc str. Returns ['a', ('b','c'), 'd', ('e','f')] for the given example. | def parse_alternates(pkgdesc):
assert(isinstance(pkgdesc, str))
parsed_pkgdesc = []
while len(pkgdesc) > 0:
i = pkgdesc.find('{')
if i == -1:
parsed_pkgdesc.append(pkgdesc)
break
parsed_pkgdesc.append(pkgdesc[:i])
pkgdesc = pkgdesc[i+1:]
i = pk... | [
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recurse through the alreadyparsed pkgdesc dict, generating alternates. | def gen_alternates_recurse(pkgdesc):
assert(isinstance(pkgdesc, list))
if len(pkgdesc) <= 1:
yield ''.join(pkgdesc)
else:
prefix = pkgdesc[0]
alternates = pkgdesc[1]
pkgdesc = pkgdesc[2:]
for alt in alternates:
for x in gen_alternates_recurse(pkgdesc):
... | [
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def parse_alternates(pkgdesc):\n assert(isinstance(pkgdesc, str))\n parsed_pkgdesc = []\n while len(pkgdesc) > 0:\n i = pkgdesc.find('{')\n if i == -1:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yields all possible alternates from the pkgdesc str. | def gen_alternates(pkgdesc):
pkgdesc = parse_alternates(pkgdesc)
for x in gen_alternates_recurse(pkgdesc):
yield x | [
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same as vuln_iterator, but takes alternates into account. Yields (pkgdesc, original_pkgdesc, vulntype, vulnurl). | def vuln_alternate_iterator(filename):
for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):
for x in gen_alternates(pkgdesc):
yield (x, pkgdesc, vulntype, vulnurl) | [
"def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):\n assert(isinstance(pkg_list, list))\n for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):\n pkgdesc = parse_pkgdesc(pkgdesc)\n pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse pkgdesc, spliting package name pattern and version constraints. Returns ('pkgname', '>ver1', '<ver2') for Deweystyle comparision. Returns ('pkgname', 'ver') for glob version matching. | def parse_pkgdesc(pkgdesc):
assert(isinstance(pkgdesc, str))
# Find version comparisions.
split_points = [pkgdesc.find(c) for c in '<>']
split_points = [i for i in split_points if i != -1]
split_points.sort()
# Split the str.
parsed_pkgdesc = []
j = 0
for i in split_points:
p... | [
"def getCurrentVerData(self, pkg):\n pkgpat = re.compile(r'%s\\s' % pkg)\n\n cururl = \"%s/current.list\" % os.environ[\"EUPS_PKGROOT\"]\n curlist = urllib2.urlopen(cururl)\n desc = filter(lambda x: pkgpat.match(x), curlist.readlines())\n curlist.close()\n \n if len(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filters pkgname before matching. | def pkgname_filter(pkgname):
if re.search('^py\d{2}-', pkgname):
# Strip Python version from pkgname, as it's present in the binary package name,
# but is not present in the pkgsrc package name.
return 'py-' + pkgname[5:]
return pkgname | [
"def test_filter_app_by_name_pattern_no_pattern(self) -> None:\n self.assertEqual(filter_app_by_name_pattern(self.COMPATIBLE_APPS), self.COMPATIBLE_APPS)",
"def check_package_name(package_name):\n m = re.match('[a-z0-9_]{3,30}', package_name)\n return (m != None and m.group(0) == package_name)",
"def f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same as vuln_alternate_iterator, but matchs pkgnames against a package list, and splits up the version patterns. Yields (pkgname, (version_pattern,), original_pkgdesc, vulntype, vulnurl). | def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):
assert(isinstance(pkg_list, list))
for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):
pkgdesc = parse_pkgdesc(pkgdesc)
pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc[0]))
... | [
"def vuln_alternate_iterator(filename):\n for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):\n for x in gen_alternates(pkgdesc):\n yield (x, pkgdesc, vulntype, vulnurl)",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
apply github matrix `include` and `exclude` transformations | def expand_gh_matrix(matrix):
raw = dict(matrix)
include = raw.pop("include", [])
exclude = raw.pop("exclude", [])
merged = [
dict(collections.ChainMap(*p))
for p in [*itertools.product(*[[{k: i} for i in raw[k]] for k in raw])]
]
for m in merged:
to_yield = dict(m)
... | [
"def clean_ids(include, exclude):\n\n # deal with nothing to exclude\n if (exclude is None) or (len(exclude) < 1):\n return include\n\n # check if nested\n if pyto.util.nested.is_nested(exclude):\n nested = True\n else:\n nested = False\n include = [include]\n exclu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a very explicit env from a lock. | def lock_to_env(lock: Path, env: Path):
env.write_text(
P.ENV_TMPL.render(
deps=lock.read_text(encoding="utf-8")
.split(EXPLICIT)[1]
.strip()
.splitlines()
)
) | [
"def fixture_env_object(env_manager):\n env = Environment(\n env_id=COMMIT_HASH,\n created=multiprocessing.Event(),\n creating=multiprocessing.Event(),\n location=os.path.join(env_manager.base_dir, COMMIT_HASH),\n site_packages=os.path.join(env_manager.base_dir, COMMIT_HASH, VE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to disable all the workers in the modjk load balancer | def test_worker_disabled(list_not_str):
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": list_not_str})
assert modjk.worker_disabled(name, "app1") == ret | [
"def test_get_all_workers(self):\n print(self.api.get_all_workers())\n pass",
"def test_disable_agent(self):\n pass",
"def testE_WhiteListBlackList(self):\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to recover all the workers in the modjk load balancer | def test_worker_recover(list_not_str):
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": list_not_str})
assert modjk.worker_recover(name, "app1") == ret | [
"def test_get_all_workers(self):\n print(self.api.get_all_workers())\n pass",
"def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The specified limit will try to sell until it reaches. If not successful, the order will be canceled. | def sell_limit(symbol, quantity, orderId, sell_price, last_price):
invalidAttempts = 0
while invalidAttempts < INVALID_ATTEMPTS_LIMIT:
order = client.sell_limit(symbol, quantity, sell_price)
if 'msg' in order:
message(order['msg'])
... | [
"def limit_order(self, instrument, action, qty, limit_price):\r\n # Verify action\r\n if action != 'BUY' and action != 'SELL':\r\n raise ValueError(\"Invalid action () for market order. Must be \"\r\n \"'BUY' or 'SELL'.\".format(action))\r\n \r\n li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The function returning the value f[i,j] in case (i,j) in an index 'in the image', otherwise it return 0 | def value(i,j):
if i<0 or i>=M or j<0 or j>=N:
return 0
return f[i,j] | [
"def getValue(self, i, j):\n return self.A[i][j] if j in self.A[i] else 0",
"def non_zero_func(x):\n\n inds = x.nonzero()[0]\n\n return inds",
"def feature_index_for_cell_face(self, cell_kji0, axis, p01):\n\n self.cache_arrays()\n if self.feature_indices is None: return None\n cell =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given image NVR, wait for the build that produced it to show up in koji. If it doesn't within the timeout, raise an error. | def wait_for_parent_image_build(self, nvr):
self.log.info('Waiting for Koji build for parent image %s', nvr)
poll_start = time.time()
while time.time() - poll_start < self.poll_timeout:
build = self.koji_session.getBuild(nvr)
if build:
self.log.info('Pare... | [
"def testWaitForPushImageError(self):\n stage = self.ConstructStage()\n stage.board_runattrs.SetParallel(\n 'instruction_urls_per_channel', None)\n\n self.assertEqual(stage.WaitUntilReady(), False)",
"def _wait_for_image_to_become_active(\n self, image_id: \"std::uuid\", timeout: int = 60\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct the result dict to be preserved in the build metadata. | def make_result(self):
result = {}
if self._base_image_build:
result[BASE_IMAGE_KOJI_BUILD] = self._base_image_build
if self._parent_builds:
result[PARENT_IMAGES_KOJI_BUILDS] = self._parent_builds
return result if result else None | [
"def __call__(self, results):\n\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n \n for key in self.keys:\n data[key] = results[key] \n return data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rename an existing overlay instance | def rename_overlay(self, old_lbl, new_lbl):
# NOTE: the overlay will call _on_overlay_rename after updating
self.overlay.rename_choice(old_lbl, new_lbl) | [
"def test_instance_rename(self):\n # create the instance\n ret_val = self.run_cloud(\n \"-p ec2-test {} --no-deploy\".format(self.instance_name), timeout=TIMEOUT\n )\n # check if instance returned\n self.assertInstanceExists(ret_val)\n\n changed_name = self.insta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new overlay instance, and set it as selected. Once selected, the traitlets will then control the options of the new overlay. | def add_overlay(self, lbl):
# TODO: ability to pass options which would avoid updating the marks until all are set,
# probably by setattr(self.user_api, k, v) (and checks in advance that all are valid?)
self.overlay.add_choice(lbl, set_as_selected=True) | [
"def useOverlay(self, overlay: 'SbBool'=1) -> \"void\":\n return _coin.SoExtSelection_useOverlay(self, overlay)",
"def add_selection(self, pi):\n # pi: \"pick_info\", i.e. an incomplete selection.\n ann = pi.artist.axes.annotate(\n _pick_info.get_ann_text(*pi),\n xy=pi.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a overlay instance. If selected, the selected overlay will default to the first entry in the list. | def remove_overlay(self, lbl):
# NOTE: the overlay will call _on_overlay_remove after updating
self.overlay.remove_choice(lbl) | [
"def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)",
"def remove(self):\n self.layers.pop()",
"def remove_selected(self):\n idx = 0\n for i in list(self.selection):\n idx = self.index(i)\n self.remove(i)\n new = max(0, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Center the values of RA and DEC based on the current zoomlimits of a viewer. | def center_on_viewer(self, viewer_ref=None):
if viewer_ref is None:
if not len(self.viewer.selected): # pragma: nocover
raise ValueError("no viewers selected, provide viewer reference")
viewer_ref = self.viewer.selected[0]
viewer = self.app.get_viewer(viewer... | [
"def reset_zoom_and_center(self):\n self._send_to_ztv('reset-zoom-and-center')",
"def setup_limits(self):\r\n #odmakni x granice za specificni interval ovisno o tipu\r\n tmin, tmax = self.prosiri_granice_grafa(self.pocetnoVrijeme, self.zavrsnoVrijeme, 4)\r\n #set granice za max zoom ou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a path to a pickle file that contains a set of premade crossvalidation folds, this function will load the file and return the contained data. This function also performs simple checks on the object loaded to be sure it is a wellformed CVfolds object | def load_cv_folds(filepath):
folds = pickle.load(open(filepath, "rb"))
if not isinstance(folds, list):
raise RuntimeError("Loaded a non-list item as CV folds.")
if not isinstance(folds[0], tuple) or not len(folds[0]) == 3:
print(type(folds[0]))
print(len(folds))
raise Runti... | [
"def load_classifier():\n with open(\"classifier.pik\", 'rb') as f:\n return pickle.load(f)",
"def loadClassifier(filename):\n with open(filename, 'rb') as fid:\n return cPickle.load(fid)",
"def read_classifier(file_path):\n import cPickle\n if file_path.endswith('.classifier'):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the filename from an ablation file this function parsers out the identifier of the classifier used and then returns the print name which matches the identifier | def resolve_model_name(filename):
first_ = filename.find("_")
second_ = filename.find("_", first_ + 1)
model_name = filename[:second_]
return get_classifier_print_name(model_name) | [
"def get_classid_by_filename(filename):\n filename = os.path.basename(filename)\n return filename.split('_')[-1].split('.')[0]",
"def parse_id(filename):\n match = re.search('B[0-9]{2}-[0-9]{3}', filename) \n if match:\n return match.group()\n return None",
"def classifier_document_name(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of features this function expands the list of features by creating a min, max, avg feature for each original feature. This is used to retain data from our features when creating a row grouping to represent a single collection of EventContext pairs. | def expanded_features(feats):
results = list()
for feat in feats:
results.extend(["{}_min".format(feat),
"{}_avg".format(feat),
"{}_max".format(feat)])
return results | [
"def _aggregate_features(self, feature_dict: dict) -> tuple:\n aggregated_features = {}\n aggregation_functions = {'mean': np.mean, 'min': np.min, 'max': np.max, 'std': np.std}\n feature_names = []\n for feature, data in feature_dict.items():\n # Aggregate the feature data and... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of features, this function returns a list of all possible combinations of the features. This is akin to taking the power set of the original feature list; however, we make an acception to group all contextdependencytail features into a single feature and all eventdependencytail features into a single featu... | def feature_power_set(data_features):
# Find all context-dep-tail/event-dep-tail features
ctx_dep_cols = [c for c in data_features if "ctxDepTail" in c]
evt_dep_cols = [c for c in data_features if "evtDepTail" in c]
# Remove dep-tail features from overall list
reg_cols = list(set(data_features) - s... | [
"def get_original_features(features):\n # Remove _max, _min, _avg, etc. endings and remove duplicates. (Duplicates\n # are caused by the removal of the endings)\n names = list(set([feat[:feat.rfind(\"_\")] for feat in features]))\n\n # Group dep-tail features\n ctx_dep_cols = [c for c in names if \"c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an expanded set of features, this function returns the list of features that are contained in the original pandas DataFrame with the exception that dependencytail features are represented as a single string. | def get_original_features(features):
# Remove _max, _min, _avg, etc. endings and remove duplicates. (Duplicates
# are caused by the removal of the endings)
names = list(set([feat[:feat.rfind("_")] for feat in features]))
# Group dep-tail features
ctx_dep_cols = [c for c in names if "ctxDepTail" in ... | [
"def list_present_features(self) -> List[str]:\n features = set(feature_path.name.replace('.encrypted', '')\n for feature_path in self.data_path.glob('features_*.parquet*'))\n features = [feature for feature in features if f'-{self.subset}' in feature]\n return sorted(feat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Two solution instances are equal if their xvectors are roughly the same. There is logically no need for checking the y vectors as well, since there is a manytoone mapping. "Roughly the same" is defined by class static attribute Solution.eps which defines the relative and absolute tolerance allowed between individual co... | def __eq__(self, other):
if isinstance(other, Solution):
equalities = np.isclose(self.x, other.x, rtol=Solution.eps, atol=Solution.eps)
return np.all(equalities)
else:
raise InvalidComparison('Attempted to compare instance with nonSolution instance.') | [
"def are_points_equal(a, b, epsilon = 1e-9):\n try:\n x1, y1 = a.x, a.y\n x2, y2 = b.x, b.y\n except AttributeError:\n x1, y1 = a[0], a[1]\n x2, y2 = b[0], b[1]\n return (abs(x1-x2) < epsilon) and (abs(y1-y2) < epsilon)",
"def same_point(a, b):\n return math.fabs(b[0] - a[0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Logs the given message every n calls to a logger. | def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in xrange(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
... | [
"def log_every_n(n, level, message, *args): # pylint: disable=invalid-name\n return _log_every_n_to_logger(n, None, level, message, *args)",
"def log(self, message: str) -> bool:\n self.count += 1\n if self.count == self.frequency:\n self.logger.info(message)\n self.count = 0\n return True\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Logs a message every n calls. See _log_every_n_to_logger. | def log_every_n(n, level, message, *args): # pylint: disable=invalid-name
return _log_every_n_to_logger(n, None, level, message, *args) | [
"def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name\n logger = logger or logging.getLogger()\n def _gen(): # pylint: disable=missing-docstring\n while True:\n for _ in xrange(n):\n yield False\n logger.log(level, message, *args)\n yield True\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the version string of the 'openhtf' package. | def get_version():
version = 'Unknown'
try:
version = get_distribution('openhtf')
except DistributionNotFound:
version = 'Unknown - Perhaps openhtf was not installed via setup.py or pip.'
return version | [
"def get_version() -> str:\n return VERSION",
"def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version",
"def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version",
"def get_version():\n return 'PyS2OPC... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes path to file with a single word on each line Returns list of those words | def get_words(file_path):
words = []
with open(file_path) as f:
for line in f:
words.append(line.strip())
return words | [
"def get_wordle_list(filename: string) -> string:\n with open(filename, \"r\", encoding=\"utf-8\") as file_handler:\n return file_handler.read()",
"def get_wordlist():\n with open(WORDLIST_FILE) as english:\n wordlist = english.readlines()\n return [word.strip() for word in wordlist]",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of (word, count, percentage) tuples, return the top two word counts. | def top_two_word(counts):
limited_counts = counts[0:2]
count_data = [count for (_, count, _) in limited_counts]
return count_data | [
"def top_word(word_count):\n\n top_words = []\n last_count = 0\n for each in word_count:\n if len(each.split()) < 2:\n # we compress spaces in our word count but sometimes they still\n # show up as a count, we won't list them as a top word though.\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method to list the entity owners | def list_entity_owners(ipaddress, entity_owner_list):
entity = ":8181/restconf/operational/entity-owners:entity-owners"
url = "http://" + ipaddress + entity
resp = requests.get(url, headers=con_header, auth=authentication)
if resp.status_code != RESP_GET_SUCCESS:
print("controller is down, resp_... | [
"def get_owners(conn):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM owners;\"\"\"\n c.execute(sql)\n return c.fetchall()",
"def get_all_owners():\n owners = []\n for owner in query_db('SELECT * FROM owner'):\n owners.append({\n 'id': owner['id'],\n 'firstName': ow... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compile .osl file with given filepath to temporary .oso file | def my_osl_compile(self, input_path):
output_file = tempfile.NamedTemporaryFile(mode='w', suffix=".oso", delete=False)
output_path = output_file.name
output_file.close()
ok = _cycles.osl_compile(input_path, output_path)
print("osl compile output = %s" % output_path)
if o... | [
"def make_source_fs():\n return open_fs('temp://')",
"def _compile_file(engine, path, e_file):\n # Change directory and execute engine\n os.chdir(path)\n os.system(engine + e_file)",
"def compile_assembly(filename: str, cmd: str, temp: str):\n assembly = os.path.basename(filename).partition(\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scrapes website for info on a specific company | def scrape_company_info(driver, site, company_name):
source = get_page_source(driver, site, company_name)
soup = BeautifulSoup(source, "html.parser")
company_curr = Company(soup)
name = company_curr.get_name()
desc = company_curr.get_desc()
location = company_curr.get_location()
size = co... | [
"def get_company_info(name, session):\n escaped_name = urllib.parse.quote_plus(name)\n\n response = session.get(('https://www.linkedin.com'\n '/voyager/api/organization/companies?'\n 'q=universalName&universalName=' + escaped_name))\n\n if response.stat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all wave files (recursively) from the provided directory in sorted order | def get_all_waves(directory: str) -> list:
files = glob.glob(directory + '/**/*.wav')
if not files:
logging.warning('No WAVE files found in ', directory)
else:
files.sort()
return files | [
"def _get_wav_files(dir_path):\n files = []\n for file in os.listdir(dir_path):\n if file.endswith(\".wav\"):\n files.append(file)\n return files",
"def collect_files_from_dir(directory, prefix=\"\", suffix=\"\", recursive=True):\n files = []\n _collect_files_from_dir(directory, p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the original_filename of this Job. | def original_filename(self, original_filename):
self._original_filename = original_filename | [
"def set_filename(self, filename):\n return",
"def setfilename(self, filename):\n if os.path.exists(filename):\n self._filename = filename\n self.backup_filename = filename + self.backup_ext\n else:\n raise ValueError(\"Invalid path '%s' passed to ScriptFile\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the source_url of this Job. | def source_url(self, source_url):
self._source_url = source_url | [
"def set_source_path(self, source_path):\n\n self.source_path = source_path",
"def campaign_source(self, campaign_source):\n \n self._campaign_source = campaign_source",
"def url(self, url):\n self._url = url",
"def set_server_url(self, url: str):\n self.url = url",
"async... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the progress of this Job. | def progress(self, progress):
self._progress = progress | [
"def update_progress(self, value):\n self.progress.setValue(value)",
"def setProgress(self, n, m):\n pass",
"def set_progress(self, value):\n\n if self.active_socket is not None:\n msg = 'PROGRESS %f\\n' % float(value)\n try:\n self.active_socket.send(ms... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the bitrate of this Job. | def bitrate(self, bitrate):
self._bitrate = bitrate | [
"def setBitrate(self, bitrate):\n try:\n # bypassed by request from Ivan\n if (pu.pxpconfig.IgnoreVideoSettings()):\n dbg.prn(dbg.TDK,\"td -- SetBitrate BYBASSED\")\n return\n \n url = \"http://\"+self.ip+\"/cgi-bin/api.cgi\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the sample_rate of this Job. | def sample_rate(self, sample_rate):
self._sample_rate = sample_rate | [
"def change_sampling_rate(self, sampling_rate):\n\n self.sampling_rate = sampling_rate",
"def _set_sample_rate(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the file_size of this Job. | def file_size(self, file_size):
self._file_size = file_size | [
"def set_file_size(self, file_size):\n self.file_size = file_size",
"def setFilmSize(self, size):\n self.filmSize = size",
"def set_cache_size(self, size):\n self.__cache_max_size = size",
"def set_cache_size(self, size):\n pass",
"def model_size(self, model_size):\n\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the custom_words of this Job. | def custom_words(self, custom_words):
self._custom_words = custom_words | [
"def setWords(self, words, style, substyle=-1):\n if substyle >= 0:\n # only supported for sub-styles\n self.__lex.setSubstyleWords(words, style, substyle)",
"def customs(self, customs):\n\n self._customs = customs",
"def setKeywords(self) -> None:\n # Add any new user... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Whether or not this provider supports a given URI. | def supports(uri: str) -> bool:
is_remote, is_git = check_url(uri)
return is_remote and is_git | [
"def supports(uri):\n is_doi_ = is_doi(uri)\n\n is_dataverse_uri = is_doi_ is None and check_dataverse_uri(uri)\n is_dataverse_doi = is_doi_ and check_dataverse_doi(is_doi_.group(0))\n\n return is_dataverse_uri or is_dataverse_doi",
"def get_supported_uri(self, uris):\n transpor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update dataset files from the remote provider. | def update_files(
self,
files: List[DynamicProxy],
dry_run: bool,
delete: bool,
context: Dict[str, Any],
ref: Optional[str] = None,
**kwargs,
) -> List["DatasetUpdateMetadata"]:
from renku.core.dataset.providers.models import DatasetUpdateAction, Datas... | [
"def pull_all_data_dependencies(self):\n self._overwrite_dvc_config()\n\n # checkout dvc pull files according to git checkout\n subprocess.check_call([\"dvc\", \"pull\", \"-r\", self.remote_repo])\n logging.getLogger(__name__).info(\"Pulling right data version from remote dvc storage... ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify if server contains the file | def verify_if_server_contains_file(self, section="cs_url", path=""):
server = TESTDATA[section][u'server_address']
command = 'ssh {} [ -f {} ] && echo "Found" || echo "Not found"'.format(server, path)
if self.run_bash_command(command, True).strip() != "Found":
self.fail("File not fo... | [
"def exist_remote_file(target, path):\n cmd = 'test -f %s' % path\n res = run_ssh(target, cmd)\n if res == 0:\n return True\n return False",
"def file_exists(self, resource: GenomicResource, filename: str) -> bool:",
"def file_exists(self, path):\n path = path.strip('/')\n file_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find exceptions from logs and save them | def find_exception_from_logs_and_save(self, start_time, stop_time, name_prefix="", copy_location=""):
self.run_folder = get_config_value("reporting_folder_run")
self.report_folder = get_config_value("reporting_folder")
error_log_file = open(self.report_folder + os.sep + "error_logs.txt", "w")
... | [
"def _extract_exceptions_from_logs(start_time, end_time, module_versions):\n if start_time and end_time and start_time >= end_time:\n raise webob.exc.HTTPBadRequest(\n 'Invalid range, start_time must be before end_time.')\n try:\n for entry in logservice.fetch(\n start_time=start_time or None,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify audit log file in server. | def verify_audit_log(self, section=u'ss1_url', event="Log in user"):
# Sleep waiting log
sleep(1)
server_log_address = TESTDATA[section][u'server_address']
user = TESTDATA[section][u'j_username']
log_output = self.read_server_file(server_log_address, strings.audit_log)
... | [
"def CheckLog(file): \n lines_list = open(file).read().splitlines()\n rejects = []\n\n for l in lines_list:\n record = LogRecord({ 'record' : l})\n if record.action == \"REJECT\":\n rejects.append(l)\n return rejects",
"def run_audit(audits=None, plateform=None):\n from ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate log probabilities of a batch of observations & actions Calculates log probabilities using previous step's model parameters and new parameters being trained. | def _logprob(self):
logp = -0.5 * tf.reduce_sum(self.log_vars)
logp += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.means) /
tf.exp(self.log_vars), axis=-1)
self.logp = logp
logp_old = -0.5 * tf.reduce_sum(self.log_vars)
logp_old +... | [
"def logprob(self, action_sample, policy_params):\n return self.action_head.logprob(action_sample, policy_params)",
"def posteriorLikelihood(self, step):",
"def sample(self):\n observations, rewards, actions, logprobs, dones, values = [], [], [], [], [], []\n done = False\n for step ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the itype argument and returns a set of strings with all the selected interaction types | def parse_itypes(itype_argument):
if "all" in itype_argument:
return ["sb", "pc", "ps", "ts", "vdw", "hb", "lhb", "hbbb", "hbsb",
"hbss", "wb", "wb2", "hls", "hlb", "lwb", "lwb2"]
return set(itype_argument.split(",")) | [
"def get_input_types():\n return [\"normal\", \"string_all\", \"string_upper\", \"string_lower\",\n \"specials\", \"integer\", \"float\", \"version\", \"nothing\"]",
"def getTypeInfo():",
"def get_result_type_ids():\n try:\n from pydoas import _LIBDIR\n except:\n raise\n wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a traceplot from the contact frames and writes a figure to an image file. | def write_trace(contact_frames, labels, output_file):
assert len(contact_frames) == len(labels)
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
num_interactions = len(contact_frames)
num_frames = max(map(max, contact_frames)) + 1
f, axs = plt.subplots(num_interactions... | [
"def save_frame(frame_num, frame_path, frame_plot):\n # frame plot\n frame_plot()\n plt.savefig(frame_path + str(frame_num) + '.png')\n plt.close()",
"def PlotGeometry(self,plot_file):\n #for each beam, get the (x,y) coordinates and plot\n max_x,max_y = np.max(self.joints_arr[:,0]),np.ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the feature set xFeat, predict what class the values will have. | def predict(self, xFeat):
yHat = []
for row in xFeat:
row_df = pd.DataFrame(row)
votes = []
for tree, features in zip(self.trees, self.features):
xtest = row_df.iloc[features]
votes.append(tree.predict(xtest.T)[0])
yHat.appe... | [
"def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction",
"def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array([self._classify(x) for x in X])",
"def predict(self, x_set):\n def classify(x):\n # Pick top-voted label among the k nea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read an input file and convert it to numpy | def file_to_numpy(filename):
df = pd.read_csv(filename)
return df.to_numpy() | [
"def _file_to_array(self, file, type=int):\n\n mlist = []\n for line in open(file):\n mlist.append(line)\n return np.asarray(mlist, dtype=type)",
"def txt_to_array(pathname, shape):\n import numpy as np\n f = open(pathname, 'r')\n data = np.array(\n [float(i) for i ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Distributes position over a lattice given the type | def position_lattice(type = 'cubic'):
x = np.zeros(system.N)
y = np.zeros(system.N)
z = np.zeros(system.N)
places_z = np.linspace(0, system.L[2], num=system.n[2], endpoint = False)
places_z += places_z[1]*0.5
n_part = 0
for i,j,k in itertools.product(list(np.arange(system.n[0])), list(np.... | [
"def position_generator(lattice_structure, nx = 20, ny = 20):\n\n a1 = None\n a2 = None\n phi = None\n\n if lattice_structure=='hexagonal':\n a1 = a2 = random.uniform(0.8, 2)\n phi = 2*(math.pi/3)\n \n elif lattice_structure=='square':\n a1 = a2 = random.uniform(0.8, 2)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Randomly generates velocities in the range (1,1) | def velocity_random(type = 'uniform'):
if(type == 'uniform'):
system.vel = np.random.uniform(-1.0, 1.0, (system.N,system.dim))
#Velocities are shifted to avoid unwanted momenta
for dim in range(system.vel.shape[1]):
system.vel[:,dim] -= np.mean(system.vel[:,dim])
elif(type =... | [
"def generate_random_velocity():\n return random.randrange(5), random.randrange(5), random.randrange(5)",
"def __random_velocity():\n heading = random.randint(0, 360)\n magnitude = random.randint(ASTEROID_MIN_VEL, ASTEROID_MAX_VEL)\n return heading, magnitude",
"def _get_random_veloc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls Numba for rescaling the velocities | def velocity_rescale():
system.vel = v_res(system.vel, system.T, const.KB, system.mass) | [
"def updateVelocities(self) -> None:\r\n for idx1 in range(self.size() - 1):\r\n for idx2 in range(idx1 + 1, self.size()):\r\n self.updateVelocity(idx1, idx2)",
"def action_scaling_vecs(self):\n vel_vec = np.arange(1, self.specs['velocity_limits'][1] + 1, 1)\n\n acc_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the Nose Hoover energy contribution given by E = xixiQ/2 + 3NkbTlns | def nose_hoover_energy(Q, xi, N, kb, T, lns):
energy = 0.5*Q*xi**2 + 3*N*kb*T*lns
return energy | [
"def sodiumHydrogenExchanger(Na_i, H_i, enable_I_NaH):\n if (enable_I_NaH == True):\n n_H = params_dict[\"n_H\"]; K_H_i_mod = params_dict[\"K_H_i_mod\"]; I_NaH_scale = params_dict[\"I_NaH_scale\"]\n k1_p = params_dict[\"k1_p\"]; k1_m = params_dict[\"k1_m\"]; k2_p = params_dict[\"k2_p\"]; k2_m = par... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the running average of a given observable at time t | def running_average(array, dt=1):
if not isinstance(array, np.ndarray):
array = np.asarray(array)
r_ave = np.cumsum(array*dt)
for j in range(len(r_ave)):
r_ave[j] = r_ave[j]/(dt*(j+1))
return r_ave | [
"def get_avg(t):\n l = []\n for pl in range(n):\n l.append(markov[pl][t])\n expr = l[0]\n for i in range(1,n):\n expr = expr+l[i]\n return(1/n*expr)",
"def at(self, t):\n return \\\n self._avg + (self._lastValue - self._avg)*(1 - (1 - self.x.alpha)**( t - self._lastT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the block average of a potentially correlated timeseries "array" | def block_average(array, block_size):
if not isinstance(array, np.ndarray):
array = np.asarray(array)
n_blocks = int(len(array)/block_size)
blocks = np.array_split(array, n_blocks)
average = []
for block in blocks:
average.append(np.mean(block))
error = np.std(np.asarray(avera... | [
"def mean_per_block(array, axis=None, controller=None):\n if axis is None or axis == 0:\n return sum_per_block(array, axis, controller) / count_per_block(array, axis, controller)\n else:\n return sum(array, axis, controller)",
"def block_mean(ar, fact):\n\t\n\tassert isinstance(fact, int), typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes current pressure using the kinetic energy and the calculated internal virial | def current_pressure(virial):
pressure = (2*system.kinetic + system.virial)/3/system.V
system.pressure = pressure
return pressure | [
"def pressure(current_data):\n pressure = gamma1*(current_data.q[2,:]-\n 0.5*current_data.q[1,:]**2/current_data.q[0,:])\n return pressure",
"def velocity_pressure(self) -> qty.Pressure:\n rho = self._fluid.density()\n v = self._flow_rate / self._cross_section.area()\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the internal virial given by Theta = sum_i r_i dot f_i | def internal_virial(r,f):
virial = 0
for i in range(r.shape[0]):
for dim in range(r.shape[1]):
virial += r[i,dim]*f[i,dim]
return virial | [
"def integrate(self,f,use_dV=False):\n if use_dV:\n return ((f[0:self.N-1]+f[1:self.N])*self.dV).sum()*0.5\n else:\n return ((f[0:self.N-1]+f[1:self.N])*self.dr).sum()*0.5",
"def _refraction(self, F, i, forward=True):\n\n\n if forward:\n dx = self.calc_x_coord... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the radial distribution function of the system, among with the coordination number and the isothermal compressibility | def radial_distribution_function(nbins=50):
# Array of distances
dist = rdf_distances(system.pos/force.sigma, system.L/force.sigma, np.zeros(system.N*(system.N-1)))
max_dist = 0.5*system.L[0]/force.sigma
bins = np.linspace(0., max_dist, nbins)
rdf = nrdf(bins, np.zeros(len(bins)-1, dtype = np.floa... | [
"def radial_gaussian( rij, i_atom , width, rshift, Rc ):\n\n #print(\" symmetry function \", i_atom )\n\n Gi=0\n for j_atom in range( rij.shape[0] ):\n\n fc = cutoff_function( rij[i_atom][j_atom] , Rc )\n Gi = Gi + fc * np.exp(-width * (rij[i_atom][j_atom]-rshift)**2 )\n #print( j_atom... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the density profile of the particles over a given axis | def density_profile(axis, nbins = 100):
bins = np.linspace(0., system.L[axis], num=nbins)
hist = np.histogram(system.pos[:,axis], bins=bins, density=True)
return hist[0], hist[1] | [
"def sweep_density(pos, box, window, pts, axis=0):\n if axis == 0:\n L = box.Lx\n h = box.Ly\n elif axis == 1:\n L = box.Ly\n h = box.Lx\n else:\n raise ValueError('axis must be 0 or 1')\n X = np.linspace(-L/2, L/2, pts)\n Y = np.zeros(pts)\n for i, x in enumerat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return empty perms dict thus hiding the model from admin index. | def get_model_perms(self, request):
return {} | [
"def _permissions():\n return getattr(g, '_request_permissions', {})",
"def initial_permissions() -> [[str, str]]:\n return {'admin_all': ['user__Admin', 'resource__All'],\n 'guest_all': ['user__Guest', 'resource__All']}",
"def get_permissions_for_registration(self):\n from wagtail.snipp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get paths like ``sysconfig.get_paths()`` for installation. | def get_paths(self) -> Dict[str, str]:
paths = sysconfig.get_paths()
scripts = "Scripts" if os.name == "nt" else "bin"
packages_path = self.packages_path
paths["platlib"] = paths["purelib"] = (packages_path / "lib").as_posix()
paths["scripts"] = (packages_path / scripts).as_posix... | [
"def get_paths(self, name):\n info = self.get_module_info(name)\n if info:\n return info.get(constants.MODULE_PATH, [])\n return []",
"def get_paths() -> list[pathlib.Path]:\n logger.debug(\"Identifying service paths\")\n\n base_paths = [\"/\", \"/System\"] if os.getenv(\"SUD... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Activate the environment. Manipulate the ``PYTHONPATH`` and patches ``pip`` to be aware of local packages. This method acts like a context manager. | def activate(self):
paths = self.get_paths()
with temp_environ():
working_set = self.get_working_set()
_old_ws = pkg_resources.working_set
pkg_resources.working_set = working_set.pkg_ws
# HACK: Replace the is_local with environment version so that packages... | [
"def activate_env(prefix):\n # type: (str) -> None\n logger.info(\"activate_env %s\", locals())\n os.environ[\"PATH\"] = \":\".join([prefix + \"/bin\", os.environ.get(\"PATH\", \"\")])\n os.environ[\"CONDA_PREFIX\"] = prefix",
"def post_setup(self, context):\n os.environ[\"VIRTUAL_ENV\"] = cont... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the package finder of given index sources. | def get_finder(
self,
sources: Optional[List[Source]] = None,
ignore_requires_python: bool = False,
) -> Generator[pip_shims.PackageFinder, None, None]:
if sources is None:
sources = self.project.sources
for source in sources:
source["url"] = expand_en... | [
"def find_installed_sources():",
"def _build_package_finder(\n self,\n options, # type: Values\n session, # type: PipSession\n platform=None, # type: Optional[str]\n python_versions=None, # type: Optional[List[str]]\n abi=None, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the working set based on local packages directory. | def get_working_set(self) -> WorkingSet:
paths = self.get_paths()
return WorkingSet(
[paths["platlib"]], python=get_python_version(self.python_executable)[0]
) | [
"def get_local_packages(directory=THIRDPARTY_DIR):\n return list(PypiPackage.packages_from_dir(directory=directory))",
"def get_packages(self) -> list:\r\n return os.listdir(f\"{self.path}/uniflash-packages\")",
"def get_installed_sources():",
"def AddDistToWorkingSet(distPath):\n zpDists = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get environment for marker evaluation | def marker_environment(self) -> Dict[str, Any]:
return get_pep508_environment(self.python_executable) | [
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def environment(self) -> rl_environment.Environment:\n return self._environment",
"def runtime_environment(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_environment\")",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the shebang lines | def update_shebangs(self, new_path: str) -> None:
scripts = self.get_paths()["scripts"]
maker = ScriptMaker(None, None)
maker.executable = new_path
shebang = maker._get_shebang("utf-8").rstrip().replace(b"\\", b"\\\\")
for child in Path(scripts).iterdir():
if not chil... | [
"def shebang(self):\n try:\n first_line = self.stripped_lines()[0]\n if first_line.startswith(\"#!\"):\n return first_line[2:].strip()\n except IndexError:\n pass\n return \"\"",
"def shebang(path):\n return get(path)",
"def test_shebang(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create a linked instance of SharedArrays that uses the same data and shm_manager | def fork(self):
return SharedArrays(self.array_headers, self.shm_manager) | [
"def create_array(self, key, proto: ArrayProto):\n self[key] = shared_array = SharedArray(proto, self.shm_manager.SharedMemory(size=proto.nbytes))\n return shared_array",
"def allocate_shared_mem(self):\n # Get array shape and data types\n if self.snapshot.snapshot_type == \"numpy\":\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and return a shared array under the specified key. if key already exists, overwrite | def create_array(self, key, proto: ArrayProto):
self[key] = shared_array = SharedArray(proto, self.shm_manager.SharedMemory(size=proto.nbytes))
return shared_array | [
"def add(self, key, value):\r\n index = self.hash(key)\r\n\r\n if self.array[index] is not None:\r\n # This index contains some values.\r\n # We need to check if the key we're adding already exists, this\r\n # way, we can update it with the new value, this way, we can ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create a player character. Initializes health, xp, score,char_class, name, and debug | def __init__(self, name="Samalander",score=0, char_class='human',xp=10, debug=False):
self.char_class = char_class
self.xp = xp
self.level = 0
self.attack = 0
self.defense = 0
self.__setLevel()
self.__setMaxHP()
self.health = self.maxHealth
self.score = score
self.inventory = items.getItems(... | [
"def create_char(char_values):\n consts = _CharacterCreator._get_constants(char_values)\n if consts is None:\n return 3\n\n if CharacterManager.does_char_with_name_exist(char_values[\"name\"]):\n return 2\n\n char_data = _CharacterCreator._try_create_char(char_value... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
used to set self.health | def __setHealth(self,health):
self.health = health | [
"def set_health(self, new_health):\n self.health = new_health",
"def set_health(self, health):\r\n if(health > self._max_health):\r\n self._health = self._max_health\r\n elif(health < 0):\r\n self._health = 0\r\n else:\r\n self._health = health",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the current player inventory. | def getInventory(self):
return self.inventory | [
"def inventory(self):\n return self.meta['inventory']",
"def inventory(self):\n if self.player.inventory != []:\n items = ''\n for things in self.player.inventory:\n items += things.name + ', '\n items1 = items[0:-2]\n print ('Inventory: ' +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
used to set self.score | def __setScore(self, score):
self.score = score
return self.score | [
"def set_input_score(self, score):\n pass",
"def set_score(self,new_score):\n self.__fitness = new_score",
"def change_score(self, new_score):\n raise NotImplementedError",
"def reset_score(self):\n\n self.score = 0",
"def qm_score(self, qm_score):\n self._qm_score = qm_sc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
used to set self.level | def setLevel(self):
self.level = int(floor(sqrt(self.xp))) | [
"def SetLevel(self, level):\n self.level = level",
"def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))",
"def set_level(self, grade_id):\n pass",
"def setLevel(self, level):\n self.Logger.setLevel(self.Le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |