query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Transforms an HTML entity to a character. | def htmlentity_transform(entity):
# Known non-numeric HTML entity
try:
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
except Exception: pass
mobj = re.match(r'#(x?[0-9A-Fa-f]+)', entity)
if mobj is not None:
... | [
"def __html2unicode(self, s):\n # First the digits:\n ents = set(html_entity_digit_re.findall(s))\n if len(ents) > 0:\n for ent in ents:\n entnum = ent[2:-1]\n try:\n entnum = int(entnum)\n s = s.replace(ent, unichr(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checking if Spark (sparkshell) is running properly | def test_spark_shell(container: TrackedContainer) -> None:
logs = container.run_and_wait(
timeout=60,
no_warnings=False,
tty=True,
command=["start.sh", "bash", "-c", 'spark-shell <<< "1+1"'],
)
warnings = TrackedContainer.get_warnings(logs)
# Some Spark warnings
asser... | [
"def test_pyspark_import():\n\n try:\n import pyspark\n print(pyspark.__version__)\n except (ImportError, ModuleNotFoundError):\n pytest.fail(\"pyspark can't be imported\")",
"def check_java():\n run('java -version 2>&1 | head -1')",
"def check_slurm():\n return False",
"def _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cannot remove last owner of a group | def test_01_remove_last_owner_of_group(self):
felines = self.felines
dog = self.dog
self.assertTrue(dog.uaccess.owns_group(felines))
self.assertEqual(felines.gaccess.owners.count(), 1)
# try to downgrade your own privilege
with self.assertRaises(PermissionDenied) as cm:
... | [
"def test_01_remove_last_owner_of_resource(self):\n scratching = self.scratching\n dog = self.dog\n self.assertTrue(dog.uaccess.owns_resource(scratching))\n self.assertEqual(scratching.raccess.owners.count(), 1)\n\n # try to downgrade your own privilege\n with self.assertRa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cannot remove last owner of a resource | def test_01_remove_last_owner_of_resource(self):
scratching = self.scratching
dog = self.dog
self.assertTrue(dog.uaccess.owns_resource(scratching))
self.assertEqual(scratching.raccess.owners.count(), 1)
# try to downgrade your own privilege
with self.assertRaises(Permiss... | [
"def test_01_remove_last_owner_of_group(self):\n felines = self.felines\n dog = self.dog\n self.assertTrue(dog.uaccess.owns_group(felines))\n self.assertEqual(felines.gaccess.owners.count(), 1)\n\n # try to downgrade your own privilege\n with self.assertRaises(PermissionDen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Using the optical rectifier, rectify the optic of an entire directory | def rectify_all_inputs(inputFolder, outputFolder):
tableSrc = [24, 25, 25, 26, 26, 27, 27, 29, 29, 32, 32, 36, 36, 40, 41, 43, 44]
imgViewAngle = 180
imgWidth = 1440
imgHeight = 1440
oprec = OpticalRectifier(tableSrc, imgViewAngle, imgWidth, imgHeight)
oprec.rectify_all_inputs(inputFolder, outp... | [
"def solve_all(self, solved_dir=None, depth=None):\n script_dir = os.getcwd()\n os.chdir(self.data_dir)\n \n image_header = fits.getheader(f\"{self.data_dir}/{self.files[0]}\")\n image_data = fits.getdata(f\"{self.data_dir}/{self.files[0]}\")\n \n pixscale = image_header[\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This sniffs all ARP requests (op 1, who has) the machine made on the network | def sniff_requests():
sniff(filter='arp', lfilter=outgoing_req, prn=add_req, iface=conf.iface) | [
"def arp_scan(self):\r\n if self.router_mac and self.router_mac == GLOBAL_MAC:\r\n self.init()\r\n\r\n self.generate_ips()\r\n scan_result = arping(f\"{self.router_ip}/24\", verbose=0, timeout=1)\r\n clean_result = [(i[1].psrc, i[1].src) for i in scan_result[0]]\r\n\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This sniffs all ARP replays (op 2, is at) the machine received from the network | def sniff_replays():
sniff(filter='arp', lfilter=incoming_reply, prn=check_arp_header, iface=conf.iface) | [
"def sniff_requests():\n sniff(filter='arp', lfilter=outgoing_req, prn=add_req, iface=conf.iface)",
"def parse_arp_packet(eth):\n a = eth.data\n if a.op == dpkt.arp.ARP_OP_REPLY and a.hln == 6 and a.pln == 4:\n ip = format_ip(a.spa)\n mac = format_mac(a.sha)\n update_arp(mac, ip)\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This module prints the ARP messages for debugging purposes | def print_arp(pkt):
if pkt[ARP].op == 1:
print(pkt[ARP].hwsrc, ' who has ', pkt[ARP].pdst)
else:
print(pkt[ARP].psrc, ' is at ', pkt[ARP].hwsrc) | [
"def arp(cmd, *args, **argv):\n \n def arp_show():\n from _arp_deal import arp as show_arp\n arplists = show_arp()\n\tfrom _prettytable import PrettyTable\n\tarp_t = PrettyTable([\"IPadress\", \"HWaddress\", \"Flags\", \"Iface\"])\n\tarp_t.align = \"l\"\n\tarp_t.padding_width = 1\n\tarp_t.border... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if the packet is an incoming ARP reply message | def incoming_reply(pkt):
return pkt[ARP].psrc != str(get_if_addr(conf.iface)) and pkt[ARP].op == 2 | [
"def test_arp_reply_from_host(self):\n arp_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"arp_code\": arp.ARP_REPLY,\n \"a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if the packet is an outgoing ARP request message | def outgoing_req(pkt):
return pkt[ARP].psrc == str(get_if_addr(conf.iface)) and pkt[ARP].op == 1 | [
"def incoming_reply(pkt):\n return pkt[ARP].psrc != str(get_if_addr(conf.iface)) and pkt[ARP].op == 2",
"def test_arp_reply_from_host(self):\n arp_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This module adds ARP requests made by the machine to the "arp_req table" | def add_req(pkt):
ARP_REQ_TABLE[pkt[ARP].pdst] = datetime.datetime.now() | [
"def answer_arp(self, mac):\n packet = self.event.parsed\n if not isinstance(packet.next, arp): return\n a = packet.next\n if a.opcode == arp.REQUEST:\n r = arp()\n r.hwtype = a.hwtype\n r.prototype = a.prototype\n r.hwlen = a.hwlen\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper to _create_update_type to include logic to retry failures based on parent objects created in reverse order | def create_update_type(self, orig_org_id, orig_inc_id, type_name, payload, orig_type_id):
id_list = []
new_id, opr_type = self._create_update_type(orig_inc_id, orig_org_id, type_name, payload, orig_type_id)
if new_id:
# determine if there are any retries needed
retry_lis... | [
"def _createObjects(parent, children):\n\n parent.plone_log('Creating %s in %s' % (children, parent))\n\n workflowTool = getToolByName(parent, 'portal_workflow')\n\n existing = parent.objectIds()\n for new_object in children:\n if new_object['id'] in existing:\n parent.plone_log('%s ex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tasks automatically created with an incident can cause duplicates from the source incident also sync'd. This logic will get all the sync incident tasks and make sure there isn't a duplicate | def _find_task(self, sync_inc_id, payload):
response = self.get_incident_tasks(sync_inc_id)
for task in response:
# pick a number of comparison fields to ensure duplicate
if task['name'] == payload['name'] and task['cat_name'] == payload['cat_name']:
return task[... | [
"def test_duplicate_task(self):\n pass",
"def current_voting_tasks(self):\n if not self._dest_tasks:\n self._voting_task_index = {}\n task_generator = client.tasks.find_by_project(self._obj_id, fields=['name'])\n self._dest_tasks = [DestTask(task_json['id']) for task... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
datatables cannot be queried for a given row, so we'll need to get the entire table and parse each row to find the given row. | def get_existing_datatable_row(self, inc_id, table_name, row_id):
datatable_list = self.get_datatable(inc_id, table_name)
if datatable_list:
for row in datatable_list['rows']:
if row['id'] == row_id:
return row
return None | [
"def find_by_row(self, browser, table_locator, row, content): \n location_method = \"row\"\n if \"-\" == row[0]:\n row = row[1:]\n location_method = \"last-row\"\n locators = self._parse_table_locator(table_locator, location_method)\n locators = [locator % str(row)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extract & format column default values | def column_default(self, column_data: Dict) -> str:
if isinstance(column_data.default, str):
if column_data.type.upper() in datetime_types:
if "now" in column_data.default.lower():
# todo: need to add other popular PostgreSQL & MySQL functions
... | [
"def get_defaults(cls, columns=None):\n columns = cls.columns() if columns is None else columns\n return {col: col.default for col in columns if col.default}",
"def get_default(field):\r\n ...",
"def defaults(self) -> Mapping[str, str]:",
"def _standardize_column_values(dataframe):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This will add the Thread info from setParentPage method and Add the posts of the thread in addPosts mehtod | def __addThreadAndPosts(self):
self.__task_elements_dict = {
'priority':self.task.priority,
'level': self.task.level,
'last_updated_time':datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ"),
'pickup_date':datet... | [
"def __addThreadAndPosts(self):\n self.__genre = \"Review\"\n self.__hierarchy = []\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This will set the soup the last page of the post | def __goToLastPage(self):
try:
pagination_tag = self.soup.find('div', 'pages')
if not pagination_tag:
log.info(self.log_msg('pagination not found, posts exists in current\
url%s'%self.currenturi))
... | [
"def __goToLastPage(self):\n try:\n pagination_tag = self.soup.find('div', 'pagenav')\n if not pagination_tag:\n return\n uri = None\n last_page_tag = pagination_tag.find('a', title=re.compile('Last Page'))\n if last_page_tag:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
(str) > list Return list of words given from file specified by dictFile string. >>> a = AnagramSolver("dict.txt") >>> lst = a._generate_word_list("dict.txt") | def _generate_word_list(self, dictFile):
word_list = []
try:
dict_f = open(str(dictFile))
except FileNotFoundError:
raise FileNotFoundError("Text file required in the same directory as anagram.py")
for entry in dict_f.readlines():
word_list.append(entr... | [
"def load_words():\n dict_list = []\n with open(DICTIONARY, 'rt') as fin:\n for word in fin:\n dict_list.append(word.strip('\\n'))\n # print(dict_list)\n return dict_list",
"def get_wordlist():\n with open(WORDLIST_FILE) as english:\n wordlist = english.readlines()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
(LetterManager, str, str) > object Return inclusion report if s1 contains s2, False otherwise. inclusion report > [contained word, remaining characters] >>> a = AnagramSolver("dict.txt") >>> a._does_include("cats", "cat") ['cat', 's'] >>> a._does_include("cats", "dogs") False | def _does_include(self, s1, s2):
lm1 = LetterManager(s1)
lm2 = LetterManager(s2)
result = lm1.Subtract(lm2)
if result:
contained_word = s2
remaining_chars = str(result)
return [contained_word, remaining_chars]
else:
return False | [
"def is_included(content, words):",
"def _is_included(self, include, visiting_path):\n\n if include is None or len(include) <= 0:\n return True\n\n for path in include:\n if visiting_path.startswith(path):\n return True\n\n return False",
"def englishDic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
(str) > NoneType Alters self._list to list of inclusion reports on words contained by s. >>> a = AnagramSolver("dict.txt") >>> a._shorten_list("cats") >>> a._list [['act', 's'], ['cast', ''], ['cat', 's'], ['sac', 't'], ['sat', 'c'], ['scat', '']] | def _shorten_list(self, s):
new_lst = []
if isinstance(self._list[0], list):
for inclusion_report in self._list:
word = inclusion_report[0]
new_lst.append(word)
self._list = new_lst
new_lst = []
for word in self._list:
... | [
"def _get_anagram_list(self, s, iteration_num = 1, previous_words = []):\n master_lst = [] #represents the list of all generated anagrams.\n old_word_lst = self._list #copy the _list used in the previous function call.\n self._shorten_list(s) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
(str, int, list) > list of lists of str Return a list containing iteration_num number of anagrams for s. The final parameter, previous_words, is used for recursive purposes only. >>> a = AnagramSolver("dict.txt") >>> a._get_anagram_list("office key", 3) [['eke', 'icy', 'off'], ['eke', 'off', 'icy'], ['ice', 'key', 'off... | def _get_anagram_list(self, s, iteration_num = 1, previous_words = []):
master_lst = [] #represents the list of all generated anagrams.
old_word_lst = self._list #copy the _list used in the previous function call.
self._shorten_list(s) #... | [
"def find_anagrams(s):\n word_list = sorted(s)\n anagrams_list = []\n find_anagrams_helper(word_list, anagrams_list, [], [])\n print(f'{count} anagrams: {anagrams_list}')",
"def getAnagrams(wordList, string):\n node = getAnagramNode(wordList, string)\n if node is None:\n return []\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
(str, int) > list of lists of str Return a list containing iteration_num number of anagrams for s. Implements "0 max" functionality, and resets object parameters. >>> a = AnagramSolver("dict.txt") >>> a.generateAnagrams("office key", 0) [['eke', 'icy', 'off'], ['eke', 'off', 'icy'], ['ice', 'key', 'off'], ['ice', 'off'... | def generateAnagrams(self, s, max):
if not isinstance(s, str):
raise TypeError("Non-empty string required for input.")
elif not isinstance(max, int):
raise TypeError("Non-negative integer input required for input.")
elif max < 0:
raise NegativeError("Non-negat... | [
"def find_anagrams(s):\n word_list = sorted(s)\n anagrams_list = []\n find_anagrams_helper(word_list, anagrams_list, [], [])\n print(f'{count} anagrams: {anagrams_list}')",
"def anagrams(self) -> None:\n # Generate an instance of StdIn.\n reader: StdIn = StdIn()\n\n # Read stdin ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group for organizations commands. | def organizations(): | [
"def get_mgr_group():\n\n @click.group(name=\"mgr\")\n def mgr_group():\n \"\"\" Arcus Manager service \"\"\"\n\n mgr_group.add_command(pull)\n mgr_group.add_command(start)\n mgr_group.add_command(update)\n return mgr_group",
"def cmd_groups(self):\r\n return dict({i.name: i.info()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates OrganizationApplication objects for Organization objects | def migrate_unapproved_orgs_to_applications(config):
# Imported here because we don't want to trigger an import from anything
# but warehouse.cli at the module scope.
from warehouse.accounts.models import User
from warehouse.db import Session
from warehouse.events.tags import EventTag
from wareh... | [
"def create_org_payload(self):\n organizations = ET.Element(\"organizations\")\n organization = ET.Element(\"organization\")\n organizations.append(organization)\n name = ET.SubElement(organization, \"name\")\n name.text = self._module.paramgram[\"org_name\"]\n fullName = E... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that a view is only accessible to administrators or participants of that particular challenge. | def validate_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_chall... | [
"def test_visibility_when_no_approved_content_and_authenticated_user_with_permission(self):\n # Each of these permissions will provide access.\n for perm in (\n \"superuser\",\n \"review_revision\",\n \"delete_document\",\n \"en-US__leaders\",\n \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prepare adata for report "portfolio by months" and plotting report "portfolio vs time" | def call_main(qs):
qs = qs.values('paper__name', 'date', 'price') \
.order_by('paper' ,'date')
# initialize data
ticks = {}
obj_time = time()
first_price = 0
prices = []
dates = []
curr_paper = ''
portfolio_by_month = defaultdict(list)
portfolio_by_month_dates = set()... | [
"def plot_time_series() -> None:\n quandl.read_key()\n\n # Get data of ABN Amro\n df = quandl.get('EURONEXT/ABN')\n print(STR_FMT.format('df.head()', df.head()))\n print(STR_FMT.format('df.tail()', df.tail()))\n df.plot()\n plt.savefig(IMGDIR+'dataset.png', bbox_inches='tight')\n\n # Extract... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
update data portfolio_by_month and portfolio_by_month_dates | def update_portfolio_data(paper_name, price, tick_date):
portfolio_by_month[paper_name].append(price)
portfolio_by_month_dates.add(tick_date) | [
"def update_month(pet_id, user_id, selected_month):\n from sqlalchemy.exc import IntegrityError\n # find all day records of the month\n daily_records = DailyStatistics.query.\\\n filter_by(pet_id=pet_id).\\\n filter(DailyStatistics.date >= selected_month).\\\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Aligns vector a to vector b with axis angle rotation | def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):
if np.array_equal(a, b):
return None, None
axis_ = np.cross(a, b)
axis_ = axis_ / np.linalg.norm(axis_)
angle = np.arccos(np.dot(a, b))
return axis_, angle | [
"def vec_angle(a, b):\n cosang = np.dot(a, b)\n sinang = fast_norm(np.cross(a, b))\n return np.arctan2(sinang, cosang)",
"def vrrotvec(a, b):\r\n a = normalize(a)\r\n b = normalize(b)\r\n ax = normalize(np.cross(a, b))\r\n\r\n angle = np.arccos(np.minimum(np.dot(a, b), [1]))\r\n if not np.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add directed edge from from_vertex to to_vertex along with the distance between them. To get the from_vertex from adjacency_table, a search time of O(1) is needed. From the value between from_vertex and to_vertex, it needs to be added to edge_weights table. The time complexity to add is O(E) Lastly, updating adjacency_... | def add_directed_edge(self, from_vertex, to_vertex, weight=1.0):
# O(1)
new_from_vertex = self.adjacency_table.get(from_vertex)
new_from_vertex[len(new_from_vertex):] = [to_vertex]
# O(E)
self.edge_weights_table.add((from_vertex, to_vertex), weight)
# O(V)
self... | [
"def add_edge(self, v1, v2, weight):",
"def add_edge(\n self, source_vertex: T, destination_vertex: T\n ) -> GraphAdjacencyList[T]:\n\n if not self.directed: # For undirected graphs\n # if both source vertex and destination vertex are both present in the\n # adjacency list,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An undirected_edge includes in weight between one point and another. The reason for adding flipped vertices is for reversing from point b to point. | def add_undirected_edge(self, vertex_a, vertex_b, weight=1.0):
# O(E + V)
self.add_directed_edge(vertex_a, vertex_b, weight)
# O(E + V)
self.add_directed_edge(vertex_b, vertex_a, weight) | [
"def add_edge(self, v1, v2, weight):",
"def add_undirected_edge(self, key, edge, weight):\n target = self.get_node(key)\n targeted = self.get_node(edge)\n if target and targeted:\n # add edge from Node(key) to Node(edge)\n target.add_nbr(edge, weight)\n # add ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for vertex that match address. | def get_vertex(self, address):
# V * V = O(V^2)
for v in self.adjacency_table:
if len(v) != 0:
# O(V)
for vertex in v:
if vertex[0].label == address:
return vertex[0] | [
"def get_potential_matches_from_address(self, address):",
"def find_vertex(self, value):\n # !!!! IMPLEMENT ME\n\n print\n q = []\n q.append([self.vertices[0].value])\n checked = []\n counter = 0\n\n while len(q) > 0:\n\n if len(q) > 0:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Taking an undirected graph and the packages to be sent to its address, build_undirected graph, in a sense, builds a map with edge's weights. The outcome is to have an weighted graph with all possible vertices path routed from one to another vertex, so it has to loop through all known vertices and find a match with the ... | def Build_Undirected_Graph(self, packages):
# V(V * (1 + 2E + V)) = O(V^2E + V^3))
for vertices in self.adjacency_table:
if len(vertices) != 0:
# V * (1 + 2E + V) = O(VE + V^2))
for vertex in vertices:
# 1 + 2E + V = O(E + V)
... | [
"def build_graph(self, msg):\n self.map = msg\n self.map_points = geodesy.wu_point.WuPointSet(msg.points)\n self.graph = makeGraph(msg)\n\n # process each feature marked as a route\n for feature in itertools.ifilter(is_route, self.map.features):\n oneway = is_oneway(fea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if request envelope is of the expected skill format. | def supports(self, request_envelope, context):
# type: (Dict[str, Any], Any) -> bool
return 'request' in request_envelope | [
"def validate_request(self, hmac_key, request):\n\n hash_ = hmac.new(self.HMAC[hmac_key], request.data, sha256)\n\n # check if from phabricator\n if hash_.hexdigest() == request.headers[PHAB_WEBHOOK_SIG]:\n\n # Store the request and transaction\n self.request_data = json.l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invokes the dispatcher, to handle the request envelope and return a response envelope. | def invoke(self, request_envelope, context):
# type: (RequestEnvelope, Any) -> ResponseEnvelope
if (self.skill_id is not None and
request_envelope.context.system.application.application_id !=
self.skill_id):
raise AskSdkException("Skill ID Verification failed!... | [
"def post(self, *args, **kw):\n return self.custom_dispatch(*args, **kw)",
"def wsgi_app(self, environ, start_response):\n request = Request(environ)\n response = self.dispatch_request(request)\n return response(environ, start_response)",
"def __marshaled_dispatch(self, data, dispatc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate model data with crosssection weighting applied ddnXSfxn is an instance of the ddnXSinterpolator class dedxfxn is a function used to calculate dEdx probably more efficient to these in rather than reinitializing one each time This is edited to accommodate multiple standoffs being passed | def generateModelData(params, standoffDistance, nBins_tof, range_tof, ddnXSfxn, dedxfxn,
nSamples, getPDF=False):
e0 = params[0]
dataHist = np.zeros((x_bins, eD_bins))
nLoops = int(nSamples / nEvPerLoop)
for loopNum in range(0, nLoops):
eZeros = np.repeat(params, nEvPerLoop... | [
"def generateModelData(params, standoffDistance, range_tof, nBins_tof, ddnXSfxn,\n dedxfxn, beamTimer, nSamples, getPDF=False):\n beamE, eLoss, scale, s, scaleFactor = params\n e0mean = 900.0\n dataHist = np.zeros((x_bins, eD_bins))\n \n dedxForODE = lambda x, y: dedxfxn(energy=y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main entry point for class. Outputs file to location specified during the construction of the parse_rtf object | def parse(self, rtf_text, filename, file=None):
parsed_text = self._remove_tags(self._clean_url_field(self._create_newlines(rtf_text)))
date = self._find_date(parsed_text)
time = self._find_time(parsed_text)
if date is None:
# print('no date')
return
else:... | [
"def main(self):\n rwkos.make_dir(self.dst_path)\n self.gdoc_finder.main()\n self.gdoc_down()\n self.nb_finder.main()\n self.pdf_finder.main()\n self.copy_pdfs()",
"def main():\n\n # parse argument filepath\n SOURCE_FILE = sys.argv[1]\n name, ext = os.path.splite... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main entry point for class. Parses a RTF file that is in a list format Outputs file to location specified during the construction of the parse_rtf object | def parse_list(self, rtf_list, filename, file=None, search_string=None):
# determine document type:
if self._document_is_type_1(rtf_list):
self._parse_type_1_list(rtf_list, filename, file, search_string=search_string)
else:
self._parse_type_2_list(rtf_list, filename, file... | [
"def parse(self, rtf_text, filename, file=None):\n parsed_text = self._remove_tags(self._clean_url_field(self._create_newlines(rtf_text)))\n date = self._find_date(parsed_text)\n time = self._find_time(parsed_text)\n if date is None:\n # print('no date')\n return\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the class variable "cache" to None. | def _clear_cache(self):
self.cache = None | [
"def stop_using_cache(self):\n self.use_cache = False",
"def disable_cache(self) -> None:\n if self._cache:\n self._cache = None\n self._urlopen = urlopen",
"def disable_cache(self):\n self.__cache_enabled = False",
"def clear_cache(self) -> None:\n self._load... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates class variable cache. Add to the cache the text contained in the "line" variable | def _update_cache(self, line):
if self.cache is None:
self.cache = line
else:
self.cache += (line) | [
"def _update_cache_with_line(self, line):\n try:\n addr, data = line.split(\"=\")\n except ValueError, e:\n raise ProtoError(\"invalid data line format\")\n \n cache_line = []\n for i in range(0, len(data), 2):\n try:\n byte = da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if a document contains images. Type 1 RTF documents do not contain images Type 2 RTF documents contain images Type 1 documents have a line that has the string "Document " at the beginning of some line | def _document_is_type_1(text):
type_1 = re.compile('Document ')
for line in text:
if type_1.match(line):
return True
return False | [
"def _end_of_type_1_document(text):\n end_of_document = re.compile('Document ')\n if end_of_document.match(text):\n return True\n else:\n return False",
"def detect_images(self, text=None):\r\n if text is None:\r\n lines = self.lines\r\n else:\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if a line is the end of a document All documents in FACTSET format have a line that begins with "Document " | def _end_of_type_1_document(text):
end_of_document = re.compile('Document ')
if end_of_document.match(text):
return True
else:
return False | [
"def isEnd(self, line):\r\n return self.startsWithAttribute(line)",
"def _document_is_type_1(text):\n type_1 = re.compile('Document ')\n for line in text:\n if type_1.match(line):\n return True\n return False",
"def test_ends_at(line):\n return TEST_END_R... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the cache is empty. Returns true if cache is empty | def _cache_not_empty(cache):
if cache is None:
return False
else:
return True | [
"def is_empty(self):\r\n return (len(self.data.keys()) == 0)",
"def is_empty(self) -> bool:\n\n fm_logger.debug('FeedManager.is_empty')\n\n if self.size() == 0:\n return True\n\n return False",
"def is_empty(self):\n return len(self.tiles) == 0",
"def _empty(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes rtf tags from "type 1" rtf documents | def _remove_type_1_tags(rtf_text):
brackets = re.compile(r"[{}]")
headers = re.compile(r"\\f[2-6]")
bold = re.compile(r"\\b[0-3]?")
font = re.compile(r"\\fcharset0 .*?(?= |\\|;|\n);?")
remainder = re.compile(r"\\.*?(?=\\| |;|\n);?")
rtf_text = headers.sub('', rtf_text)
... | [
"def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replaces \par tags with newline characters | def _create_newlines(rtf_text):
# regex to identify all par tags
re_par = re.compile(r"(\\par)(?=[ \\])", re.DOTALL)
return re_par.sub(r"\n", rtf_text) | [
"def strip_paras(value):\n return re.sub(r'<p>(.*)</p>',r'\\1',value)",
"def new_paragraph(self):\n if self.chainMode == ChainMode.CHARS:\n return \"\\n\\n\"\n elif self.chainMode == ChainMode.WORDS:\n return [\"\\n\\n\"]",
"def simple_format(text):\n text = re.sub(r'(\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all RTF tags from text snippet | def _remove_tags(rtf_text):
# remove all tags except the pars converted to newlines
re_tag = re.compile(r"(\\.*?) ")
re_tag_newline = re.compile(r"(\\.*?)(?=\n)")
rtf_text = re_tag.sub(r"", rtf_text)
# there are stragglers because of the newlines. We need two regular expressions
... | [
"def _remove_type_1_tags(rtf_text):\n brackets = re.compile(r\"[{}]\")\n headers = re.compile(r\"\\\\f[2-6]\")\n bold = re.compile(r\"\\\\b[0-3]?\")\n font = re.compile(r\"\\\\fcharset0 .*?(?= |\\\\|;|\\n);?\")\n remainder = re.compile(r\"\\\\.*?(?=\\\\| |;|\\n);?\")\n rtf_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the text of a hyperlink field in a RTF document | def _clean_url_field(rtf_text):
# identify the hyperlink fields and extract the text
hyperlink_field = re.compile(r"{\\field.*?}{\\fldrslt{\\cf2 \\uc2(?: )*?(.*?)}+", re.DOTALL)
return hyperlink_field.sub(r"\1", rtf_text) | [
"def extract_link(row):\n return row[len(row) - 4]",
"def get_13F_text_url(html_13F_page):\n\tsoup = BeautifulSoup(html_13F_page, features=\"html.parser\")\n\tdocuments_table = soup.find(\"table\", {\"class\": \"tableFile\"})\n\n\t# It looks like it will always be last item.\n\tlast_row = documents_table.findA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identifies a RTF file from the FACTSET format FACTSET RTF documents BEGIN with a \par tag. The text of each document is contained on a single line Therefore the first four characters of the text will be \par All images are placed after the END of the text, so the textfile with images will be EMPTY we can delete these. | def identify_rtf_article(line):
if r'\par' in line[0:4]:
return True | [
"def _create_newlines(rtf_text):\n # regex to identify all par tags\n re_par = re.compile(r\"(\\\\par)(?=[ \\\\])\", re.DOTALL)\n return re_par.sub(r\"\\n\", rtf_text)",
"def _remove_type_1_tags(rtf_text):\n brackets = re.compile(r\"[{}]\")\n headers = re.compile(r\"\\\\f[2-6]\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the min values of the subarrays. Examples >>> s = pl.Series( ... "a", [[1, 2], [4, 3]], dtype=pl.Array(width=2, inner=pl.Int64) ... ) >>> s.arr.min() | def min(self) -> Series: | [
"def Min(axis=-1, keepdims=False):\n return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims))",
"def found_min(array_min):\n return min(array_min)",
"def min(self,*,axis=1):\n try:\n mins = np.amin(self.data,axis=axis).squeeze()\n if mins.size == 1:\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds a list of pieces to a game at positions specified | def add_pieces(self, piece_list):
success = True
# iterate through each piece, adding the piece if valid
for piece in piece_list:
if not self.add_piece(piece):
# warning for invalid piece location - does not kill engine
print(f'Warning: piece at {piece... | [
"def add_pieces(self):\n i = 0\n j = 0\n for c in self.fen_pos:\n try:\n a = int(c)\n j += a\n except ValueError:\n if c == \"/\":\n i += 1\n j = 0\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section three of the introduction | def __get_section_three_output_data(self, user_message, client_id):
if user_message.lower() == "yes":
response = "Feeling this way often sounds pretty rubbish. I'm sorry about that. How long has it been like this?"
next_user_options = [""] # n/a because next user inp... | [
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)\n\n response = \"OK, now we've got the intro stuff out the way... you were saying before that \\\n you were feeling \"+str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section five of the introduction | def __get_section_five_output_data(self, user_message, client_id):
response = "So given that I can't track you down, and also because I'm a very simple bot, \
if you told me about an emergency/crisis situation, I wouldn't \
be able to help."
next_user_options = ["OK, I k... | [
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)\n\n response = \"OK, now we've got the intro stuff out the way... you were saying before that \\\n you were feeling \"+str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section six of the introduction | def __get_section_six_output_data(self, user_message, client_id):
response = "Next I'm going to give you the choice whether you want to use this on a confidential \
or anonymous basis. When I say anonymous, I mean that our boffins may see your text to help \
us improve the way this softw... | [
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)\n\n response = \"OK, now we've got the intro stuff out the way... you were saying before that \\\n you were feeling \"+str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section seven of the introduction | def __get_section_seven_output_data(self, user_message, client_id):
response = "And when I say confidential, I mean that your text won't be \
stored at all, and no human will see what you write."
next_user_options = ["OK, I know what you mean by confidential."] # this is the option that the... | [
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)\n\n response = \"OK, now we've got the intro stuff out the way... you were saying before that \\\n you were feeling \"+str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section eight of the introduction | def __get_section_eight_output_data(self, user_message, client_id):
response = "Would you like this service to be anonymous or confidential?"
next_user_options = ["Anonymous (my words can help improve the bot)", "Confidential (no human ever sees my words)"] # this is the option that the user can select... | [
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)\n\n response = \"OK, now we've got the intro stuff out the way... you were saying before that \\\n you were feeling \"+str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the output data for section eleven of the introduction | def __get_section_eleven_output_data(self, initial_happiness_score):
response_fragment = self.__get_response_fragment_for_section_eleven(initial_happiness_score)
response = "OK, now we've got the intro stuff out the way... you were saying before that \
you were feeling "+str(initial_happ... | [
"def __get_section_seven_output_data(self, user_message, client_id):\n response = \"And when I say confidential, I mean that your text won't be \\\n stored at all, and no human will see what you write.\"\n\n next_user_options = [\"OK, I know what you mean by confidential.\"] # this is the o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the response fragment to add to the end of the core response based on the initial happiness score | def __get_response_fragment_for_section_eleven(self, initial_happiness_score):
response_fragment = ""
if initial_happiness_score > 7:
response_fragment = "Seems like you're feeling OK, but I'm still available for you \
to chat with if you want. Maybe just start by talking ab... | [
"def get_next_response(self):\n return # osid.assessment.Response",
"def _additional_response_starting(self, name):\n pass",
"def get_response_msg(self) -> str:\n return self.response_msg",
"def __get_section_eleven_output_data(self, initial_happiness_score): \n response_fragmen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method removes users from the database if they were created before time module takes as parameter | def remove_users_before_date(self, ts):
with self.__access_db() as cur:
cur.execute("DELETE FROM users WHERE time < %s""", (ts,)) | [
"def delete_expired_registers():\n days = auth_settings.AUTH_REGISTER_EXPIRE_DAYS\n diff = timezone.now() - timezone.timedelta(days=days)\n RegisterUser.objects.filter(date_joined__lt=diff).delete()",
"def delete_user(self):\n \n User.user_list.remove(self)",
"def test_delete_expired_users(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch checksum for the product with this id. | def get_checksum(self):
if self.checksum is None:
r = get(f'{self.link}?$format=json&$select=Checksum',
auth=Product.AUTH).json()
self.checksum = r['d']['Checksum']['Value']
return self.checksum | [
"def calculate_checksum(self):\n return binascii.crc32(self.unpack_binary(0, 0x78)) & 0xFFFFFFFF",
"def firmware_checksum(self):\n\n return self._firmware_checksum.value",
"def get_checksum(data):\n return hashlib.sha1(data).hexdigest()",
"def calculate_data_checksum(self):\n data = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a link to download this product. | def get_download_link(self):
return f'{self.link}$value' | [
"def get_download_url(self):\n return reverse('products:download', kwargs={\"pk\":self.pk,\n \"slug\":self.product.slug})",
"def get_url_for_download(self):\n return 'http://{}/form/Download'.format(self._host)",
"def product_url(self, product):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up the window with the given background color and title. Returns the new window. | def make_window(colr, ttle):
w = turtle.Screen()
w.bgcolor(colr)
w.title(ttle)
return w | [
"def make_window(colr,title):\n\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(title)\n return w",
"def create_background(self):\n # title and resize the window\n self.root.title(WINDOW_TITLE)\n self.root.geometry('{0}x{1}'.format(WINDOW_WIDTH, WINDOW_HEIGHT))\n\n # crea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Divide a scipy sparse matrix by a vector. This function exists because division is not implemented for scipy sparse matrix. | def spmatrix_divide_vector(X_sparse, vec):
if len(vec) == X_sparse.shape[1]:
return X_sparse @ ss.diags(1 / vec)
else:
return (X_sparse.T @ ss.diags(1 / vec)).T | [
"def csr_div_np(indptr, indices, sparse_data, dense, shape):\n import scipy.sparse\n x = sparse_data.reshape(sparse_data.shape[0], -1)\n y = np.broadcast_to(dense, shape).reshape(shape[0], shape[1], -1)\n expect = []\n for i in range(x.shape[-1]):\n sparse = scipy.sparse.csr_matrix((x[..., i],... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform svd on sparse matrix | def svd_with_sparse(X, k, solver="arpack", fit_transform=True, random_state=None):
random_init = np.random.rand(np.min(X.shape))
mu = X.mean(axis=0).A.flatten() # d
vars_ = preprocessing.StandardScaler(with_mean=False).fit(X).var_
XH = X.T.conj() # d x n
def matvec(x):
# print(x.shape)
... | [
"def svd(x, cpu_offload=True, *args, **kwargs):\n return cpuoffload_op_(\n torch.svd, x, SvdOut, cpu_offload=cpu_offload, *args, **kwargs)",
"def svd(A, chi = 0, full_matrices = False, compute_uv = True):\n # Try using the normal svd\n try:\n #if A.dtype != np.complex128:\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursive function used to search an unknown number of nested objects for a property. For example if we had a path 'cases.CasePage.title' this function would take the current object `object_to_search` and get an object called 'CasePage'. It would then call itself again to search the 'CasePage' for a property called 'ti... | def get(object_to_search, nested_properties_list):
object = getattr(object_to_search, nested_properties_list[0])
if len(nested_properties_list) == 1:
# We have reached the end of the path and now have the string
return object.replace("<!--", "<span class='govuk-visually-hidden'>"... | [
"def finditem(obj, key):\n _found = []\n\n def _finditem(obj, key):\n if key in obj or unicode(key) in obj:\n _found.append(obj[key])\n for k, v in obj.items():\n if isinstance(v,dict):\n _finditem(v, key)\n\n\n def _recursive_get(obj, keys):\n if l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of items and an LCS string, return the singular version if the list contains one item otherwise return the plural version | def pluralize_lcs(items, string):
strings = get_const_string(string).split("/")
count = items if isinstance(items, int) else len(items) if items else 0
if count == 1:
return strings[0]
else:
return strings[1] | [
"def pluralize_lcs(items, string):\n strings = get_const_string(string).split(\"/\")\n\n if items and len(items) == 1:\n return strings[0]\n else:\n return strings[1]",
"def plural(items, word):\n item_count = items if isinstance(items, int) else len(items)\n word = word if item_count... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a hidden field from the given key and value | def hidden_field(key, value):
return f'<input type="hidden" name="{key}" value="{value}">' | [
"def hidden(self,name,value=''):\n\t\tself.output += '<input name=' + str(name) + ' value=\"' + str(value) + '\" type=\"hidden\" />'",
"def getHiddenKey(self) -> str:\n return ''",
"def add_field(self, key, value):\n value = str(value).replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns 'Yes' if a boolean is equal to True, else 'No' | def friendly_boolean(boolean):
if boolean is True or boolean == "true" or boolean == "True" or boolean == "yes" or boolean == "Yes":
return "Yes"
else:
return "No" | [
"def friendly_boolean(boolean):\n if boolean is True or str(boolean).lower() == \"true\":\n return \"Yes\"\n else:\n return \"No\"",
"def convertToboolean(v):\n\tif v.lower() == 'yes':\n\t\treturn True\n\telse:\n\t\treturn False",
"def _map_boolean_to_human_readable(boolean, resource, token)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a missing title banner to the page | def missing_title():
if not settings.DEBUG:
return
return (
"</title>"
"</head>"
'<body style="margin-top: 73px;">'
'<div class="app-missing-title-banner">'
'<div class="govuk-width-container">'
'<h2 class="app-missing-title-banner__heading">You need to s... | [
"def test_no_title(self):\n self.alert.title = None\n actual = self.alert.display_title()\n expected = 'No title available'\n self.assertEqual(actual, expected)",
"def add_title(self, title):\n if not self.check_title(title):\n self.titles.append(UserTitle([self.id, t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a radial gradient background from a list of flags | def aurora(flags):
colours = {
"default": "#626a6e",
"red": "#d4351c",
"orange": "#f47738",
"blue": "#1d70b8",
"yellow": "#FED90C",
"green": "#00703c",
"pink": "#d53880",
"purple": "#4c2c92",
"brown": "#b58840",
"turquoise": "#28a197",
... | [
"def color_flags(wbins, y, flags, *args, **kwargs):\n plts = []\n if 'labels' in kwargs:\n labels = kwargs['labels']\n del kwargs['labels']\n else:\n labels = None\n\n for i, fl in enumerate(np.unique(flags)):\n pts = (flags == fl)\n if np.any(pts):\n if lab... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instead of iterating over each goods list of countries without the ability to break loops in django templating. This function will make a match for which goods are being exported to the country supplied, and return the list of goods | def get_goods_linked_to_destination_as_list(goods, country_id):
item_number = 1
list_of_goods = []
for good in goods:
for country in good["countries"]:
if country["id"] == country_id:
list_of_goods.append(f"{item_number}. {good['description']}")
item_numbe... | [
"def search_country_items():\n\n if request.method == 'GET':\n print \"Method = Get\"\n email = session.get(\"email\")\n print email\n countries = Country.query.all()\n country_names = []\n for country in countries:\n country_names.append(country.name)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is invoked once the connection to the telnet server is established. | def connectionMade(self):
super().connectionMade()
# negociate telnet options
self.transport.negotiationMap[LINEMODE] = self.telnet_LINEMODE
self.transport.negotiationMap[PLUGIN] = self.telnet_PLUGIN
self.transport.negotiationMap[TTYPE] = self.telnet_TTYPE
self.transport.... | [
"def connection_made(self, transport):\n self.transport = transport\n self.stream = telopt.TelnetStreamReader(transport, server=True)\n self._last_received = datetime.datetime.now()\n self._connected = datetime.datetime.now()\n self._retval = 0\n self.set_callbacks()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start a thread that listen to the keyboard. The terminal is put in CBREAK mode (no line buffering). Keystrokes are sent to the telnet server. | def _start_keyboard_listener(self):
def keyboard_listener(transport):
# put terminal in CBREAK mode
original_stty = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin, termios.TCSANOW)
# restore normal mode when the client exits
atexit.register(lambd... | [
"def keyboard_thread_SHOW(SHOW):\r\n logger.info('Starting keyboard show thread')\r\n while True:\r\n keyboard.wait(SHOW)\r\n logger.info('Show event')\r\n sendEvent({'showEvent': True})",
"def start_threads(self): \n self.input_thread = threading.Thread(target=self.get_in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Telnet subnegociation of the LINEMODE option | def telnet_LINEMODE(self, data):
if data[0] == MODE:
if data[1] != b'\x02': # not(EDIT) + TRAPSIG
raise ValueError("bad LINEMODE MODE set by server : {}".format(data[1]))
self.transport.requestNegotiation(LINEMODE, MODE + bytes([0x06])) # confirm
elif data[3] ... | [
"async def handle_subneg_linemode_mode(self, mode):\n suggest_mode = slc.Linemode(mode[0])\n\n self.log.debug('recv IAC SB LINEMODE LINEMODE-MODE {0!r} IAC SE'\n .format(suggest_mode.mask))\n\n if not suggest_mode.ack:\n # This implementation acknowledges and se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Telnet subnegociation of the TTYPE option | def telnet_TTYPE(self, data):
if data[0] == TTYPE_SEND:
if platform.system() == 'Windows' and self._init_descriptor is not None:
import curses
ttype = curses.get_term(self._init_descriptor)
else:
ttype = os.environ.get('TERM', 'dumb')
... | [
"def will_ttype(self, option):\r\n\r\n options = self.protocol.protocol_flags.get('TTYPE')\r\n\r\n if options and options.get('init_done') or self.ttype_step > 3:\r\n return\r\n\r\n try:\r\n option = \"\".join(option).lstrip(IS)\r\n except TypeError:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read attribute from sysfs and return as string | def readattr(path, name):
try:
f = open(USB_SYS_PREFIX + path + "/" + name)
return f.readline().rstrip("\n")
except IOError:
return None | [
"def sysfs_read(self, inp):\n try:\n with open(inp, \"r\") as f:\n str_val = f.readline().rstrip(\"\\n\")\n if str_val.find(\"0x\") is -1:\n val = int(str_val, 10)\n else:\n val = int(str_val, 16)\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason. | def find_ports(device):
bus_id = device.bus
dev_id = device.address
for dirent in os.listdir(USB_SYS_PREFIX):
matches = re.match(USB_PORTS_STR + '$', dirent)
if matches:
bus_str = readattr(dirent, 'busnum')
if bus_str:
busnum = float(bus_str)
... | [
"def lookup_port(cls, vendor_id=0x2341, product_id=0x0043, **kwargs):\n dev_path = None\n with suppress(error.NotFound):\n dev_path = find_serial_port(vendor_id, product_id)\n\n return dev_path",
"def _find(self):\n ports = [port\n for port\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set device calibration data based on settings in /etc/temper.conf. | def set_calibration_data(self, scale=None, offset=None):
if scale is not None and offset is not None:
self._scale = scale
self._offset = offset
elif scale is None and offset is None:
self._scale = 1.0
self._offset = 0.0
try:
... | [
"def set_calibration(self, data):\n # Check that 22 bytes were passed in with calibration data.\n if data is None or len(data) != 22:\n raise ValueError('Expected a list of 22 bytes for calibration data.')\n # Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lookup the offset of the humidity data by product name. | def lookup_humidity_offset(self, sensor):
if self._device.product == 'TEMPer1F_H1_V1.4':
# Has only 1 sensor, and the humidity data is at offset = 4
return 4
return None | [
"def __findInterchangeLocation(self, interchange):\n for row in Interchange.locations:\n try:\n idx = row.index(interchange)\n return float(row[idx + 2])\n except ValueError:\n continue",
"def getOffset(self, accession_id, resolution, chr_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lookup the number of sensors on the device by product name. | def lookup_sensor_count(self):
if (self._device.product == 'TEMPer1F_V1.3') or \
(self._device.product == 'TEMPer1F_H1_V1.4'):
return 1
# All others are two - if not the case, contribute here: https://github.com/padelt/temper-python/issues
return 2 | [
"def _sensor(product_id):\n sid = product_id[:2]\n if sid == 'LC':\n return 'OLI_TIRS'\n elif sid == 'LO':\n return 'OLI'\n elif sid == 'LE':\n return 'ETM'\n elif sid == 'LT':\n return 'TM'\n elif sid == 'LM':\n return 'MSS'",
"def count_product(self, product)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get number of sensors on the device. | def get_sensor_count(self):
return self._sensor_count | [
"def lookup_sensor_count(self):\r\n if (self._device.product == 'TEMPer1F_V1.3') or \\\r\n (self._device.product == 'TEMPer1F_H1_V1.4'):\r\n return 1\r\n\r\n # All others are two - if not the case, contribute here: https://github.com/padelt/temper-python/issues\r\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set number of sensors on the device. | def set_sensor_count(self, count):
# Currently this only supports 1 and 2 sensor models.
# If you have the 8 sensor model, please contribute to the
# discussion here: https://github.com/padelt/temper-python/issues
if count not in [1, 2,]:
raise ValueError('Only sensor_co... | [
"def set_number_of_products(self, number_of_products):\n self.number_of_products = number_of_products",
"def set_mode(self, mode_num: int, sensors: List[str] = None):\n if sensors is None or len(sensors) == 0:\n sensors = self.sensor_names\n\n for sensor_name in sensors:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send device a control request with standard parameters and as payload. | def _control_transfer(self, data):
LOGGER.debug('Ctrl transfer: %r', data)
self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09,
wValue=0x0200, wIndex=0x01, data_or_wLength=data, timeout=TIMEOUT) | [
"def send_control_signal(self, control):\n msg = {}\n name = control[0]\n _time = control[1]\n # This is not required but for simplicity\n # this agent uses a uniform value for the\n # nextSampleTime parameter for all inputs to Modelica.\n next_sample_time = _time + ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function filter shown entries by the user status. Only published entries accessible for non authenticated users. | def filter_status_by_user(query):
if not current_user.is_authenticated:
query = query.filter(Entry.status == Entry.PUBLISHED)
else:
# Allow users to view their own drafts
query = query.filter(
(Entry.status == Entry.PUBLISHED) |
((Entry.author == current_user) &
... | [
"def visibility_filter(self, user):\n vis = Q(visibility=Picture.PUBLIC)\n \n if user is not None:\n if user.is_superuser:\n vis = Q()\n else:\n up = user.get_profile()\n vis = vis | (Q(visibility=Picture.RESTRICTED, owner__frie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes pairwise IOU. Reason to use 4D tensors is to follow TPU compiler preference. | def _tensor_product_iou(boxes):
boxes_size = boxes.shape[-2]
# Code below will do frequent operands broadcasting.
# TPU compiler has (empirically) less issues broadcasting if
# - batch (first) dimension is 1. (Special consideration sharding)
# - there are 4 dimensions. (Standard traversal mapping)
# - last ... | [
"def DIoU(y_pred_box, y_true_box):\n y_pred_box_min = y_pred_box[..., :2]\n y_pred_box_max = y_pred_box[..., 2:4]\n y_pred_box_wh = y_pred_box_max - y_pred_box_min\n y_pred_box_center = (y_pred_box_min + y_pred_box_max) / 2.\n\n y_true_box_min = y_true_box[..., :2]\n y_true_box_max = y_true_box[..... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Consistently splits multiple tensors shardingstyle. | def shard_tensors(
axis: int, block_size: int, tensors: 'Sequence[tf.Tensor]'
) -> Union[List[Sequence[tf.Tensor]], 'Iterable[Sequence[tf.Tensor]]']:
if not all(tensor.shape.is_fully_defined() for tensor in tensors):
return [tensors]
for validate_axis in range(axis + 1):
consistent_length: int = tensors... | [
"def shard(tensors, n_shards=None):\n n_shards = N_WEIGHTS_SHARDS if n_shards is None else n_shards\n indices = _axis_index(np.zeros(fastmath.local_device_count()))\n def _shard_fn(x):\n axis = _axis_to_shard_heuristic(x.shape)\n if int(x.shape[axis]) % n_shards != 0:\n raise ValueError(f'Cannot split... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combines shards of top_k operation, when sharded along filtered dimension. General idea is that sometimes top_k dimension is very large, while top_k is moderately low. (Keep in mind sample of 15K pretop_k dimension and 150 top_k) In that case it is possible to break top_k input into groups significantly larger than top... | def concat_and_top_k(
top_k: int, scores_pair: 'tuple[Optional[tf.Tensor], tf.Tensor]',
*other_pairs: 'tuple[Optional[tf.Tensor], tf.Tensor]'
) -> 'tuple[tf.Tensor, ...]':
scores, scores_shard = scores_pair
if other_pairs:
others, others_shard = zip(*other_pairs)
else:
others = others_shard = []
... | [
"def _top_k(scores, k, boxes_list):\n assert isinstance(boxes_list, list)\n assert boxes_list\n\n with tf.name_scope('top_k_wrapper'):\n scores, top_k_indices = tf.nn.top_k(scores, k=k)\n batch_size, _ = scores.get_shape().as_list()\n outputs = []\n for boxes in boxes_list:\n boxes_index_offsets... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set delivery customer field | def set_delivery_customer(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_DELIVERY_CUSTOMER).
send_keys(value)) | [
"def delivery_charges(self, delivery_charges):\n\n self._delivery_charges = delivery_charges",
"def customer_code(self, customer_code: str):\n\n self._customer_code = customer_code",
"def delivery_charge(self, delivery_charge):\n\n self._delivery_charge = delivery_charge",
"def set_Delive... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set channel partner field | def set_channel_partner(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_CHANNEL_PARTNER).
send_keys(value)) | [
"def partner_id(self, partner_id: UserId):\n\n self._partner_id = partner_id",
"def partner_type(self, partner_type):\n\n self._partner_type = partner_type",
"def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):\n if not partner_id:\n return {'value': {'email': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set indirect salesforce field | def set_indirect_salesforce_number(self, value):
(self.driver.find_element
(*ProjectFormLoc.FIELD_INDIRECT_SALESFORCE_NUMBER)).send_keys(value) | [
"def set(self, field, value):\n raise NotImplementedError",
"def setfield_entrezID(q):\n q.fieldname = 'entrezID'\n return q",
"def set_field(self, field):\n return self.set_param('field', field)",
"def __setattr__(self, name, val):\n if name in self._meta.fields:\n f = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set project name field | def set_project_name(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_PROJECT_NAME).
send_keys(value)) | [
"def project_name(self, value):\n self._project_name = value\n self.CP2K_INPUT.GLOBAL.Project_name = value",
"def update_project_name(self, path, old_name, new_name):\n item = self.get_item_by_path(path)\n item.setText(new_name)",
"def changeNameProject(self, name):\n check, _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set project description field | def set_project_description(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_PROJECT_DESCRIPTION).
send_keys(value)) | [
"def set_description(description):",
"def changeDescriptionProject(self, description):\n check, _ = self.editProjectInfo(description=description)\n return check",
"def edit_description(self, new_desciption):\n self.desciption = new_desciption",
"def setDescription(self, description):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set no third party checkbox | def set_no_third_party(self, check=True):
field = self.driver.find_element(*ProjectFormLoc.FIELD_NO_THIRD_PARTY)
if field.is_selected() and not check:
field.click()
elif not field.is_selected() and check:
field.click() | [
"def checkboxcheck(self):\n if ultimate.get() == 1:\n self.ultimate_fav_checkbox.configure(state=\"normal\")\n self.run_ultimate_intf_checkbox.configure(state=\"normal\")\n if ultimate.get() == 0:\n self.ultimate_fav_checkbox.configure(state=\"disabled\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set pt status dropdown | def set_pt_status(self, option):
(Select(self.driver.
find_element(*ProjectFormLoc.FIELD_PT_STATUS)).
select_by_visible_text(option)) | [
"def __init__status_choices__(self):\n self.fields['status'].choices = flag_settings.get_for_model(\n self.target_object, 'STATUSES')",
"def _set_status(self, status: str = 'none'):\n if status == 'loop':\n self.color_loop = True\n else:\n self.color_loop ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set status comment field | def set_status_comment(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_STATUS_COMMENT).
send_keys(value)) | [
"def status_change_comment(self, status_change_comment):\n\n self._status_change_comment = status_change_comment",
"def SetStatus(self, status):\n self.status = status\n self.put()",
"def __set_status(self, status):\n self.__status = status",
"def set_status(self, status: CommitStatus):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set main product dropdown | def set_main_product(self, option):
(Select(self.driver.find_element(*ProjectFormLoc.FIELD_MAIN_PRODUCT)).
select_by_visible_text(option)) | [
"def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)",
"def products_menu(self, category_selection):\n\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set revenue recognition dropdown | def set_revenue_recognition(self, option):
(Select(self.driver.find_element
(*ProjectFormLoc.FIELD_REVENUE_RECOGNITION)).
select_by_visible_text(option)) | [
"def r_choice(self):\r\n\r\n def enter(t: str):\r\n \"\"\"\r\n Function to set values in entries on radio button command\r\n :param t: hotel type\r\n \"\"\"\r\n for i in range(8):\r\n self.sv[i].set(creds_rtypes['h_type'][t][i])\r\n\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set project manager field | def set_pm(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_PROJECT_MANAGER).
send_keys(value)) | [
"def product_project_manager(self, product_project_manager):\n\n self._product_project_manager = product_project_manager",
"def setManagerProperty(self, p_str, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def setManagerProperty(self, QString, *__args)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set solution architect field | def set_solution_architect(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_SOLUTION_ARCHITECT).
send_keys(value)) | [
"def setArch(arch):\n if arch in supportedArchs:\n ArchInfo.currentArch = arch\n #print \"[+] Working under architecture : \" + str(arch)\n if arch == \"X86\":\n ArchInfo.bits = 32\n REGSIZE.size = 32\n ArchInfo.ip = \"eip\"\n ArchInfo.sp = \"esp\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set contract type dropdown | def set_contract_type(self, option):
(Select(self.driver.find_element(*ProjectFormLoc.FIELD_CONTRACT_TYPE)).
select_by_visible_text(option)) | [
"def _on_outlookType_Select(self, cat):\n\n self.outlookType = cat # Change the outlook type",
"def set_contract_currency(self, option):\n (Select(self.driver.\n find_element(*ProjectFormLoc.FIELD_CONTRACT_CURRENCY)).\n selec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set start date field | def set_start_date(self, value):
(self.driver.find_element(*ProjectFormLoc.FIELD_START_DATE).
send_keys(value)) | [
"def set_start_date(self):\n self.start_date_button.config(text=f\"Start - {self.calendar.get_date()}\")\n self.start_date = dt.datetime.strptime(self.calendar.get_date(), '%m/%d/%y')",
"def update_cal_start(self):\n start = self.StartDateEdit.date()\n self.startCalendarWidget.setSelec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set contract currency dropdown | def set_contract_currency(self, option):
(Select(self.driver.
find_element(*ProjectFormLoc.FIELD_CONTRACT_CURRENCY)).
select_by_visible_text(option)) | [
"def set_default_currency_to(self):\n index = int(self.ctl.get_parameter_value(4)) - 1\n self.set_combo_selection(index, self.gui.cmb_currency_to)",
"def set_default_currency_from(self):\n index = int(self.ctl.get_parameter_value(3)) - 1\n self.set_combo_selection(index, self.gui.cmb_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |