query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Get all instance reservations for a profile | def get_ec2_reservations(profile, running_filter):
try:
ec2_client = boto3.Session(profile_name=profile).client('ec2')
except ProfileNotFound:
print("Profile: %s not found" % profile, file=sys.stderr)
sys.exit(1)
filtered_instances = ec2_client.describe_instances(Filters=running_filt... | [
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that getting tags is successful | def test_get_tags_successful(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqu... | [
"def test_get_tags(self):\n pass",
"def test_get_tag(self):\n pass",
"def test_list_tags(self):\n pass",
"def test_show_tags(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags\")\r\n html = resp.get_data(as_text=True)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that adding a tag is successful | def test_add_tag_successful(self):
payload = {'name': 'test tag'}
self.client.post(TAGS_URL, payload)
# self.assertEqual(res.status_code, status.HTTP_201_CREATED)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.as... | [
"def test_create_tag(self):\n pass",
"def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
1. Query all of the puppies and return the results in ascending alphabetical order | def query_one():
puppies = session.query(Puppy.name).order_by(Puppy.name.asc()).all()
for puppy in puppies:
print puppy.name | [
"def hw1():\r\n\tpuppies = session.query(Puppy).order_by(Puppy.name.asc()).all()\r\n\tfor puppy in puppies :\r\n\t\tprint str(puppy.id) + \"\\t\" + puppy.name + \"\\t\" + puppy.gender + \"\\t\" + str(puppy.dateOfBirth)",
"def query_three():\n \n puppies = session.query(Puppy.name, Puppy.weight).order_by(Pu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
2. Query all of the puppies that are less than 6 months old organized by the youngest first | def query_two():
today = datetime.date.today()
max_days_old = 180
max_birthday = today - datetime.timedelta(days = max_days_old)
puppies = session.query(Puppy.name, Puppy.dateOfBirth).filter(Puppy.dateOfBirth >= max_birthday).order_by(Puppy.dateOfBirth.desc()).all()
for puppy in puppies:
... | [
"def getLatestDatadropPerYeargroup():\n\trow_filter=[]\n\tfor y in yeargroup.objects.all().order_by('-current_year'):\n\t\tdd=datadrop.objects.filter(cohort=y)\n\t\tif dd.count()>0:\n\t\t\tdd=dd.order_by('-date')[0]\n\t\t\trow_filter.append(dd)\n\treturn row_filter",
"def upcoming_meetups_query(cls):\r\n # War... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
3. Query all puppies by ascending weight. | def query_three():
puppies = session.query(Puppy.name, Puppy.weight).order_by(Puppy.weight.asc()).all()
for puppy in puppies:
print "{puppy_name}: {weight}".format(puppy_name=puppy[0], weight=puppy[1]) | [
"def hw3():\r\n\tpuppies = session.query(Puppy).order_by(Puppy.weight.asc()).all()\r\n\tfor puppy in puppies :\r\n\t\tprint str(puppy.id) + \"\\t\" + puppy.name + \"\\t\" + puppy.gender + \"\\t\" + str(puppy.weight)",
"def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][3]\n while partialWe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
4. Query all puppies grouped by the shelter in which they are staying. Show count of puppies at each shelter | def query_four():
puppies = session.query(Shelter, func.count(Puppy.id)).join(Puppy).group_by(Shelter.id).all()
for shelter_puppy in puppies:
print "{shelter_name}: {puppy}".format(shelter_name=shelter_puppy[0].name, puppy=shelter_puppy[1]) | [
"def hw4():\r\n\tresult = session.query(Shelter, func.count(Puppy.id)).join(Puppy).group_by(Shelter.id).all()\r\n\tfor item in result:\r\n\t\tprint item[0].id, item[0].name, item[1]",
"def count(cls, queryset):\n result_group = queryset.values('date')\n annotate_report = result_group.annotate(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same functionality as RelPosEmb1D | def rel_pos_emb_1d(q, rel_emb, shared_heads):
if shared_heads:
emb = torch.einsum('b h t d, r d -> b h t r', q, rel_emb)
else:
emb = torch.einsum('b h t d, h r d -> b h t r', q, rel_emb)
return relative_to_absolute(emb) | [
"def GetPosition2(self):\n ...",
"def demo_embed():\n\n center = (0,0,0)\n\n g = generate_structure(\n structure = 'graphene',\n cell = 'prim',\n tiling = (4,4,1),\n )\n g.recenter(center)\n\n # Represent the \"relaxed\" cell\n gr = g.copy()\n npos = le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates current querystring with a given dict of params, removing existing occurrences of such params. Returns a urlencoded querystring. | def updated_querystring(request, params):
original_params = request.GET.copy()
for key in params:
if key in original_params:
original_params.pop(key)
original_params.update(params)
return original_params.urlencode() | [
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If template context variable with `name` not set get default value from django.settings | def context_or_settings(context, name):
if name in context:
return context[name]
return getattr(settings, 'DEFAULT_' + name.upper()) | [
"def context_or_settings(context, name):\n if name in context:\n return context[name]\n return getattr(settings, \"DEFAULT_\" + name.upper())",
"def test_setting_default(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses raw data in family field. | def parse_raw_family_string(family_raw):
return map(parse_family_member, filter(None, family_raw.split(";"))) | [
"def _handle_raw(self, raw_bytes: bytes):\n try:\n from_embedded = pb.FromEmbedded()\n from_embedded.ParseFromString(raw_bytes)\n if from_embedded.HasField(\"canDataChunk\"):\n self._handle_can_data(from_embedded.canDataChunk)\n if from_embedded.HasF... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decodes jwt token and returns u_id | def decode_token(token):
payload = None
try:
payload = jwt.decode(token.encode('utf-8'), '1$Arh"1bWa/7+OS', algorithm='HS256')['u_id']
except jwt.InvalidTokenError:
pass
return payload | [
"def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])",
"def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turn a BeautifulSoup form into a dict of fields and default values | def __extract_form_fields(self, soup):
fields = OrderedDict()
for input in soup.find_all('input', attrs={'name': True}):
if 'type' not in input.attrs:
input.attrs['type'] = 'text'
# Single element name/value fields
if input.attrs['type'].lower() in ('t... | [
"def get_form_fields(url):\n page = urlopen(url)\n soup = BeautifulSoup(page)\n form = soup.form\n\n # Setting up data structure\n form_data = dict(fields=[])\n form_attr = dict(form.attrs)\n\n form_data['title'] = soup.h1 and soup.h1.text or soup.title.text\n form_data['action'] = urljoin(u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to find the name of username field among a list of input fields. Looks for the most evocative value for the "name" attribute | def __find_username_field_via_name(self, inputs):
for input in inputs:
for n in ('name', 'login', 'user', 'mail'):
if n in input.attrs['name'].lower():
return input.attrs['name']
return None | [
"def get_names(form):\n names = []\n\n print(\"getting names\")\n print(form)\n print(len(form))\n print(len(form) / 2)\n for idx in range(\n 0, int(len(form) - 1)\n ): # Magic number 1 corrects for csrf_token stored in the form\n print(idx)\n print(form[\"input[new\" + st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
clear ip arp inspection statistics | def clear_ip_arp_inspection_stats(device):
log.info("clear ip arp inspection statistics on {device}".format(device=device))
dialog = Dialog([Statement(pattern=r'\[confirm\].*', action='sendline(\r)',loop_continue=True,continue_timer=False)])
try:
device.execute("clear ip arp inspection statistics"... | [
"def flush_arp(self):\n self.cli.cmd('ip neighbour flush all')",
"def arps_del(self):\n arp_cmd = \"sudo {} arp -d {}\"\n with open(self.filename, \"w\") as fn:\n for ip_mac in self.ip_mac_list:\n cmd = arp_cmd.format(self.asic.ns_arg, ip_mac.ip)\n fn.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A dictionary to map required slots to an extracted entity | def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"bug":[self.from_entity(
entity="bug",
intent="inform"),
self.from_text(
intent="inform")],
"beverage": [self.from_entity(
entity="be... | [
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"entity_major\": [\n self.from_entity(entity=\"entity_major\", intent=[\"intent_major_info\", \"inform\"])],\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n print(\"slot ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Database of multiple choice answers | def answers_db() -> Dict[str, List]:
return{"lawyer":["either","other","law","boy"],
"cot_caught":["different","other","same"],
"second_person_plural":["other","y'all","yins",
"you","you'uns","you all","you guys","you lot",
"yous, youse"],
"yard_sale":["car boot","car boo... | [
"def _question_selector(key: str) -> list:\n\n res = list()\n\n if \"impose\" in key: # impose\n res.extend(\n [\n (\"Who is imposed?\", \"who_imposed\"),\n (\"Who are imposed?\", \"who_imposed\"),\n # (\"what are the victims?\", \"what_victims\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function generate our validation functions, since they're pretty much the same for each slot | def create_validation_function(name_of_slot):
def validate_slot(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate user input."""
if value.... | [
"def question_new_validate():",
"def validate(self, name, values):\r\n \r\n pass",
"def build_validation_fn(action_validators, parameter_validators, data_validators):\n ins_action_validators = []\n ins_parameter_validators = {}\n ins_data_validators = {}\n\n # Validators instantiation ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Database of slot values & corresponding questions | def slot_key_db() -> Dict[str, List]:
return {'q50': 'second_person_plural',
'q28': 'cot_caught',
'q80': 'rain_sun',
'q66': 'crawfish',
'q110': 'halloween',
'q64': 'sandwich',
'q90': 'side_road',
'q105': 'beverage',
... | [
"def fill_slot(slot, context):\n question = slot_questions[slot]\n inputs = tokenizer(question, context, add_special_tokens=True, return_tensors=\"pt\")\n input_ids = inputs[\"input_ids\"].tolist()[0]\n\n outputs = model(**inputs)\n answer_start_scores = outputs.start_logits\n answer_end_scores = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates pairs of nodes (i, j) that have at least one common neighbour. | def _get_common_neighbour_node_pairs(self):
node_pairs = []
for node1 in self.graph.nodes():
for node2 in self.graph.nodes():
if node1 != node2:
neighbour_count = self.neighbour_counts[(node1, node2)]
if neighbour_count >= 1:
... | [
"def get_common_neighbours(p1: Position, p2: Position) -> List[Position]:\n i, j = p1\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n i, j = p2\n l2 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n return [k for k in l1 if k in l2]",
"def graclus_pairs(W, verbose=False):\n n = W.shape[0]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of service_ids that match a given datetime | def getServiceIdsForDate(self, dt, exclude0000=True):
dayDict = {1:"monday", 2:"tuesday", 3:"wednesday", 4:"thursday",
5:"friday", 6:"saturday", 7:"sunday"}
sqlQuery = "select service_id from calendar" \
+ " where {} = 1".format(dayDict[dt.isoweekday()]) \
+ " and... | [
"def get_serviceIDs_for_date(date):\n global SDHandler\n return SDHandler.effective_service_ids(date);",
"def find_by_activity_date(self,date):\n return list(filter(lambda x: x.activity_id in [e.entity_id for e in self.__activity_controller.find_by_date(date)],self.get_all()))\n #return map(lambda... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provides the template tag {% attendance SESSION USER as ATTENDING TENTATIVE %} | def do_attendance(parser, token):
try:
_tagname, session, user, _as, context_attending, context_tentative = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(u'%(tagname)r tag syntax is as follows: '
'{%% %(tagname)r SESSION USER as VARIABLE1 VARIABLE2 %%}'... | [
"def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)",
"def AddSessionUtilization(asg_name, arn_scalein, arn_scaleout):\n logger.info('Creating Session Utilization CloudWatch alarm for ASG: ' + asg_name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the ability to send http requests and receive responses. | def test_http_request(self):
response = requests.get(self.live_server_url)
assert response.status_code == 200 | [
"async def test_http_communicator():\n communicator = HttpCommunicator(SimpleHttpApp(), \"GET\", \"/test/?foo=bar\")\n response = await communicator.get_response()\n assert response[\"body\"] == b\"test response\"\n assert response[\"status\"] == 200",
"def test_introduce_send_request(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the ability to send and receive messages over WebSocket. | def test_websocket_message(self):
ws = websocket.create_connection(self.live_server_ws_url)
ws.send('test')
response = ws.recv()
ws.close()
assert 'test' == response | [
"async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(tex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that the __version__ in the module is being correctly pulled from the pyproject.toml config | def test_module_version_matches_pyproject_version():
version_from_package_init = __version__
# this is so that the test finds the pyproject.toml file when run from the command line or from within Pycharm
this_directory = os.path.dirname(os.path.realpath(__file__))
pyproject_toml_path = os.path.join(thi... | [
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entrie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes the loss onto the Tensorboard log | def log_loss(self, loss: float, duration: float, iteration: int):
self.train_writer.add_scalar("Time", duration, iteration)
self.train_writer.add_scalar("Loss", loss, iteration) | [
"def _log_tensorboard(self, side, loss):\n if not self._tensorboard:\n return\n logger.trace(\"Updating TensorBoard log: '%s'\", side)\n logs = {log[0]: log[1]\n for log in zip(self._model.state.loss_names[side], loss)}\n self._tensorboard[side].on_batch_end(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a record with a given id from the table. | def remove(table, id_):
table, successful = common.remove_record(table, id_)
if not successful:
ui.print_error_message('Error!')
return table | [
"def remove(table, id_):\n\n record = common.find_id(table, id_)\n common.remove_record(table, record)\n save_data_to_file(table)\n\n return table",
"def remove(table, id_):\n\n record = common.find_id(table, id_[0])\n if record in table:\n table = common.remove_record(table, record)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check how many different games are made by each manufacturer. Return dictionary where key is manufacturer and value is number of games by it. | def get_counts_by_manufacturers(table):
manufacturers_dict = {}
for item in table:
try:
if item[2]:
try:
if item[2] in manufacturers_dict.keys():
manufacturers_dict[item[2]] += 1
else:
man... | [
"def get_counts_by_manufacturers(table):\n manufacturers = []\n for item in table:\n if item[2] not in manufacturers:\n manufacturers.append(item[2])\n\n manufacturers_games = {}\n\n for record in manufacturers:\n games_counter = 0\n for item in table:\n if ite... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Give back average amount of games in stock of a given manufacturer. Count how many different games are in stock by given manufacturer and how many copies of them are in total. On that base avarege amount is count. | def get_average_by_manufacturer(table, manufacturer):
games = 0
manufacturer_apperance = 0
for i in range(len(table)):
try:
if manufacturer.lower() == table[i][2].lower():
games += int(table[i][-1])
manufacturer_apperance += 1
except (IndexError, ... | [
"def get_average_by_manufacturer(table, manufacturer):\n games_sum = 0\n games_occurance = 0\n for item in table:\n if item[2] == manufacturer:\n games_sum += int(item[4])\n games_occurance += 1\n\n average_amount = games_sum / games_occurance\n\n return average_amount",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether the lattice is binary or not i.e if every vertex except the bottom covers maximum two elements and is covered by maximum two elements | def is_binary(self):
for element in self:
if element != self.bottom and not self.isa_binary_element(element):
return False
return True | [
"def is_upper_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i):\n if self[i, j] != 0:\n return False\n return True",
"def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether a given element is binary or not i.e if it covers maximum two elements and is covered by maximum two elements | def isa_binary_element(self, element):
return len(self.above(element)) <= 2 and len(self.under(element)) <= 2 | [
"def is_binary(image):\n return sum(image==amin(image))+sum(image==amax(image)) > 0.99*image.size",
"def is_binary(self):\n return len(self.rhs) == 2 and is_cat(self.rhs[0]) and is_cat(self.rhs[1])",
"def is_binary(self):\n\n for element in self:\n if element != self.bottom and not s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the successor of element different from first_successor in a binary lattice | def other_above(self, element, first_successor):
successors = list(self.above(element))
if len(successors) != 2:
raise ValueError("element is not binary in lattice")
elif successors[0] == first_successor:
return successors[1]
elif successors[1] == first_successor:... | [
"def other_under(self, element, first_predecessor):\n predecessors = list(self.under(element))\n if len(predecessors) != 2:\n raise ValueError(\"element is not binary in lattice\")\n elif predecessors[0] == first_predecessor:\n return predecessors[1]\n elif predeces... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the predecessor of element different from first_predecessor in a binary lattice | def other_under(self, element, first_predecessor):
predecessors = list(self.under(element))
if len(predecessors) != 2:
raise ValueError("element is not binary in lattice")
elif predecessors[0] == first_predecessor:
return predecessors[1]
elif predecessors[1] == fi... | [
"def predecessor(self, node):\n if node is None:\n return None\n if node.left_child is not None:\n pred = node.left_child\n while pred.right_child is not None:\n pred = pred.right_child\n return pred\n else:\n p = node.parent\n while p is not None:\n if node is n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tensorflow peak picking via local maxima Returns the indices of the local maxima of the first dimension of the tensor | def find_local_maxima(tens):
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P'))) | [
"def local_maxima(array):\n maxInd = argrelextrema(array, np.greater)\n vals = array[maxInd]\n\n sorted_array = np.sort(vals)\n second_max = sorted_array[-2]\n idx1 = np.argwhere(vals == second_max)\n second_max_idx = maxInd[0][idx1][0][0]\n\n third_max = sorted_array[-3]\n idx2 = np.argwher... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tensorflowbased implementation of np.fft.fftfreq | def fft_frequencies(sr=22050, n_fft=2048):
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32)) | [
"def freq_from_fft(self, sig, fs):\n\t # Compute Fourier transform of windowed signal\n\t windowed = sig * blackmanharris(len(sig))\n\t f = rfft(windowed)\n\n\t # Find the peak and interpolate to get a more accurate peak\n\t i = argmax(abs(f)) # Just use this for less-accurate, naive version\n\t ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tensorflowbased implementation of librosa.core.fourier_tempo_frequencies | def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length) | [
"def fft_frequencies(sr=22050, n_fft=2048):\n # TODO endpoint=True\n return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))",
"def frequencyAnalysis(sample):\n return fft(sample)",
"def obsFreq() :\n return oFreq",
"def tf(freq: int) -> float:\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Bandpass filters audio to given frequency range | def bandpass_filter_audio(audio, f_low=400, f_high=450):
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio) | [
"def butterworth_bandpass_filter( values, lowFreq, highFreq, sampleFreq, order=5):\n nyq = 0.5 * sampleFreq\n low = lowFreq / nyq\n high = highFreq /nyq\n b,a = butter( order, [low,high], btype='band' )\n return y = lfilter(b, a, data)",
"def butter_bandpass_filter(data, lowcut, highcut, fs, order=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate dominant frequency from tempogram. | def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
t... | [
"def DominantFrequency(data):\n w = fft.fft(data)\n freqs = fft.fftfreq(len(data))\n i = argmax(abs(w))\n dom_freq = freqs[i]\n dom_freq_hz = abs(dom_freq * 32.0)\n return dom_freq_hz",
"def estimate_dominant_frequency_fft(F):\n # compute power spectrum\n P = abs(F)\n # print \"P:\", P.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run PLP encoder over all chunks in a song | def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
if chunks != 0:
... | [
"async def process_audio_chunks(self, chunk: bytes):\n if self.processor is not None:\n await self.processor.process(chunk)",
"def parallel_embed(chunk, chunk_id):\n # Set up simple multiprocessing\n manager = Manager()\n results = manager.list()\n work = manager.Queue(args.n_threads... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots tempogram and local pulse. | def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local... | [
"def plotTime(self, pulse):\n plt.plot(pulse.X_time,pulse.Y_time)\n plt.plot(pulse.X_time,np.abs(pulse.Y_time), 'r--')\n plt.ylabel(\"Field amplitude a.u\")\n plt.xlabel(\"Time in femtosecond\")",
"def plot_temperature_room(self):\n print(\"avg_temp in room: \", np.mean(self.tem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
find the octant of the point p | def octant(p):
x = p[0]
y = p[1]
z = p[2]
if z > 0:
if y > 0:
if x > 0:
return 1
else:
return 2
else:
if x > 0:
return 4
else:
return 3
else:
if y > 0:
... | [
"def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6",
"def pentagonal(n):\n return n*(3*n-1)/2",
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))",
"def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if an axis from beta1 to beta2 is nearly perpendicular with a maximal distance to an axis from alpha1 to alpha2 and if their distance is under maxDist | def isSecondOblateAxis(alpha1, alpha2, beta1, beta2, maxDist, maxTorsAngle):
a1 = np.asarray(alpha1)
a2 = np.asarray(alpha2)
b1 = np.asarray(beta1)
b2 = np.asarray(beta2)
#lent = alpha1 - beta1
adir = a2 - a1
bdir = b2 - b1
aLength = np.sqrt ( np.dot(adir, adir) )
bLength = np... | [
"def _check_max_dist(p1, p2, max_dist, crs_is_projected=False):\n if crs_is_projected:\n dist = p1.distance(p2)\n else:\n dist = ti.geogr.point_distances.haversine_dist(p1.x, p1.y, p2.x, p2.y)\n dist_below_thresh = dist <= max_dist\n return dist_below_thresh",
"def are_compatible_in_y(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if a sample point (sx,sy,sz) is inside the prolate shape with semi=axes alpha > beta. The translation vector and rotation matrix have to describe the transformation for aligning the alphaaxis with the x of the coordinate system and in setting the center to the origin. The fundamental ellipsoidal equation is appl... | def isInProlate(sample, alpha, beta):
E = sample[0] * sample[0] / (alpha * alpha)
E += (sample[1] * sample[1] + sample[2] * sample[2] ) / (beta * beta)
if E > 1.0:
return False
else:
return True | [
"def in_ellipse(x, y, a, b, x0=0, y0=0, θ=0):\n testin = (\n ( (x - x0) * np.cos(θ) + (y - y0) * np.sin(θ) )**2 / a**2 +\n ( (x - x0) * np.sin(θ) - (y - y0) * np.cos(θ) )**2 / b**2\n )\n return (testin <= 1)",
"def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1",
"def pointIns... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The harvesting event occurs when the voltage U has an extremum. This is equivalent with I_Cp = Iq = 0. To distinguish the positive and negative zero crossing, the event function becomes Iq sign(U) <= 0. | def ev_harvest(t, x, y):
Iq, UC, U = x
return Iq * sign(U) | [
"def specific_energy_consumption_func(self):\n return self.P.val - self.outl[2].m.val_SI * self.e.val",
"def signal_hammer(icu, icu_slope, hammer_icu, hammer_slope):\n\n return (icu > hammer_icu and icu_slope > 0) or (icu_slope > hammer_slope)",
"def test_interferometer(self, tol):\n # fmt:off\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The SECE principle could modeled with only one FSM state CHARGE. After every extremum of the output voltage U is reseted to zero. Also the current Iq is set to zero avoiding numerical problems. Additional the time discrete state vector saves the energy E transfered through the output and every extremum of the voltage U... | def CHARGE(t, x, y):
Iq, UC, U = x
E, Umax = y # time discrete state vector
E_new = E + Cp/2 * U**2
return array([0.0, UC, 0.0]), array([E_new, U]) | [
"def exposed_to_infected(self, model):\r\n to_inf =np.where(((model.epidemic_state==1) & (model.time_cur_state >= self.T_EXP)), \r\n 1, 0)\r\n \r\n model.epidemic_state = np.where(to_inf==1, 2, model.epidemic_state)\r\n model.time_cur_state = np.where(to_inf==1, 0, mode... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the csrf token from a url | def extract_csrf(self, url):
with requests.Session() as client:
client.get(url)
csrf = client.cookies['csrftoken']
return csrf | [
"def _get_initial_token(url):\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(url)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''",
"def get_csrf_token():\n response = sess... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new weight; params needed for post request's payload | def add_weight(self):
# Get the csrf token
csrf = self.extract_csrf('https://wger.de/en/weight/add/')
# Adding referer to the headers
self.headers['Referer'] = API.url_weight
# Take the weight entires from TOML file
entries = self.cfg.get('payload', {}).get('we... | [
"def add_weight(self):\r\n\r\n # Get the csrf token\r\n csrf = self.extract_csrf('https://wger.de/en/weight/add/')\r\n # Adding referer to the headers\r\n self.headers['Referer'] = API.url_weight\r\n\r\n # Take the weight entires from TOML file\r\n entries = cfg.get('paylo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new nutrition plan and stores information about it in the class dictionary Params needed for post request's payload | def add_plan(self):
# Take the weight entries from TOML file
plans = self.cfg.get('payload', {}).get('plan')
# Check for valid entries
if plans :
# Construct payload
for payload in plans:
# Parse the payload
ready = self.... | [
"def add_plan(self):\r\n\r\n # Take the weight entries from TOML file\r\n plans = cfg.get('payload', {}).get('plan')\r\n # Check for valid entries\r\n if plans :\r\n # Construct payload \r\n for payload in plans:\r\n # Parse the payload\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new meal to a nutrition plan and stores information about it in the class dictionary Params needed for post request's payload | def add_meal(self, p_id):
# Take the plans entires from TOML file
plans = self.cfg.get('payload',{}).get('plan')
# For each meal in each plan
for entries in plans:
# Check for valid entires
if entries:
for payload in entries.get('m... | [
"def add_meal(self):\r\n \r\n # Take the plans entires from TOML file\r\n plans = cfg.get('payload',{}).get('plan')\r\n # For each meal in each plan\r\n for entries in plans:\r\n # Check for valid entires\r\n if entries:\r\n for payload in entr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new item to a meal and stores information about it in the class dictionary Params needed for post request's payload | def add_item(self, m_id):
# Take the meals entires from TOML file
meals = self.cfg.get('payload',{}).get('plan',{})[0].get('meal',{})
for entries in meals:
# Check for valid entires
if entries:
# Construct payload
for payl... | [
"def add_item(self):\r\n\r\n # Take the meals entires from TOML file\r\n meals = cfg.get('payload',{}).get('plan',{})[0].get('meal',{})\r\n for entries in meals:\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new exercise and stores information about it in the class dictionary Params needed for post request's payload | def add_exercise(self):
# Take the exercise entires from TOML file
entries = self.cfg.get("payload",{}).get("exercise")
# Check for valid entires
if entries:
# Construct payload
for payload in entries:
# Check the entry vs a json schema
... | [
"def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new workout and stores information about it in the class dictionary Params needed for post request's payload | def add_workout(self):
# Take the workout entires from TOML file
workouts = self.cfg.get('payload',{}).get('workout')
# Check for valid entires
if workouts :
# Construct payload
for payload in workouts:
# Parse the workout payload
... | [
"def add_workout(self):\r\n\r\n # Take the workout entires from TOML file\r\n workouts = cfg.get('payload',{}).get('workout')\r\n # Check for valid entires\r\n if workouts :\r\n # Construct payload \r\n for payload in workouts:\r\n # Parse the workout... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new day to a workout and stores information about it in the class dictionary Params needed for post request's payload | def add_day(self, w_id):
# Take the weight entries from TOML file
workouts = self.cfg.get('payload',{}).get('workout')
# Check for valid entries
if workouts:
for entries in workouts:
if entries:
# Construct payload
... | [
"def add_day(self):\r\n\r\n # Take the weight entries from TOML file\r\n workouts = cfg.get('payload',{}).get('workout')\r\n # Check for valid entries\r\n if workouts:\r\n for entries in workouts:\r\n if entries:\r\n # Construct payload \r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new schedule and stores information about it in the class dictionary Params needed for post request's payload | def add_schedule(self):
# Take the schedule entires from TOML file
entries = self.cfg.get('payload',{}).get('schedule')
# Check for valid entires
if entries:
# Construct payload
for payload in entries:
# Parse schedule payload
... | [
"def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule pay... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download the image of a exercice by index | def get_image(self, index):
# Get request to get all the links for all exercises
image = requests.get(API.url_image, headers = self.headers).json()
filename = download(image[index]['image']) | [
"def image_path_from_index(self, index):\n raise NotImplementedError",
"def _index_img(img_file, index):\n imgs = check_niimg(img_file, ensure_ndim=4, atleast_4d=True)\n return _index_img(imgs, index)",
"def get_example(self, i):\n if i >= len(self):\n raise IndexError('in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the comment from a exercice by index | def get_comment(self, index):
# Get request to get all the comments for all exercises
comments = requests.get(API.url_comment, headers = self.headers).json()
# Parse the response
for my_comment in comments:
if my_comment['id'] == index:
print(my_commen... | [
"def __getitem__(self, index: int) -> praw.models.Comment:\n return self._comments[index]",
"def getComment(self, n = None):\n \n if n is None:\n return self._comments\n else:\n return self._comments[n]",
"def get_text_from_note (self,\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a delete was succesfull | def check_delete(self, url, info, index):
exists = requests.get(url, headers=self.headers)
requests.delete(url + str(index), headers = self.headers)
exists2 = requests.get(url, headers=self.headers)
if exists.ok == exists2.ok == True and exists.json()['results'] == exists2.json()['... | [
"def checkDeleted(self) -> None:\n ...",
"def test_post_deletion_success(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n self.client.credentials(\n HTTP_AUTHORI... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Define the processing of each new directory added [in] dirpath The directory just added by the user Improve this ugly inteface... | def _processNewDirectory(self, dirpath):
self._parent.processDirectory(dirpath) | [
"def dir_add(self, dirname):\n with os.scandir(dirname) as dirfd:\n for entry in dirfd:\n self.add(entry.name, attr=os.stat(join(dirname, entry.name)))",
"def do_diradd(self, glob):\r\n hits = []\r\n for m in matches(self.cwd, glob, self.exclusions):\r\n h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the given line in order to return an array of n_classes probabilities | def _parse_preds_line(self, preds_line):
# The output for AWS XGBoost for multiclass is [proba_c1, probac2, probac3,...] for each sample, neither csv, nor python...
# return list(map(float, preds_line[1:-2].split(',')))
return eval(preds_line) | [
"def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes",
"def read_class_labels(path):\n\n wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the local and S3 libsvm files used for the training | def _prepare_libsvm_data(self):
logging.info('Preparing libsvm training data...')
if self.clean or not (self.is_s3_file(self.s3_training_file) and self.is_s3_file(self.s3_validation_file)):
logging.info('S3 libsvm files do not exist.')
if self.clean or not (os.path.isfile(self.lo... | [
"def deploy_svm_on_vmdk():\n\n INFO(\"Setting FUA bit for local storage\")\n api.set_kernel_module_options(\"libata\", \"fua=1\")\n api.set_kernel_module_options(\"libata_92\", \"fua=1\")\n\n svm_base_path = \"%s/%s\" % (P_LIST.datastore_path, SVM_NAME)\n\n # Customize SVM from the template.\n customize_svm_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have depthfirst search. | def best_first_graph_search(problem, f):
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f)) | [
"def best_first_graph_search(problem, f):\r\n f = memoize(f, 'f')\r\n node = Node(problem.initial)\r\n frontier = PriorityQueue('min', f)\r\n frontier.append(node)\r\n explored = []\r\n while frontier:\r\n node = frontier.pop()\r\n if problem.goal_test(node.state):\r\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A search is bestfirst graph search with f(n) = g(n)+h(n). You need to specify the h function when you call astar_search. | def astar_search(problem, h=None):
h = h or problem.h
h = memoize(h, 'h')
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f) | [
"def astar_search(problem, h=None):\r\n h = memoize(h or problem.h, 'h')\r\n return best_first_graph_search(problem, lambda n: n.path_cost + h(n))",
"def astar_search(problem, h=None, display=False):\n\n\th = memoize(h or problem.h, 'h')\n\treturn best_first_graph_search(problem, lambda n: n.path_cost + h(n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This helper function validates inputs to check that they are either scalars or arrays and then that any arrays of the same shape. It either raises an error or returns the common shape or 1 if all arguments are scalar. | def check_input_shapes(*args):
# Collect the shapes of the inputs
shapes = set()
# DESIGN NOTES - currently allow:
# - scalars,
# - 0 dim ndarrays (also scalars but packaged differently)
# - 1 dim ndarrays with only a single value
for val in args:
if isinstance(val, np.ndarr... | [
"def _good_shape(x, shape, axes):\n if shape is not None and axes is None:\n shape = _helper._iterable_of_int(shape, 'shape')\n if len(shape) != np.ndim(x):\n raise ValueError(\"when given, axes and shape arguments\"\n \" have to be of the same length\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to create a simple table of attribute mean, min, max and nan count from an object for use in summarize function. | def summarize_attrs(obj, attrs, dp=2, repr_head=True):
ret = []
for attr in attrs:
data = getattr(obj, attr)
ret.append([attr,
np.round(np.nanmean(data), dp),
np.round(np.nanmin(data), dp),
np.round(np.nanmax(data), dp),
... | [
"def _summary_stats(data):\n \n stats = {'min':[],'max':[], 'mean':[]}\n \n for scan in range(len(data)):\n stats['min'].append(\n (scan, min(data[scan][1]))\n )\n stats['max'].append(\n (scan, max(data[scan][... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tests passing 1D data into sigma_points | def test_julier_sigma_points_1D():
kappa = 0.
sp = JulierSigmaPoints(1, kappa)
#ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)
Wm, Wc = sp.weights()
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 3
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm,... | [
"def test_simplex_sigma_points_1D():\n\n sp = SimplexSigmaPoints(1)\n\n #ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)\n\n Wm, Wc = sp.weights()\n assert np.allclose(Wm, Wc, 1e-12)\n assert len(Wm) == 2\n\n mean = 5\n cov = 9\n\n Xi = sp.sigma_points(mean, cov)\n xm, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tests passing 1D data into sigma_points | def test_simplex_sigma_points_1D():
sp = SimplexSigmaPoints(1)
#ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)
Wm, Wc = sp.weights()
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 2
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_tr... | [
"def test_julier_sigma_points_1D():\n\n kappa = 0.\n sp = JulierSigmaPoints(1, kappa)\n\n #ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)\n\n Wm, Wc = sp.weights()\n assert np.allclose(Wm, Wc, 1e-12)\n assert len(Wm) == 3\n\n mean = 5\n cov = 9\n\n Xi = sp.sigma_points... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
should work like a linear KF if problem is linear | def test_linear_1d():
def fx(x, dt):
F = np.array([[1., dt],
[0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0]])
dt = 0.1
points = MerweScaledSigmaPoints(2, .1, 2., -1)
kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=fx, hx=hx, point... | [
"def _linearize(self):\n pass",
"def spline_linear(x, f, x_k, x_ki):\n A = (x_ki - x) / (x_ki - x_k)\n B = (x - x_k) / (x_ki - x_k)\n \n return A*f(x_k) + B*f(x_ki)",
"def f(t, k):\n\n def P(z):\n return 1 - z / 2 - 1 / z**3 / 3j\n return np.exp(1j*t) * P(np.exp(k*1j*t))",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
batch filter should accept missing data with None in the measurements | def test_batch_missing_data():
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
poi... | [
"def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]",
"def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)",
"def test__postprocess_sampled_with_null_values(self):\n # Setup\n sampled = pd.DataF... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
plot overscan in 9x2 plots with 16 channels | def plot_overscan(overscan, img, TITLE, OUT_DIR):
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[... | [
"def view_grid(self):\n avgs2 = self.smooth(2)\n avgs24 = self.smooth(23)\n row_labels = list('1234')\n column_labels = list('1234')\n arr = []\n mins = []\n maxs = []\n j = 0\n for i in range(len(avgs2[0])):\n j = 0\n darr = [[],[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
plot overscan with subtracted 7th / 17th channel | def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplo... | [
"def plot_channel_offset(fig_name):\n file1 = 'log/influence/channel/table_left_150cm.csv'\n df = utils.read_file_rssi(file1, correct_index=True)\n df[colums].plot()\n plt.title(file1)\n plt.xlabel('Time: s')\n plt.ylabel('RSSI: dBm')\n if not os.path.exists(dir_fig):\n os.makedirs(dir_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
plot gains with respect to the reference gain, whre reference gain is number => gains[gain_ref] | def plot_gains(gains, gain_ref, TITLES, OUT_DIR):
# print 'directory: %s' % OUT_DIR
# print 'TITLES:%s', TITLES
gain_ref_np = np.array(gains[gain_ref].gain)
ratios = []
for gain in gains:
gain_np = np.array(gain.gain)
dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),
... | [
"def plot_gains(self, gains, gain_variance):\n\n # TODO : Make plots have the same scale if they share the same units\n # or figure out how to normalize these.\n\n n, q, p = gains.shape\n\n fig, axes = plt.subplots(q, p, sharex=True, sharey=True)\n\n percent_of_gait_cycle = \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an API event instance | def get_api_event(self):
pass | [
"def retrieve(cls, event_id):\n return Event(Requester.get(cls.endpoint + '/' + event_id))",
"def get_events_api(self):\n events_api = EventsApi(self.authtoken, self.portal_id) \n return events_api",
"def get_instance(self, payload):\n return EventInstance(self._version, payload, cal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the next count number for the given metric/variant (rotates every few calls) | def _get_metric_count(cls, metric, variant, next=True):
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
value = counters[key] = value + 1
... | [
"def next_num(cls):\r\n cls.num += 1\r\n return cls.num",
"def next_int(self):\n self.innovation_number += 1\n return self.innovation_number",
"def get_next_label_number():\n try:\n max_dict = Label.objects.all().aggregate(Max('number'))\n max_value = max_dict['numbe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add_datepart converts a column of df from a datetime64 to many columns containing the information from the date. This applies changes inplace. | def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld... | [
"def add_datepart(df, fldname, drop=True, time=False):\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
proc_df takes a data frame df and splits off the response variable, and changes the df into an entirely numeric dataframe. For each column of df which is not in skip_flds nor in ignore_flds, na values are replaced by the median value of the column. | def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_fld... | [
"def preprocess(df: pd.DataFrame, _map: Optional[Callable[[T], Any]] = None) -> pd.DataFrame:\n assert not isinstance(df, type(None))\n\n if not _map:\n _map = tokenize_stem_stop\n\n # remove or fill rows that have no data, i.e NaN\n nonempty_df = fill_empty(df)\n\n # Map each remaining row to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort standings_rows according to the subclass rank method. | def sort_standings_rows(self, standings_rows, heat_games, players, rank_finals=False):
non_finals_sort_key_fn = self.get_standings_row_sort_key_fn()
self.calculate_secondary_rank_values(standings_rows, heat_games, players)
standings_rows.sort(key=non_finals_sort_key_fn, reverse=True)
if... | [
"def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One waypoint must exist on database | def test_create(self):
self.assertTrue(WayPoint.objects.exists()) | [
"def create_waypoint(self, waypoint):\n connection = self.__create_connection()\n try:\n waypoint_list = list(waypoint)\n key = self.__compound_key(waypoint)\n waypoint_list.insert(0, key)\n\n keyed_waypoint = tuple(waypoint_list)\n\n sql = ''' IN... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if max episode steps have been reached. | def is_terminal(self) -> bool:
return self.time_index == self.max_episode_steps - 1 | [
"def max_episode_steps(self):\n pass",
"def has_finished_episode(self):\n if self.num_steps_taken == 0:\n return False\n\n if self.evaluating:\n return False\n\n done = self.num_steps_taken % self.episode_length == 0\n if done:\n self.finished_ep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Round the value x down to the nearest time step interval. | def _round(self, x):
return x - x % self.minutes_per_step | [
"def round_down(x):\n return int(math.floor(x / 10.0)) * 10",
"def round_down(x, m):\n return int(m * round(float(x) / m))",
"def _roundup(self, x):\n\n if x < 100:\n fact = 10\n elif x < 1000:\n fact = 100\n elif x < 10000:\n fact = 1000\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Disconnect user using Facebook | def fbdisconnect():
facebook_id = login_session['facebook_id']
url = 'https://graph.facebook.com/%s/permissions' % facebook_id
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
del login_session['facebook_id']
return "you have been logged out" | [
"def fbdisconnect():\n\n facebook_id = login_session['facebook_id']\n # The access token must me included to successfully logout\n access_token = login_session['access_token']\n # Only disconnect a connected user.\n if access_token is None:\n response = make_response(json.dumps('Current user n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gera hash do arquivo | def gerar_hash(nome_arquivo):
m = hashlib.sha256()
arquivo = open(nome_arquivo,'rb').read()
m.update(arquivo)
hash_votos = m.digest()
open("hash_votos_cifrados.txt","w").write(hash_votos) | [
"def hash_file(self, fileobj):\n assert not isinstance(fileobj, io.TextIOBase)\n h = self._hash_ctor()\n buf = fileobj.read(self._block_size)\n while len(buf) > 0:\n h.update(buf)\n buf = fileobj.read(self._block_size)\n return h.hexdigest()",
"def _hash_fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gera a chave aes secreta | def gera_chave():
AES_tamanho_chave = 32
chave_aes_secreta = os.urandom(AES_tamanho_chave)
return chave_aes_secreta | [
"def cambiar_escena(self, escena):\n\t\t# Reemplazo directo\n\t\tself.escena = escena\n\t\t# Reiniciar la ventana con el tamaño de la nueva escena\n\t\tprint(\"Iniciando nuevo contexto OpenGL...\")\n\t\tv_ancho, v_alto = escena.tam\n\t\topciones = OPENGL | DOUBLEBUF\n\t\tif escena.pant_compl:\n\t\t\topciones |= FUL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all nodes of the graph visited using BFS | def bfs(graph,start):
#keeps track of nodes to be visited
queue = []
#keeps track of nodes already visited
explored = []
queue.append(start)
while queue:
#remove first node from queue
curr_node = queue.pop(0)
#check if node is visited
if curr_node not in explored:
explored.append(curr_... | [
"def bfs_visited(ugraph, start_node):\r\n queue = deque()\r\n visited = set() #Set is enough here.\r\n visited.add(start_node)\r\n queue.append(start_node)\r\n while len(queue) != 0:\r\n temp_node = queue.popleft()\r\n for neighbor in ugraph[temp_node]: #In graph theory, neighborhood ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an empty Queue. Test that its size is 0. | def test_new_queue_is_empty(self):
queue = Queue_()
self.assertTrue(queue.empty())
self.assertEqual(queue.size(), 0) | [
"def test_new_queue_is_empty(self):\n queue = Queue_(Stack())\n self.assertTrue(queue.empty())\n self.assertEqual(queue.size(), 0)",
"def test_init_queue(self):\n q = Queue()\n self.assertIsInstance(q, Queue)\n self.assertIsNone(q.first)\n self.assertIsNone(q.last)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an element from a queue. Test that it is 1. | def test_get_element(self):
data = (1, 2, 3, 4)
queue = Queue_(data)
self.assertEqual(queue.get(), data[0]) | [
"def test_get_element(self):\n data = Stack((1, 2, 3, 4, 5))\n queue = Queue_(data)\n self.assertEqual(queue.get(), 1)",
"def get(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to get item from queue')\n\t\t\titem = self.queue.get()\n\t\t\tself.logger.debug('Recevie item from queue %s'%... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Queue from an iterable object. Check that the size of queue_ equals to the size of the given tuple. | def test_new_queue_from_tuple(self):
data = (1, 2, 3, 4)
queue = Queue_(data)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), len(data))
for value in data:
test_value = queue.get()
self.assertEqual(test_value, value)
self.assertTrue(... | [
"def test_can_instantiate_a_queue_with_iter():\n q = Queue([1, 2, 3, 4, 5, 6])\n assert len(q) == 6",
"def test_can_instantiate_queue_with_list(small_q2):\n assert len(small_q2) == 4\n assert small_q2.front.val == 1\n assert small_q2.back.val == 4",
"def test_new_queue_from_list(self):\n d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Queue from a list. Check that the size of queue equals to the size of the queue. Check that the top element of queue equals to the latest element of the list. | def test_new_queue_from_list(self):
data = [1, 3, 5, 7, 2, 4]
queue = Queue_(data)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), len(data))
self.assertEqual(queue.top(), data[0]) | [
"def test_can_instantiate_queue_with_list(small_q2):\n assert len(small_q2) == 4\n assert small_q2.front.val == 1\n assert small_q2.back.val == 4",
"def test_that_rear_is_the_right_value_at_length_greater_than_one(empty_queue):\n empty_queue.enqueue(9)\n empty_queue.enqueue(1)\n assert empty_que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Queue_ from a generator. Test that its size equals to the number provided in the generator. | def test_new_queue_from_generator(self):
queue = Queue_(range(10))
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 10)
self.assertEqual(queue.top(), 0) | [
"def test_can_instantiate_a_queue_with_iter():\n q = Queue([1, 2, 3, 4, 5, 6])\n assert len(q) == 6",
"def test_can_dequeue_from_iter_created_queue(filled_queue):\n start_length = len(filled_queue)\n filled_queue.dequeue()\n assert start_length > len(filled_queue)\n assert len(filled_queue) == 5... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Put an element in queue. Test that its size is 1. | def test_put_element(self):
queue = Queue_()
queue.put(1)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 1)
self.assertEqual(queue.top(), 1) | [
"def test_put_element(self):\n queue = Queue_(Stack((3, 2)))\n queue.put(1)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 3)\n self.assertEqual(queue.top(), 3)",
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an empty Queue. Test that call of get function raises Assertion error | def test_call_get_of_empty_queue_raised_error(self):
queue = Queue_()
self.assertRaises(IndexError, queue.get) | [
"def test_init_queue(self):\n q = Queue()\n self.assertIsInstance(q, Queue)\n self.assertIsNone(q.first)\n self.assertIsNone(q.last)\n self.assertEqual(q.size, 0)",
"def test_new_queue_is_empty(self):\n queue = Queue_()\n self.assertTrue(queue.empty())\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Override the save method to save the first and last name to the user field. | def save(self):
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user ... | [
"def save(self, commit=True):\n\n email_local_part = self.cleaned_data['email'].split('@')[0]\n username_start = email_local_part[:5] if len(email_local_part) >= 5 else email_local_part\n self.instance.username = username_start + ''.join(\n [choice(ascii_letters) for _ in range(30 - ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate the `hateword` table in MongoDB with data from CSV file. | def populate_hateword_data():
with open("./data/hate-speech-lexicons/refined_ngram_dict.csv") as f:
lst = [row.split(',', 1)[0] for row in f]
lst = lst[1:]
lst = [{
'word': word,
'category': [],
'similar_to': []
} for word in lst]
try:
... | [
"def save_hand_csv_mongo(self, filename):\n data = pandas.read_csv(filename)\n data_json = json.loads(data.to_json(orient='records'))\n self.mongo_wrapper.drop_collection(self.constants.METADATA) # Drop Metadata Collection\n self.mongo_wrapper.bulk_insert(self.constants.METADATA, data_j... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepopulate user data for the app, including an admin account | def populate_user_data():
try:
db = mongo_client.MongoClient(config.MONGO_URI).twitter
db.user.insert_one(
{
'username': 'admin',
'password': 'admin',
}
)
print("Created an admin account")
except Exception as e:
prin... | [
"def prefill(self, user):\n print('prefilling')\n self.username.data = user.username\n self.full_name.data = user.full_name\n self.email.data = user.email",
"def InitUser():\n result = AppUser.query(AppUser.user == users.get_current_user()).fetch()\n\n if result:\n app_user = resu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add a new vertex object to the graph with the given key and return the vertex | def add_vertex(self, key):
#increments the number of vertices
#creates a new vertex
#adds the new vertex to the vertex list
#returns the new vertex
if key != None:
self.num_vertices += 1
new_vertex = Vertex(key)
self.vert_list[key] = new_vertex... | [
"def add_vertex(self, key):\n\n if key in self.vert_dict:\n print(f'Vertex {key} already exists')\n return\n\n # create a new vertex\n new_vertex = Vertex(key)\n self.vert_dict[key] = new_vertex\n self.num_vertices += 1\n\n return new_vertex",
"def a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the vertex if it exists | def get_vertex(self, n):
#returns the vertex if it is in the graph
if self.vert_list[n] != None:
return self.vert_list[n]
else:
raise KeyError("It would appear the vertex you are searching for does not exist") | [
"def get_vertex(self, key):\n # return the vertex if it is in the graph\n if key in self.vert_dict.keys():\n return self.vert_dict[key]\n return None",
"def get_vertex(self, key):\n if key in self.vertList:\n return self.vertList[key]\n else:\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add an edge from vertex f to vertex t with a cost | def add_edge(self, f, t, cost=0):
#if either vertex is not in the graph, returns an error
#if both vertices in the graph, adds the
# edge by making t a neighbor of f
#using the addNeighbor method of the Vertex class.
if (get_vertex(f) != None) and (get_vertex(t) != None):
... | [
"def add_edge(self, f, t, cost=1):\n if f not in self.vertList:\n self.add_vertex(f)\n if t not in self.vertList:\n self.add_vertex(t)\n self.vertList[f].add_neighbor(self.vertList[t], cost)",
"def add_edge(e, R):\n\tR.add_edges_from([tuple(e)])\n\ta=R.get_edge_data(e[0]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The object pk value as a string. | def object_pk(self):
if self._wrapped not in (None, empty):
return str(self._wrapped.pk)
if '_object_pk' in self.__dict__:
return self.__dict__['_object_pk']
identifier = self._get_identifier()
if identifier:
# noinspection PyBroadException
... | [
"def object_key(self) -> str:\n return self._values.get('object_key')",
"def _get_obj_pk(self, obj):\n if self.use_natural_keys and hasattr(obj, 'natural_key'):\n raw_nat_key = obj.natural_key()\n obj_pk = smart_text(NATURAL_KEY_JOINER.join(raw_nat_key))\n keytype = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get or add a LazyModelObject instance to this dictionary. Accepts the same arguments as the LazyModelObject class. Returns a LazyModelObject instance. | def get_or_add(self, *args, **kwargs):
key = LazyModelObject.get_identifier(*args, **kwargs)
try:
return self[key]
except KeyError:
item = LazyModelObject(*args, **kwargs)
if not item:
item = None
self[key] = item
retur... | [
"def __get__(self, model_instance, model_class):\n instance = None\n\n if model_instance is None:\n return self\n\n if hasattr(model_instance, self.__id_attr_name()):\n reference_id = getattr(model_instance, self.__id_attr_name())\n else:\n reference_id =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove any matches (and it's entries from {matches}, {read_count} and {phreds}) that have 1 or more positions with a superPhred score < {min_phred_score} Returns count total number of reads removed count_unique total number of unique reads removed | def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):
count = count_unique = 0
kk = matches.keys()
for k in kk:
m = matches[k]
if any( x < min_phred_score for x in phreds[m.read.tostring()] ):
count += read_count[m.read.tostring()]
count_unique += 1
if ditched... | [
"def remove_relative_frequent_words_below_score(score):\n print 'removing words with a relative frequency below: ', score\n global global_reduced_freqs\n\n for w, value in global_reduced_freqs.items():\n if value < score:\n del global_reduced_freqs[w]",
"def remove_mutants_below_readcou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts the total number of reads in M | def count(self):
return sum(read.copy for read in self.__iter__()) | [
"def __count_indices__(self):\n count = 0\n for seq_read in self.seq_reads:\n if seq_read.__is_index__():\n count += 1\n return count",
"def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |