query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Validate the phone number on the Authy API Server. If valid, Twilio API will send 4 digit verification token via SMS. | def validate(self, data):
phone_number = phonenumbers.parse(
str(data.get('phone_number')), None)
authy_api = AuthyApiClient(settings.ACCOUNT_SECURITY_API_KEY)
authy_phone = authy_api.phones.verification_start(
phone_number.national_number,
phone_n... | [
"def validate_verification_text(self, token, country_code=settings.TWILIO_US_COUNTRY_CODE):\n verification = None\n\n if not settings.DEBUG:\n verification = get_authy_client().phones.verification_check(\n self.phone_number,\n country_code,\n tok... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loops over attributes and runs methods that begin with the word `test`. | def main(self):
for item in self.__dir__():
if 'test' in item:
function = self.__getattribute__(item)
if 'output' in function.__code__.co_varnames:
self.print_results(item, function,
main=True, output=True)
... | [
"def gen_tests(test_class):\n for class_attr in dir(test_class):\n if class_attr.startswith('test_'):\n yield class_attr",
"def obj_tests(self):\n return [func for func in dir(self) if callable(getattr(self, func)) and func.startswith(\"test_\")]",
"def test(attr=\"quick\", verbose=F... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializing the variables h and data. h is a list of five 8digit hexadecimal numbers. We will start with this as a message digest. 0x is how you write hexadecimal numbers in Python. | def __init__(self, data):
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] | [
"def _hash_init(self):\r\n # Initialize the indices and data dependencies.\r\n self.rotor = 1\r\n self.ratchet = 3\r\n self.avalanche = 5\r\n self.last_plain = 7\r\n self.last_cipher = 11\r\n\r\n # Start with cards all in inverse order.\r\n self.cards = list(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a source item id, find its most matched k items. | def find_matched_clothes(self, source_item, k):
pass | [
"def find_matched_clothes(self, source_item_id, k):\n if source_item_id not in self._item_info:\n return []\n\n # compute how many matched items in each category by category matching model\n cat_id = self._item_info[source_item_id].get_cat_id()\n if cat_id not in self._cat_rel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a source item id, find its most matched k items. | def find_matched_clothes(self, source_item_id, k):
if source_item_id not in self._item_info:
return []
# compute how many matched items in each category by category matching model
cat_id = self._item_info[source_item_id].get_cat_id()
if cat_id not in self._cat_relationship_m... | [
"def FindTopN(items, n=3):\n candidates = []\n for item in items:\n candidates.append(Candidate(item))\n\n tops = []\n for i in range(n):\n winners = HeadToHead(candidates)\n while len(winners) > 1:\n winners = HeadToHead(winners)\n tops.append(winners[0].name)\n candidates = winners[0].lose... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
crop data as krl | def crop_input(data, krl, k):
# data: (b,c,h,w), krl: (b,k**2,h_,w_)
r = k // 2
if data.shape[-2:] != krl.shape[-2:]:
with torch.no_grad():
dx = data.shape[-2] - krl.shape[-2] - 2 * r
dy = data.shape[-1] - krl.shape[-1] - 2 * r
data = data[:, :, dx // 2: - dx // 2... | [
"def cropImage():",
"def _crop(self, img, hm, padding, crop_box):\n img = np.pad(img, padding, mode = 'constant')\n hm = np.pad(hm, padding, mode = 'constant')\n max_lenght = max(crop_box[2], crop_box[3])\n img = img[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght //2, crop_box[0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that when an async task is created and completed, the Task object has a status of 'SUCCESS' and contains the return value of the task. | def test_asynctask_reports_success(self):
metadata = {'test': True}
task_options = {
'user_id': self.user.pk,
'task_type': 'asynctask',
'metadata': metadata
}
task, task_info = create_async_task('test', task_options)
self.assertTrue(Task.object... | [
"def test_get_task_status(self):\n pass",
"def test_asynctask_reports_progress(self):\n metadata = {'test': True}\n task_options = {\n 'user_id': self.user.pk,\n 'task_type': 'asynctask',\n 'metadata': metadata\n }\n task, task_info = create_asyn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we can retrieve task progress via the Task API. | def test_asynctask_reports_progress(self):
metadata = {'test': True}
task_options = {
'user_id': self.user.pk,
'task_type': 'asynctask',
'metadata': metadata
}
task, task_info = create_async_task('progress-test', task_options)
self.assertTrue(T... | [
"def test_get_task_status(self):\n pass",
"def test_set_progress(self):\n pass",
"def test_api_v1_radar_serverless_progress_get(self):\n pass",
"def test_increment_progress(self):\n pass",
"def test_tower_api_progress(tower_id: str, response_file: Path, expected_progress: float) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that if a task fails with an error, that the error information is stored in the Task object for later retrieval and analysis. | def test_asynctask_reports_error(self):
metadata = {'test': True}
task_options = {
'user_id': self.user.pk,
'task_type': 'asynctask',
'metadata': metadata
}
task, task_info = create_async_task('error-test', task_options)
task = Task.objects.ge... | [
"def task_failed(self, task, errors):\n pass",
"async def _set_error(self, error: str):\n await self.data.tasks.update(self.task_id, TaskUpdate(error=error))\n self.errored = True",
"def do_task(self, task):\n def _on_fail():\n raise self.TaskFailed(\"Task failed\")\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we don't add a Task entry when we create a new Celery task outside of the create_async_task API. | def test_only_create_async_task_creates_task_entry(self):
task = non_async_test_task.apply_async()
result = task.get()
self.assertEquals(result, 42)
self.assertEquals(Task.objects.filter(task_id=task.id).count(), 0) | [
"def test_create_subtask_for_task(self):\n pass",
"def test_duplicate_task(self):\n pass",
"def test_task_no_chain(self):\n kwargs = {\"a\": 400, \"b\": 901}\n\n worker = wiji.Worker(the_task=self.myTask, worker_id=\"myWorkerID1\")\n self.myTask.synchronous_delay(a=kwargs[\"a\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode arbitrary length binary code decimals. | def decode_bcd(bytes_in):
v = 0
if isinstance(bytes_in, int):
bytes_in = bytes([bytes_in])
n = len(bytes_in)
n = n * 2 - 1 # 2 values per byte
for byte in bytes_in:
v1, v2 = Decoder.bcd(byte)
v += v1 * 10 ** n + v2 * 10 ** (n - 1)
... | [
"def _decode_int(data):\n data = data[1:]\n end = data.index(b'e')\n return int(data[:end],10), data[end+1:]",
"def decode_length(binary_string, length_nibble_position):\n length_info = packet_eater.read_nibbles(binary_string, \n length_nibble_position)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode unsigned ints as booleans. | def decode_bin_bool(bytes_in):
b = Decoder.decode_bin(bytes_in)
return b > 0 | [
"def boolFromBytes(b):\n return b == 0x01",
"def bool_converter(val):\n return bool(strtobool(str(val)))",
"def boolToBytes(v):\n return 0x01 if v else 0x00",
"def int_to_bool(value):\n try:\n return bool(int(value))\n except ValueError:\n raise TypeError('must supply integer stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode positive binary fractions. | def decode_fraction(bytes_in):
if PY2:
# transform bytes_in to a list of ints
bytes_ord = map(ord, bytes_in)
else:
# in PY3 this is already the case
bytes_ord = bytes_in
bit = ''.join('{:08b}'.format(b) for b in bytes_ord)
return sum(int(x)... | [
"def decodeDecimalFraction(value, alphabetInfo):\n # Convert the alphabet raw frequencies into fractions\n fractions, totalLength = getAlphabetFractions(alphabetInfo)\n # Keep track of the decoded part of the string\n s = ''\n\n bottom = Decimal('0')\n top = Decimal('1')\n\n # Loop until the st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A project with a mock connection that does not hit any server. Instead calls to pseudonymize and reidentify will return a random list Key objects with valid value_types | def mock_project():
mock_connection = Mock(spec=PIMSConnection)
mock_connection.pseudonymize.return_value = [
TypedKeyFactory() for _ in range(20)
]
mock_connection.reidentify.return_value = [
TypedKeyFactory() for _ in range(20)
]
return Project(key_file_id=1, connection=mock_c... | [
"def test_typed_key_factory(value_type):\n key = Key(identifier=IdentifierFactory(source=value_type), pseudonym=PseudonymFactory())\n\n typed_key = KeyTypeFactory().create_typed_key(key)\n assert typed_key.value_type == value_type",
"def test_typed_key_factory(value_type):\n key = Key(\n identi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creating typed keys for these value types should work | def test_typed_key_factory(value_type):
key = Key(
identifier=IdentifierFactory(source=value_type),
pseudonym=PseudonymFactory(),
)
typed_key = KeyTypeFactory().create_typed_key(key)
assert typed_key.value_type == value_type | [
"def test_typed_key_factory(value_type):\n key = Key(identifier=IdentifierFactory(source=value_type), pseudonym=PseudonymFactory())\n\n typed_key = KeyTypeFactory().create_typed_key(key)\n assert typed_key.value_type == value_type",
"def of(python_type: Any) -> str:\n if python_type is str or isin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setting a pseudonym that is already used for a different identifier should fail | def test_set_keys_existing_pseudonym(mock_pims_session, some_patient_id_keys):
connection = PIMSConnection(session=mock_pims_session)
mock_pims_session.session.set_response_tuple(
RequestsMockResponseExamples.DEIDENTIFY_FAILED_TO_INSERT
)
# keys need to be patientID to match mocked response abov... | [
"async def set_alias(self, ctx, word: str, alias: str, is_proper: bool = \"True\"):\n db = self.db_utils\n try:\n if is_proper:\n proper: int = 1\n else:\n proper: int = 0\n await db.set_alias(word, alias, proper)\n msg = f'Alia... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns tuple with pixels_per_mm_x and pixels_per_mm_y | def get_pixels_per_mm(self):
return (self.pixels_per_mm_x, self.pixels_per_mm_y) | [
"def physical_size_mm(self) -> tuple[int, int]:\n return self._ptr.phys_width, self._ptr.phys_height",
"def pxsize(self):\n tag_root = 'root.ImageList.1'\n pixel_size = float(\n self.tags[\"%s.ImageData.Calibrations.Dimension.0.Scale\" % tag_root])\n unit = self.tags[\"%s.Im... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of available phases | def get_phase_list(self):
return self.phase_list | [
"def steps_by_phases(self):\n phases = {}\n for step in self.steps.all():\n if step.phase not in phases:\n phases[step.phase] = []\n phases[step.phase].append(step)\n\n return phases",
"def collect_phases(cls):\n phase_attrs_by_index = dict()\n for a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the character width of the current terminal window | def get_terminal_width(self):
width = 60 # Use this as a minimum
try:
size = os.get_terminal_size()
except OSError:
size = None
if size and size[0] > width:
width = size[0]
if os.name == 'nt':
width -= 1 # Windows needs 1 empty sp... | [
"def _get_width(self) -> \"int\" :\n return _core.TextCommandPalette__get_width(self)",
"def effective_width(self):\n return self._console_width - (INDENTATION_WIDTH * self._level)",
"def get_width( o ):\n \"\"\"获取该字符在屏幕上的显示的长度\"\"\"\n global widths\n if o == 0xe or o == 0xf:\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify the given catalog is a valid value. | def validate_catalog(self, catalog: str) -> None:
url = self.api_base + '/index/catalogs'
response = self.get_json_response(url)
if catalog not in response['catalogs']:
print(f'Invalid catalog: {catalog}')
exit(1) | [
"def verify(filename):\n\n path, fn = os.path.split(filename)\n catobj = None\n\n if fn.startswith(\"catalog\"):\n if fn.endswith(\"attrs\"):\n catobj = CatalogAttrs(meta_root=path)\n else:\n catobj = CatalogPart(fn, me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify the given project ID is valid for the given catalog. | def validate_project(self, catalog: str, project_id: str) -> None:
try:
uuid.UUID(project_id)
except (ValueError, TypeError):
print('Project ID must be a valid a UUID')
exit(1)
url = self.api_base + f'/index/projects/{project_id}'
try:
self... | [
"def _check_project_exists(course_code: str, project_id: str):\n check_course_exists(course_code)\n\n if not project_helper.project_exists(course_code, project_id):\n click.echo(f'The project with id \"{project_id} does not exist in \"{course_code}\".')\n sys.exit(1)",
"def validate_catalog(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print a list of all available catalogs. | def list_catalogs(self) -> None:
url = self.api_base + '/index/catalogs'
response = self.get_json_response(url)
print()
for catalog, details in response['catalogs'].items():
if not details['internal']:
print(catalog)
print() | [
"def showCatalog():\n state = generateState(login_session, 'state')\n categories = session.query(Category).all()\n return render_template('allCategories.html', categories=categories,\n STATE=state, session=login_session)",
"def print_book_list():\n print_all_books()",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print a list of all available projects in the given catalog. | def list_projects(self, catalog: str) -> None:
url = self.api_base + '/index/projects'
params = {
'catalog': catalog,
'size': 100,
'sort': 'projectTitle',
'order': 'asc'
}
print()
screen_width = self.get_terminal_width()
whi... | [
"def _print_projects():\n project_dir = projects_path()\n print(' '.join(\n ['aeriscloud'] +\n [\n pro\n for pro in os.listdir(project_dir)\n if os.path.exists(os.path.join(project_dir, pro,\n '.aeriscloud.yml'))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of file type summaries for the given project. | def get_file_summary(self,
catalog: str,
project_id: str
) -> List[Mapping[str, Any]]:
url = self.api_base + '/index/summary'
params = {
'catalog': catalog,
'filters': json.dumps({'projectId': {'is': [proj... | [
"def listTypes(path):\n types = os.listdir(path)\n if types:\n for name in types:\n print(name)\n else:\n print(\"No types created for the given project\")",
"def list_file_summary(self, catalog: str, project_id: str) -> None:\n summaries = self.get_file_summary(catalog, p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print a list of file type summaries for the given project. | def list_file_summary(self, catalog: str, project_id: str) -> None:
summaries = self.get_file_summary(catalog, project_id)
print()
if summaries:
width1 = max([len(s['format']) for s in summaries] + [6])
width2 = max([len(str(s['count'])) for s in summaries] + [5])
... | [
"def listTypes(path):\n types = os.listdir(path)\n if types:\n for name in types:\n print(name)\n else:\n print(\"No types created for the given project\")",
"def pretty_print(self) -> None:\r\n pt: PrettyTable = PrettyTable(field_names=['File Name','classes','functions','... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yield the leaf nodes from a project matrices tree. | def iterate_matrices_tree(self, tree, keys=()):
if isinstance(tree, dict):
for k, v in tree.items():
yield from self.iterate_matrices_tree(v, keys=(*keys, k))
elif isinstance(tree, list):
for file in tree:
yield keys, file
else:
... | [
"def get_nodes_po(self):\r\n\r\n\t\tnode_stack = [(self.root, 0)]\r\n\r\n\t\twhile len(node_stack) > 0:\r\n\t\t\tyield node_stack[-1]\r\n\t\t\tnode, indent = node_stack.pop()\r\n\r\n\t\t\tfor child in node.children[::-1]:\r\n\t\t\t\tnode_stack.append((child,indent + 1))",
"def projects_iter(self):\n for pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print all the project matrices in the given project. | def list_project_matrices(self,
catalog: str,
project_id: str) -> None:
project = self.get_project_json(catalog, project_id)
files = {}
max_size_length = 0
for key in ('matrices', 'contributedAnalyses'):
for path, fi... | [
"def printProjects(self):\n for prjct in reversed(self.projects):\n print(prjct)",
"def printMatrix(*args):\n\n for M in args:\n if type(M).__module__ == np.__name__ or type(M) == \"list\":\n for row in M:\n s = [\"{:8.3}\"] * len(row)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download all the project matrices data files in the given project. | def download_project_matrices(self,
catalog: str,
project_id: str,
destination: str) -> None:
self.create_destination_dir(destination)
project = self.get_project_json(catalog, project_id)
file_u... | [
"def download_ml_data(self, lproject):\n project = self.session.projects[lproject]\n train_dir = os.path.expanduser(lproject + \"/TRAIN\")\n test_dir = os.path.expanduser(lproject + \"/TEST\")\n\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n if not os.pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints instructions including a curl command to download all files of the given format(s) from a project. | def download_files_by_format(self,
catalog: str,
project_id: str,
formats: List[str],
) -> None:
if len(formats) == 1 and formats[0] == 'ALL':
summaries = self.get_file... | [
"def download_project_files():\n log_path = os.getcwd() + \"/log/\"\n output = subprocess.getstatusoutput(\"cd AIstudio_Download && python ./aistudio_client.py\")\n with open(log_path + \"download.log\", \"a\") as flog:\n flog.write(\"%s\" % (output[1]))",
"def download(all):\n print(\"Download... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive a new candle event from feed. self.feed.candles dataframe contains all candles including this one. | def on_candle(self, ohlcv: Ohlcv):
# Skip if too early for a new processing cycle
self._logger.debug(f"Got new candle ohlcv={ohlcv}") | [
"def feed_data(self, last_candles):\n raise NotImplementedError",
"def subscribe_to_candles(self, symbol, timeframe, callback):\n\n valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',\n '7D', '14D', '1M']\n if timeframe:\n if timeframe not in v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Got new level2 data. self.feed.level2 contains all level2 records including this one | def on_level2(self, level2: Level2):
self._logger.debug(f"Got new level2: {level2}")
return | [
"def on_level2(self, level2: Level2):\n self._logger.debug(f\"Received level2 {level2}\")\n asset_str = str(level2.asset)\n # Add new level2 records to dataframe\n for item in level2.items:\n self.level2 = self.level2.append({'datetime': level2.dt, 'ticker': asset_str, 'price'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decodes an integer sequence, of the format given by encode_midi, back into a midi file. | def decode_midi(time_shift=Fraction(1, 12)):
def _midi(x):
time = time_shift * 0
for i, event in enumerate(x):
if event < 128:
nextE = x[i+1] if i+1 < len(x) else -1
if 256 <= nextE < 384:
vel = nextE - 256
else:
... | [
"def _convert_to_midi_file(self, notes):\n\n offset = 0\n output_notes = []\n # create note and chord objects based on the values generated by the model\n for pattern in notes:\n # pattern is a chord\n if (',' in pattern) or pattern.isdigit():\n pitch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches the values with the given query names and returns them in a list in the same order as names. | def getQueryValues(handler, names):
# Iterate through all the names and request the values
values = []
for name in names:
values.append(getQueryValue(handler, name))
return values | [
"def getList(name):",
"def fetch_as_lists(self, wmi_classname, fields, **where_clause):\n wql = \"SELECT %s FROM %s\" % (\", \".join(fields), wmi_classname)\n if where_clause:\n wql += \" WHERE \" + \" AND \".join([\"%s = '%s'\" % (k, v) for k, v in where_clause.items()])\n results... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether the current user's account level meets the required account level. If the user does not, sets the redirect flags to the unauthroized page. Usually, if this method returns false the caller will want to immediately return. | def userIsAuthorized(handler, requiredAccountLevel):
currentUser = uau.getLoggedInUser(handler)
userAccountLevel = uau.loggedOut
if currentUser != None:
userAccountLevel = currentUser.accountLevel
if not uau.doesUserHavePermission(userAccountLevel, requiredAccountLevel):
handler.red... | [
"def doesUserHavePermission(userAccountLevel, requiredAccountLevel):\n return userAccountLevel <= requiredAccountLevel",
"def needs_auth(self, user, **kwargs):\n if self.auth_provider is None:\n return False\n\n if not user.is_authenticated():\n return True\n\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use kmeans clustering to segment berries based on color | def six():
orig = cv2.imread('berries.png')
# blur to remove details and smoothen image
img = cv2.GaussianBlur(orig, (7, 7), 4, 4)
# convert to HSV and saturate the colors
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img = np.uint16(img)
img[:, :, 1] += 128
img[:, :, 2] += 64
img[img >... | [
"def cluster_segment(img, n_clusters, random_state=0):\n # Downsample img first using the mean to speed up K-means\n img_d = block_reduce(img, block_size=(2, 2, 1), func=np.mean)\n img_d = cv2.GaussianBlur(img_d, (5, 5), 0)\n\n # first convert our 3-dimensional img_d array to a 2-dimensional array\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a child ConvBlockGene to this gene's children. | def add_child(self):
# The new child block
new_block = ConvBlockGene('decode block',
parent=self)
# Add the new block to self.children
self.children.append(new_block)
pass | [
"def setup_children(self):\n\n # Get the number of blocks of the encoder gene\n # (Note that the decoder part of the network will have an extra block)\n encoder = self.root.children[0]\n n_encoder_blocks = encoder.hyperparam('n_blocks')\n\n # In a BlockSetGene, children are blocks... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up child blocks. | def setup_children(self):
# Get the number of blocks of the encoder gene
# (Note that the decoder part of the network will have an extra block)
encoder = self.root.children[0]
n_encoder_blocks = encoder.hyperparam('n_blocks')
# In a BlockSetGene, children are blocks
n_c... | [
"def __init__(self, root_block):\n self.root_block = root_block\n self.blocks = {'@': root_block}\n self.block_names = {\"default\":[]}\n #registering blocks by id\n self.register_blocks(root_block.ch_blocks)\n self.register_block_names()",
"def update_blocks_register(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up the Edges that target this Gene and its children. | def setup_edges(self, edges: Union[ScaleEdge, Sequence[ScaleEdge]]):
# Add forward edges
super().setup_edges(edges)
# Add merge edges
encoder = self.root.children[0]
for dblock in self.children:
child_scale = dblock.hyperparam('spatial_scale')
for eblock ... | [
"def setup_children(self):\n\n # Get the number of blocks of the encoder gene\n # (Note that the decoder part of the network will have an extra block)\n encoder = self.root.children[0]\n n_encoder_blocks = encoder.hyperparam('n_blocks')\n\n # In a BlockSetGene, children are blocks... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recalculate the spatial scales of child ScaleGenes. Used after modifying the number of DecoderGene children. Operates on the assumption that the final child's scale stays the same, and each previous child's scale increments by 1. | def _rescale_children(self):
n_children = len(self.children)
self_scale = self.hyperparam('spatial_scale')
# Update children
for i, child in enumerate(self.children):
child: ConvBlockGene
new_scale = self_scale + n_children - 1 - i
# Update spatial... | [
"def _update_scale(self,\n child: ConvBlockGene,\n new_scale: int,\n self_scale: int):\n child.set(spatial_scale=new_scale)\n\n # Update n kernels\n d_kernels = 2 ** (new_scale - self_scale)\n self_kernels = self.hyperparam('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the spatial scale of a child ConvBlockGene. | def _update_scale(self,
child: ConvBlockGene,
new_scale: int,
self_scale: int):
child.set(spatial_scale=new_scale)
# Update n kernels
d_kernels = 2 ** (new_scale - self_scale)
self_kernels = self.hyperparam('n_kernels')
... | [
"def _rescale_children(self):\n n_children = len(self.children)\n self_scale = self.hyperparam('spatial_scale')\n\n # Update children\n\n for i, child in enumerate(self.children):\n child: ConvBlockGene\n\n new_scale = self_scale + n_children - 1 - i\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the requested Keyword Plan campaign in full detail. | def GetKeywordPlanCampaign(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | [
"def create_keyword_plan_campaign(client, customer_id, keyword_plan):\r\n operation = client.get_type(\"KeywordPlanCampaignOperation\", version=\"v6\")\r\n keyword_plan_campaign = operation.create\r\n\r\n keyword_plan_campaign.name = f\"Keyword plan campaign {uuid.uuid4()}\"\r\n keyword_plan_campaign.cp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates, updates, or removes Keyword Plan campaigns. Operation statuses are returned. | def MutateKeywordPlanCampaigns(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | [
"def create_keyword_plan_campaign(client, customer_id, keyword_plan):\r\n operation = client.get_type(\"KeywordPlanCampaignOperation\", version=\"v6\")\r\n keyword_plan_campaign = operation.create\r\n\r\n keyword_plan_campaign.name = f\"Keyword plan campaign {uuid.uuid4()}\"\r\n keyword_plan_campaign.cp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert source to Python 3.x syntax using lib2to3. | def convert2to3():
# create a new 2to3 directory for converted source files
dst_path = os.path.join(LOCAL_PATH, '2to3')
shutil.rmtree(dst_path, ignore_errors=True)
# copy original tree into 2to3 folder ignoring some unneeded files
def ignored_files(_adir, filenames):
return ['.svn', '2to3',... | [
"def py2to3(ast):\n return _AST2To3().visit(ast)",
"def transcode(source, from_dialect, to_dialect):\n # In many ways, this is a simplified version of Convert, but where\n # we assume that the syntax of the source is valid.\n if from_dialect == to_dialect:\n return source\n\n if to_dialect =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for a device with OpenCl support, and create device context | def _create_context():
platforms = cl.get_platforms() # Select the first platform [0]
if not platforms:
raise EnvironmentError('No openCL platform (or driver) available.')
# Return first found device
for platform in platforms:
devices = platform.get_devices()
if devices:
... | [
"def create_context(device_type=device_types.ALL,\n vendor=vendors.ALL,\n device_list=None):\n global context, CU_count\n # > if device list is specified\n if device_list is not None:\n context = Context(device_list)\n return context\n # > other wise cre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the buffer object for a image | def _get_image_buffer(image):
image = image.convert("RGBA")
image = np.array(image)
return cl.image_from_array(_context, image, num_channels=4, mode="r", norm_int=False) | [
"def image_from_byte_buffer(buffer: BytesLike, size: Tuple[int, int], stride: int):\n ystep = 1 # image is top to bottom in memory\n return Image.frombuffer('L', size, buffer, \"raw\", 'L', stride, ystep)",
"def img2buffer(self, img):\n buffer = [0x0] * self.BUFFER_SIZE\n offset = [n*self.DIS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate ancestors that satisfy ``predicate``. Generator that climbs the tree yielding resources for which ``predicate(current_resource)`` returns True. | def ancestor_finder(resource, predicate, include_self=False):
resource = resource if include_self else getattr(resource, "__parent__", None)
while resource is not None:
if predicate(resource):
yield resource
resource = getattr(resource, "__parent__", None) | [
"def ancestors(self) -> QuerySet['TreeModel']:\n queryset = self.__class__.objects.filter(path__descendant=self.path)\n return queryset.exclude(id=self.id)",
"def ancestor(resource, cls, include_self=False): # noqa\n\n def predicate(resource):\n return isinstance(resource, cls)\n\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the first ancestor of ``resource`` that is of type ``cls``. | def ancestor(resource, cls, include_self=False): # noqa
def predicate(resource):
return isinstance(resource, cls)
return first(ancestor_finder(resource, predicate, include_self)) | [
"def ancestor_model(resource, cls, include_self=False): # noqa\n\n def predicate(resource):\n return hasattr(resource, \"model\") and isinstance(resource.model, cls)\n\n o = first(ancestor_finder(resource, predicate, include_self))\n return o.model if o else None",
"def find_root(resource):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find in ancestors a model instance of type ``cls``. The search is done in the ``model`` attribute of the ancestors of ``resource``. Returns None if not found. | def ancestor_model(resource, cls, include_self=False): # noqa
def predicate(resource):
return hasattr(resource, "model") and isinstance(resource.model, cls)
o = first(ancestor_finder(resource, predicate, include_self))
return o.model if o else None | [
"def ancestor(resource, cls, include_self=False): # noqa\n\n def predicate(resource):\n return isinstance(resource, cls)\n\n return first(ancestor_finder(resource, predicate, include_self))",
"def ancestor_finder(resource, predicate, include_self=False):\n resource = resource if include_self else... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find and return the root resource. | def find_root(resource):
return ancestor(resource, type(None)) | [
"def get_root(self):\n return self.get_obj(self._root_path)",
"def get_root(cls):\n try:\n return Article.objects.filter(parent__exact = None)[0]\n except:\n raise ShouldHaveExactlyOneRootSlug()",
"def root_path():\n return Root()",
"def get_root(self):\n\n def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render gaussian heat maps from given centers. | def render_gaussian_hmap(centers, shape, sigma=None):
if sigma is None:
sigma = shape[0] / 40
x = [i for i in range(shape[1])]
y = [i for i in range(shape[0])]
xx, yy = np.meshgrid(x, y)
xx = np.reshape(xx.astype(np.float32), [shape[0], shape[1], 1])
yy = np.reshape(yy.astype(np.float32), [shape[0], sha... | [
"def draw_heatmap(gazepoints, dispsize, imagefile=None, alpha=0.5, savefilename=None, gaussianwh=200, gaussiansd=None):\n\n # =========================================== # \n # Show the background image # \n # =========================================== #\n fig, ax = draw_display(disps... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wether two arrays are identical. | def arr_identical(a, b, verbose=True):
if a.shape != b.shape:
if verbose:
print('Different shape: a: {}, b: {}'.format(a.shape, b.shape))
return False
else:
return np.allclose(a, b) | [
"def is_equal(array1, array2):\n assert(array1.size == array2.size)\n return all(array2 == array1)",
"def _equal(a, b):\n return type(a) != np.ndarray and a == b",
"def is_same_array(a, b):\n if not a.flags['OWNDATA'] and not b.flags['OWNDATA']:\n return a.base is b.base\n if not a.flags['... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save current timestamp and return the interval from previous tic. | def tic(self, key=None):
curr_time = time.time()
interval = 0.0
if self.last_tic is not None:
interval = curr_time - self.last_tic
if key is not None:
self.memory[key] = interval
self.last_tic = curr_time
return interval | [
"def get_current_time(self):\n if not self.is_data_set():\n return -1\n return self._interval * self._sample_number",
"def current_timestamp():\n return int(time.time() // aggregation_interval_sec) * aggregation_interval_sec",
"def getInterval(self) -> \"SbTime const &\":\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute uv coordinates of heat map maxima in each layer. | def hmap_to_uv(hmap):
shape = hmap.shape
hmap = np.reshape(hmap, [-1, shape[-1]])
v, h = np.unravel_index(np.argmax(hmap, 0), shape[:-1])
coord = np.stack([v, h], 1)
return coord | [
"def getUVs(self):\n if self.uvs:\n for uv in self.uvs:\n yield uv.u, uv.v\n elif self.uvsData:\n for uv in self.uvsData.uvs:\n yield uv.u, 1.0 - uv.v # OpenGL fix!",
"def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts usernames from ``filepath`` into Otter Service's database | def create_users(filepath, host=None, username=None, password=None, conn=None):
with open(filepath, newline='') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='|')
# TODO: fill in the arguments below
if conn is None:
conn = connect_db(host, username, passwo... | [
"def import_users(filepath=os.path.join(os.path.expanduser('~'), 'pyedu_users.json')):\n # get the roles\n r_teach = Role.query.filter_by(name='teacher').first()\n r_stud = Role.query.filter_by(name='student').first()\n\n with open(filepath) as userfile:\n users = json.loads(userfile.read())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes users specified in file ``filepath`` | def remove_users(filepath, host=None, username=None, password=None, conn=None):
with open(filepath, newline='') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='|')
# TODO: fill in the arguments below
if conn is None:
conn = connect_db(host, username, passwo... | [
"def removefsuser(self, username):",
"def remove(self, file):\n pass",
"def delete_user(self, user_name: str):\n arr = []\n with open(self.path) as input_file:\n with open(self.path + '_temp', 'w') as output_file:\n for i, line in enumerate(input_file):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implements the random surfer algorithm for a DiGraph | def surfs_up(dg):
# the goal of this function is to jump randomly from node to node until
# the most visited node--kept track of by <largest>--reaches a score of
# <MIN_SCORE>; 1 is added to the score of a node each time it is chosen
# ASSUMPTIONS:
# dg.nodes is not empty
# dg.edges ha... | [
"def randomize_edge_directions(g, p = .5):\n \"\"\"\n Hiroki's Algorithm:\n 1. Create a list of all node pairs that are connected by at least one way (or both ways).\n \n 2. For each pair in the list created above, independently decide whether you want to swap the directions of their edges (with, say... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether the delta values between and are close enough | def check_deltas(old_nodes, new_nodes):
# both arguments are expected to be a list of 2-tuples of the
# form (node, score), where "node" is in fact a key to that node
# ensures that the function will not return True when either one or both of the lists
# are empty during the first couple of iter... | [
"def test_assert_almost_equal(self):\n self.assertAlmostEqual(1.0, 1.00000001)\n #self.assertAlmostEqual(1.0, 1.00000009)\n self.assertAlmostEqual(1.0, 1.0000001, places=6)\n self.assertAlmostEqual(1.0, 1.001, delta=.01)\n #self.assertAlmostEqual(1.0, 1.1, msg=\"Not close enough.\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints formatted results for surfs_up() | def print_surfer_results(dg):
# constants used to help with formatting
NODES_FORMAT_LENGTH = int(log10(len(dg.nodes))) + 1
NODES_SCORE_LENGTH = int(log10(MIN_SCORE)) + 1
start = time.time()
total, normalized_total, top_nodes, top_nodes_normal = surfs_up(dg)
elapsed = time.time() - star... | [
"def print_results(results):",
"def printout(self):\n\n print 'Outrankings:'\n wid = self.contest.colwidth\n\n print ''.rjust(wid),\n for col in self.entries:\n print col.rjust(wid),\n print\n\n for row in self.entries:\n print row.rjust(wid),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints formatted results for rank_it() | def print_rank_results(dg):
# constant used to help with formatting
NODES_FORMAT_LENGTH = int(log10(len(dg.nodes))) + 1
start = time.time()
iterations, stability, total, top_nodes = rank_it(dg)
elapsed = time.time() - start
# similar formatting as that for the random surfer output
... | [
"def rank_print(text: str):\n rank = dist.get_rank()\n # Keep the print statement as a one-liner to guarantee that\n # one single process prints all the lines\n print(f\"Rank: {rank}, {text}.\")",
"def printout(self):\n\n print 'Outrankings:'\n wid = self.contest.colwidth\n\n prin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get size of json object in bytes | def get_json_size_bytes(json_data):
return len(bytearray(json.dumps(json_data))) | [
"def _get_answer_dict_size(answer_dict):\n return sys.getsizeof(json.dumps(answer_dict))",
"def getLength(self, obj):\n if isinstance(obj, (dict, list)):\n return len(obj)\n else:\n return 0",
"def object_size(self):\n ret = self._get_attr(\"objectSize\")\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make a safe instance name string, concatenated with device if appropriate | def make_safe_instance_string(instance, device=''):
# strip underscores
instance = UNDERSCORE.sub('.', instance)
instance = COLONS.sub('-', instance)
# if there's a device, concatenate it to the instance with an underscore
if len(device) != 0:
instance = '{}_{}'.format(make_safe_instance_str... | [
"def makeMachineName(self):\n\n return 'vcycle-' + self.machinetypeName + '-' + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))",
"def get_device_instance_name(dev, apply_to_builtin=True):\n if 'PluginDevice' in dev.class_name or dev.class_name.startswith('MxD'):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reset the track global for the next chunk | def reset_track():
track['start_time'] = time.time()
track['line_count'] = 0
track['current_row'] = [] | [
"def reset_tracks():\n global trackq\n stop_all_tracks()\n trackq = TrackTarget(OVST)",
"def _reset_tracker_state(self):\n\n return",
"def clear_finished_tracks(self):\n del self.finished_tracks\n self.finished_tracks = []",
"def reset(self):\n self.potentials = None\n self.in_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use project type to determine agent type | def get_agent_type_from_project_type():
if 'METRIC' in if_config_vars['project_type']:
if if_config_vars['is_replay']:
return 'MetricFileReplay'
else:
return 'CUSTOM'
elif if_config_vars['is_replay']:
return 'LogFileReplay'
else:
return 'LogStreaming'
... | [
"def agent_creator(agent_type):\n# print(\"creating agent of type {}\".format(str(agent_type)))\n if \"Robot\" in str(agent_type):\n return Robot()\n if \"Person\" in str(agent_type):\n return Person()\n if \"ORCAAgent\" in str(agent_type):\n return ORCAAgent()\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use project type to determine which field to place data in | def get_data_field_from_project_type():
# incident uses a different API endpoint
if 'INCIDENT' in if_config_vars['project_type']:
return 'incidentData'
elif 'DEPLOYMENT' in if_config_vars['project_type']:
return 'deploymentData'
else: # MERTIC, LOG, ALERT
return 'metricData' | [
"def generate_field(name, data):\n assert 'type' in data\n field = TYPES_TO_FIELDS.get(data['type'], Unknown)()\n return field",
"def _fieldFromPath(self):\n pass",
"def fieldtype(self):\n return self.__class__.__name__",
"def _create_fields(self):\r\n pass",
"def FieldType(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use project type to determine which API to post to | def get_api_from_project_type():
# incident uses a different API endpoint
if 'INCIDENT' in if_config_vars['project_type']:
return 'incidentdatareceive'
elif 'DEPLOYMENT' in if_config_vars['project_type']:
return 'deploymentEventReceive'
else: # MERTIC, LOG, ALERT
return 'customp... | [
"def test_api_v3_projects_post(self):\n pass",
"def post(self):\n adm = ElectionSystemAdministration()\n prpl = Projecttype.to_dict(api.payload)\n\n if prpl is not None:\n \"\"\"We only use the attributes of projecttype of the proposal for generation\n of a object... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the go_id is in the structure, return the term, otherwise, find by alias | def find_term(self, go_id):
try:
return self.terms[go_id]
except KeyError:
return self.terms[self.alias_map[go_id]] | [
"def _get_alias(self, searchstr):\n log.debug(\"_get_alias(): Received control\")\n try:\n searchy = \"%\" + searchstr + \"%\"\n log.debug(\"_get_alias(): searching with search string \" + searchy)\n result = self.db_cur.execute(\"SELECT userid FROM aliases WHERE alias... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store PBBs in dataframe (self.pbbs) along with ground truth binary labels. | def generate_stats(self):
if self.test_set == 'test':
print 'Error: test set has no labels'
return
lbbs_not_in_pbbs_df = pd.DataFrame(columns=['pid','z','y','x','d'])
if self.classifier_pred_path is None:
pbbs_df = pd.DataFrame(columns=['pid','prob','z','y',... | [
"def importPLINKDATA(self, bfile):\n filename = bfile + '.bim'\n self.SNPs = pd.read_table(\n bfile+'.bim', sep=None, names=['CHR', 'RSID', 'Cm', 'POS', 'ALT', 'REF'], engine='python')\n self.Samples = pd.read_table(bfile+'.fam', sep=None,\n names=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given text in "news" format, strip the headers, by removing everything before the first blank line. | def strip_newsgroup_header(text):
_before, _blankline, after = text.partition('\n\n')
return after | [
"def strip_newsgroup_header(text):\n ...",
"def rm_first_line(text):\n return '\\n'.join(text.split('\\n')[1:])",
"def _remove_empty_lines(article_text: str) -> str:\n return '\\n'.join(\n filter(lambda s: s.strip() != '', article_text.split('\\n')))",
"def delete_till_beginning_of_lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given text in "news" format, strip lines beginning with the quote characters > or |, plus lines that often introduce a quoted section | def strip_newsgroup_quoting(text):
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines) | [
"def strip_newsgroup_quoting(text):\n ...",
"def _remove_empty_lines(article_text: str) -> str:\n return '\\n'.join(\n filter(lambda s: s.strip() != '', article_text.split('\\n')))",
"def remove_defined_articles(self, text: str) -> str:\n cleaned_text = re.sub(self.quote_pattern, \"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given text in "news" format, attempt to remove a signature block. As a rough heuristic, we assume that signatures are set apart by either a blank line or a line made of hyphens, and that it is the last such line in the file (disregarding blank lines at the end). | def strip_newsgroup_footer(text):
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text | [
"def strip_newsgroup_header(text):\n ...",
"def remove(f, l, text):\n raw_lines = text.split(\"\\n\")\n lines = []\n # remove all unwanted empty lines.\n for line in raw_lines:\n if line == \"\" or line.isspace():\n if len(lines) == 0 or lines[-1][-1] == \"\\n\": # if there is al... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[summary] test that the stops' locality has an nptg entry | def test_unused_locality_near_stops_has_nptg_entries():
assert unused() | [
"def test_unused_locality_near_stops_150_meters():\n assert unused()",
"def check_tsp_feasibility(rt):\n for stop_key, order in rt.tsp_route_dict.items():\n # print(stop_key, order)\n # print(rt.stp_dict[stop_key])\n index = rt.stop_key_index_dict[stop_key]\n if order == 0: # or... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[summary] test the response is 150 or more meters from the stop | def test_unused_locality_near_stops_150_meters():
assert unused() | [
"def test_when_less_than_pi(self):\n self.responses_test([C] * 4, [C, C, D, D], [C])",
"def test_zernike_detector_response(self):\n self.assertTrue(abs(self.kp_sorted[0].response - 1256.8241) < 0.01, \"Incorrect max response\")",
"def test_radius_cap(self):\n test_query_dict = {'location': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a schedd, process its entire set of history since last checkpoint. | def process_schedd(start_time, since, checkpoint_queue, schedd_ad, args, metadata=None):
logging.info(f"Start processing the scheduler: {schedd_ad['Name']}")
my_start = time.time()
metadata = metadata or {}
metadata["condor_history_source"] = "schedd"
metadata["condor_history_runtime"] = int(my_sta... | [
"def process_histories(\n schedd_ads=[], startd_ads=[], starttime=None, pool=None, args=None, metadata=None\n):\n checkpoint = load_checkpoint(args.checkpoint_file)\n timeout = 2 * 60\n\n futures = []\n metadata = metadata or {}\n metadata[\"es_push_source\"] = \"condor_history\"\n\n manager = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a startd, process its entire set of history since last checkpoint. | def process_startd(start_time, since, checkpoint_queue, startd_ad, args, metadata=None):
my_start = time.time()
metadata = metadata or {}
metadata["condor_history_source"] = "startd"
metadata["condor_history_runtime"] = int(my_start)
metadata["condor_history_host_version"] = startd_ad.get("CondorVer... | [
"def process_histories(\n schedd_ads=[], startd_ads=[], starttime=None, pool=None, args=None, metadata=None\n):\n checkpoint = load_checkpoint(args.checkpoint_file)\n timeout = 2 * 60\n\n futures = []\n metadata = metadata or {}\n metadata[\"es_push_source\"] = \"condor_history\"\n\n manager = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process history files for each schedd listed in a given multiprocessing pool | def process_histories(
schedd_ads=[], startd_ads=[], starttime=None, pool=None, args=None, metadata=None
):
checkpoint = load_checkpoint(args.checkpoint_file)
timeout = 2 * 60
futures = []
metadata = metadata or {}
metadata["es_push_source"] = "condor_history"
manager = multiprocessing.Man... | [
"def read_all_files():\n # Setting up pool with 8 processes.\n pool = Pool(processes=8) \n\n # Get the list of file names.\n path = \"DATA/votes-all/\"\n files = os.listdir(path)\n file_list = [filename for filename in files if filename.split('.')[1]=='csv']\n\n # Using the pool to map the file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get name of the thread. | def getName(self):
return self._thread.getName() | [
"def get_threadname():\n return threading.current_thread().name",
"def get_name(self):\n try:\n return self.task.split('.')[-1]\n except NotImplementedError:\n return '%s: No task specified.' % self.__class__.__name__",
"def _generateName(self):\n return \"PoolThrea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set name of this thread. | def setName(self, name):
self._thread.setName(name) | [
"def set_name(self, name):\n if self._status == \"lock\":\n raise QiitaAnalysisError(\"analysis can't be changed. It's locked\")\n self._name = name",
"def set_name(self, new_name):\n self.name = new_name",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run in this thread a WorkItem object. | def run(self, workItem):
if self.join_flag:
# When someone waits for the thread to finish executing work items(join call.).
raise WorkException("The discussion is already hoping to end with a join call! A new work item cannot be added!")
if not self._thread.is_alive... | [
"def handleWork(self, work):\n raise NotImplementedError()",
"def __call__(self, event, payload):\n # as we defined a threadpool we can enqueue our item\n # and move to the next.\n self.threadpool.enqueue(event, payload)\n print(\"Thread with payload \" + str(payload) + \" is en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy rayoptics mpl styles to user's mpl_config dir. | def copy_styles():
pth = Path(__file__).resolve().parent
styles_dir = Path(pth / 'styles')
mpl_configdir = Path(matplotlib.get_configdir()) / 'stylelib'
mpl_configdir.mkdir(exist_ok=True)
for mpl_style in styles_dir.glob('*.mplstyle'):
copy2(mpl_style, mpl_configdir) | [
"def set_style():\n\n fig_width_1col = 3.4 # figure width in inch\n fig_width_2col = 7.0 # figure width in inch\n fig_aspect_ratio = 0.66 # width to height aspect ratio\n font_size = 8 # font size in pt\n font_size_small = 6 # font size in pt\n font_scheme =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
output a text file of the lst | def output_txt_file(f_name, lst):
output_file = open('{}.txt'.format(f_name), "w", encoding="utf-8")
output_file.write('\n'.join(str(word) for word in lst))
output_file.close()
return None | [
"def list_to_file(in_list, file_name):\n with open(file_name, \"w\") as f:\n for s in in_list:\n f.write(s+'\\n')",
"def write_to_file(englist):\n\n # Output to text file\n text_file = open(\"engraving_output.txt\", \"w\")\n count = 0\n\n # Write to the text file\n text_file.wr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get frequency vector of a sentence based on vocabulary | def get_sent_freq_vec(_vocab_lst, sent):
freq_vec = [0] * len(_vocab_lst)
tokens = proc_sent(sent)
for word in tokens:
if word in _vocab_lst:
freq_vec[_vocab_lst.index(word)] += 1
return freq_vec | [
"def bagOfWords2Vec(vocabList, inputSentence):\n sentenceVector = [0] * len(vocabList)\n for word in inputSentence:\n if word in vocabList:\n sentenceVector[vocabList.index(word)] += 1\n else:\n print(\"The word: %s is not vocabulary list\" % word)\n\n return sentenceVec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate the log value of a list of conditional probability | def get_log_of_cond_prob(cond_prob_lst):
return list(map(lambda ele: math.log(ele, 10), cond_prob_lst)) | [
"def log_prob(list):\n p=0\n for i in list:\n p += math.log10(i)\n return math.exp(p)",
"def log_prob(self, xs, zs):\n x, y = xs['x'], xs['y']\n log_prior = multivariate_normal.logpdf(\n zs['z'], tf.zeros(self.N), self.kernel(x))\n log_lik = tf.reduce_sum(\n bernoulli.logpmf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the protos using protoc. This session but be last to avoid overwriting the protos used in CI runs. | def generate_protos(session):
# longrunning operations directory is non-standard for backwards compatibility
# see comments in directory for details
# Temporarily rename the operations_pb2.py to keep it from getting overwritten
os.replace(
"google/longrunning/operations_pb2.py",
"google/... | [
"def generate(env):\n try:\n bld = env['BUILDERS']['Protoc']\n except KeyError:\n bld = ProtocBuilder\n env['BUILDERS']['Protoc'] = bld\n \n env['PROTOC'] = env.Detect(protocs) or 'protoc'\n env['PROTOCFLAGS'] = SCons.Util.CLVar('')\n env['PROTOCPROTOPATH'] = SCon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TranslateZ of the root by amountToScale. Use percentage from 0.02.0 | def TranslateZ(amountToScale):
objs = mc.ls(sl=True);
obj = objs[0];
animAttr = mc.listAnimatable(obj);
animAttrIndex = 0;
for attr in animAttr:
numKeyFrames = mc.keyframe(attr, query=True, keyframeCount=True);
if (attr == u'|root.translateZ'):
times = mc.keyframe(attr, query=True, index=(0,numKeyFrames),... | [
"def pref_to_scale(x):\n # Somewhat arbitrarily chosen.\n # By eye, 0.25 produces a good balance\n return 0.25*pow(10, float(-x)/10)",
"def adjustZValue(self, zValue):\n clampedDist = min(2.5, zValue)\n return clampedDist / 2.5 * 255.",
"def _get_z_scaling(self, horrange):\n zmin, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes as input a star data file (CSV, required) and a linelist data file (CSV, optional) to create a q2 data object. | def __init__(self, fname_star_data, fname_lines=None):
try:
self.star_data = read_csv(fname_star_data, file_type='stars')
self.star_data_fname = fname_star_data
if not self.star_data:
logger.error('Star data file not read. Data.star_data '+\
... | [
"def read_line_list(label):\n \n\n if label=='atom':\n filename=resource_filename('IGM','lines/atom_full.dat')\n elif label == 'LLS':\n filename=resource_filename('IGM','lines/lls.lst')\n elif label == 'LLS Small':\n filename=resource_filename('IGM','lines/lls_sub.lst')\n elif la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the Star object has a name that matches one of the id's in a Data object, the information from Data will be given to Star. | def get_data_from(self, Data):
#idx must correspond to a unique id; hence the [0][0]
try:
idx = np.where(Data.star_data['id'] == self.name)[0][0]
logger.info("Star '"+self.name+"' found in data object.")
except:
logger.error("Star '"+self.name+"' not found in ... | [
"def show_star(star_id):\n\n star, consts = get_star_info(star_id)\n\n return render_template(\"star_info.html\",\n star=star,\n constellations=consts)",
"def test_predefined_star(self):\n self._create_stars()\n request = self.factory.get(use... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If teff, logg, and feh are set attributes for a Star object, a model atmosphere will be interpolated from one of the | def get_model_atmosphere(self, grid='odfnew'):
if self.teff == None or self.logg == None or self.feh == None:
logger.error('To create model atmosphere, star must have all '+
'three fundamental parameters: Teff, logg, and '+
'[Fe/H].')
ret... | [
"def draw(self, star):\n # Start by getting all interpolation coefficients for all observed points\n data, weight, u, v = star.data.getDataVector(include_zero_weight=True)\n # Subtract star.fit.center from u, v\n u -= star.fit.center[0]\n v -= star.fit.center[1]\n\n coeffs,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark a key as the default case Deletes/Substitutes any previously existing default case. | def makedefault(self, key=None):
if key is None:
for choice in self.cases.keys():
if key is None or choice > key:
key = choice
self.cases["default"] = self.cases[key]
del self.cases[key]
return self | [
"def ctx_default(flags: Flags, fdict: FlagDict, key: str, default: Any) -> Any:\n key = ctx_fix_key(flags, fdict, key) or key\n fdict.setdefault(key, default)",
"def setDefault(key, value, context=None):",
"def setdefault(self, key, default=None):\n location = self.hash(key)\n\n if not self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Absolute path to folder for unzipped version of referenced file. | def zipdir(self):
return os.path.join(self.location, self.trunc + '_unzipped') | [
"def get_output_file_path(self):\n zip_filename = \"%s.%s_%s.wotmod\" % (\n self.author_id, self.mod_id, self.mod_version)\n return os.path.abspath(os.path.join(self.dist_dir, zip_filename))",
"def get_relative_pathname(self):\n return os.path.join(Syllabus.SYLLABUS_FILES_LOCATION,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Restore referenced file from latest backup | def restore(self):
latest_backup = self.__class__(self.get_backups()[-1])
latest_backup.copy(self.filepath) | [
"def restore_latest_backup(self):\n latest_backup_path = self.get_latest_backup_path()\n self.restore(latest_backup_path)",
"def restore():\n backup_dir = f'{args.database}/backups/{datetime.strftime(backups[args.restore - 1], \"%d-%b-%Y_%H-%M-%S\")}'\n\n if os.path.isdir(f'{args.database}/ort... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete all backups of the referenced file | def delete_backups(self):
for backup in self.get_backups():
os.remove(backup) | [
"def cleanup(self):\n self.delete_backups()\n self.delete_zip_folder()",
"def purge(self):\n for fname in os.listdir(self.filecache):\n os.remove(os.path.join(self.filecache, fname))",
"def them_all(self):\n\n to_delete = self.file_to_delete()\n\n for file in to_del... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete eventual unzipped folder | def delete_zip_folder(self):
if os.path.isdir(self.zipdir):
shutil.rmtree(self.zipdir, ignore_errors=True) | [
"def clean_up() -> None:\n rmtree(TEMP)\n Path.unlink(ROOT_DIR.joinpath(ZIP_NAME))",
"def zip_folder_and_rm_local(path: Path) -> Path:\n zip_path = zip_folder(path)\n shutil.rmtree(path)\n return zip_path",
"def clear_epub_folder():\n folder = settings.EPUB_DECOMPRESSED_FOLDER\n\n for f in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReZip a previously unzipped file and remove unzipped folder. | def rezip(self):
#TODO need special handling for .gz files
fzip = zipfile.ZipFile(self.filepath, 'w', zipfile.ZIP_DEFLATED)
if not os.path.isdir(self.zipdir):
raise IOError('No "{}" folder to rezip'.format(self.trunc))
for root, dirs, files in os.walk(self.zipdir):
... | [
"def extract_and_remove(zip_file: Path):\n # Unzip file\n zip_ref = zipfile.ZipFile(zip_file, \"r\")\n zip_ref.extractall(zip_file.parent)\n zip_ref.close()\n # Remove sessions zip file\n zip_file.unlink()",
"def rezip(self):\n\n exclude_files = ['.DS_Store', 'mimetype', 'iTunesMetadata.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove backups and unzipped files. | def cleanup(self):
self.delete_backups()
self.delete_zip_folder() | [
"def clean_up() -> None:\n rmtree(TEMP)\n Path.unlink(ROOT_DIR.joinpath(ZIP_NAME))",
"def remove_files(self):\n flag = False\n _, _, files = next(os.walk(self.dest_path), (self.dest_path, [], []))\n for each in files:\n file_path = os.path.join(self.dest_path, each)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the referenced file is a zip file | def is_zip_file(self):
return zipfile.is_zipfile(self.filepath) | [
"def is_zip(self, document):\n fileName, fileExtension = os.path.splitext(document)\n if fileExtension == \".zip\":\n return True\n return False",
"def is_zip(zip_fio: fileIO) -> bool:\n try:\n ZipFile(zip_fio, 'r')\n return True\n except:\n return False"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReZip a previously unzipped epub and remove unzipped folder. | def rezip(self):
exclude_files = ['.DS_Store', 'mimetype', 'iTunesMetadata.plist']
parent_dir, dir_to_zip = os.path.split(self.zipdir)
def trim(path):
"""Prepare archive path"""
zip_path = path.replace(parent_dir, "", 1)
if parent_dir:
zip_pa... | [
"def rezip(self):\n #TODO need special handling for .gz files\n fzip = zipfile.ZipFile(self.filepath, 'w', zipfile.ZIP_DEFLATED)\n if not os.path.isdir(self.zipdir):\n raise IOError('No \"{}\" folder to rezip'.format(self.trunc))\n for root, dirs, files in os.walk(self.zipdir)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dumps the list of users and shares registered present at addr. Addr is a valid host name or IP address. | def dump(self, addr):
encoding = sys.getdefaultencoding()
print
if (self.__username and self.__password):
print '[+] Attaching to ' + addr + ' using ' + self.__username + ":" + self.__password
elif (self.__username):
print '[+] Attaching to ' + addr + ' using ' + self.__username
else:
print '[+] Att... | [
"def dump_friends():\n\n friend_list = get_my_friends()\n\n fid = open('steam_friend_dump.tsv', 'w')\n fields = ('steamid', 'steamnick')\n print('%s\\t%s' % fields, file=fid)\n for id, nick in friend_list.items():\n print('%s\\t%s' % (id, nick), file=fid)\n\n fid.close()",
"def dump_utxo(self, ut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Figure out what pool this `map_id` belongs in. Returns shorthand pool notation, equivalent to the collection name in the `mappools` database. Returns `None` on fail. | async def determine_pool(map_id):
db = client["mappools"]
collection = db["meta"]
cursor = collection.find()
#well i'd hope we never end up with 100 pools
for meta_document in await cursor.to_list(length=100):
if map_id in meta_document["diff_ids"]:
return meta_document["_... | [
"async def get_map_document(id, pool=None):\r\n db = client['mappools']\r\n try:\r\n int(id)\r\n #id is only numbers, and is probably a /b id\r\n if not pool:\r\n pool = await determine_pool(id)\r\n pool_collection = db[pool]\r\n return await pool_collection.find_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the tournamentwide meta document. If the meta document does not exist, returns None. | async def get_meta_document():
db = client["tournament_data"]
meta_collection = db["meta"]
return await meta_collection.find_one({'_id': "main"}) | [
"def get_meta (self) :\n return self._meta",
"def wind_meta(self):\n if self._wind_meta is None:\n path = os.path.join(self.META_ROOT, 'wind_site_meta.csv')\n self._wind_meta = self.load_meta(path)\n\n return self._wind_meta",
"def metadata(self):\r\n metadataur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the DiscordUser document associated with a Discord ID. If this fails, generates a new DiscordUser document and returns the newlycreated (though empty) document. (This guarantees a document is always returned.) | async def get_user_document(discord_id):
db = client['discord_users']
discord_user_collection = db['discord_users']
user_document = await discord_user_collection.find_one({'_id': discord_id})
if not user_document:
await db_manip.create_discord_user(discord_id)
user_document = await... | [
"async def get_user_by_discord_id(discord_id: str, bot: discord.Client) -> User:\n logger.debug('get_user_by_discord_id: passed Discord ID: {}'.format(discord_id))\n # Try to find discord_id in database\n try:\n db_user = session.query(User).filter_by(discord_id=discord_id).one_or_none()\n except... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |