query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
If master is dead worker should exit gracefully | def test_dead_master(self):
tasks.print_task('hello world')
with run_kuyruk(terminate=False) as worker:
worker.expect('hello world')
worker.kill()
worker.expect_exit(-signal.SIGKILL)
wait_until(not_running, timeout=TIMEOUT) | [
"def on_worker_stopped(self):\n pass",
"def exit_slave(self, status = 0):\n\n\t\tif not self.ismaster:\n\t\t\tsys.exit(status)",
"def __exit__(self, *_: Any) -> None:\n if self._tasks_queue is not None:\n self.terminate()",
"def exit(self) -> None:\n for worker_id in self._work... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the current line | def current_line(self):
return self._current_line | [
"def get_cur_line(self):\n return self.cur_line",
"def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line",
"def _next_line(self):\n self.current_line += 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Push a line of code onto the buffer, run the buffer If the interpreter successfully runs the code, clear the buffer Return ("for stdout", "for_stderr", finished?) | def push(self, line):
self.buffer.append(line)
indent = len(re.match(r'[ ]*', line).group())
if line.endswith(':'):
indent = max(0, indent + self.config.tab_length)
elif line and line.count(' ') == len(self._current_line):
indent = max(0, indent - self.config.tab... | [
"def __execute_line(self):\n # Execute line in STM (if not blank)\n full_line = \"\".join(self.short_memory)\n if len(full_line.strip()) > 0:\n \n # Use locals/globals from LTM\n mem_globals, mem_locals = self.long_memory\n response = \"\"\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies the AMI to specified regions | def copy_AMI_to_regions(
AWS_access_key_id, AWS_secret_access_key, AMI_id, AMI_source_region, AMI_copy_regions, AMI_name
):
for region in AMI_copy_regions:
try:
logging.info(
f"Copying ami {AMI_id} from {AMI_source_region} to {region}")
ec2_client = boto3.client(
... | [
"def AMI_builder(\n AWS_access_key_id,\n AWS_secret_access_key,\n region_name,\n base_image_id,\n os,\n security_group_id,\n AMI_name,\n RPM_package_version,\n APT_OSS_version,\n):\n try:\n instance = Instance(\n AWS_access_key_id=AWS_access_key_id,\n AWS_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the ODFE AMI | def AMI_builder(
AWS_access_key_id,
AWS_secret_access_key,
region_name,
base_image_id,
os,
security_group_id,
AMI_name,
RPM_package_version,
APT_OSS_version,
):
try:
instance = Instance(
AWS_access_key_id=AWS_access_key_id,
AWS_secret_access_key=AW... | [
"def aws_create_afi(self) -> Optional[bool]:\n local_deploy_dir = get_deploy_dir()\n local_results_dir = f\"{local_deploy_dir}/results-build/{self.build_config.get_build_dir_name()}\"\n\n afi = None\n agfi = None\n s3bucket = self.s3_bucketname\n afiname = self.build_config... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return number of days until 2022, 10, 31. | def days_left():
return str((datetime(2020, 10, 31) - datetime.now()).days) | [
"def number_of_days(iteration):\r\n return iteration // 24",
"def ntradingdays():\n return 252*10",
"def days_until_next_birthday(self) -> int:\n return calculate_days_until(self.date_of_birth, date.today())",
"def days(n):\n return timedelta(days=n)",
"def elapsed_days(cls, year):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempts to read the cached results of summoner sid If there's no cache, returns None | def readCache(sid):
fname = CACHE_DIR.format(sid=sid)
if os.path.isfile(fname):
return pickle.load(open(fname,'rb'))
else:
return None | [
"async def _get_cached_response(self, cache_key, name):\n if not self.cache:\n return\n try:\n cached_pickle = await self.cache.get(cache_key)\n if cached_pickle:\n self.log.info(\"Type of self.cache is: %s\", type(self.cache))\n return pi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Index page view, displaying all active leagues | def index(request):
active_leagues = League.objects.filter(status=True)
# Replace spaces with underscores for url representation
for league in active_leagues:
league.url = encode_url(league.name)
context = {'leagues' : active_leagues}
return render(request, 'layup/index.html', context) | [
"def league_list(request, league_id):\n\tleague = get_object_or_404(League, pk=league_id)\n\tsports = Sport.objects.all()\n\tleagues = league.sport.league_set.all()\n\tdivisions = league.division_set.all()\n\tteams = Team.objects.filter(division__league=league)\n\tathletes = Athlete.objects.filter(team__division__l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Team page view, displaying all players in specified team | def team(request, league_url, team_url):
# Check for valid league / team
league_name = decode_url(league_url)
league = get_object_or_404(League, name=league_name)
team_name = decode_url(team_url)
team = get_object_or_404(league.team_set, name=team_name)
players = team.player_set.all()
c... | [
"def team_index(request):\n extra_context = get_extra_context()\n players = Player.objects.filter().order_by('-number')\n extra_context['players'] = players\n return render_to_response(\"team.html\", extra_context,\n context_instance=RequestContext(request))",
"def list_pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
REST view for league, returns all league names | def rest_league(request):
try:
active_leagues = League.objects.filter(status=True)
serializer = league_serializer()
data = serializer.serialize(active_leagues)
except:
data = None
return HttpResponse([data], mimetype='application/json') | [
"def rest_team(request, league_url):\n\n # Check for valid league \n league_name = decode_url(league_url)\n\n try:\n league = League.objects.get(name=league_name)\n league_teams = league.team_set.all()\n serializer = league_serializer()\n data = serializer.serialize(league_teams... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
REST view for team, returns all team names, associated with league | def rest_team(request, league_url):
# Check for valid league
league_name = decode_url(league_url)
try:
league = League.objects.get(name=league_name)
league_teams = league.team_set.all()
serializer = league_serializer()
data = serializer.serialize(league_teams, fields=('nam... | [
"def team(request, league_url, team_url):\n\n # Check for valid league / team \n league_name = decode_url(league_url)\n league = get_object_or_404(League, name=league_name)\n\n team_name = decode_url(team_url)\n team = get_object_or_404(league.team_set, name=team_name)\n\n players = team.player_se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rest for for team members, returns all player names, associated with team | def rest_team_members(request, league_url, team_url):
# Check for valid data
try:
league_name = decode_url(league_url)
league = League.objects.get(name=league_name)
team_name = decode_url(team_url)
team = league.team_set.get(name=team_name)
players = team.player_set.a... | [
"def get_teams():\n api.get_teams()",
"def test_get_team_members(self):\n pass",
"def get_team_players(self, team):\n try:\n req = self._get('teams/{}/'.format(team['id']))\n team_players = req.json()['squad']\n if not team_players:\n return\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
HELPER FUNCTION Due to "player" being a dual entity object, data from the User model and Player moded needs to be merged so that this architectural detail is abstracted from the user | def extract_player(user):
player = Player.objects.get(user=user)
serializer = league_serializer()
user_data = serializer.serialize(
[user],
fields=(
'username',
'first_name',
'last_name',
'email'
)
... | [
"def test_player_details_by_player(self):\n pass",
"def serialize_player(self):\n return {\n \"id\": self.p_id,\n \"last_name\": self.last_name,\n \"first_name\": self.first_name,\n \"date_of_birth\": self.birthday,\n \"gender\": self.gender,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create new league view | def create_league(request):
# If form submitted
if request.method =='POST':
league_form = LeagueForm(request.POST)
if league_form.is_valid():
# Process user update
league = league_form.save()
league.save()
# Get number of teams to create
... | [
"def create_tournament_view(self):\n\n self.commands.display_message(\n \"CHESS TOURNAMENT \\n\\n\\n \"\n \"\\t You choose to create a new tournament. \"\n \"Please complete all information necessary \\n \")\n\n name = self.check_tournament.check_name()\n locat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Player Login page view | def login_player(request):
# If form submitted
if request.method =='POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
# If authentication successful
if user is not None and user.is_a... | [
"def showLogin():\n return render_template('login.html')",
"def show_login():\n\n # form = LoginForm()\n\n return render_template(\"login.html\")",
"def login(self):\n self.state = 'logged_in'",
"def play_main_page():\r\n return render_template(\r\n \"play.html\",\r\n user... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Management page; create or edit leagues | def manage(request):
leagues = League.objects.all()
# Replace spaces with underscores for url representation
for league in leagues:
league.url = encode_url(league.name)
context = {'leagues' : leagues}
return render(request, 'layup/manage.html', context) | [
"def create_league(request):\n\n # If form submitted\n if request.method =='POST':\n league_form = LeagueForm(request.POST)\n\n if league_form.is_valid():\n # Process user update\n league = league_form.save()\n league.save()\n\n # Get number of teams t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a new setup.cfg file with a "cactuskeeper" section to a given directory. | def write_config_file(base_dir, content):
parser = configparser.ConfigParser()
parser.add_section("cactuskeeper")
for key, value in content.items():
parser.set("cactuskeeper", key, value)
with open(os.path.join(base_dir, "setup.cfg"), "w") as f:
parser.write(f) | [
"def write_all(self, dirname):\n if os.path.exists(dirname) is False:\n os.mkdir(dirname)\n\n self.write_mfcconfig(os.path.join(dirname, \"mfc_config\"))\n self.write_config(os.path.join(dirname, \"config\"))",
"def writeConfig(hostname, config, write_dir):\n # Create file\n # Cl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stitch together two faces sharing an edge. | def _stitch(edge, faces, bkdns, freq):
a, b = freq
face_0, face_1 = faces
shape_0 = len(face_0)
shape_1 = len(face_1)
bkdn_0 = bkdns[shape_0]
bkdn_1 = bkdns[shape_1]
improper = (a == b or b == 0)
roll_0, flip_0 = tiling.orient_face(face_0, edge)
roll_1, flip_1 = tiling.orient_face(fa... | [
"def redirect_edges(\n mesh: Mesh,\n edge_key_a: int,\n b_side_for_a: int,\n edge_key_b: int,\n a_side_for_b: int,\n):\n mesh.edge_to_neighbors[edge_key_a, b_side_for_a] = edge_key_b\n mesh.edge_to_neighbors[edge_key_b, a_side_for_b] = edge_key_a\n mesh.edge_lookup[edge_key_a, b_side_for_a] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find duplicate vertices in a subdivided polyhedron. | def _find_dupe_verts(base, bf, group, freq, bkdns):
#find redundant vertices
base_edges = base.edges
base_edge_corr, base_face_corr = base.faces_by_edge(base_edges)
l0 = []
l1 = []
for i in range(len(base_edges)):
edge = base_edges[i]
index = base_edge_corr == i
facex = b... | [
"def loopSubdivisionAlg(self):\n # variable for new vertex\n d = vertex(0, 0, 0)\n oldPtsSize = len(self.pts)\n # matrix indices of iv\n mv = [[1, 2, 0], [2, 0, 1], [0, 1, 2]]\n # for every triangle calculate edge points\n for i in range(0, len(self.tris)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper around the GCOPoly constructor that performs parallel projection and normalization. | def build_gco(base, frequency, proj, k=1, tweak=False, normalize=True):
poly = GCOPoly(base, frequency, proj, tweak)
if proj in projection.PARALLEL:
if k in MEASURES:
measure = MEASURES[k]
k = optimize_k(poly, base, measure, not tweak, normalize)
else:
k = flo... | [
"def polyContourProjection(offset0=\"string\", offset2=\"string\", method=int, offset1=\"string\", reduceShear=float, smoothness1=float, smoothness2=float, createNewMap=bool, name=\"string\", worldSpace=bool, nodeState=int, userDefinedCorners=bool, constructionHistory=bool, flipRails=bool, offset3=\"string\", smoot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper around the GCOPoly constructor that factors the frequency and uses the factors to repeatedly subdivide the grid. Uses lowernorm factors first. Also does everything `build_gco` does. | def build_gco_rep(base, frequency, proj, k=1, tweak=False,
normalize=True):
fs = base.face_size
fsu = np.unique(fs)
fsu = fsu[fsu > 2]
if len(fsu) != 1:
raise ValueError('cannot perform repeated subdivision on mixed grid')
elif fsu[0] == 3:
element = factor.Steineis... | [
"def build_gco(base, frequency, proj, k=1, tweak=False, normalize=True):\n poly = GCOPoly(base, frequency, proj, tweak)\n if proj in projection.PARALLEL:\n if k in MEASURES:\n measure = MEASURES[k]\n k = optimize_k(poly, base, measure, not tweak, normalize)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a subdivided polyhedron based on a base polyhedron, return the parallels to the base faces for each vertex in the polyhedron that would put the vertices onto the sphere | def parallels(poly, base, exact=True):
normals = base.face_normals[poly.base_face]
return projection.parallel(poly.vertices, normals, exact) | [
"def _mesh_space_subdivision(vertices, faces, voxel_size, n_sv):\n\n # Define the subvoxel boundaries\n xs = np.linspace(0, voxel_size[0], n_sv[0] + 1)\n ys = np.linspace(0, voxel_size[1], n_sv[1] + 1)\n zs = np.linspace(0, voxel_size[2], n_sv[2] + 1)\n relevant_triangles = [[] for _ in range(np.prod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given vertices and parallels, return points on (or near) the sphere. | def parallel_sphere(xyz, pls, k=1):
return xyz + k*pls | [
"def point_on_sphere(pt):\n theta, phi = pt\n return (cos(phi) * cos(theta), sin(phi) * cos(theta), sin(theta))",
"def sphere_plane_intersection(pos, r, theta=np.linspace(-180., 180.)):\n assert r > 0.0, \"Cannot work with zero radius sphere\"\n\n assert len(pos.shape) == 2, \"Need (N, 3) as pos-shape... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input graph, output its 'first' graceful labeling, if one exists (deterministic, but arbitrary) | def find_graceful_labeling(self, graph):
graph_copy = graph.copy()
nodes = graph_copy.get_nodes()
nodes.sort() # just in case
for node in nodes:
graph_copy.set_label(node, None)
edges = graph_copy.get_edges()
node_labels = set(range(len(edges) + 1))
... | [
"def LabelPropagation(graph):\n nodes = list(graph.nodes())\n n = len(nodes)\n adjDict = adjacencyDict(graph)\n labels = {i: i for i in graph.nodes}\n frequencies = np.ones(n)\n dontStop = True\n while dontStop:\n nodes = FYshuffle(nodes)\n dontStop = False\n for node in no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A single permitted_idp does not have any impact on the unit. | def test_single_permitted_idp(
mock_ssh_service_get_security_policy,
):
security_policy = DEFAULT_SECURITY_POLICY.copy()
security_policy['permitted_idps'] = ['globus.org']
mock_ssh_service_get_security_policy.return_value = security_policy
runner = CliRunner()
result = runner.invoke(token_autho... | [
"def test_portals_id_permission_put(self):\n pass",
"def test_idp_fantasy_players_with_adp(self):\n pass",
"def test_multiple_permitted_idps(\n mock_ssh_service_get_security_policy,\n):\n security_policy = DEFAULT_SECURITY_POLICY.copy()\n security_policy['permitted_idps'] = ['globus.org',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A multiple permitted_idps requires use of the identity option. | def test_multiple_permitted_idps(
mock_ssh_service_get_security_policy,
):
security_policy = DEFAULT_SECURITY_POLICY.copy()
security_policy['permitted_idps'] = ['globus.org', 'foo.com']
mock_ssh_service_get_security_policy.return_value = security_policy
runner = CliRunner()
result = runner.invo... | [
"def test_single_permitted_idp(\n mock_ssh_service_get_security_policy,\n):\n security_policy = DEFAULT_SECURITY_POLICY.copy()\n security_policy['permitted_idps'] = ['globus.org']\n mock_ssh_service_get_security_policy.return_value = security_policy\n\n runner = CliRunner()\n result = runner.invok... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms a kernel function into a scaled kernel function (for a certain bandwidth `bw`) | def kernel(kernel_func):
@wraps(kernel_func) # just for naming
def decorated(x, bw):
if len(x.shape) == 1:
x = x.reshape(-1, 1)
dims = x.shape[-1]
# Euclidean norm
dist = np.sqrt((x * x).sum(axis=-1))
return kernel_func(dist / bw, dims) / (bw ** dims)
... | [
"def _kernweight(self, x ):\n if isinstance( self._kernels, CustomKernel ):\n ## Radial case\n #d = x.T * x\n #x is matrix, 2d, element wise sqrt looks wrong\n #d = np.sqrt( x.T * x )\n x = np.asarray(x)\n #d = np.sqrt( (x * x).sum(-1) )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract all the patterns from the documentation file. Collect a dictionary mapping keys to unique integertuple patterns, arbitrarily choosing a single name for each pattern. | def extract_patterns(infile):
all_patterns = {}
for idx, name, byte_indices in get_immediates(infile):
word_indices = coarsen_indices(byte_indices, BIT_WIDTH // 8)
if word_indices:
all_patterns[tuple(word_indices)] = name
return {v: k for k, v in all_patterns.items()} | [
"def patterns(self): # pragma: no cover",
"def patterns(self, ):\n retlist = []\n for subblock in self.blocks: \n for instance in self.blocks[subblock]:\n retlist.extend(instance.patterns.keys())\n return retlist",
"def gen_defined_specs(self):\n for pnum, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine new position, starting from current, moving 'move' elements and skipping 'skip' elements. Wrap around by applying length criterion | def new_pos(current, move, skip, length):
return (current + move + skip) % length | [
"def moveSequence(i : int, seq): # TEST\n for elt in seq:\n elt.moveBy(i)",
"def move(self, idx_old, idx_new):\n inLoop = False\n with self.lock:\n order = self.order\n if type(idx_old) != int and type(idx_new) != int:\n if len(idx_old) != len(i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the characters in the input_list using the input_lengths according to the given algorithm | def process_all(input_list, input_lengths):
skip = 0
pos = 0
length = len(input_list)
for r in range(0,64):
for ipl in input_lengths:
knot(input_list, pos, ipl)
pos = new_pos(pos, ipl, skip, length)
skip = (skip + 1 ) % length
return input_list | [
"def list_of_words_with_lengths(list_of_lengths):\n pyramid_list = []\n for x in list_of_lengths:\n pyramid_list.append(get_a_word_of_length_n(x))\n return pyramid_list",
"def get_lengths(input_list):\n\n for item in input_list:\n yield len(item)",
"def process_message(card_list, messa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the xor product for the sixteen numbers in the list provided | def xor_block(sixteen_bytes):
assert len(sixteen_bytes) == 16
result = 0
for n in sixteen_bytes:
result = result ^ n
return result | [
"def xor_list(x):\n state = 0\n for i in x:\n state = state ^ i\n\n return state",
"def form_dense_hash(numbers):\n return [reduce(xor, chunk) for chunk in list(form_chunks(numbers, 16))]",
"def list_xor(list1, list2):\n list3 = []\n for i in range(len(list1)):\n list3.append(lis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parses one line of weather data (i.e. data for one month). | def parse_line(line):
if not line:
return None
# Split our first 4 fields, and the string containing temperature values for each day of the month.
record, temperature_string = (line[:11], int(line[11:15]), int(line[15:17]), line[17:21]), line[21:]
# raise exception if temperature_stirng is too short.
if len(te... | [
"def parseLine(line):\n tokens = line.split(':')\n # print(tokens)\n stamp=\"\"\n temp=-999.0\n humt=-999.0\n pres=-999.0\n lght=-999.0\n clock=False\n thermometer=False\n barometer=False\n hygrometer=False\n firmware=\"\"\n hardware=\"\"\n devName=\"\"\n for t in tokens... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the map's size and allocate empty tiles. | def set_size(self, x_max, y_max):
self.x_max = x_max
self.y_max = y_max
self.tiles = [[ Tile()
for y in range(y_max) ]
for x in range(x_max) ] | [
"def __init__(self, map_size):\n\n self.map_size = map_size\n self.map = self._build_default_map()",
"def _clear_map(self, default=10):\n self.tiles = [\n [default\n for _ in range(self.height)]\n for _ in range(self.width)]\n\n for (x, y, score) in se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an entity. This takes up the 'entity' attribute of a tile. It is assumed the entity has a valid position, and this is used to determine which tile to place it on. It does not do any bounds checking on map. | def add_entity(self, x, y, entity):
tile = self.tiles[x][y]
if tile.entity is None:
tile.entity = entity
entity.owner = map
entity.x = x
entity.y = y
self.entities.append(entity)
else:
raise LogicException("Entity placed on ... | [
"def add_entity_as_inventory(self, x, y, entity):\n tile = self.tiles[x][y]\n if tile.inventory is None:\n tile.inventory = entity\n entity.owner = map\n entity.x = x\n entity.y = y\n self.entities.append(entity)\n else:\n raise ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an entity as 'inventory' to a tile. Inventory entities are those which are small/can be picked up etc. This takes up the 'inventory' attribute of a tile. It is assumed the item has a valid position, and this is used to determine which tile to place it on. It does not do any bounds checking on map. | def add_entity_as_inventory(self, x, y, entity):
tile = self.tiles[x][y]
if tile.inventory is None:
tile.inventory = entity
entity.owner = map
entity.x = x
entity.y = y
self.entities.append(entity)
else:
raise LogicException... | [
"def add_entity(self, x, y, entity):\n tile = self.tiles[x][y]\n if tile.entity is None:\n tile.entity = entity\n entity.owner = map\n entity.x = x\n entity.y = y\n self.entities.append(entity)\n else:\n raise LogicException(\"En... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove an entity as 'inventory' from a tile. | def remove_entity_from_inventory(self, x, y):
tile = self.tiles[x][y]
entity = tile.inventory
if entity is None:
raise LogicException("Tried to remove inventory from (%d,%d) but there was nothing there." % (x, y))
entity.x = -1
entity.y = -1
entity.o... | [
"def pop_entity(self, entity):\n self._entity_stack.remove(entity)",
"def remove_entity(self, x, y):\n tile = map.tiles[x][y]\n entity = tile.entity\n \n if entity is None:\n raise LogicException(\"Tried to remove entity from (%d,%d) but there was nothing there.\" % (... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove an entity from tile | def remove_entity(self, x, y):
tile = map.tiles[x][y]
entity = tile.entity
if entity is None:
raise LogicException("Tried to remove entity from (%d,%d) but there was nothing there." % (x, y))
entity.x = -1
entity.y = -1
entity.owner = None
t... | [
"def remove_entity(self, entity):\n self.entities.remove(entity)",
"def pop_entity(self, entity):\n self._entity_stack.remove(entity)",
"def remove_entity_from_inventory(self, x, y):\n tile = self.tiles[x][y]\n entity = tile.inventory\n \n if entity is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move an entity to a new location on the map. This overwrites any existing entity in the target tile. It does not do any bounds checking on map. | def move_entity(self, entity, x, y, is_player = False):
old_tile = self.tiles[entity.x][entity.y]
new_tile = self.tiles[x][y]
old_tile.entity = None
new_tile.entity = entity
entity.x = x
entity.y = y
if is_player and new_tile.inventory:
... | [
"def move(self, new_location):\n pass",
"def test_move(self):\n\n map1 = \"\"\"\\\n OOOOOO\n OODOJO\n OOJJOO\n OOOOOO\"\"\"\n rd.seed(5)\n m = Ma.Map(map1)\n m.populate_map((1, 2), [Fa.Carnivore(\n age=10... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Person initialized with the passing of first name, last name, and address | def __init__(self, firstName="", lastName="", address=""):
self.firstName = firstName
self.lastName = lastName
self.address = address | [
"def __init__(self, first_name, last_name, street_address):\n self.first_name = first_name.title()\n self.last_name = last_name.title()\n self.street_address = street_address.title()",
"def __init__(self, first=None, last=None, ID=None, email=None):\n self.first_name = first\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change Person's last name | def setLastName(self, name=""):
self.lastName = name | [
"def change_last_name(self, name):\n if not fullmatch(self.__MATCH_NAME, name):\n raise InvalidCustomerNameException(name)\n\n self.last_name = name",
"def last_user_name(self, value):\n self._last_user_name = value",
"def get_last_name(self):\n\t\treturn self.last_name",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take trace after multiplying n_d_imp return vector of dimension (D[0],) | def trace_with_nd(self):
v = np.asarray([1], dtype=np.complex)
for i in range(self.L-1, 0, -1):
T = self.T[i]
T = np.tensordot(T, v, axes=1)
v = np.trace(T, axis1=1, axis2=2)
T = self.T[0]
T = np.tensordot(T, v, axes=1)
T = np.transpose(T, (0,... | [
"def trace_with_d(self):\n\n v = np.asarray([1], dtype=np.complex)\n for i in range(self.L-1, 0, -1):\n T = self.T[i]\n T = np.tensordot(T, v, axes=1)\n T[:,0,:] = -T[:,0,:]\n v = np.trace(T, axis1=1, axis2=2)\n T = self.T[0]\n T = np.tensordot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take trace after multiplying d return vector of dimension (D[0],) | def trace_with_d(self):
v = np.asarray([1], dtype=np.complex)
for i in range(self.L-1, 0, -1):
T = self.T[i]
T = np.tensordot(T, v, axes=1)
T[:,0,:] = -T[:,0,:]
v = np.trace(T, axis1=1, axis2=2)
T = self.T[0]
T = np.tensordot(T, v, axes=1)... | [
"def trace_with_nd(self):\n\n v = np.asarray([1], dtype=np.complex)\n for i in range(self.L-1, 0, -1):\n T = self.T[i]\n T = np.tensordot(T, v, axes=1)\n v = np.trace(T, axis1=1, axis2=2)\n T = self.T[0]\n T = np.tensordot(T, v, axes=1)\n T = np.tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the test suite. This will register some equality assertion functions for helping compare Q and F objects. | def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
self.addTypeEqualityFunc(F, 'assertFEqual')
self.addTypeEqualityFunc(Q, 'assertQEqual') | [
"def prepare_tests(self):",
"def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(FRAMConnection))\n suite.addTest(unittest.makeSuite(FRAMActions))\n return suite",
"def test_init(self):\n measurement_set = self.measurement_set.data\n\n submissi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the description of the current test. This changes the default behavior to replace all newlines with spaces, allowing a test description to span lines. It should still be kept short, though. | def shortDescription(self):
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc | [
"def getTestDescription(self, user, fname):\n try: text = open(fname,'rb').read()\n except: return ''\n\n li_tags = re.findall('^[ ]*?[#]*?[ ]*?<(?P<tag>\\w+)>([ -~\\n]+?)</(?P=tag)>', text, re.MULTILINE)\n tags = '<br>\\n'.join(['<b>' + title + '</b> : ' + descr.replace('<', '<') for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the Evolution and Version models if missing. | def ensure_evolution_models(self):
Evolver()
assert Version.objects.exists() | [
"def create_models(self):\n self.__handle_csv()\n self.__export_models()",
"def _create_model_entities(self, models_list):\n for model_name in models_list:\n if model_name not in self.models:\n self.models[model_name] = Model(name=model_name)\n self.mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Record evolutions in the database. This is a convenience around creating and saving `Evolution` models. | def record_evolutions(self, version, evolutions,
database=DEFAULT_DB_ALIAS):
Evolution.objects.using(database).bulk_create([
Evolution(version=version,
app_label=app_label,
label=label)
for app_label, label in evolutio... | [
"def _on_app_models_updated(app, using=DEFAULT_DB_ALIAS, **kwargs):\n global _django_evolution_app\n\n if _django_evolution_app is None:\n _django_evolution_app = get_app('django_evolution')\n\n if (_evolve_lock > 0 or\n app is not _django_evolution_app or\n Version.objects.using(using... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Record applied migrations in the database. This is a convenience around creating a migration list and then recording it in the database. | def record_applied_migrations(self, migration_targets,
database=DEFAULT_DB_ALIAS):
assert supports_migrations
migration_list = MigrationList()
migration_list.add_migration_targets(migration_targets)
record_applied_migrations(connection=connections[data... | [
"def applied_migrations(self, value):\n self._applied_migrations = value",
"def write_migration_history(self):\n DatabaseHelper.write_migration_history(self.py_module_name, self.py_package)",
"def applied_migrations(self):\n return self._applied_migrations | self.extra_applied_migrations",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the SQL for the given mapping name and database. | def get_sql_mapping(self, name, sql_mappings_key=None, db_name=None):
sql_mappings_key = sql_mappings_key or self.sql_mapping_key
assert sql_mappings_key
db_name = db_name or self.default_database_name
assert db_name
sql_mappings = get_sql_mappings(mapping_key=sql_mappings_key,... | [
"def get_database_mapping(self, mapping_name):\n caller = sys._getframe(1).f_locals[\"self\"].__class__.__name__\n db_mapping = self.loader.request_db_mapping(mapping_name)\n self.log(2, \"Returning database mapping '{BLUE}{mapping_name}{RESET}' to '{MAGENTA}{caller}{RESET}'\", {\"caller\": cal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that a list of evolutions models match expectations. | def assertEvolutionsEqual(self, evolutions, expected_evolutions):
self.assertEqual(
[
(evolution.app_label, evolution.label)
for evolution in evolutions
],
expected_evolutions) | [
"def assertAppliedEvolutions(self, expected_evolutions, version=None,\n database=DEFAULT_DB_ALIAS):\n if version is None:\n queryset = Evolution.objects.using(database)\n else:\n queryset = version.evolutions.all()\n\n applied_evolutions = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that applied evolutions match expectations. | def assertAppliedEvolutions(self, expected_evolutions, version=None,
database=DEFAULT_DB_ALIAS):
if version is None:
queryset = Evolution.objects.using(database)
else:
queryset = version.evolutions.all()
applied_evolutions = set(queryset.v... | [
"def assertEvolutionsEqual(self, evolutions, expected_evolutions):\n self.assertEqual(\n [\n (evolution.app_label, evolution.label)\n for evolution in evolutions\n ],\n expected_evolutions)",
"def test_round_verdict(inputs, expected):\n asse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that applied migrations match expectations. | def assertAppliedMigrations(self, expected_migration_targets,
database=DEFAULT_DB_ALIAS):
applied_migrations = MigrationList.from_database(connections[database])
for app_label, name in expected_migration_targets:
self.assertTrue(applied_migrations.has_migrati... | [
"def test_migrate(self):\n # Make sure no tables are created\n self.assertTableNotExists(\"migrations_author\")\n self.assertTableNotExists(\"migrations_tribble\")\n self.assertTableNotExists(\"migrations_book\")\n # Run the migrations to 0001 only\n call_command(\"migrate\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert generated SQL against databasespecific mapped test SQL. This will output the provided generated SQL and the expectation test SQL mapped by the given key and optional database name, for debugging, and will then compare the contents of both. The expected SQL may contain regexes, which are used for comparing agains... | def assertSQLMappingEqual(self, sql, sql_mapping_name,
sql_mappings_key=None, database=None):
if database is None:
database = DEFAULT_DB_ALIAS
# Normalize the generated and expected SQL so that we are
# guaranteed to have a list with one item per line.
... | [
"def test_sql_statement(self) -> None:\n with patch.object(SQLAlchemyExtractor, '_get_connection'):\n extractor = SnowflakeTableLastUpdatedExtractor()\n extractor.init(self.conf)\n self.assertFalse(self.database_key in extractor.sql_stmt)",
"def test_sql_statement(self) -> ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that two F objects are identical. This will compare correctly for all supported versions of Django. | def assertFEqual(self, f1, f2):
if django.VERSION[0] >= 2:
# Django 2.0+ supports equality checks for F objects.
self._baseAssertEqual(f1, f2)
else:
# Django 1.11 and older does not, so we'll need to compare
# string representations.
#
... | [
"def testEquals(self):\n vf1 = VidFeed()\n vf2 = VidFeed()\n vf3 = VidFeed()\n\n vf1.feed_url = '127.0.0.1'\n vf2.feed_url = '127.0.0.1'\n vf3.feed_url = '192.168.1.1'\n\n self.assertEqual(vf1, vf2)\n self.assertTrue(vf1 == vf2)\n self.assertFalse(vf1 =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that two Q objects are identical. This will compare correctly for all supported versions of Django. | def assertQEqual(self, q1, q2, msg=None):
if django.VERSION[0] >= 2:
# Django 2.0+ supports equality checks for Q objects.
self._baseAssertEqual(q1, q2, msg=msg)
else:
# Django 1.11 and older does not, so we'll need to compare
# string representations.
... | [
"def assert_sets_equals(\n cls, lhs: Graph, rhs: Graph, exclude_blanks: bool = False\n ) -> None:\n lhs_set = cls.triple_or_quad_set(lhs, exclude_blanks)\n rhs_set = cls.triple_or_quad_set(rhs, exclude_blanks)\n assert lhs_set == rhs_set",
"def test_copy_queryset_with_filters():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalize the generated and expected SQL for comparison. This will run through each statement in the expected SQL, handling ordering and unordered lists of SQL, and turning it into a flat list of statements suitable for comparison. The generated SQL will be normalized along with the expected SQL. Any unordered statemen... | def _normalize_sql_for_compare(self, generated_sql, expected_sql):
i = 0
norm_generated_sql = []
norm_expected_sql = []
for outer_expected in expected_sql:
if (isinstance(outer_expected, six.text_type) or
hasattr(outer_expected, 'pattern')):
n... | [
"def format_sql_statements(sql_statements):\n sql_statements = sql_statements.strip()\n # Create a list of SQL statements with delimiter as \";\"\n sql_statements = sqlparse.split(SQL_STMNTS)\n # print \"sqls\",sql_statements\n\n for i, sql_statement in enumerate(sql_statements):\n sql_stateme... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Default function for creating test data for base models. By default, this won't do anything. | def default_create_test_data(self, db_name):
pass | [
"def test_new_base_model(self):\n self.new_helper(\"BaseModel\")",
"def setUp(self):\n db.create_all()\n self.db = db",
"def create_db(self, base):\n logger.info(\"Creating data models\")\n base.metadata.create_all(self.connection)",
"def setUpTestData(cls):\n profile... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the base model(s) that will be mutated in a test. These models will be registered in Django's model registry and queued up to be written to the database. Starting signatures based on these models will be provided, which the test is expected to mutate. | def set_base_model(self, base_model, name=None, extra_models=[],
pre_extra_models=[], db_name=None):
name = name or self.default_model_name
db_name = db_name or self.default_database_name
if self.base_model:
unregister_app('tests')
self.base_model = b... | [
"def test_save_base_model(self):\n self.save_helper(\"BaseModel\")",
"def test_reload_base_model(self):\n self.reload_helper(\"BaseModel\")",
"def test_all_base_model(self):\n self.all_helper(\"BaseModel\")",
"def setUp(self):\n setupModels(ConcretePublishableModel)",
"def test_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return signatures for a model representing the end of a mutation. Callers should construct a model that reflects the expected result of any mutations and provide that. This will register that model and construct a signature from it. | def make_end_signatures(self, dest_model, model_name, db_name=None):
db_name = db_name or self.default_database_name
end = self.register_model(model=dest_model,
name=model_name,
db_name=db_name)
end_sig = self.create_test_proj_... | [
"def ImputedSignatureModel():\n name = 'ImputedSignatureModel'",
"def _get_full_signature_list(self):\n return self._interpreter.GetSignatureDefs()",
"def output_signature(self):\n return _radio_astro_swig.vmedian_sptr_output_signature(self)",
"def get_signature_list(self):\n full_signature_de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform test evolutions and validate results. This is used for most common evolutionrelated tests. It handles generating signatures for a base model and an expected postevolution model, ensuring that the mutations result in an empty diff. It then optionally simulates the evolutions on the signatures | def perform_evolution_tests(self,
dest_model,
evolutions,
diff_text=None,
expected_hint=None,
sql_name=None,
model_name=None,
... | [
"def perform_mutations(self, evolutions, end, end_sig, sql_name=None,\n rescan_indexes=True, db_name=None,\n create_test_data_func=None):\n app_label = 'tests'\n\n def run_mutations():\n if rescan_indexes:\n self.test_database... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a diff between signatures and check for expected results. The registered base signature and the provided ending signature will be diffed, asserted to be empty/not empty (depending on the arguments), and then checked against the provided diff text and hint. | def perform_diff_test(self, end_sig, diff_text=None, expected_hint=None,
expect_empty=False):
d = Diff(self.start_sig, end_sig)
self.assertEqual(d.is_empty(), expect_empty)
if not expect_empty:
if diff_text is not None:
self.assertEqual(str(... | [
"def diff(*seqs, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def assertDiff(func):\n pp = PrettyPrinter().pformat\n\n @wraps(func)\n def wrapper(self, **kwargs):\n queue = func(self, **kwargs)\n\n a = queue.next()\n b = queue.next()\n expected = que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run simulations and verify that they result in an end signature. This will run through an evolution chain, simulating each one on a copy of the starting signature, and then verifying that the signature is properly transformed into the expected ending signature. | def perform_simulations(self, evolutions, end_sig, ignore_apps=False,
db_name=None):
db_name = db_name or self.default_database_name
self.test_database_state = self.database_state.clone()
test_sig = self.start_sig.clone()
for mutation in evolutions:
... | [
"def test_upload_signatures(self) -> None:\n\n e2e_handler = self.hs.get_e2e_keys_handler()\n\n # register two devices\n u1 = self.register_user(\"user\", \"pass\")\n self.login(u1, \"pass\", device_id=\"D1\")\n self.login(u1, \"pass\", device_id=\"D2\")\n\n # expect two ed... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply mutations that and verify the results. This will run through the evolution chain, applying each mutation on the database and against the signature, and then verifying the resulting signature and generated SQL. | def perform_mutations(self, evolutions, end, end_sig, sql_name=None,
rescan_indexes=True, db_name=None,
create_test_data_func=None):
app_label = 'tests'
def run_mutations():
if rescan_indexes:
self.test_database_state.resca... | [
"def perform_mutation(cls, _root, info, **data):\n\n # Retrieve the data\n original_instance = cls.get_instance(info, **data)\n data = data.get(\"input\")\n\n # Clean the input and generate a new instance from the new data\n cleaned_input = cls.clean_input(info, original_instance,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register a model for the test. This will register not only this model, but any models in | def register_model(self, model, name, db_name=None, **kwargs):
self._models_registered = True
models = self.pre_extra_models + [(name, model)] + self.extra_models
return register_models(database_state=self.database_state,
models=models,
... | [
"def register_model(name: str) -> None:\n # Add the model to the list of valid models.\n VALID_MODELS.append(name)",
"def test_add_model(self):\n pass",
"def register_models() -> None:\n db.create_all()\n db.session.commit()",
"def register_model(mar_file_path, model_store, torchserve):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a project signature for the given models. The signature will include not only these models, but any models in | def create_test_proj_sig(self, model, name, extra_models=[],
pre_extra_models=[]):
return create_test_project_sig(models=(
self.pre_extra_models + pre_extra_models + [(name, model)] +
extra_models + self.extra_models
)) | [
"def make_end_signatures(self, dest_model, model_name, db_name=None):\n db_name = db_name or self.default_database_name\n\n end = self.register_model(model=dest_model,\n name=model_name,\n db_name=db_name)\n end_sig = self.create... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy a list of models. This will be a deep copy, allowing any of the copied models to be altered without affecting the originals. | def copy_models(self, models):
return copy.deepcopy(models) | [
"def copy(self):\n\n models_copy = [m.copy() for m in self._models]\n return self.__class__(init=models_copy)",
"def copy_items(self, item_list):\n\t\tself.__session_model.copy_items(item_list)",
"def copy(self) -> List:\n return self._list.copy()",
"def copy(self):\n\n rv = Fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Override database routers for a test. This clears the router cache before and after the test, allowing custom routers to be used during unit tests. | def override_db_routers(self, routers):
try:
with override_settings(DATABASE_ROUTERS=routers):
self.clear_routers_cache()
yield
finally:
self.clear_routers_cache() | [
"def test_unregister_router(self):\n pass",
"def setUp(self):\n cache.clear()\n return super().setUp()",
"def test_router_urls(self):\n # Create a model and viewset with at least one special method.\n class PhonyModel(models.Model):\n class Meta:\n ap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clear the router cache. | def clear_routers_cache(self):
router.routers = ConnectionRouter().routers | [
"def _clear_cache(self):\n self.cache = None",
"def clear_cache(self):\n global JR_SITE_CACHE\n JR_SITE_CACHE = {}",
"def clear_cache(self) -> None:\n self._load_cache = None",
"def clear_cache(self):\n cache.delete_many(self.get_cache_keys())",
"def clear_caches(self):",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that the main gen command works as expected | def test_gen():
# Define a click runner to invoke click commands
logger.info("Calling 'gen' with a specific amount of scores.")
gen_case(
n_subjects=5,
n_probes_per_subject=5,
n_unknown_subjects=2,
n_pos=10,
n_neg=60,
n_unk=20,
)
logger.info("Calling... | [
"def test_main_generate(mocker, seed, poly):\n m_generate = mocker.patch('spanners.cli.service.generate')\n\n argv = 'exec generate 1000 1000 40 20 file.txt'.split()\n if seed is not None:\n argv.append('-s')\n argv.append(seed)\n if poly is not None:\n argv.append('-p')\n ar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the multiplier for the given stage. | def get_multiplier(self, stage: int) -> float:
return self._multipliers[stage] | [
"def cp_multiplier(self, level):\n i = round(2 * level) - 2\n return self._cp_multiplier[int(i)]",
"def _getMultiplier(self, category: str) -> float:\n multiplier = 1.0\n for m in self.modules:\n if isinstance(m, Module):\n multiplier += getattr(m, category)\n return round(multi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the model classes for chart dataChanged signal of model is used to notify when new data is available | def setModel(self, model):
self.model = model
self.model.dataChanged.connect(self.fetchData) | [
"def setModelData(self, editor, model, index):\n \n pass",
"def setDataModel(self, datamodel: 'ScXMLDataModelElt') -> \"void\":\n return _coin.ScXMLScxmlElt_setDataModel(self, datamodel)",
"def _data_changed(self, old, new):\n\n if has_attributes(self.data):\n aa = self._assig... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches point data from model and starts chart redrawing | def fetchData(self, topLeft, bottomRight):
self.points = self.model.data(None)
self.repaint()
self.update() | [
"def updateDataset(self):\n\t\tif self.visualizer:\n\t\t\tself.visualizer.setTimepoint(self.currentTimePoint)",
"def set_model(self, model):\n if not self.modelcache.has_model(model):\n return False# Model not yet loaded\n self.current_model = model\n xdata = self.get_xdata()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returned meta should be valid scheme Process your data and populate scheme struct src/core/scheme/definition.py | def __call__(self, scheme):
yield scheme.validator.check([{
"imdb_code": "tt00000",
"title": "A Fork in the Road",
"year": 2010, "rating": 6,
"runtime": 105,
# if MIXED_RESOURCES=False then its needed for split dbs and keep groups for diff resources
... | [
"def _gen_meta(self):\n meta = {\"encode_dict\" : self.encode_dict,\n \"word_length\" : self.word_len,\n \"data_length\" : self.data_length,\n \"magic_number\" : MAGIC_NUMBER}\n return meta",
"def rdfMeta(self):\n\t\treturn {\n\t\t\t'label' \t\t: {'uri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes an image filename and converts the image to PDF | def convert_image_to_pdf(filename):
pass
# try:
# # Verify filename
# if not filename or type(filename) != str:
# return 'Invalid image filename'
# # Confirm that file exists
# if not os.path.isfile(filename):
# return 'Image file not found'
#
# ... | [
"def convert2pdf(pdfpath,imgpath):\n pages = convert_from_path(pdfpath, 350)\n i = 1\n for page in pages:\n image_name = imgpath + str(i) + \".jpg\"\n page.save(image_name, \"JPEG\")\n i = i+1",
"def put_image_into_pdf(self, image, pdf):\n # Generate random string\n random_file_name = ''... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Concatenate list of images vertically with the same width | def get_concat_vertical(image_list: list):
try:
if image_list:
# Get first image in list
image1 = image_list.pop(0)
# Loop through the rest of the files
for image2 in image_list:
# Create a background
dst = Image.new('RGB', (ima... | [
"def concat_images(images):\n spacer = np.ones([64, 1], dtype=np.float32)\n images_with_spacers = []\n\n image_size = len(images)\n\n for i in range(image_size):\n images_with_spacers.append(images[i])\n if i != image_size - 1:\n # Add one pixel spacing.\n images_with... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads data from a csv file containing parents and sometimes teachers and clinicians severity assessments of a disease and factor values. This csv file contains all the data required to perform the SAOB. | def _common_read(csv_file, raters):
# Import a csv file as a dataframe
df = pd.read_csv(csv_file)
# Name of studies with an evaluation and its associated score
indices_name_studies = df.loc[ (df['Time'] == "pre")
& (df['Raters'] == raters),
['Mean', 'Std']
... | [
"def import_csv(csv_file):\n \n # Import parents ans teachers values\n df_values_parents = _common_read(csv_file, raters='Parents')\n df_values_teachers = _common_read(csv_file, raters='Teachers')\n \n return df_values_parents, df_values_teachers",
"def load_assessment_data():\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Imports data from a csv file containing parents and sometimes teachers severity assessments of a disease and factor values. This csv file contains all the data required to perform the SAOB. | def import_csv(csv_file):
# Import parents ans teachers values
df_values_parents = _common_read(csv_file, raters='Parents')
df_values_teachers = _common_read(csv_file, raters='Teachers')
return df_values_parents, df_values_teachers | [
"def load_assessment_data():\n return load('user_assessment_scores.csv')",
"def main(csv_path: str = SENSOR_CSV_PATH) -> None:\n user, pw = secrets.db.epi\n engine = sqlalchemy.create_engine(f\"mysql+pymysql://{user}:{pw}@{secrets.db.host}/{DB_NAME}\")\n for filepath, attribute in CsvImporter.find_issue... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
view course schedule route | def view_schedule():
ses = session['user_id']
classes = db.session.query(Registered_Courses.id, Registered_Courses.course_id, Course_Info.dept, Course_Info.courseNum, Course_Info.courseTitle).join(Course_Info, Registered_Courses.course_id == Course_Info.id).filter(Registered_Courses.user_id == ses)
if re... | [
"def show_schedule(context):\n program = context['program']\n\n return {'program': program}",
"def offering_schedule(request, course_sec_id):\n\n offering = get_object_or_404(Offering, course_sec_id=course_sec_id)\n\n return render_to_response(\n 'courses/offering_schedule.html',\n local... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the splits for the AutoDL Dataset. It returns a dictionnary which values correspond to the class names associated to the key (split). | def create_splits(self):
filepaths = collections.defaultdict(list)
for i,row in data.iterrows():
filepaths[row[info['category_column_name']]].append(row[info['image_column_name']])
keys = list(filepaths.keys())
num_classes = len(keys)
... | [
"def create_splits(self):\n # Load class names from the text file\n file_path = TRAFFICSIGN_LABELS_PATH\n with tf.io.gfile.GFile(file_path) as fd:\n all_lines = fd.read()\n # First line is expected to be a comment.\n class_names = all_lines.splitlines()[1:]\n\n err_msg = 'number of classes in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing discard_report function of Testsuite class in twister | def test_discard_report(class_testsuite, platforms_list, all_testcases_dict, caplog, tmpdir):
class_testsuite.platforms = platforms_list
class_testsuite.testcases = all_testcases_dict
filename = tmpdir.mkdir("test_discard").join("discard_report.csv")
with pytest.raises(SystemExit):
class_testsui... | [
"def clear_test_result(self, test):",
"def teardown_test():\n warnings.simplefilter('default')",
"def clear_report_results(self):",
"def pytest_ignore(cls):\n cls.__test__ = False\n return cls",
"def test_cancel_report_schedule(self):\n pass",
"def visitTrialAfter(self, testSuite):",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing csv_report function of Testsuite class in twister | def test_csv_report(class_testsuite, instances_fixture, tmpdir):
class_testsuite.instances = instances_fixture
filename = tmpdir.mkdir("test_csv").join("twister_csv_report.csv")
class_testsuite.csv_report(filename)
assert os.path.exists(filename)
assert os.stat(filename).st_size != 0
mydict = {... | [
"def test_export_csv_in_job(self):\n pass",
"def test_export_csv_to_file(self):\n pass",
"def test_csvdata(db, specialization, slu1, slu2, student, grade_slu1, grade_slu2):\n\n specialization.unit_count = 2\n spc_list = [specialization]\n unit_list = [slu1, slu2]\n object_list = [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing xunit_report function of Testsuite class in twister | def test_xunit_report(class_testsuite, test_data,
instances_fixture, platforms_list, all_testcases_dict):
class_testsuite.platforms = platforms_list
class_testsuite.testcases = all_testcases_dict
kwargs = {"exclude_tag" : ['test_a'], "exclude_platform" : ['demo_board_1'],
... | [
"def visitTrial(self, testSuite):",
"def summariseSuiteResult(self, suite):",
"def test_target_report(class_testsuite, instances_fixture, tmpdir_factory):\n class_testsuite.instances = instances_fixture\n outdir = tmpdir_factory.mktemp(\"tmp\")\n class_testsuite.xunit_report = MagicMock(side_effect=cla... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing compare_metrics function of Testsuite class in twister | def test_compare_metrics(class_testsuite, test_data, instances_fixture, caplog):
class_testsuite.instances = instances_fixture
for instance in class_testsuite.instances.values():
instance.metrics["ram_size"] = 5
instance.metrics["rom_size"] = 9
filename_not_exist = test_data + "twister_file_... | [
"def test_get_metrics(self):\n pass",
"def testGetMetrics(self):\n ## case with nonzero true positives, true negatives, and false negatives\n actual = torch.FloatTensor(np.array([[[1.1, 1.1], [0, .99]]]))\n predicted = torch.FloatTensor(np.array([[[1.05, .99],[.99, 1.1]]]))\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing target_report function of Testsuite class in twister | def test_target_report(class_testsuite, instances_fixture, tmpdir_factory):
class_testsuite.instances = instances_fixture
outdir = tmpdir_factory.mktemp("tmp")
class_testsuite.xunit_report = MagicMock(side_effect=class_testsuite.xunit_report)
class_testsuite.target_report(outdir, "abc", append=False)
... | [
"def visitTrial(self, testSuite):",
"def summariseSuiteResult(self, suite):",
"def test_get_report_data(self):\n pass",
"def test_create_report_task(self):\n pass",
"def reporter(self):\r\n pass",
"def test_get_report_document(self):\n pass",
"def visitTrialAfter(self, testSu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the version from |launcher|. | def get_version(opts):
# Make sure we don't search $PATH when signing the "repo" file in the cwd.
launcher = os.path.join(".", opts.launcher)
cmd = [launcher, "--version"]
ret = util.run(opts, cmd, encoding="utf-8", stdout=subprocess.PIPE)
m = re.search(r"repo launcher version ([0-9.]+)", ret.stdout... | [
"def _get_version(self):\n solver_exec = self.executable()\n if solver_exec is None:\n return _extract_version('')\n results = pyutilib.subprocess.run( [solver_exec,'-c','quit'], timelimit=1 )\n return _extract_version(results[1])",
"def getAppVersion():\n return os.environ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function to process event log json file to extract data, process it and \ load into time table, users dimension table and songplays fact table | def process_log_file(cur, filepath):
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action. Each log file may have more than one records.Get all data
#df =
filtered_ts_values = df[["ts"]].values
ts_data = []
# Iterate through each record for ts and ... | [
"def process_log_file(cur, filepath): \n \n # open log file \n df_log = pd.read_json(filepath, lines=True)\n \n df_log = df_log.loc[df_log['page'] == 'NextSong'] # filter dataframe to only include rows with page == 'NextSong'\n \n df_log['timestamp'] = pd.to_datetime(df_log['ts'], unit = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Greet people. Say hi they are your friend. Give them $20 if they are your friend and you have enough money. Steal $10 from them if they are not your friend. | def greet(friend, money):
if friend and (money > 20):
print("Hi")
money = money - 20
elif friend:
print("Hello")
else:
print("Ha ha!")
money = money + 10
return money | [
"def send_a_thank_you():\n\n # Prompt for donor's full name\n new_donor = prompt_for_donor()\n\n # Prompt for the donation amount\n prompt_for_donation_amount(new_donor)\n\n # Add donor to collection\n donor_list.add_donor(new_donor)\n\n # Print out a letter customized for the donor and amount\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the initial blocks, finds the length and court importance of each document. Specifically, each of the files are then read and processed. Moreover, doc ids, length information and court importance are computed here. Returns a dictionary that maps each document ID to its length and court importance information. I... | def create_blocks_and_find_lengths(doc_list):
block = Block()
lengths_and_court_importance = {}
for doc in doc_list:
doc_id = int(doc[0])
content = doc[1:]
(length, court_importance) = process_document(content, doc_id, block)
lengths_and_court_importance[doc_id] = (length, ... | [
"def process_document(content, doc_id, block):\n COURT_INDEX = 3\n tokens = tokenize_document(content, doc_id)\n court_importance = compute_court_importance(content[COURT_INDEX])\n length = compute_doc_vector(tokens)\n update_block(block, tokens)\n\n return (length, court_importance)",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes the content by tokenizing it and computes its length. Then, update the given block and return the length and the court's importance of this document. | def process_document(content, doc_id, block):
COURT_INDEX = 3
tokens = tokenize_document(content, doc_id)
court_importance = compute_court_importance(content[COURT_INDEX])
length = compute_doc_vector(tokens)
update_block(block, tokens)
return (length, court_importance) | [
"def create_blocks_and_find_lengths(doc_list):\n\n block = Block()\n lengths_and_court_importance = {}\n\n for doc in doc_list:\n doc_id = int(doc[0])\n content = doc[1:]\n (length, court_importance) = process_document(content, doc_id, block)\n lengths_and_court_importance[doc_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the length information using the given tokens. Returns `(scalar length, [(term, weighted tf)])`. | def compute_doc_vector(tokens):
length = 0
doc_vec = []
for (term, _, freq, _) in tokens:
weighted_tf = calculate_weighted_tf(freq)
length += weighted_tf ** 2
doc_vec.append((term, weighted_tf))
# Sort by descending weighted tf
doc_vec = sorted(doc_vec, key=lambda term_tf :... | [
"def sentence_lengths(context, tokenizer):\n list_context = [[p[0] + \" \"] + p[1] for p in context] # squeeze header into the paragraph\n tokenized_sentences = [[tokenizer.tokenize(s) for s in p] for p in list_context] # list[list[list[str]]]\n sentence_lengths = [[len(s) for s in p] for p in tokenized_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the tokens to the given block as long as the block is not full. Once the block is full, we will save the dictionary that the block has built so far, and clear the block. | def update_block(block, tokens):
for token in tokens:
if block.is_full():
block.save_dictionary()
block.clear()
block.add(token) | [
"def add_block(self, block):\n if isinstance(block, Block):\n if block in self.blocks:\n raise ValueError('Duplicate block:' + block.name + 'already exists.')\n else:\n self.blocks[block] = block",
"def finalize_and_add_block(\n self, block, gas_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the court importance based on the `court_text`, the text that is found in the 'court' zone. | def compute_court_importance(court_text):
if court_text in MOST_IMPORTANT_COURTS:
return 2
elif court_text in SOMEHOW_IMPORTANT_COURTS:
return 1
else:
return 0 | [
"def _get_total_cost(self, text: str) -> int:\n\n return sum([self.splitter.word_cost.get(word, self.default_cost) for word in self.splitter.split(text)])",
"def analyze(self, text):\n # split sentences into words\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokeni... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert hex color code to OpenGL float | def tofloat(hex):
return (1.0 / 255) * ord(hex.decode('hex')) | [
"def hex_to_rgb_float(hex_value):\r\n r, g, b, _a = hex_to_rgba_float(hex_value)\r\n return r, g, b",
"def rgb_hex2float(cls, hex_value: str) -> tuple[float, float, float]:\n r = int(hex_value[0:2], base=16) / 255\n g = int(hex_value[2:4], base=16) / 255\n b = int(hex_value[4:6], base=1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is to initialize the header | def InitializeHeader (self):
self.Nonce = ''.join(random.choice(string.digits) for _ in range (9))
self.AuthDateTime = datetime.datetime.now().strftime('%m%d%H%M%S')
self.Headers = {
'Accept-Language': 'en-US',
... | [
"def __init__(self):\n self.__headers = {\n \"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \\\n(KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36\",\n \"Connection\": \"keep-alive\",\n \"Accept-Encoding\": \"gzip, deflate\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter data based on year, genre and language. | def filter_data(self,
languages: List[str],
genres: List[str],
year_from: int = OLDEST_YEAR,
year_to: int = NEWEST_YEAR):
language_mask = self._dataframe[LANGUAGE_COL]. \
apply(lambda x: self.contains(x, languages))
... | [
"def get_movies_by_year(self, year):\r\n raise NotImplementedError",
"def filter_by_year(data: dict, year: int) -> dict:\n filtered_data = data | {\"places\": []}\n\n for place in data[\"places\"]:\n dataframes = []\n\n for dataframe in place[\"data\"]:\n if dataframe[\"start... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert string to integer. | def str_to_int(self, string):
try:
return int(string)
except ValueError:
return None | [
"def _to_int( self, str ):\n tmp = 1\n try:\n tmp = int( str)\n except ValueError:\n pass\n\n return tmp",
"def _to_int(string):\n if string == \"\":\n return 0\n return int(string)",
"def string_to_number(s) -> int:\n return int.from_bytes(s.enc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load json with all information about macros. | def load_macros(self, files=None):
return self.load_data_by_key(self.macros_folder, files) | [
"def load_macros(self):\n f = self.fs.join(\"macros.py\")\n if f.exists() and f.is_file():\n logging.info(\"loading macros file %s\", f)\n spec = importlib.util.spec_from_file_location(\"macros\", f)\n module = importlib.util.module_from_spec(spec)\n spec.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Appends an event object to an output buffer and queues. | def _AppendEvent(self, event_object, output_buffer, event_queues):
output_buffer.Append(event_object)
# Needed due to duplicate removals, if two events
# are merged then we'll just pick the first inode value.
inode = getattr(event_object, u'inode', None)
if isinstance(inode, basestring):
inod... | [
"def Append(self, event_object):\n if not self.check_dedups:\n self.formatter.WriteEvent(event_object)\n return\n\n if event_object.timestamp != self._current_timestamp:\n self._current_timestamp = event_object.timestamp\n self.Flush()\n\n key = event_object.EqualityString()\n if key... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the analysis plugin options to the argument group | def AddAnalysisPluginOptions(self, argument_group, plugin_names):
if plugin_names == u'list':
return
plugin_list = set([
name.strip().lower() for name in plugin_names.split(u',')])
# Get a list of all available plugins.
analysis_plugins = (
analysis_manager.AnalysisPluginManager.... | [
"def define_sub_options(self):\n self.plugin_parser = self.parser.add_argument_group(\"Plugin Options\",\n \"Options for all plugins.\")\n self.plugin_parser.add_argument(\"-H\", \"--host\",\n default='12... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the output module options to the argument group | def AddOutputModuleOptions(self, argument_group, module_names):
if module_names == u'list':
return
modules_list = set([name.lower() for name in module_names])
manager = output_manager.OutputManager
for output_module_string, _ in manager.GetOutputs():
if not output_module_string.lower() in ... | [
"def add_opts(self, optparser):\n return",
"def add_arg_group(parser, title):\n return parser.add_argument_group(f'{title} arguments')",
"def define_sub_options(self):\n self.plugin_parser = self.parser.add_argument_group(\"Plugin Options\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lists the language identifiers. | def ListLanguageIdentifiers(self):
self.PrintHeader(u'Language identifiers')
self.PrintColumnValue(u'Identifier', u'Language')
for language_id, value_list in sorted(
language_ids.LANGUAGE_IDENTIFIERS.items()):
self.PrintColumnValue(language_id, value_list[1]) | [
"def languages(self):\n return list(self.family.langs.keys())",
"def get_languages(self) -> List[str]:\n languages = []\n for key in self.keys():\n if key in self:\n languages.append(key)\n return languages",
"def get_languages():\n print(classifier.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |