query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Initializes the Theano shared variables. This should be called in the process where you want to do the updating. All further calls must be from the same process. The network.gparams must be created in the same process. | def initVars(self, network, net_param_deltas):
assert not self.isInitialized
self.pid = os.getpid()
self.network = network
if net_param_deltas is not None:
self.update_on_device = True
self.net_train_param_deltas = net_param_deltas
else:
self.update_on_device = False
self.net... | [
"def initialise_theano_functions(self):\n\n gradient_wrt_W = theano.tensor.grad(cost=self.get_cost(), wrt=self.W)\n gradient_wrt_b = theano.tensor.grad(cost=self.get_cost(), wrt=self.b)\n updates = [\n (self.W, self.W - self.learning_rate * gradient_wrt_W),\n (self.b, self.b - self.learning_rate ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> x = [[4,8],[12,4],[4]] >>> ll_sum(x) 32 >>> y = [[1,1,2],[4,4],[112],[44]] >>> ll_sum(y) 168 >>> ll_sum([[1010],[502,301],[207,400],[24],[1500,500]]) 4444 >>> ll_sum([[100],[10],[2]]) 112 >>> ll_sum([[44444444444444444444444444444444,4040404040440404404040404040404]]) 48484848484884848848484848484848 | def ll_sum(list):
x = 0
for i in range(len(list)):
x += sum(list[i])
return x | [
"def ll_sum(lists_int):\n a = 0\n for L in lists_int:\n for i in L:\n a += i\n return a",
"def ll_sum(t):\n result = 0\n for x in t:\n if type(x) == list:\n result += ll_sum(x)\n else:\n result += x\n return result",
"def compute_sum(input_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> x = [1,9,8,12,65,4,4,36,88,9,112,69] >>> middle(x) [9, 8, 12, 65, 4, 4, 36, 88, 9, 112] >>> y = [10,4,'^w^','r10'] >>> middle(y) [4, '^w^'] >>> middle(['cut','show only me!','cut']) ['show only me!'] >>> middle(['r',10]) [] >>> middle([1]) [] >>> middle([]) [] | def middle(list):
new = list[1:-1]
return new | [
"def middle(list):\n new_list = list[1:-1]\n return new_list",
"def middle_index(x):\n if len(x) % 2 == 0:\n middle_index = len(x)/2 - 1\n else:\n middle_index = len(x)/2\n return int(middle_index)",
"def middle(t):\n return t[1:-1]",
"def find_middle(self, nums):\n midd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> x = ['eh!?','middle','again?'] >>> chop(x) >>> x ['middle'] >>> y = ['chop','now','chop','now','chop','chop','chop'] >>> chop(y) >>> y ['now', 'chop', 'now', 'chop', 'chop'] >>> z = ['Mr.Stark','I',"don't",'feel','so','good','.'] >>> chop(z) >>> z ['I', "don't", 'feel', 'so', 'good'] >>> chop(z) >>> z ["don't", 'fe... | def chop(list):
new = list[1:-1]
list[0:len(list)] = new
return None | [
"def chop(lst):\n del lst[0] # Removes the first element\n del lst[-1] # Removes the last element",
"def chop(lst):\r\n del lst[0] # Removes the first element\r\n del lst[-1] # Removes the last element\r"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> x = ['siam','ciao','xiao','nongt'] >>> front_x(x) ['xiao', 'ciao', 'nongt', 'siam'] >>> y = ['bandori','poppipa','roselia','paspale','afterglow','hellohappy'] >>> front_x(y) ['afterglow', 'bandori', 'hellohappy', 'paspale', 'poppipa', 'roselia'] >>> front_x(['coup_de_grace','phantasm','XD','x_mark']) ['XD', 'x_mark... | def front_x(list):
x_list = []
non_x = []
for i in range(len(list)):
if (list[i])[0].lower() == 'x':
x_list.append(list[i])
else:
non_x.append(list[i])
x_list.sort()
non_x.sort()
x_list.extend(non_x)
return (x_list) | [
"def front_x(list):\n xlist = []\n olist = []\n for s in list:\n if s[0] == 'x' or s[0] == 'X':\n xlist.append(s)\n else:\n olist.append(s)\n xlist.sort()\n olist.sort()\n return xlist + olist",
"def front_x(t):\n initial = []\n for x in t:\n if x... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> even_only([1,4,9,7,11,15,4,37,69,4,21,33,4,99,87]) [4, 4, 4, 4] >>> even_only([0,5,2,9,4,7,6,87,44]) [0, 2, 4, 6, 44] >>> even_only([49,48,47,46,45,44,43,42,41,40]) [48, 46, 44, 42, 40] >>> even_only([11,22,33,44,55,66,77,88,99]) [22, 44, 66, 88] >>> even_only([99,88,77,66,55,44,33,22,11]) [88, 66, 44, 22] | def even_only(list):
x = []
for i in range(len(list)):
if list[i] %2 == 0:
x.append(list[i])
return x | [
"def even_only(list):\n new_list = []\n for i in list:\n if i % 2 == 0:\n new_list.append(i)\n return new_list",
"def all_even(number_list):\n\n # do the opposite of the above function (this time, find positive #s)\n\n even_elements = [num for num in number_list if num % 2 == 0]\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> love('i hate youuu!!!!!') 'i love youuu!!!!!' >>> love('i wanna sleep.') 'i love sleep.' >>> love('love hate love hate love hate love hate love') 'love hate love hate love hate love love love' >>> love('Everyone hate 10') 'Everyone love 10' >>> love('Everyone in jail') 'Everyone love jail' | def love(text):
new1 = text.split()
new1[-2] = 'love'
new2 = ' '.join(new1)
return new2 | [
"def love():\n return \"Baby Don't hurt me!!\"",
"def love(t):\n list_splitted = t.split()\n second_last = list_splitted[-2]\n changed = t.replace(second_last,\"love\")\n return changed",
"def greet2(g,n):\n phrase = g+','+n+'!'\n print(phrase)\n print('How are you?')",
"def lick():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> centered_average([4,3,4]) 4.0 >>> centered_average([4,1,9,7,44,32,569,4444]) 110.83333333333333 >>> centered_average([1,23,456,78910]) 239.5 >>> centered_average([1,3,5,7,9,11]) 6.0 >>> centered_average([9,5,16,79,23]) 16.0 | def centered_average(nums):
nums.sort()
nums.remove(nums[0])
nums.remove(nums[-1])
return sum(nums) / len(nums) | [
"def centered_average(nums):\n nums.sort()\n new_list = nums[1:-1]\n sum_list = sum(new_list)\n avg = sum_list/len(new_list)\n return avg",
"def centered_average(nums):\n return (sum(nums) - max(nums) - min(nums)) // (len(nums) - 2)",
"def find_average(input_list):\r\n return sum(input_list... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> reverse_pair('lose or win') 'win or lose' >>> reverse_pair('he is she') 'she is he' >>> reverse_pair('i will sleep with you') 'you with sleep will i' >>> reverse_pair('run in the hallway') 'hallway the in run' >>> reverse_pair('back to the future') 'future the to back' | def reverse_pair(sentence):
new = sentence.split(' ')[::-1]
reversed = ' '.join(new)
return reversed | [
"def reverse_pair(sentence):\n list = sentence.split()\n l = []\n for s in list:\n l = [s] + l\n return ' '.join(l)",
"def reverse_pair(t):\n new = t.split()\n number = len(new)\n changed = []\n while number >=1:\n changed.append(new[number-1])\n number = number - 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> match_ends(['lingering','underwear','pant','tshirt']) 1 >>> match_ends(['sis','bro','dad','mom','papa','mama']) 3 >>> match_ends(['no','match','for','you']) 0 >>> match_ends(['roar','howl','bite','slash']) 1 >>> match_ends(['lol','lul','orz','meme']) 2 | def match_ends(strings):
count = 0
for i in range(len(strings)):
if strings[i][0].lower() == strings[i][-1]:
count += 1
return count | [
"def match_ends(list):\n count = 0\n for s in list:\n s = s.lower()\n if len(s) >= 2:\n if s[0] == s[-1]:\n count += 1\n return count",
"def match_ends(t):\n num = 0\n for x in t:\n if len(x) >= 2:\n if x.startswith(x[0]) == x.endswith(x[0])... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The privacy settings for the album | def getPrivacy(self):
return self.base.get("privacy", []) | [
"def getPrivacy(self):\n return FacebookPrivacy(self.base.get(\"privacy\", []))",
"def privacy(self):\n return 'all_users' if self.q(css=PROFILE_VISIBILITY_SELECTOR.format('all_users')).selected else 'private'",
"def privacy(self, privacy):\n self.wait_for_element_visibility('select#u-field... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the FAQ page | def faq(request):
return render(request, "faq.html") | [
"def test_faq_template(self):\n res = self.testapp.get('/faq', status=200)\n self.failUnless('Why is it that C3S wants me to sign?' in res.body)\n self.failUnless(\n 'Copyright 2013, OpenMusicContest.org e.V.' in res.body)",
"def quiz():\n return render_template('questions.html'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the Judging Criteria page | def judging_criteria(request):
return render(request, "judging-criteria.html") | [
"def devderby_rules(request):\n return jingo.render(request, 'demos/devderby_rules.html', {})",
"def as_html(self, request, position):\n\n categories = []\n self_categories = self.categories.all()\n for category in Category.objects.all():\n if category in self_categories:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the Plagiarism Policy page | def plagiarism_policy(request):
return render(request, "plagiarism-policy.html") | [
"def privacy_policy_view(request, *args, **kwargs):\n return render(request, \"privacy_policy.html\")",
"def privacy_policy():\r\n\treturn render_template(\"privacy_policy.html\", page_title=misc.page_title(\"pp\"))",
"def policy():\n return render_template('dashboard/policy.html', tagname = 'policy')",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the Privacy Policy page | def privacy_policy(request):
return render(request, "privacy-policy.html") | [
"def privacy_policy_view(request, *args, **kwargs):\n return render(request, \"privacy_policy.html\")",
"def privacy_policy():\r\n\treturn render_template(\"privacy_policy.html\", page_title=misc.page_title(\"pp\"))",
"def privacy(self):\n return render('/privacy.html')",
"def policy():\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the Useful Resources page | def useful_resources(request):
return render(request, "useful-resources.html") | [
"def resources():\n return render_template('resources.html', title='Resources')",
"def resources():",
"def help(self):\n return render('/help.html')",
"def get_static_web_help_page():\n ...",
"def test_resource_details(self):\r\n\r\n # Check details page\r\n resp = self.client['ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Submits a pyspark job to your dataproc cluster. Specify the main script with main_python_file. You can add script parameters with the script_parameters parameter. | def submit_pyspark_job(self, cluster_name, main_python_file, script_parameters=[]):
job_details = {
'projectId': self.project_id,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
... | [
"def submit_pyspark_job(self, job_config):\n\n job_config = {\n \"args\": [\n \"gs://fynd-new-bucket/a/README.txt\"\n ],\n \"pythonFileUris\": [\n job_config.get('files.zip')\n ],\n \"mainPythonFileUri\": job_config.get('mai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs inference for the given objects with model_name. For each object in objects, returns the topN best predictions. The retry parameter determines whether to retry on HTTP errors indicated by | def create_inference_request(
self,
model_name: str,
objects: List[dict],
top_n: int = TOP_N,
retry: bool = True,
) -> dict:
self.log.debug(
"Submitting Inference request for model '%s' with '%s'"
" objects and top_n '%s' ",
model_n... | [
"def do_bulk_inference(\n self,\n model_name: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n worker_count: int = 4,\n ) -> List[Union[dict, None]]:\n\n if worker_count is None:\n raise InvalidWorkerCount(\"worker_count cannot ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs bulk inference for larger collections. For objects collections larger than LIMIT_OBJECTS_PER_CALL, splits the data into several smaller Inference requests. Requests are executed in parallel. Returns the aggregated values of the predictions of the original API response | def do_bulk_inference(
self,
model_name: str,
objects: List[dict],
top_n: int = TOP_N,
retry: bool = True,
worker_count: int = 4,
) -> List[Union[dict, None]]:
if worker_count is None:
raise InvalidWorkerCount("worker_count cannot be None!")
... | [
"def execute(self, requests):\n\n responses = []\n\n # Every Python backend must iterate over everyone of the requests\n # and create a pb_utils.InferenceResponse for each of them.\n for idx, request in enumerate(requests):\n # Get input tensors \n query = pb_utils.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs inference for the given objects against fullyqualified URL. A complete inference URL can be the passed to the method inference, instead of constructing URL from using base url and model name | def create_inference_request_with_url(
self,
url: str,
objects: List[dict],
top_n: int = TOP_N,
retry: bool = True,
) -> dict:
self.log.debug(
"Submitting Inference request with '%s'"
" objects and top_n '%s' to url %s",
len(objects... | [
"def inference(self, inputs, sess, mode):\n fetches = {}\n if mode == 'depth':\n fetches['depth'] = self.est_depth\n inputs_ph = self.inputs_depth\n if mode == 'egomotion':\n fetches['egomotion'] = self.est_egomotion\n inputs_ph = self.inputs_egomotion\n results = sess.run(fetches, f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compare if a value is equal to an element in a list | def check_list(self, list1, val):
for x in list1:
if val == x:
return True
return False | [
"def elem(value, lst: List) -> bool:\n return not null(lst) and (head(lst) == value or elem(value, tail(lst)))",
"def exists(\n self,\n value\n ):\n size_of_list = self.size()\n index = 0\n\n while index < size_of_list:\n if self.get_value(index) == valu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if user type is an artist, routes to collecting artist info page, if users and artist tables get updated | def test_register_artist(self):
self.helper_user_type_all('artist', 'Artist Info', 'website', 'mywebsite') | [
"def artistInfo(aid):\n\n # checks if user is logged in, if not redirects to welcome page\n if notLoggedIn(): \n return redirect( url_for('index'))\n\n # gets artist info and displays it\n conn = dbi.connect() \n artist = music.getArtistById(conn, aid)\n artistsWork = music.getMusicByArtist... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if user type is an patron, routes to collecting patron info page, if users and patron tables get updated | def test_register_patron(self):
self.helper_user_type_all('patron', 'Patron Info', 'patron_info', 'I am a patron') | [
"def lookup_patron():\n return app.manager.admin_patron_controller.lookup_patron()",
"def manage_single_journal_member(request, user_id):\n\n # Only registry members and registrar memebers can edit vesting members\n if request.user.groups.all()[0].name not in ['registrar_member', 'registry_member', 'vest... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if user type is an fan, routes to collecting fan info page, if users and fan tables get updated | def test_register_fan(self):
self.helper_user_type_all('fan', 'Fan Info', 'fan_info', 'I am a fan') | [
"def is_fan(obj, user):\n if not is_authenticated(user):\n return False\n obj_type = ContentType.objects.get_for_model(obj)\n likes = Like.objects.filter(\n content_type=obj_type, object_id=obj.id, user=user)\n return likes.exists()",
"def user_feeds(request):\n subscription, created ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to return the special offers page | def special_offers (request):
return render(request, 'special_offers.html') | [
"def offer_detail(self, slug):\n if not self.token:\n return Exception('No token found!')\n response = self.api_request(method='GET', path='offer/%s/' % slug)\n return response",
"def emp_profile_offres(request):\n\n usname = request.user.username\n uslname = request... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
All dummies must contain unique values or will influence analysis | def test_dummies (self, dummies=()):
return len(set(sum(dummies, ()))) == len(sum(dummies, ())) | [
"def _set_dummies(self):\n data_reduced = self.data[self.antecedent]\n self.data_dummies = pd.get_dummies(data_reduced, columns=self.antecedent)",
"def dummization(self):\n #TODO: use sklearn ColumnTransformer instead\n\n return pd.get_dummies(\n self.simple_imputer(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
only allows answers in the sets, AFERMATIVE and NEGATIVE. | def answer_binary(prompt):
answer = input(prompt).lower()
while (answer not in AFERMATIVE) and (answer not in NEGATIVE):
print(f"your anser must be answer must be any of '{AFERMATIVE}'"
f" if yes, or '{NEGATIVE}' if no.")
answer = input(prompt).lower()
return answer | [
"def conflateAnswers(answers):\n\n if 'objective' in answers or 'neutral' in answers:\n answers = ['neutral']\n return answers",
"def test_is_antichain_2(set_of_sets):\n assert not is_antichain(set_of_sets)",
"def any_answered(self) -> Set[str]:\n return reduce(set.union, self.answers, in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the create_default_project will create a default maya project structure with a proper workspace.mel | def test_create_default_project_will_create_a_workspace_mel_file(
create_test_data, trash_bin
):
arch = Archiver()
tempdir = tempfile.gettempdir()
project_path = arch.create_default_project(tempdir)
trash_bin.append(project_path)
workspace_mel_path = os.path.join(project_path, "workspace.mel")... | [
"def test_no_project_newproj(self):\n self._test_non_admin_operation(ccdlib.OP_NEWPROJ)",
"def prepare_project(project_manager, project_name=\"working_project\"):\n project = project_manager.LoadProject(project_name)\n if not project:\n print(\"Unable to loat a project '\" + project_name + \"'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the content of the workspace.mel file is correct when the create_default_project method is used. | def test_create_default_project_workspace_mel_content_is_correct(
create_test_data, trash_bin
):
arch = Archiver()
tempdir = tempfile.gettempdir()
project_path = arch.create_default_project(tempdir)
trash_bin.append(project_path)
workspace_mel_path = os.path.join(project_path, "workspace.mel")... | [
"def test_create_default_project_will_create_a_workspace_mel_file(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if no error will be raised when the workspace.mel file is already there | def test_create_default_project_workspace_mel_already_exists(
create_test_data, trash_bin
):
data = create_test_data
arch = Archiver()
tempdir = tempfile.gettempdir()
# there should be no error to call it multiple times
project_path = arch.create_default_project(tempdir)
trash_bin.append(pr... | [
"def dirty(self):\n if os.path.exists(self.file_path):\n return False\n else:\n raise RuntimeError(\"Source file missing: %s\" % self.file_path)",
"def _has_workspace(self, job):\r\n\t\tif job.workspace is None or not os.path.exists(job.workspace):\r\n\t\t\t# No workspace has b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with no references. | def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):
data = create_test_data
arch = Archiver()
project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path])
trash_bin.append(project_path)
# the returned path should be a maya project directory
... | [
"def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with only one level of references. | def test_flatten_is_working_properly_with_only_one_level_of_references(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["asset2_model_main_v001"]
maya_env.open(data["asset2_model_main_v001"], force=Tru... | [
"def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with only one level of multiple references to the same file. | def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["asset2_model_main_v001"]
maya_env.open(data["asset2_mo... | [
"def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with multiple levels of references. | def test_flatten_is_working_properly_with_multiple_level_of_references(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["asset2_model_take1_v001"]
maya_env.open(data["asset2_model_take1_v001"], force=T... | [
"def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with references that has external files like textures, sound etc. and there is also an exclude_mask | def test_flatten_is_working_properly_with_exclude_mask(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["version7"]
maya_env.open(data["version7"], force=True)
# create an image file at the projec... | [
"def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() is working properly for a scene with textures, audio etc. external files | def test_flatten_is_working_properly_for_external_files(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["version7"]
maya_env.open(data["version7"], force=True)
# create an image file at the proje... | [
"def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
testing if the Archiver.flatten() will restore the current workspace path after it has finished flattening | def test_flatten_will_restore_the_current_workspace(
create_test_data, trash_bin, create_pymel, create_maya_env
):
data = create_test_data
maya_env = create_maya_env
pm = create_pymel
# open data["asset2_model_main_v001"]
maya_env.open(data["asset2_model_main_v001"], force=True)
current_wor... | [
"def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Utility function to get an AOV from it's given info. AOV nodes can't be retrieved using a simple path like "|RenderPass|Layer|Beauty" and the AOV name ("Beauty" here) is not the node name but a plug value ("PlugName"). This function try to provide an easy way to retrieve an AOV node based on predicted render passe and ... | def aov_node(parser, rp_name, rl_name, aov_name):
# get render layer node
rl = parser.path_to_node('|{rp_name}|{rl_name}'.format(**locals()))
aov_nodes = []
# and find aov based on its display name
for aov_node in rl.children:
if aov_node.display_name == aov_name:
aov_nodes.app... | [
"def add_aov(self):\n prompt = QtWidgets.QInputDialog(self._view)\n prompt.setWindowTitle('Add AOV')\n prompt.setLabelText('AOV name:')\n prompt.setOkButtonText('Add')\n if prompt.exec_():\n self._model.add_aov(prompt.textValue())",
"def _prepare_ovas(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a URL that is most likely to route to `local_host` from outside. The point is that we may be running on a remote host from the user's point of view, so they can't access `local_host` from a Web browser just | def guess_external_url(local_host, port):
if local_host in ['0.0.0.0', '::']:
# The server is listening on all interfaces, but we have to pick one.
# The system's FQDN should give us a hint.
local_host = socket.getfqdn()
# https://github.com/vfaronov/turq/issues/9
match = IP... | [
"def get_current_host_url():\n if settings.DEBUG or is_local():\n return 'http://jaypark.sinwoobang.me:8000'\n return 'http://jaypark.sinwoobang.me'",
"def local_url(self) -> str:\n return f'ws://127.0.0.1:{self.port}'",
"def toLocalhostUri(uri):\n return uri.replace(CELLAR_BASE + '/resou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the minimum duration in the stage has passed. Otherwise return False. | def min_duration_over(self):
return self.start_date + datetime.timedelta(seconds= \
self.group_stage.stage.min_duration) <= timezone.now() | [
"def has_run(self, duration):\n if self.time() - self.start_time > duration:\n return True\n return False",
"def has_duration(self):\n return # boolean",
"def validate_time_step(self):\n valid = (self.time_step >= 0) and (is_number(self.time_step))\n # set flag\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the date/time of the deadline for this stage. Return None if there is no deadline for this stage. | def get_deadline(self):
if self.start_date is None:
# The stage hasn't been started yet: no deadline
return None
if self.group_stage.stage.deadline is None:
# This stage has no deadline associated with it
return None
# Compute the deadline for this stage
days_to_complete_stage = datetime.timedelta... | [
"def deadline(self):\n if not self._deadline:\n self._deadline = self.now + timezone.timedelta(days=1)\n return self._deadline",
"def deadline(self):\n\n print \"# Deadline info\"\n print \"#\"\n print \"# Exipres in: {time}\".format(time=self.subtree['.meta'].get('de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if this stage is overdue, False otherwise. | def is_overdue(self):
deadline = self.get_deadline()
if deadline is None:
# No deadline has been set for this stage
return False
if self.status == 0:
# The stage has already been completed
return False
return timezone.now() > deadline | [
"def is_overdue(self):\n return bool(self.due_back and date.today() > self.due_back)",
"def is_overdue(self):\n if self.due_back and date.today() > self.due_back:\n return True\n return False",
"def overdue(self) -> bool:\n if not self.scheduled_start_datetime:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the active stages for the specified 'user' and 'study'. An active stage is defined as a stage that has been started but not ended. | def get_active_stages(user, study=None):
active_stages = UserStage.objects.filter(user=user, status=1)
# Studies should be started
start_date_exists = Q(group_stage__stage__study__start_date__isnull=False)
start_date_in_past = Q(group_stage__stage__study__start_date__lte=timezone.now())
active_stages = activ... | [
"def get_next_user_stage(user, study):\n\t\n\tus = UserStage.objects.filter(user=user, group_stage__stage__study=study).order_by('group_stage__order')\n\t\n\tfor s in us.all():\n\t\tif s.status != 0:\n\t\t\treturn s\n\t\t\n\treturn None",
"def get_active_exams_for_user(user_id, course_id=None):\n result = []\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the next user stage. | def get_next_user_stage(user, study):
us = UserStage.objects.filter(user=user, group_stage__stage__study=study).order_by('group_stage__order')
for s in us.all():
if s.status != 0:
return s
return None | [
"def get_last_stage(self):\n return self.stages[-1] if self.stages else None",
"def get_next_user(tgt_user):\n handles = sorted(list_users())\n try:\n current_idx = handles.index(tgt_user.handle)\n except ValueError:\n # what if we just deleted the target user?\n # inject it b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the number of images in the dataset split. | def __len__(self) -> int:
return self.num_images | [
"def get_number_of_images(self) -> int:\n return len(self._soup.find_all(\"img\"))",
"def num_images(self):\n return len(self.get_image_keys(include_labels=False))",
"def get_dataset_size(self):\n keys = self.get_keys()\n\n dataset_size = 0\n for key in keys:\n imag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all valid lengths | def getLengths(self):
return [self.length] | [
"def lengths(self):\n return self._lengths.__copy__()",
"def determine_lengths(dtrajs):\n if (isinstance(dtrajs[0],(int))):\n return len(dtrajs)*np.ones((1))\n lengths = np.zeros((len(dtrajs)))\n for i in range(len(dtrajs)):\n lengths[i] = len(dtrajs[i])\n return lengths",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all decision byte positions | def getDecisionBytes(self):
decBytes = self.hdr.getDecisionBytes()[0]
decBytes = [byte - self.startPos for byte in decBytes]
while len(decBytes) > 0 and decBytes[-1] >= self.length:
decBytes.pop()
while len(decBytes) > 0 and decBytes[0] < 0:
decBytes.pop(0)
... | [
"def _identify_all_possible_position(self):\n lign = 1\n index_number = 1\n while lign < 16:\n column = 1\n while column < 16:\n self.all_position.append(index_number)\n column += 1\n index_number += 1\n index_number ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all extract byte positions | def getExtractBytes(self):
extBytes = self.hdr.getExtractBytes()
extBytes = [byte - self.startPos for byte in extBytes]
while len(extBytes) > 0 and extBytes[-1] >= self.length:
extBytes.pop()
while len(extBytes) > 0 and extBytes[0] < 0:
extBytes.pop(0)
ret... | [
"def _getOffsets(self, mapped, magicbytes, start = None):\n if start is None:\n start = 0\n else:\n start -= len(magicbytes)\n\n start = mapped.find(magicbytes, start)\n while True:\n end = mapped.find(magicbytes, start + len(magicbytes))\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all fields within the header | def getFields(self):
raise RuntimeError('This function needs vetting')
return self.hdr.getLookupLengthFields() | [
"def _basicFields(self):\n headers = self.message.getHeaders(False, *self._HEADERS)\n\n # Number of octets total\n size = self.message.getSize()\n\n major, minor = self.main, self.subtype\n\n # content-type parameter list\n unquotedAttrs = self._unquotedAttrs()\n\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
App factory to create website | def create_site():
if os.path.exists(instfolder):
app = Flask('threecolor', instance_path=instfolder, instance_relative_config=True)
# configure flask app from default settings, then overide with settings.cfg
app.config.from_object('threecolor.configs.default_settings')
app.config.... | [
"def software_factory(app, company_factory):\n pass",
"def app(par=None):\n\n return Miniweb.get_instance(par)",
"def make_site():\n\n def site(name=\"site1\", devices=None):\n \"\"\"Provide an instance of a Site model.\"\"\"\n if not devices:\n devices = []\n return Sit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether the node is terminal. If there is no left node and no right node, it's a terminal node. If either is nonNone, it is a parent to something. | def is_terminal(self):
return self.left is None and self.right is None | [
"def is_terminal(self):\n return isinstance(self, Terminal)",
"def is_right_child(self):\n return self.node_type() == -1",
"def is_terminal(item):\n return hasattr(item, '__hash__') and not isinstance(item, LexNonterminal)",
"def is_right_child(self):\n return self.parent and self.pare... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates multiple clones according to config using a `model_fn`. The returned values of `model_fn(args, kwargs)` are collected along with the scope and device used to created it in a namedtuple `Clone(outputs, scope, device)` | def create_clones(config, model_fn, args=None, kwargs=None):
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_sc... | [
"def deploy(config,\n model_fn,\n args=None,\n kwargs=None,\n optimizer=None,\n summarize_gradients=False):\n # Gather initial summaries.\n summaries = set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES))\n\n # Create Clones.\n clones = create_clon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gather the loss for a single clone. | def _gather_clone_loss(clone, num_clones, regularization_losses):
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses ... | [
"def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute losses and gradients for a single clone. | def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kw... | [
"def _gather_clone_loss(clone, num_clones, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n clone_loss = None\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n with tf.device(clone.device):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute clone losses and gradients for the given list of `Clones`. | def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARI... | [
"def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deploys a Slimconstructed model across multiple clones. The deployment options are specified by the config object and support deploying one or several clones on different GPUs and one or several replicas of such clones. The argument `model_fn` is called `config.num_clones` times to create the model clones as `model_fn(... | def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
# Gather initial summaries.
summaries = set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, mode... | [
"def create_clones(config, model_fn, args=None, kwargs=None):\n clones = []\n args = args or []\n kwargs = kwargs or {}\n with slim.arg_scope([slim.model_variable, slim.variable],\n device=config.variables_device()):\n # Create clones.\n for i in range(0, config.num_clones):\n wi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the sum gradient for each shared variable across all clones. This function assumes that the clone_grads has been scaled appropriately by 1 / num_clones. | def _sum_clones_gradients(clone_grads):
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == v... | [
"def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add histogram summaries to gradients. | def _add_gradients_summaries(grads_and_vars):
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
... | [
"def update_hist_list(self):\n for layer in self.layer_list:\n self.histogram_weight_history[layer]['weight'].append(np.histogram(layer.weight.detach().cpu().numpy().reshape(-1), bins=self.bins))\n self.histogram_weight_history[layer]['bias'].append(np.histogram(layer.bias.detach().cpu(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas. | def caching_device(self):
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None | [
"def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
"def variables_device(self):\n device = ''\n if self._num_ps_tasks > 0:\n device += self._ps_device\n\n device += _get_device(self._clone_on_cpu).name\n\n class _PSDeviceChooser(object):\n \"\"\"Sli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Device used to create the clone and all the ops inside the clone. | def clone_device(self, clone_index):
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device_type = _get_device(self._clone_on_cpu).device_type
device += '/device:%s:%d' ... | [
"def clonedev(self):\n try:\n try:\n self.device_hidrep\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n try:\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Name scope to create the clone. | def clone_scope(self, clone_index):
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope | [
"def new_scope(self):\n self.append(Scope(self.peek()))\n return",
"def scope_name(self):\n return self.fun.scope_name",
"def instance(origin, copy, identifier):\n newInstance = ObName()\n newInstance.origin = origin\n newInstance.copy = copy\n newInstance.identi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Device to use with the optimizer. | def optimizer_device(self):
if self._num_ps_tasks > 0 or self._num_clones > 0:
device = self._worker_device
device += _get_device(self._clone_on_cpu).name
return device
else:
return '' | [
"def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")",
"def get_device() -> torch.device:\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
"def _init_device(self, cuda_device: torch.device = torch.device('cpu')):\n if sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Device to use to build the inputs. | def inputs_device(self):
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device | [
"def listInputDevices():\n pass",
"def assignInputDevice(multiple=bool, clutch=\"string\", continuous=bool, immediate=bool, device=\"string\"):\n pass",
"def InitDevice(self):\n # No need to init the device.\n pass",
"def convert_device(self):\n\n from spira.yevon.geometry.ports import Port... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the device to use for variables created inside the clone. | def variables_device(self):
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += _get_device(self._clone_on_cpu).name
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device ... | [
"def device(self):\n return str(self.dummy_param.device)",
"def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
"def get_device() -> torch.device:\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
"def device(use_gpu=False):\n if use_gp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collects and stores any xBlock and modulestore data into the block_structure that's necessary for later execution of the transformer's transform method. Transformers should store such | def collect(cls, block_structure):
pass # lint-amnesty, pylint: disable=unnecessary-pass | [
"def transform(self, usage_info, block_structure):\n raise NotImplementedError",
"def save_to_block(self, data):\n self.refresh()\n\n if self.frame_start_time is None:\n # Get min frame time out of all blocks\n self.frame_start_time = time.time()\n for _, b in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms the given block_structure for the given usage_info, assuming the block_structure contains cached data from a prior call to the collect method of the latest version of the Transformer. No access to the modulestore nor instantiation of xBlocks should be performed during the execution of this method. However, a... | def transform(self, usage_info, block_structure):
raise NotImplementedError | [
"def transform(self, usage_info, block_structure):\n for block_key in block_structure.topological_traversal():\n parents = block_structure.get_parents(block_key)\n if parents:\n block_depth = min(\n self.get_block_depth(block_structure, parent_key)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is an alternative to the standard transform method. Returns a list of filter functions to be used for filtering out any unwanted blocks in the given block_structure. In addition to the commonly used methods listed above, the following | def transform_block_filters(self, usage_info, block_structure):
raise NotImplementedError | [
"def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given two functions that take a block_key and return a boolean, yield a function that takes a block key, and 'ands' the functions together | def _filter_chain(accumulated, additional):
return lambda block_key: accumulated(block_key) and additional(block_key) | [
"def _binary(func, b):\n @wraps(func)\n def wrapper(a):\n return func(a, b)\n\n return wrapper",
"def combine_and(expect_fn1, expect_fn2):\r\n def combine_fn(x1, x2):\r\n return min(x1, x2)\r\n return Expect.combine(expect_fn1, expect_fn2, combine_fn)",
"def __block_equa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the current value against the target. | def _check(self, target, current):
raise NotImplementedError('Do not call the base Goal directly.') | [
"def check(self, state, val):\n return",
"def _needs_to_track_change(self, instance, value) -> bool:\n try:\n current_value = instance.__dict__[self._name]\n except KeyError:\n return True\n return value != current_value",
"def check(self, instance):\n r ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate all possible oligos from seq with length constraints seq is Bio.Seq.Seq or string | def oligo_gen(seq, min_len, max_len):
for i in range(len(seq) - min_len):
for j in range(min_len, max_len + 1):
oligo = seq[i:i + j]
if len(oligo) == j:
yield oligo | [
"def seq_permutation(seq_len: int,\n charset: str = \"ATCG\") -> t.Iterable[str]:\n if seq_len <= 0:\n yield \"\"\n else:\n for seq in seq_permutation(seq_len-1, charset):\n for c in charset:\n yield seq + c",
"def build_sequences(data, seq_len=5):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate all possible point mutations from DNA seq seq is Bio.Seq.Seq Does not respect case of letters | def dna_mutation_gen(seq):
letters = seq.alphabet.letters
for i in range(len(seq)):
for letter in letters:
if letter != seq[i].upper():
yield seq[:i] + letter + seq[i + 1:] | [
"def translate(seq):\n\n table = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate all single inosine mutations in seq seq is a Bio.Seq.Seq or str Does not respect alphabets | def inosine_gen(seq):
compat = set('GAT')
for i in range(len(seq)):
if seq[i].upper() in compat:
yield seq[:i] + 'I' + seq[i + 1:] | [
"def generate_all_insertion_mutants(self, sequence):\n ancestor_sequence = list(sequence)\n all_insertion_mutants = []\n \n #make all insertions, (+1 for insertion off the last instruction)\n for i in range(len(sequence) + 1):\n for new_char in self.char_lookup:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of namedtuple with some DNA properties seqs is a list[Bio.Seq.Seq or str] representing DNA sequences | def dna_properties_batch(seqs):
seqs = [str(seq) for seq in seqs]
gcs = [GC(seq) for seq in seqs]
Tms = melting_temp(seqs)
ss_dGs = hybrid_ss_min(seqs)
self_hyb_dGs = [r[0] for r in hybrid_min(seqs, seqs)]
return [DNAProp(*tup) for tup in zip(seqs, gcs, Tms, ss_dGs, self_hyb_dGs)] | [
"def createDnaObjects(self):\n ADNs = list()\n for sequance in self.__read_FASTA_sequences():\n # assignment title and sequance\n ADNs.append(a.adn(sequance[1], sequance[0]))\n return ADNs",
"def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a namedtuple with some DNA properties seq is a Bio.Seq.Seq or str representing DNA sequence | def dna_properties(seq):
return dna_properties_batch([seq])[0] | [
"def __init__(self, seq, extraNt=''):\n\n self.seq = DNAseq(str(seq))\n self.extraNt = extraNt",
"def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a tuple with some protein biochemical properties seq is a Bio.Seq.Seq or str representing protein sequence | def protein_properties(seq):
pa = ProteinAnalysis(seq)
aa_counts = pa.count_amino_acids()
arom = pa.aromaticity()
isoelec = pa.isoelectric_point()
try:
instability = pa.instability_index()
except KeyError:
instability = None
try:
gravy = pa.gravy()
except KeyErro... | [
"def readBpseq(bpseq_fn):\n content = open(bpseq_fn).readlines()\n seq = [-1] * len(content)\n struct = [-1] * len(content)\n for i, entry in enumerate(content):\n pos, base, pair = entry.strip().split()\n seq[i] = base\n p = int(pair)\n struct[i] = [1, p][p == 0]\n return \"\".join(seq), struct",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise ImportError with detailed error message if matplotlib is not installed. Functionality requiring matplotlib should call this helper and then lazily import. | def check_matplotlib_support(caller_name: str) -> None:
try:
import matplotlib # type: ignore
except ImportError as e:
raise ImportError(
caller_name + " requires matplotlib. You can "
"install matplotlib with `pip install matplotlib`."
) from e | [
"def import_matplotlib_pyplot(funcname=\"XU\"):\n try:\n from matplotlib import pyplot as plt\n\n # from .mpl_helper import SqrtAllowNegScale\n return True, plt\n except ImportError: # print(d['qvec'][m][ind['ind'][0]])\n print(\"%s:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise ImportError with detailed error message if vowpalwabbit is not installed. Functionality requiring the vowpalwabbit should call this helper and then lazily import. | def check_vowpal_support(caller_name: str) -> None:
try:
import vowpalwabbit # type: ignore
except ImportError as e:
raise ImportError(
caller_name + " requires vowpalwabbit. You can "
"install vowpalwabbit with `pip install vowpalwabbit`."
) from e | [
"def test_import_not_found(self):\n try:\n import_version('bogus', Requirement.parse('bogus==1.0'),\n Environment(['plugins']))\n except DistributionNotFound, err:\n self.assertEqual(str(err),\n 'could not find distri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise ImportError with detailed error message if pandas is not installed. Functionality requiring the pandas should call this helper and then lazily import. | def check_pandas_support(caller_name: str) -> None:
try:
import pandas # type: ignore
except ImportError as e:
raise ImportError(
caller_name + " requires pandas. You can "
"install pandas with `pip install pandas`."
) from e | [
"def handleImportError(self, exception):\n first = exception.args[0]\n if first.find('No module named ') < 0:\n raise\n module = first[len('No module named '):]\n module = module.split('.')[0]\n\n if module in self._deps.keys():\n dep = self._deps[module]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get raid configuration by serial number | def get(self, sn):
config, err = self.dbapi.get_raid_config_by_sn(sn)
if err or config is None:
LOG.error("error fetching configuration by given sn %s" % sn)
return {
'is_ok': False
}
else:
LOG.info("successfully get configuration... | [
"def _get_raid(self, raid_devicefile):\r\n if self._data is not None:\r\n for raid in self._data[\"raid\"]:\r\n if raid[\"devicefile\"] == raid_devicefile:\r\n return raid",
"def get_device_index_by_serial(serial):\n if PY3 and isinstance(serial, str):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resolves to the final node instance that can be used to perform the matching | def _resolve_target_node_from_path(self, node):
if not self.path:
return node
for path in self.path.split("."):
# Since a node type can have multiple paths to a given parent
# this check allows instances that do not have this specific path
if len(node[pat... | [
"def _object_resolution(self, object_to_resolve):\n # Below is the original comment that came with the code extracted in\n # this method. It is not very clear but I decided to keep it just in\n # case\n #\n #\n # For some reason, while the type of `a_result_attribute[1]`\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a node has a value matching the prop and values field if it does, the particular instance will not participate in the entire tagging process | def match(self, node):
node = self._resolve_target_node_from_path(node)
return node and node[self.prop] in self.values | [
"def tag_value_input_test(node, tag, old, new):\n return True",
"def _value_match(self, pattern, value):\n v1, t1, l1 = value.get('@value'), value.get('@type'), value.get('@language')\n v2 = JsonLdProcessor.get_values(pattern, '@value')\n t2 = JsonLdProcessor.get_values(pattern, '@type... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes unique tag for given node | def compute_tag(node):
keys = node.get_tag_property_values()
keys += sorted(
compute_tag(p.dst)
for p in node.edges_out
if p.dst.is_taggable() and p.label != "relates_to"
)
return __generate_hash(keys, node.label) | [
"def create_tag( self, node, scene ):\r\n\r\n\t\tnode_is_tag = get_node_properties( node, property_name = 'p_tag_name' )\r\n\t\tif node_is_tag or node.GetName().startswith( 'tag_' ):\r\n\t\t\ttag = Node_Tag( node, scene )\r\n\t\t\tself.tags.append( tag )",
"def _(self, node: AnnCastString):\n node_uid = uu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Injects an event listener that sets the tag and version properties on nodes, just before they are inserted | def inject_set_tag_after_insert(cls):
@event.listens_for(cls, "after_insert")
def set_node_tag(mapper, conn, node):
table = node.__table__
if not node.is_taggable():
return # do nothing
tag = compute_tag(node)
version = __get_tagged_version(node.node_id, table, t... | [
"def addNodeAddedCallback(*args, **kwargs):\n \n pass",
"def on_pre_sync(self, changed):\n _add_tags(changed)",
"def addObservers(self):\n tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, self.landmarksWidget.requestNodeAddedUpdate)\n self.observerTags.append( (slicer.m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cooccurrence constraint as described in the paper. | def compute_cooccurrence_constraint(self, nodes):
num_nodes, num_attrs = self.modified_nx.shape
words_graph = self.cooc_matrix - sp.diags(self.cooc_matrix.diagonal())
words_graph.eliminate_zeros()
# words_graph.setdiag(0)
words_graph.data = words_graph.data > 0
... | [
"def test_co_occurrence(adata: AnnData):\n co_occurrence(adata, cluster_key=\"leiden\")\n\n # assert occurrence in adata.uns\n assert \"leiden_co_occurrence\" in adata.uns.keys()\n assert \"occ\" in adata.uns[\"leiden_co_occurrence\"].keys()\n assert \"interval\" in adata.uns[\"leiden_co_occurrence\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the logits of the surrogate model, i.e. linearized GCN. Returns np.array, [num_nodes, num_classes] The log probabilities for each node. | def compute_logits(self):
return (self.adj_norm @ self.adj_norm @ self.modified_nx
@ self.W)[self.target].ravel() | [
"def logits(self) -> T.Tensor:\n if self._logits is None:\n self._logits = T.random.bernoulli_probs_to_logits(self._probs,\n self.epsilon)\n return self._logits",
"def logits(self, x):",
"def loglike(self, nodeinput=None):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the incorrect class with largest logits. | def strongest_wrong_class(self, logits):
target_label_onehot = np.eye(self.num_classes)[self.target_label]
return (logits - 1000 * target_label_onehot).argmax() | [
"def decode_logits(self, logits):\n # Choose the class with maximimum probability.\n best = list(np.argmax(logits, axis=1))\n return self.decode(best)",
"def _find_best_class(self, x: pd.Series) -> int:\n\n optimal_score, optimal_class = float('-inf'), None\n for k in self._pi_k.keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute feature scores for all possible feature changes. | def feature_scores(self):
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influence_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(
self.target_label) ... | [
"def calculate_scores(self):\n return [self.fitness.eval(individual) for individual in self.population]",
"def compute_score(scores):\n\tcurr_score = 50\n\tfor classification in scores: \n\t\tif classification == 1 or classification == 3: \n\t\t\tcurr_score += 0.08\n\t\tif classification == 2 or classifica... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the influencer nodes to attack node i based on the weights W and the attributes X. | def get_attacker_nodes(self, n=5, add_additional_nodes=False):
assert n < self.num_nodes - 1, "number of influencers cannot be >= number of nodes in the graph!"
# neighbors = self.modified_adj[self.target].nonzero()[1]
neighbors = self.modified_adj[self.target].indices
# ... | [
"def learn_initial_weights(self, X):\n output = torch.tensor(X, dtype=torch.float32)\n for i in range(len(self.weights)):\n torch.nn.init.xavier_normal_(self.weights[i].weight, torch.nn.init.calculate_gain('tanh'))\n self.weights[i].bias.data.fill_(0)\n output2 = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the updated A_hat_square_uv entries that would result from inserting/deleting the input edges, for every edge. | def compute_new_a_hat_uv(self, potential_edges):
edges = np.transpose(self.modified_adj.nonzero())
edges_set = {tuple(e) for e in edges}
A_hat_sq = self.adj_norm @ self.adj_norm
values_before = A_hat_sq[self.target].toarray()[0]
node_ixs = np.unique(edges[:, 0], return_ind... | [
"def compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs,\r\n values_before, degs, potential_edges, u):\r\n num_nodes = degs.shape[0]\r\n\r\n twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1]\r\n nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1]\r\n nbs_u_set = set(nbs_u)\r\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the new values [A_hat_square]_u for every potential edge, where u is the target node. C.f. Theorem 5.1 equation 17. | def compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs,
values_before, degs, potential_edges, u):
num_nodes = degs.shape[0]
twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1]
nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1]
nbs_u_set = set(nbs_u)
return_ixs = [... | [
"def compute_new_a_hat_uv(self, potential_edges):\r\n\r\n edges = np.transpose(self.modified_adj.nonzero())\r\n edges_set = {tuple(e) for e in edges}\r\n A_hat_sq = self.adj_norm @ self.adj_norm\r\n values_before = A_hat_sq[self.target].toarray()[0]\r\n node_ixs = np.unique(edges[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns text between first found begin and end | def between(text, begin, end):
idx1 = text.find(begin)
idx2 = text.find(end,idx1)
if idx1 == -1 or idx2 == -1:
return ''
return text[idx1 + len(begin):idx2].strip() | [
"def extract(text, start, end, end_is_optional=True, inclusive=False):\n if start:\n try:\n s = text.split(start, 1)[1]\n except IndexError:\n raise LookupError\n if inclusive:\n s = start + s\n else:\n s = text\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns sha256 hexdigest of given data. | def sha256_hex(data):
if isinstance(data, unicode):
return hashlib.sha256(data.encode('utf-8')).hexdigest()
else:
return hashlib.sha256(data).hexdigest() | [
"async def __get_sha256(self, data):\n\n m = hashlib.sha256()\n m.update(data)\n return m.hexdigest()",
"def doubleHashHex (data):\n\n hasher = hashlib.sha256 ()\n hasher.update (binascii.unhexlify (data))\n data = hasher.digest ()\n\n hasher = hashlib.sha256 ()\n hasher.update (data)\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read the TraceAttributes for a specific traces in the file args | def get_trace_attrs(self, idx: int) -> TraceAttributes:
return read_trace(self, idx=idx, what="attrs") | [
"def read_trace(\n cf: CacheFile, idx: int, what: str = \"attrs\"\n) -> Union[TraceData, TraceAttributes]:\n if type(idx) != int:\n raise ValueError(\"Index must be an integer\")\n if idx >= 0:\n cnt = -1\n with read_file(cf.fname) as f:\n for origin in f.keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
overwrite the traceattributes for a trace the original file and index of the trace are specified as field within the TraceAttributes args | def update_trace_attributes(attrs: TraceAttributes):
index: int
index = attrs["original_index"] # type: ignore
if type(index) != int:
raise ValueError("Index must be an integer")
fname = attrs["original_file"]
attrs = filter_trace_attrs(attrs)
if index >= 0:
cnt = -1
wi... | [
"def get_trace_attrs(self, idx: int) -> TraceAttributes:\n return read_trace(self, idx=idx, what=\"attrs\")",
"def reset_trace_record():\n global _TRACE_RECORD\n _TRACE_RECORD = {}",
"def test_header_to_trace_set_params(self):\n trace_count = 100\n sample_count = 1000\n\n try:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read either metadata or attributes for a specific trace args | def read_trace(
cf: CacheFile, idx: int, what: str = "attrs"
) -> Union[TraceData, TraceAttributes]:
if type(idx) != int:
raise ValueError("Index must be an integer")
if idx >= 0:
cnt = -1
with read_file(cf.fname) as f:
for origin in f.keys():
for ix, key ... | [
"def read_args(self,filename,varnames):\n for name in varnames:\n self.args[name]=ebf.read(filename,'/'+name)",
"def _read_attributes (self, log_entry):\n\n\t\t# regex to extract \"normally formatted attributes\"\n\t\tinfoRE = \"WorkFlowServices:[ \\t]?(?P<attr>.*?):[ \\t]+?(?P<val>.*)\"\n\n\t\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
recover the two parts of a cachefile, i.e. annotations and traces args | def recover_parts(cf: CacheFile) -> Tuple[List[Annotations], List[List[TraceData]]]:
with read_file(cf.fname) as f:
events, traces = [], []
for origin in f.keys():
yml = dict()
yml["origin"] = origin
yml["attrs"] = parse_traceattrs(f[origin].attrs)
tr... | [
"def cacheFile(cacheInfo=\"string\", noBackup=bool, prefix=bool, pointCount=bool, fileName=\"string\", refresh=bool, geometry=bool, runupFrames=int, interpEndTime=(), format=\"string\", inTangent=\"string\", worldSpace=bool, doubleToFloat=bool, sampleMultiplier=int, cacheFileNode=\"string\", outAttr=\"string\", poi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create a new cachefile from a annotations and traces args | def populate(
tf: FileName, annotations: List[Annotations], traceslist: List[List[TraceData]]
) -> FileName:
tf = Path(tf).expanduser().absolute()
# populate the cachefile
with h5py.File(tf, "w") as f:
print(f"Merging into {tf.name} from:")
for settings, traces in zip(annotations, traces... | [
"def cacheFile(cacheInfo=\"string\", noBackup=bool, prefix=bool, pointCount=bool, fileName=\"string\", refresh=bool, geometry=bool, runupFrames=int, interpEndTime=(), format=\"string\", inTangent=\"string\", worldSpace=bool, doubleToFloat=bool, sampleMultiplier=int, cacheFileNode=\"string\", outAttr=\"string\", poi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the settlement after the lock is acquire. (Async S_tr creation) Test all the settle transaction is setted up correctly | def test_s_tr_lock_aquire_creation(self):
create_bill(self.ul, 10)
s = self.create_settlement()
self.assertEqual(s.wait_count, 0)
# s_tr will be setted
self.assertEqual(s.settletransaction_set.all().count(), 3)
self.assertEqual(s.settletransaction_set.get(id=1).amount... | [
"def create_test_lock(self, test_uuid):",
"async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
No only need to test all the s_tr is finished, but also all the bill state is swith to finsh | def test_s_tr_success_finish(self):
s = self.create_settlement()
for s_tr in s.settletransaction_set.all():
# both user has agree to this payment
s_tr.approve(s_tr.from_u)
s_tr.approve(s_tr.to_u)
self.assertEqual(s.state, FINISH)
self.assertEqual(s.... | [
"def test_s_tr_lock_aquire_creation(self):\n create_bill(self.ul, 10)\n\n s = self.create_settlement()\n\n self.assertEqual(s.wait_count, 0)\n\n # s_tr will be setted\n self.assertEqual(s.settletransaction_set.all().count(), 3)\n\n self.assertEqual(s.settletransaction_set.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cursor = mysql.connection.cursor() sorgu = "Select from articles where id = %s" result = cursor.execute(sorgu,(id,)) | def article(id):
article = Articles.query.filter_by(id = id).first()
if article:
#article = cursor.fetchone()
goruntulenme = article.goruntulenme
goruntulenme = goruntulenme + 1
"""sorgu2 = "Update articles Set goruntulenme = %s where id = %s"
cursor = mysql.connection.cu... | [
"def search_id(id, table, conn):\n query = 'select * from %s where id=%d' % (table, id)\n result = conn.execute(query).fetchall()\n if not result: #id not in our database, return False\n return False\n else:\n return str(result[0][1]) #return the url of the id",
"def query(self, sql):",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |