query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Return the directory associated with the patient, study and series of the DICOM data set. | def get_series_directory(self, data_set):
specific_character_set = (
data_set.as_string(odil.registry.SpecificCharacterSet)
if odil.registry.SpecificCharacterSet in data_set
else odil.Value.Strings())
def decode(value):
return odil.as_unicode(value, speci... | [
"def _get_all_dicom_filepaths(patient_root_dir=''):\n walk = os.walk(patient_root_dir)\n \n dicom_filepaths = []\n for w in walk: \n dicom_files = [f for f in w[-1] if '.dcm' in f] # extracts .dcm files \n basepath = pathlib.Path(w[0]) # extracts the basepath \n full_paths = [os.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an ISO9660 compatible version of input string. | def to_iso_9660(self, value):
value = value[:8].upper()
value = re.sub(r"[^A-Z0-9_]", "_", value)
return value | [
"def format_filename(s):\n valid_chars = \"-_. %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n return filename",
"def from_iso(self) -> str:\n return self._frm_iso",
"def makestr(string: str):\n if os.path.isfile(string) or os.path.isdir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function get all albums dates of a user | def db_annotater_get_user_album_dates(albums_queryset):
# analyse the queryset of all albums of a user
latest_date = ""#datetime.now().date()
submit_dates = []
unsubmit_dates = []
latest_album = None
for album_date in albums_queryset:
if album_date['annotation'] is True:
new_date = get_date_dash_d_m_y(alb... | [
"def db_annotater_get_latest_user_albums(album_date):\n\tstart_at\t= album_date['start_at']\n\tend_at\t\t= album_date['end_at']\n\t(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)\n\twear_time \t= [{\"hours\":str(hours),\"minutes\":str(mins)}]\n\talbum_id \t= album_date['id']\n\tif album_date['annotation... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function get all albums dates of a user | def db_annotater_get_latest_user_albums(album_date):
start_at = album_date['start_at']
end_at = album_date['end_at']
(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)
wear_time = [{"hours":str(hours),"minutes":str(mins)}]
album_id = album_date['id']
if album_date['annotation'] is True:
submitted = ... | [
"def db_annotater_get_user_album_dates(albums_queryset):\n\n\t# analyse the queryset of all albums of a user\n\tlatest_date \t= \"\"#datetime.now().date()\n\tsubmit_dates\t= []\n\tunsubmit_dates\t= []\n\tlatest_album\t= None \n\tfor album_date in albums_queryset:\n\t\tif album_date['annotation'] is True:\n\t\t\tnew... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function get all annotation terms from the database and return it to the interface | def db_get_annotation_terms(uid):
terms = {}
terms_queryset = AnnotationTerm.objects.filter(Q(private=False) | Q(user=uid)).values('concept', 'category')
# all public terms
for term_attr in terms_queryset:
# get attributes
category = str(term_attr['category']).strip()
concept = str(term_attr['concept']).str... | [
"def get_terms(self):\n return Term.objects.filter(projects=self) # get a queryset of all terms for a project\n # [term.get_mapping(self.appname) for term in project_terms]",
"def annotate_terms(text_file, output_file):\n init_data = read_data('lexicon.tsv')\n data = select_data(init_data)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function deletes one image from the database as mark visible = False | def db_delete_one_image(imgId):
print "delete one image from database: "+ str(imgId)
image = Picture.objects.get(pk=imgId)
image.visible = False
image.save() | [
"def delete(self, *args, **kwargs):\n self.image.delete()\n super(StoredImage, self).delete(*args, **kwargs)",
"def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)",
"def delete_image(self, http_request, image_id):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function return all annotatees list for user uid | def db_annotater_get_user_annotatees(uid):
annotatees = AnnotationTask.objects.filter(annotator_id=uid).values('subject','no_album', 'finished')
return annotatees | [
"def get_annotations_for_user_id(annotations, user_id):\n rows = annotations[\"rows\"]\n return [r for r in rows if r[\"user\"][\"id\"] == user_id]",
"def get_annotations_keyed_by_user_id(annotations):\n rows = annotations[\"rows\"]\n annotations_by_user = {}\n for r in rows:\n user_id = r[\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function saves all user annotation from the interface into the database | def db_annotater_insert_user_annotation(uid, image_ids, annotation_terms):
try:
for iid in image_ids:
for term in annotation_terms:
aid = AnnotationTerm.objects.filter(concept=term)[0].id
#print aid
#print "---aid-----"
annotation_action = AnnotationAction(annotator=User(id=uid), image=Picture(id=... | [
"def save_annotations_to_file():\n sid = request.form['sid']\n onsets = list(map(float, request.form['onsets'].split(',')))\n durations = list(map(float, request.form['durations'].split(',')))\n # TODO: Clean this up for descriptions with commas\n descriptions = request.form['descriptions'].split(','... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the events manager can find upcoming events | def test_get_future_events(self):
upcoming_events = Event.objects.upcoming_events()
# There are 2 upcoming events
assert len(upcoming_events) == 10
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events]) | [
"def test_search_events(self):\n pass",
"def testGetEvents(self):\n self.activity.type = \"event\"\n self.activity.depends_on = \"True\"\n self.activity.name = \"name\"\n self.activity.pub_date=datetime.datetime.today()\n self.activity.expire_date=datetime.datetime.today() + datetime.timedel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a graph to a dot file. | def to_dot(
g,
dname="tmp",
fname="test",
extension=".dot",
return_fname=False,
ortho=False,
fi_labels=False,
):
# Layout
if fi_labels:
for e in g.edges():
g.edges()[e]["label"] = "{0:.2f}".format(g.edges()[e].get("fi", 0))
dot = nx.drawing.nx_pydot.to_pydot... | [
"def write_dot_file(self, out_file_path):\n nx.nx_agraph.write_dot(self, out_file_path)",
"def _write_dot(self):\n if self.dot_file:\n write_dot(self.graph, self.dot_file)",
"def write_graph(self, filename):\n pass",
"def writeDOT(G, filename, directed=False):\n writefile = open... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loops though a list of sequences and applies the given function to each to get the corresponding tags. Also handles printing output. | def tag_all(sequence_list, tagger, normaliser=None, output_file=None):
out = []
start = time.time()
total_sents = len(sequence_list)
for i, unlabeled_sequence in enumerate(sequence_list, start=1):
print "Sentence {0} ({1:2.2f}%)".format(i, float(i)/len(sequence_list) * 100)
display = [unlabeled_sequence]
t0 ... | [
"def markupSeq(seq, ulPosList, boldPosList, annots = {}):\n annotStarts = {}\n annotEnds = defaultdict(set)\n for (start, end), aDict in annots.iteritems():\n annotStarts[start] = aDict\n aDict[\"end\"] = end\n\n ulStarts = set([x[0] for x in ulPosList])\n ulEnds = set([x[1] for x in ul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A Simple PATCH request to the API Server | def sample_patch_request(host, username, password, resource, data):
# build the URL
url = urlunparse(('https', host, resource, None, None, None))
print "PATCH: %s" % url
return requests.patch(url, json=data, auth=HTTPBasicAuth(username, password), verify=False) | [
"def method_patch(self, uri, **kwargs):\r\n return self._api_request(uri, \"PATCH\", **kwargs)",
"def update_request():",
"def patch(self):\n req_op = self.get_argument('op')\n req_path = self.get_argument('path')\n req_value = self.get_argument('value', None)\n req_from = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when the certificate has expired. | def test_https_expired(self):
domain = inspect("expired.badssl.com")
basic_check(domain.https)
self.assertTrue(domain.https.https_expired_cert) | [
"def test_check_cert(certfile):\n cert = load_cert(certfile)\n\n now = datetime.datetime.utcnow()\n if now > cert.not_valid_after:\n raise Exception(\"Certificate has expired!\")\n\n elif now + datetime.timedelta(hours=20) > cert.not_valid_after:\n print('> Certificate expiring soon: %s' %... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when the certificate has a bad hostname. | def test_https_bad_hostname(self):
domain = inspect("wrong.host.badssl.com")
basic_check(domain.https)
self.assertTrue(domain.https.https_bad_hostname) | [
"def test_invalidHostname(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"wrong-host.example.com\",\n u\"correct-host.example.com\",\n )\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when there is a bad chain of trust for a certificate. | def test_https_bad_chain(self):
domain = inspect("untrusted-root.badssl.com")
basic_check(domain.https)
self.assertTrue(domain.https.https_bad_chain) | [
"def test_trustRootPlatformRejectsUntrustedCA(self):\n caSelfCert, serverCert = certificatesForAuthorityAndServer()\n chainedCert = pathContainingDumpOf(self, serverCert, caSelfCert)\n privateKey = pathContainingDumpOf(self, serverCert.privateKey)\n\n sProto, cProto, sWrapped, cWrapped, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test when a certificate is selfsigned. | def test_https_self_signed_cert(self):
domain = inspect("self-signed.badssl.com")
basic_check(domain.https)
self.assertTrue(domain.https.https_self_signed_cert) | [
"def allow_self_signed_certificate():\n\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n ssl._create_default_https_context = _create_unverified_https_context\n except AttributeError:\n # legacy Python that doesn't verify HTTPS certificates by default\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a vtk Unstructured Grid file (.vtk, .vtu) from a welltracks DataFrame | def makeVTKWells(fname_base, welltracks_df, xml=False):
numpoints = welltracks_df.shape[0]
wells = welltracks_df['Well'].unique().tolist()
numwells = len(wells)
grid = vtkUnstructuredGrid()
points = vtkPoints()
for i in range(numpoints):
points.InsertNextPoint(welltrack... | [
"def byu_to_vtk(byu_filename, vtk_filename):\n V, F = load_surface(byu_filename)\n nv, nf = V.shape[1], F.shape[0]\n with open(vtk_filename, 'w+') as f:\n f.write('# vtk DataFile Version 3.0\\n')\n f.write('Surface Data\\n')\n f.write('ASCII\\n')\n f.write('DATASET POLYDATA\\n\\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a query string from a dictionary | def createQueryString(query_dict):
query_string = ''
(id == 1) | (id == 2) | (id == 3) | (id == 4)
for k, l in query_dict.iteritems():
for v in l:
query_string += '({0}=={1})|'.format(k,v)
query_string = query_string[:-1]
return query_string | [
"def generate_query_string(query_dict):\n if isinstance(query_dict, str):\n return query_dict\n q = ''.join(\"{}={}&\".format(k, v) for k, v in query_dict.items() if v)\n return q[:-1]",
"def _dict2query_string(query_params, sort=False):\n\n query_params = query_params.copy(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the Cell IDs of a well in Paraview. Creates a query string that can be copied to Paraview's find function. | def findWellBlocks(well_name, welltracks_df, vtr_file):
grid = readVTK(vtr_file)
ids = findIntersectedBlocks(well_name, welltracks_df, grid)
query_dict = {'id':ids}
return createQueryString(query_dict) | [
"def find_wells(input_text, **kwargs):\n session = get_global_session()\n return session.find_wells(input_text, **kwargs)",
"def _index_of_cell(self, query):\n x = 0\n for table in self.doc.tables:\n y = 0\n for row in table.rows:\n z = 0\n for cell in row.cells:\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete Buffer Object and any associated query object | def delete(self):
if self.__buffer__ is not None:
glDeleteBuffers(1,[self.__buffer__])
self.__buffer__=None
if self.__query__ is not None:
glDeleteQueries(1, [self.__query__])
self.__query__=None | [
"def delete(obj):",
"def purge(self):\n\tif self.isReferenced():\n\t log.warning('This function is not designed for referenced buffer nodes')\n\t return False\n\t\n userAttrs = mc.listAttr(self.mNode,userDefined = True) or []\n for attr in userAttrs:\n if 'item_' in attr:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the .fgd files specified in the config file | def loadFGDFiles(self):
self.fgd = Fgd()
numVals = LEConfig.fgd_files.getNumUniqueValues()
if numVals == 0:
QtWidgets.QMessageBox.critical(None, LEGlobals.AppName,
"No FGD files specified in local config!", QtWidgets.QMessageBox.Ok)
sys.exit(1)
... | [
"def read_dfg(file_path):\n from pm4py.objects.dfg.importer import importer as dfg_importer\n dfg, start_activities, end_activities = dfg_importer.apply(file_path)\n return dfg, start_activities, end_activities",
"def get_cfg():\n cfg_path = os.path.join(os.path.expanduser('~'), '.cfgnfo')\n\n cfg_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepares a Chrome driver that puts the searches into querybyschool mode with the department set to Computer Science. | def prep_query_by_school_driver():
driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver'))
columbia_url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(COLUMBIA_ID)
driver.get(columbia_url)
driver.find_element_by_class_name('close-this... | [
"def setup_driver():\n # Import webdriver for interactive webpages\n from selenium import webdriver\n\n # Initiate Selenium using the chrome browser, would be nice to have future editions include other browsers\n chromedriver = Chromedriver_path\n driver = webdriver.Chrome(executable_path=chromedrive... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the names and url's of professors for this school. If only_take_top_20, only the top (most reviewed) professors are included this is easier because the top 20 are shown when the page loads. If all professors are desired, then the driver iterates through the alphabet and takes the top 20 for each filtered result (e... | def get_professors_from_school(driver, school_id, only_take_top_20 = False):
url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)
driver.get(url)
num_professors = int(driver.find_element_by_xpath("//span[@class='professor-count']").text)
... | [
"def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current professors listed on a school's page, given its filter settings. | def get_current_list_of_professors(driver):
results = []
list_elems = driver.find_elements_by_xpath("//li[contains(@id, 'my-professor')]")
for li in list_elems:
link = li.find_element_by_tag_name('a')
url = link.get_attribute('href')
name = link.find_element_by_class_name('name').tex... | [
"def get_professors_from_school(driver, school_id, only_take_top_20 = False):\n url = 'https://www.ratemyprofessors.com/search.jsp?queryBy=schoolId&schoolID={}&queryoption=TEACHER'.format(school_id)\n driver.get(url)\n num_professors = int(driver.find_element_by_xpath(\"//span[@class='professor-count']\").... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the professor page and their reviews. | def parse_professor_page(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
reviews_heading = soup.find('div', attrs={'data-table':'rating-filter'})
if reviews_heading is None:
return 0, []
num_reviews = int(reviews_heading.text.split()[0])
reviews_table = soup.fi... | [
"def parse_individual_review(self, html_webpage, url_webpage):\n \n #Name of the location\n re_location_name = re.compile(r\"ui_pill inverted.*\\\"(.*)\\\"<\", re.S)\n \n #Name of the entity\n re_entity_name = re.compile(r\"HEADING.+>(.*)<\", re.S)\n \n re_use... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes the corpus filename from a professor's name and their page url. | def make_filename(prof_name, prof_url):
tid = extract_prof_id(prof_url)
prof_name_id = '_'.join(prof_name.split())
return PATH_TO_CORPUS + '{}__{}.txt'.format(prof_name_id, tid) | [
"def make_name(linkurl, topic):\n fileRegex = re.compile(r'^(.*)/(20\\d\\d)/(\\d\\d)/(\\d\\d)/(.*)$')\n mo = fileRegex.search(linkurl)\n date_part = mo.group(2)+'-'+mo.group(3)+'-'+mo.group(4)\n gen_name = date_part+'-'+topic+'-'+mo.group(5)+'.html'\n filename = os.path.join(POSTSDIR, gen_name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Predicts the gender of a professor, given their reviews. | def predict_gender_from_reviews(reviews):
m_count = 0
f_count = 0
for r in reviews:
if r['text']:
toks = r['text'].lower().split()
counts = Counter(toks)
for mp in MALE_PRONOUNS:
if mp in counts:
m_count += counts[mp]
... | [
"def evaluate_gender_prediction(training_gender_df, \n test_gender_df, print_flag=False):\n f1_scores = []\n method_name = ['Frequency', 'Closest']\n # read the test set for obtaining the gender column (response)\n test_set = pd.read_csv(\"../data/deaths-test.csv\")\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collects the url's to all schools in the U.S. on Rate My Professor. Saved in school2id.pkl. | def collect_schools():
MIN_OFFSET = 0
MAX_OFFSET = 6700
STEP_SIZE = 20
school2id = {}
num_failed = 0
for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE):
if offset % 100 == 0: print(offset)
url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&stateselect=&c... | [
"def college_transfer_scrape(schools):\n \n #links =\n\n #for i, link in enumerate(links):\n # soup = make_soup(link)\n \n # for item in soup.findAll():\n # stuff = ''\n \n # schools[i]['item'] = stuff\n \n return schools",
"def college_data_scrape(schools,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collects the list of CS professor pages per school. Saved in school2info.pkl. | def collect_professors_per_school(only_take_top_20):
school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))
sorted_schools = sorted(list(school2id.keys()))
print(len(sorted_schools))
school2info = {}
driver = prep_query_by_school_driver()
total_num_profs = 0
total_num_prof_page... | [
"def collect_schools():\n MIN_OFFSET = 0\n MAX_OFFSET = 6700\n STEP_SIZE = 20\n school2id = {}\n num_failed = 0\n for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE):\n if offset % 100 == 0: print(offset)\n url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Edits school2info.pkl to collect more professor pages for schools with more than 20 CS professors. | def edit_professors_per_school():
driver = prep_query_by_school_driver()
fn = '../1.rate_my_prof/school2info.pkl'
school2info = pickle.load(open(fn, 'rb'))
missing_before = 0
missing_now = 0
for school, (sid, num_profs, prof_pages) in school2info.items():
if len(prof_pages) < num_profs:
... | [
"def collect_professors_per_school(only_take_top_20):\n school2id = pickle.load(open('../rate_my_prof/school2id.pkl', 'rb'))\n sorted_schools = sorted(list(school2id.keys()))\n print(len(sorted_schools))\n school2info = {}\n driver = prep_query_by_school_driver()\n total_num_profs = 0\n total_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the text corpus, where there is one text file per professor, and the text file consists of all of that professor's reviews. | def build_corpus(start_idx, num_schools_to_process):
current_corpus = get_current_corpus()
school2info = pickle.load(open('../1.rate_my_prof/school2info.pkl', 'rb'))
sorted_schools = sorted(list(school2info.keys()))
print('Total num schools:', len(sorted_schools))
end_idx = min(len(sorted_schools), ... | [
"def create_reuters_corpus():\n #adapted from\n #https://medium.com/@namanjain2050/\n #finding-similar-documents-reuters-dataset-example-part-4-eb0462e1ab2b\n documents = []\n corpus_filename = config.CORPUS[config.REUTERS]['corpusxml']\n if os.path.exists(corpus_filename) is True:\n if os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a queue of RDDs that will be mapped/reduced one at a time in 1 second intervals. | def process_rdd_queue(twitter_stream, nb_tweets = 5):
rddQueue = []
for i in range(nb_tweets):
json_twt = get_next_tweet(twitter_stream, i )
dist_twt = ssc.sparkContext.parallelize([json_twt], 5)
rddQueue += [dist_twt]
lines = ssc.queueStream(rddQueue, oneAtATime=False)
lines.p... | [
"def create_queue(self, queue):",
"def fill_queue(orders_of_the_day, queue_of_the_day):\n for order in orders_of_the_day:\n queue_of_the_day.enqueue(order)\n return queue_of_the_day",
"def run_through_queue(arrival_epoch):\n\n MODE = 'single' # 'single' or 'changing_multi' or 'const_multi'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resizes image to target size progressively. Different from normal resize, this function will reduce the image size progressively. In each step, the maximum reduce factor is 2. | def progressive_resize_image(image, size):
if not isinstance(image, np.ndarray):
raise TypeError(f'Input image should be with type `numpy.ndarray`, '
f'but `{type(image)}` is received!')
if image.ndim != 3:
raise ValueError(f'Input image should be with shape [H, W, C], '
... | [
"def resize_image(image, size):\n return skimage.transform.resize(image, size, preserve_range=True)",
"def rescaled_image():",
"def _resize(self):\n avg_frames = 87 #this is the average image frame length in the entire dataset\n for i in range(len(self.data)):\n image = self.data[i]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specialized record with correlation_id. | def makeRecord(self, *args, **kwargs):
rv = super(LEGALogger, self).makeRecord(*args, **kwargs)
# Adding correlation_id if not already there
if 'correlation_id' in rv.__dict__.keys():
return rv
rv.__dict__['correlation_id'] = _cid.get() or '--------'
return rv | [
"def gen_record(document_id, primary_doc, gen_links):\n pass",
"def _patient_wrapper(row):\n from bhoma.apps.patient.models import CPatient\n data = row.get('value')\n docid = row.get('id')\n doc = row.get('doc')\n if not data or data is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yields EXPLAIN result rows for given queries | def explain_queries(database, queries):
# analyze only SELECT queries from the log
for query in filter(is_select_query, queries):
try:
for row in database.explain_query(query):
table_used = row['table']
index_used = row['key']
yield (query, ta... | [
"def explain_queries(database, queries):\n # analyze only SELECT queries from the log\n for query in filter(is_select_query, queries):\n for row in database.explain_query(query):\n table_used = row['table']\n index_used = row['key']\n\n yield (query, table_used, index_u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Randomizes days of week and hours when lectures will take place | def set_lectures_time(self, min_hour=8, max_hour=19, days=[1,2,3,4,5]):
print("--- set lectures time ---")
dict_lectures, dict_group_lectures = self.prepare_lectures()
for sch_subject_list in dict_group_lectures.values():
tries = HOW_MANY_TRIES
while tries > 0:
... | [
"def gen_modelled_date(start_date, end_date):\n # 2012, 2013, 2014\n year_model = [1, 2, 4]\n year_model = reduce(lambda x, y: x+y, [[year]*freq for year, freq in\n zip(range(2012, 2015), year_model)])\n rand_year = random.choice(year_model)\n\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The used column for addresses in the derivation_paths table should be a zero or greater run of 1's, followed by a zero or greater run of 0's. There should be no used derivations after seeing a used derivation. | def check_addresses_used_contiguous(derivation_paths: List[DerivationPath]) -> List[str]:
errors: List[str] = []
for wallet_id, dps in dp_by_wallet_id(derivation_paths).items():
saw_unused = False
bad_used_values: Set[int] = set()
ordering_errors: List[str] = []
# last_index = N... | [
"def check_unexpected_derivation_entries(\n self, wallets: List[Wallet], derivation_paths: List[DerivationPath]\n ) -> List[str]:\n\n errors = []\n wallet_id_to_type = {w.id: w.wallet_type for w in wallets}\n invalid_wallet_types = []\n missing_wallet_ids = []\n wrong_ty... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for unexpected derivation path entries Invalid Wallet Type Wallet IDs not in table 'users_wallets' Wallet ID with different wallet_type | def check_unexpected_derivation_entries(
self, wallets: List[Wallet], derivation_paths: List[DerivationPath]
) -> List[str]:
errors = []
wallet_id_to_type = {w.id: w.wallet_type for w in wallets}
invalid_wallet_types = []
missing_wallet_ids = []
wrong_type = defaultd... | [
"def validate_swap_path(whole_swap,swaps):\n\n balances = dict()\n\n src_token, dst_token, amount_in, amount_out, sender, receiver = whole_swap\n\n balances[src_token] = amount_in \n balances[dst_token] = - amount_out \n\n for src_token, dst_token, amount_in, amount_out, sender, receiver in swaps:\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compare a GPTJConfig with a finetuneanon GPTNeoConfig config and ensure they match. Required if loading a pretrained model | def finetuneanon_lm_config_check(config: GPTJConfig, finetuneanon_config: GPTNeoConfig):
if finetuneanon_config.jax == False:
raise ValueError(
"GPTNeo model in https://github.com/finetuneanon/transformers is equivalent to gptj only with jax=True"
)
if finetuneanon_config.rotary == F... | [
"def check_nn_config(f_config):\n\n if f_config[\"model_type\"] in [\"nrms\", \"NRMS\"]:\n required_parameters = [\n \"doc_size\",\n \"his_size\",\n \"user_num\",\n \"wordEmb_file\",\n \"word_size\",\n \"npratio\",\n \"data_forma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enumerate over an iterable in reverse order while retaining proper indexes | def reverse_enumerate(iterable):
# Lifted from http://galvanist.com/post/53478841501/python-reverse-enumerate
return itertools.izip(reversed(xrange(len(iterable))), reversed(iterable))
# Alternative python3 version:
# return zip(reversed(range(len(iterable))), reversed(iterable)) | [
"def reverse_enumerate(iterable):\n return izip(reversed(range(len(iterable))), reversed(iterable))",
"def reversed_enumerate(seq):\r\n return izip(reversed(xrange(len(seq))), reversed(seq))",
"def rev_enumerate(seq):\n cnt = 0\n seq = reverse(seq)\n for i in seq:\n yield len(seq)-cnt-1, i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an object to do various processing with a reddit thread (rendering to different formats). thread_id is the optional id of the reddit submission to squash (check the URL). If thread_id is not None, the thread will be remotely fetched and parsed from reddit (this can easily take an hour if the number of comments e... | def __init__(self, thread_id=None, json=None, author_map=None, characters=None):
self._thread = None
self.thread_id = thread_id
self.comment_data = None
self.author_map = author_map or {}
self.characters = characters or {}
self.commentlist = []
# Create a handle for accessing reddit, and load the thread
... | [
"def parse_json(board, json_):\n if 'posts' not in json_ or not json_['posts']:\n raise ValueError('Thread does not contain any posts')\n\n first = json_['posts'][0]\n\n return Thread(board,\n first['no'],\n util.unescape_html(first['sub'])\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a comment (defaults to thread root), find the maximum depth of its descendents | def max_comment_depth(self, comment=None, cur_depth=0):
if comment is None:
comment = self.thread
replies = comment.replies if isinstance(comment, praw.models.Comment) else \
(comment.comments if isinstance(comment, praw.models.Submission) else None)
if replies:
return max(self.max_comment_depth(reply, c... | [
"def _find_max_depth(root):\r\n \r\n if root.children() == {}:\r\n return 1\r\n else:\r\n return 1 + max([_find_max_depth(child)\r\n for child in root.children().values()])",
"def filter_comments_by_max_depth(self, max_depth, comments=None):\n\t\tif comments is None: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete all comments which don't have any descendents at depths >= max_depth | def filter_comments_by_max_depth(self, max_depth, comments=None):
if comments is None:
return
for i, c in reverse_enumerate(comments):
# If the comment has no children at a sufficient depth, delete it altogether,
# Else apply the same algorithm to its children
print i, " -> ", self.max_comment_depth(c)... | [
"def delete_children(self, comments):\n parents = []\n for comment in comments.values():\n if comment['parent']:\n parents.append(comment['parent'])\n\n for comment in comments.values():\n if comment['id'] not in parents:\n # Parent comments t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flattens a chain of comments, but stops if it gets to an ambiguous point where a comment has more than one child (or no children) | def flatten(self, comment=None):
print 'flattening'
if comment is None:
print 'comment is none'
comment = self.commentlist[0]
while isinstance(comment, praw.models.Comment):
print comment.body_html
yield comment
comment = comment.replies[0] | [
"def flatten_comments(root_comments):\n all_comments = []\n nodes = root_comments[:]\n while nodes:\n node = nodes.pop()\n data = node['data']\n if 'body' not in data:\n #\n # weird child node\n #\n continue\n comment = Comment(data['b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Serialize a flat sequence of comments into an array of dicts that can easily be serialized to JSON. | def comments_to_dicts(self, comments):
list_of_dicts = [{ "author": c.author.name, "body_html":c.body_html,
"created_utc":c.created_utc, "permalink":c.permalink(True) } for c in comments]
return list_of_dicts | [
"def getCommentDictList(comment_list):\n comment_dict_list = []\n for comment in comment_list:\n comment_dict = {}\n author_dict = getAuthorDict(comment.user)\n comment_dict[\"author\"] = author_dict\n comment_dict[\"comment\"] = comment.content\n # TODO python datetime is n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the flat comment array formatted as a JSON string to easily store in a file, etc. | def get_json(self):
return json.dumps(self.comment_data) | [
"def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()",
"def format_comments(self, contents):\n comment_template, reply_template = self.format_templates()\n comments = []\n for i, comment in enumerate(contents):\n comment['num'] = i + 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render a webpage out of the flattened comment data (Experimental) | def get_html(self):
env = jinja2.Environment(loader=jinja2.PackageLoader('mane', 'templates'))
template = env.get_template('basic.html')
# Embed subreddit's css into the html page:
style_info = self.reddit.subreddit("mylittlepony").stylesheet.__call__()
subreddit_css = style_info.stylesheet
images = style_... | [
"def crawl(thread_url):\n\tbase_url = \"https://np.reddit.com\"\n\tcomment_container = list()\n\treq = request.Request(base_url+thread_url, \n \tdata=None, \n \theaders={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/53... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return names for all the nodes in the chain. | def _get_all_nodes(action_chain):
all_nodes = [node.name for node in action_chain.chain]
return all_nodes | [
"def names(self):\n return {node for node in self.graph.nodes if self.name_is_valid(node)}",
"def getAllNodesNames(self, startingNode=None):\n if startingNode is not None:\n snode = self.grid.find(startingNode)\n returnNames = [node.name for node in snode.iter()]\n else:\n returnNames ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function which validates that the provided node name is defined in the workflow definition and it's valid. Keep in mind that we can only perform validation for task names which don't include jinja expressions since those are rendered at run time. | def _is_valid_node_name(self, all_node_names, node_name):
if not node_name:
# This task name needs to be resolved during run time so we cant validate the name now
return True
is_jinja_expression = jinja_utils.is_jinja_expression(value=node_name)
if is_jinja_expression:
... | [
"def is_valid_node_name(name):\n return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))",
"def is_valid_workflow_name(name):\n return bool(re.match('(?s)^[a-zA-Z][a-zA-Z0-9_]*$',name))",
"def validate_task_definition(taskdef_name, version):\n exit_if_none(taskdef_name, \"Missing tas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format ActionExecution result so it can be used in the final action result output. | def _format_action_exec_result(
self, action_node, liveaction_db, created_at, updated_at, error=None
):
if not isinstance(created_at, datetime.datetime):
raise TypeError(
f"The created_at is not a datetime object was({type(created_at)})."
)
if not isin... | [
"def _format_result(result: CheckResult) -> str:\n builder = StringBuilder()\n\n if result.success:\n builder.add(\n Color.format(\n '[check][{}][end] ... [pass]{}[end]'.format(\n result.config.check_type, result.status.upper()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the contributors and languages for the repo | def _get_repo_contributors_and_languages(self, repo) -> dict:
print(f"start getting contributors and languages for {repo.name}")
languages = self.service.get_languages(repo)
contributors = self.service.get_contributors(repo)
return {
"users": contributors,
"repo":... | [
"def getAllContributors(server,repo):\n contributors={}\n url=server+\"/repos/\"+repo+\"/stats/contributors\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for contributor in dicres:\n additionDeletion=getAdditionsDeletions(contributor.get(\"weeks\"))\n additions=str(additionDelet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group the repositories to the user, so each user will has a list of repositories. | def _aggregate_repositories_to_user(self, data: dict) -> dict:
results = dict()
for result in data:
# result will be a list of users and repo object and repo's languages.
for user in result["users"]:
# check if we get this user on any repo before or not
... | [
"def get_user_repos(username, org_id=None):\n if org_id is None:\n owned_repos = seaserv.list_personal_repos_by_owner(username)\n shared_repos = seafile_api.get_share_in_repo_list(username, -1, -1)\n groups_repos = []\n for group in seaserv.get_personal_groups_by_user(username):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the report filename in the directory. The directory will be created if it's not exists. | def filename(self):
# create the folder if it doesn't exist'
if not os.path.exists(self.report_path):
os.makedirs(self.report_path)
time_now = datetime.now().strftime("%m_%d_%Y_%H_%M")
filename = f"{self.report_path}/report_{time_now}.csv"
return os.path.join(self.rep... | [
"def create_report_dir(self) -> str:\n return create_report_dir_with_rotation(self.dir)",
"def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename",
"def build_answers_distribut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start point for this class, will call all services and write the results into a CSV. | def generate_report(self) -> None:
csv_data = self._run()
self._write_csv(csv_data) | [
"def main():\n\n ticket = get_service_ticket() # create an APIC-EM Auth ticket\n device_id_list = get_device_ids(ticket) # build a list with all device id's\n devices_info = collect_device_info(device_id_list, ticket)\n filename = get_input_file() # ask user for filename input\n output_file = open... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets image IDs from the list of images or dataset | def get_image_list(conn,parameter_map):
# Get images or datasets
message = ""
objects, log_message = script_utils.get_objects(conn, parameter_map)
message += log_message
if not objects:
return None, message
data_type = parameter_map["Data_Type"]
if data_type == "Image":
obj... | [
"def get_image_aids(ibs, gid_list):\n # print('gid_list = %r' % (gid_list,))\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n colnames = ('annot_rowid',)\n aids_list = ibs.db.get(ANNOTATION_TABLE, colnames, gid_list,\n id_colname='image_rowid', unpack_scal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that all necessary dependencies for running the testsuite are met. This includes dependencies coming from the style_checker itself, as well as dependencies coming from the testsuite framework. | def check_dependencies(args):
missing_deps = []
# The list of modules we need to be available in the Python
# distribution.
required_modules = ["pytest", "e3"]
if args.verify_style_conformance:
required_modules.append("flake8")
# The list of programs we need to be installed and accessi... | [
"def check_main_depencies():\n print(\"# Checking dependencies\")\n for tool in TOOLS_NEEDED:\n print(\"[+] Checking %s... \" % tool, end='')\n if which(tool) is not None:\n print(\"ok!\")\n else:\n print(\"missing!\")\n sys.exit()\n\n print()\n prin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if each TSV in data/tsv is present in data/tsv/summary.tsv (and viceversa) as well as if the number of entries in each TSV in data/tsv matches its listed number of entries in data/tsv/summary.tsv. (Basically checks whether generate_summary.py has been run.) | def test_language_data_matches_summary():
name_to_count = {}
with open(_TSV_SUMMARY, "r", encoding="utf-8") as lang_summary:
for line in lang_summary:
language = line.rstrip().split("\t")
name_to_count[language[0]] = int(language[-1])
for unique_tsv in os.listdir(_TSV_DIRECT... | [
"def test_phones_data_matches_summary():\n name_to_count = {}\n with open(_PHONES_SUMMARY, \"r\", encoding=\"utf-8\") as phones_summary:\n for line in phones_summary:\n language = line.rstrip().split(\"\\t\")\n name_to_count[language[0]] = int(language[-1])\n for phones_list in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if each .phones file in data/phones is present in data/phones/summary.tsv and if the number of phones in each .phones file matches its listed number of phones in data/phones/summary.tsv. (Basically checks whether generate_summary.py has been run.) | def test_phones_data_matches_summary():
name_to_count = {}
with open(_PHONES_SUMMARY, "r", encoding="utf-8") as phones_summary:
for line in phones_summary:
language = line.rstrip().split("\t")
name_to_count[language[0]] = int(language[-1])
for phones_list in os.listdir(_PHONE... | [
"def test_language_data_matches_summary():\n name_to_count = {}\n with open(_TSV_SUMMARY, \"r\", encoding=\"utf-8\") as lang_summary:\n for line in lang_summary:\n language = line.rstrip().split(\"\\t\")\n name_to_count[language[0]] = int(language[-1])\n\n for unique_tsv in os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Task to flag all bonds having passed maturity as matured. Also creates a rating decision for each matured bond | def mature_debt():
logger.info("Flagging bonds as matured.")
for i in Issue.objects.matures_today():
# Step 1, flag the bond as matured
i.is_matured = True
i.save()
try:
# See if there is an existing rating decision for this issue
# Current rating
... | [
"def __AcceptAllMandates(self, user):\n if self._mandates:\n for mandate in self._mandates:\n if mandate.IsAcceptedByTrader(user) is False:\n mandate.AddAcceptedTrader(user)\n mandate.Commit()\n getLogger().debug('Accepted man... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recount familiar movies | persons for user | def recount_familiar_objects(content_type_id, user_id):
model = ContentType.objects.get_for_id(content_type_id).model_class()
user = User.objects.get(pk=user_id)
count = UserRelationCount.objects.get_or_create(object=user)[0]
if model == MovieRelation:
count.movies = user.familiar_movies.count... | [
"def _count_movies(request):\n user_ratings_train = _process_data[\"user_ratings_train\"]\n movie_counts = {} # {movie id: count}\n\n # go through \"user_ratings_train\" to build up \"movie_counts\"\n for user_id, movie_ratings in user_ratings_train:\n for movie_id, rating in movie_ratings:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recount relations for movie | person | def recount_object_relations(content_type_id, instance_id):
model = ContentType.objects.get_for_id(content_type_id).model_class()
try:
instance = model.objects.get(pk=instance_id)
except model.DoesNotExist:
return
relations_counts = {}
for code in instance.codes:
relations_co... | [
"def recount_familiar_objects(content_type_id, user_id):\n model = ContentType.objects.get_for_id(content_type_id).model_class()\n user = User.objects.get(pk=user_id)\n\n count = UserRelationCount.objects.get_or_create(object=user)[0]\n\n if model == MovieRelation:\n count.movies = user.familiar_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
nn.MaxPool2d(kernel_size, stride=None, padding=0, dilation=1) NOTABLE PARAMS 1. 'in_channels' == out_channels of conv function 2. 'strides' > if None, defaults as == to kernel_size | def maxpool(self,
kernel_size: Union[Tuple[int], int],
stride: Union[Tuple[int], int] = None,
padding: Union[Tuple[int], int] = 0,
dilation: Union[Tuple[int], int] = 1) -> Tuple[int]:
if not stride:
stride = deepcopy(kernel_size)
... | [
"def maxpool2d_out_dim(in_dim, kernel_size, padding=1, stride=1, dilation=1):\n out_dim = ((in_dim + 2*padding - dilation*(kernel_size-1) - 1)/stride) + 1\n return out_dim\n\n #TODO make a util function to calculate the output size of a layer given a input dim\n #ie get the input size of a linear layer ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the form is valid stores the object and return success | def form_valid(self, form):
# stores the object
self.object = form.save()
# send the json response
return self.json_response({'success': True}) | [
"def form_valid(self, form):\n form.save()\n return redirect(self.get_success_url())",
"def form_valid(self, form):\n self.object = form.save(\n author=ReviewAuthor.objects.get(user=self.request.user),\n book=Book.objects.get(id=self.kwargs['pk']))\n return HttpRe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the form is invalid return not success and errors found. | def form_invalid(self, form):
# send the json response and errors
return self.json_response({'success': False, 'errors': form.errors},
status=400) | [
"def form_invalid(self, form):\n print(form.errors)\n return render_to_response(self.get_context_data(form=form))",
"def test_failed_form_validation(self):\n\n form = AnswerForm()\n self.assertFalse(form.is_valid())",
"def are_fields_invalid(self, request):\n # Sends request t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform batch from dataset for the text/label pipelines. Creates lists of labels, text tokens and offsets. | def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_text, _label) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.si... | [
"def process_input(text):\n global tokenizer\n\n inputs = tokenizer(text, return_tensors=\"pt\")\n labels = torch.tensor([1]).unsqueeze(0)\n\n return inputs, labels",
"def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap model as DDP. | def init_distributed(self):
self.model = DDP(self.model, device_ids=[self.device]) | [
"def dedp(model):\n return model.module if isinstance(model, torch.nn.DataParallel) else model",
"def DistributedFairseqModel(args, model):\n # determine which DDP class to extend\n assert isinstance(model, nn.Module)\n if args.ddp_backend == 'c10d':\n ddp_class = nn.parallel.DistributedDataPar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set device for model. | def set_device(self, device):
self.device = device
self.model = self.model.to(device) | [
"def device_class(self, value):\n\n self._device_class.set(value)",
"def set_device_properties(device_uid, config):\n return runtime.set_device_properties(device_uid, config)",
"def _set_cuda_device(device_id, verbose=None):\n import cupy\n\n cupy.cuda.Device(device_id).use()\n logger.info(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get outputs for a batch of images and text. | def get_outputs(
self,
batch_text: List[str],
batch_images: List[List[Image.Image]],
min_generation_length: int,
max_generation_length: int,
num_beams: int,
length_penalty: float,
) -> List[str]: | [
"def get_batch_size_1_output_images(outputs, b):\n b_1_outputs = {}\n for field in standard_fields.get_output_image_fields():\n if field in outputs:\n b_1_outputs[field] = outputs[field][b:b + 1, Ellipsis]\n return b_1_outputs",
"def infer_batch(self, batch):\n\t\t\n\t\t# decode \n num_batch_ele... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the prompt to use for VQA evaluation. If the answer is not provided, it should be left blank to be generated by the model. | def vqa_prompt(self, question, answer=None) -> str: | [
"def prompt(self) -> str:\n self._logger.info(\"Retrieving voice prompts setting...\")\n raise NotImplementedError(\"Prompt() is not implemented yet.\")",
"def eval_prompt(self, input):\n return input",
"def _get_select_question_input(): # pragma: no cover\n questions = [\n inqui... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the prompt to use for caption evaluation. If the caption is not provided, it should be left blank to be generated by the model. | def caption_prompt(self, caption=None) -> str: | [
"def caption(self) -> str:\n return self.attributes.get('caption', str(self.path))",
"def prompt(self) -> str:\n self._logger.info(\"Retrieving voice prompts setting...\")\n raise NotImplementedError(\"Prompt() is not implemented yet.\")",
"def vqa_prompt(self, question, answer=None) -> str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attach callback to ignite engine, attached method will be called on the end of each epoch | def attach(self, engine: ignite.engine.Engine):
engine.add_event_handler(ignite.engine.Events.EPOCH_COMPLETED, self.store)
engine.add_event_handler(ignite.engine.Events.ITERATION_COMPLETED, self.store) | [
"def every_after_train_step_callback_fn(self, sess):\n pass",
"def after_train_iter(self, trainer):\n self.after_iter(trainer)",
"def every_before_train_step_callback_fn(self, sess):\n pass",
"def addInstanceAddedCallback(*args, **kwargs):\n \n pass",
"def before_train_epoch(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compress and chunk a NetCDF file using NCO using lossless deflation. We save in the "netcdf4" format because only then the chunking will be supported. | def compress_and_chunk(in_file, out_file):
if not os.path.isfile(in_file):
raise FileNotFoundError(f"Cannot find input file '{in_file}'.")
if skip(in_file, out_file):
return out_file
if not shutil.which('ncks'):
raise RuntimeError(f'The command `ncks` is not in the PATH.')
opts =... | [
"def _netCDF4_deflate(outnetcdf):\n cmd = ['ncks', '-4', '-L4', '-O', outnetcdf, outnetcdf]\n try:\n lib.run_in_subprocess(cmd, logger.debug, logger.error)\n logger.debug(f'netCDF4 deflated {outnetcdf}')\n except WorkerError:\n raise",
"def create_intermediate_netcdf(output_name, chl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method helps to set prior for object. This prior can sample proposal theta by sample method | def set_prior(self): | [
"def initPriorHypo(self):\n self.priorHypo = np.ones(model.nhypo)/model.nhypo",
"def prior(self) -> tfp.distributions.Distribution:\n pass",
"def set_default_prior_parameters(self):\n \n # Normal prior (default)\n if self.prior_type == \"normal\":\n self.prior_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method helps to generate replica data from proposal p(theta) from prior This method return replicated data | def generate_replica(self): | [
"def create_petri_net_data_backup(self):\n self._prev_pn_data = self._pn_data.clone()",
"def posterior(self): \n # create a grid over which we will calculate the likelihood\n self.p_grid = np.linspace(0, 1, num = self.g)\n # calculate the probability of observing the data\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method helps to calculate statistics and compare the distance between replica data and original data. input of this method is data and replica data output is the distance First try with Wassenstein Distance | def distance(self,data,replica): | [
"def distance(self,data,replica):\n weight = np.random.multivariate_normal(mean=np.random.normal(size=self.n_dim),cov = np.eye(self.n_dim),size=self.data_dim)\n weight = weight /np.sqrt(np.sum(weight**2,axis=0,keepdims=True))\n data = np.matmul(data,weight)\n replica = np.matmul(replica,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Just use for one dimension data Wasserstein Distance very effective for capture distribution | def distance(self,data,replica):
weight = np.random.multivariate_normal(mean=np.random.normal(size=self.n_dim),cov = np.eye(self.n_dim),size=self.data_dim)
weight = weight /np.sqrt(np.sum(weight**2,axis=0,keepdims=True))
data = np.matmul(data,weight)
replica = np.matmul(replica,weight)
... | [
"def _apply_distance_filter(image: Image, window: Image, members: np.ndarray, weights: np.ndarray) -> np.ndarray:\n distances = np.zeros(image.size)\n for channel in range(3):\n img_channel = image[:, :][:, :, channel]\n win_channel = np.extract(members, window[:, :][:, :, channe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserting one data. If there is already data with the same ID then overwrite with the new data. | def putData(self, data):
try:
self.getDataset().insert_one(data)
except errors.DuplicateKeyError:
updateData = {'$set': data}
self.getDataset().update_one(
{'_id': data['_id']}, updateData) | [
"def insert_one(self, data):\n self._collection.insert_one(data)",
"def insert(self, data):\n if '_rev' in data:\n self.__not_opened()\n raise PreconditionsException(\n \"Can't add record with forbidden fields\")\n _rev = self.create_new_rev()\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get one data with the specific ID. | def getId(self, id):
return self.getDataset().find_one({'_id': id}) | [
"def read(self, id):",
"def getById(self, id):\n for item in self.list: \n if item.getId() == id:\n return item",
"def get(self, id): \n student = get(id)\n return student",
"def get_by_id(self, id):\n cursor = db.cursor()\n cursor.execute(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get timestamp of data with the specific ID. | def getTimestamp(self, id):
data = self.getId(id)
if isinstance(data['timestamp'], datetime):
return data['timestamp']
else:
return None | [
"def _get_time_from_id(self) -> datetime:\n return datetime.fromtimestamp(int(self.id.split(' ')[0]) / 1e3)",
"def getTimeStamp(idx):\n return data.loc[idx, 'timestamp']",
"def __get_update_timestamp(session: scoped_session, type: CachedDataType, id: str = None) -> datetime:\n logger = logging.getL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set timestamp of data to current timestamp with the specific ID. | def setTimestamp(self, id):
updateData = {'$set': {'timestamp': datetime.now()}}
self.getDataset().update_one(
{'_id': id}, updateData) | [
"def __update_timestamp(self, track_id, timestamp):\n\n if track_id:\n if timestamp is None:\n timestamp = datetime.utcnow()\n current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)",
"def setTimeStamp(self, ts):\r\n \tself.timeStamp = ts",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set one label in certain index from data with specific ID. | def setData(self, data):
updateData = {'$set': {'label.'+str(data['index']): data['tag']}}
self.getDataset().update_one(
{'_id': data['_id']}, updateData) | [
"def set_label_id(self, label_id):\n self.label_id = label_id",
"def select_label(self, label_id: int) -> Label:",
"def set_label(self, label):",
"def _translate_label(self, data_id, set_label):\n if self._label_translation_table is None:\n self._label_translation_table = {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all duplicate data with specific text. | def removeDuplicateText(self, text):
self.getDataset().delete_many({"full_text": text}) | [
"def delete_common_words(data):",
"def delete_repeats(text):\n modified_text = ''\n for letter in text:\n for symbol in modified_text:\n if symbol == letter:\n break\n else:\n modified_text += letter\n return modified_text",
"def remove_duplicates(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this method return the nodeList of the the node without any user | def nodeList_without_user(self):
nlwu = []
for n in self.nodeList:
if type(n[0]) is Node:
nlwu.append(n)
return nlwu | [
"def getNodeList(self):\n nodelist = self.response[1] or []\n return [Node(*nodeple) for nodeple in nodelist]",
"def get_nodes(self):\n nodes = []\n for node in self.nodes:\n nodes.append(node)\n return nodes",
"def get_nodes(self) :\n n = []\n self._gather_nodes(self.r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this method return the attached user of a node if it has one | def attached_user(self):
for n in self.nodeList:
if type(n[0]) is User:
return n
return None | [
"def user(self):\r\n try:\r\n return User.objects.get(username=self.username)\r\n except User.DoesNotExist:\r\n return None",
"def get_current_user(self):\n return self.graph.users.get(int(self.get_secure_cookie('eid')))",
"def user(self) -> Optional[str]:\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this method return the nodeList of the the node without any user and sorted by increasing idNode | def arrange_node(self):
idList = []
arrangedNode = []
for n in self.nodeList_without_user():
idList.append(n[0].idNode)
idList.sort()
for i in idList:
for n in self.nodeList:
if i == n[0].idNode:
arrangedNode.ap... | [
"def nodeList_without_user(self):\r\n nlwu = []\r\n for n in self.nodeList:\r\n if type(n[0]) is Node:\r\n nlwu.append(n)\r\n return nlwu",
"def get_nodes(self) :\n n = []\n self._gather_nodes(self.root,n) \n return n",
"def get_nodes(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this method takes a target node id in parameter and return the time to get to the target node from the node this method is recursive and can also be called by the time_to_node method in the user class | def time_to_node(self, target_id_node, visited_nodes=[], nodes_to_visit=[], time=0, max_time=None):
if not len(nodes_to_visit) == 0:
del nodes_to_visit[0]
if self.idNode == target_id_node:
if max_time == None:
max_time = time
elif time ... | [
"def travel_time(data, from_node, to_node):\n if from_node == to_node:\n travel_time = 0\n else:\n travel_time = distance(\n data.locations[from_node],\n data.locations[to_node]) / data.vehicle.speed\n return travel_time",
"def time_callback... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function converts a value to an URL parameter compatible with the NHL API. | def to_url_param(val):
if isinstance(val, IUrlParam):
return val.to_url_param()
if isinstance(val, (date, datetime)):
return val.strftime("%Y-%m-%d")
if isinstance(val, (list, tuple)):
return ",".join(map(to_url_param, val))
if isinstance(val, int):
return str(val)
if... | [
"def query_param(self, key, value=None, default=None, as_list=False):\r\n parse_result = self.query_params()\r\n if value is not None:\r\n parse_result[key] = value\r\n return URL._mutate(\r\n self, query=unicode_urlencode(parse_result, doseq=True))\r\n\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the org_no of this AccountListItem. | def org_no(self):
return self._org_no | [
"def external_organization_id(self):\n return self._external_organization_id",
"def pid_organization_id(self) -> str:\n return pulumi.get(self, \"pid_organization_id\")",
"def getAccountNumber(self):\n return self._acctNo",
"def account_number(self) -> int:\n if self._account_numbe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the org_no of this AccountListItem. | def org_no(self, org_no):
self._org_no = org_no | [
"def org_id(self, value):\n if isinstance(value, str):\n self._org_id = value\n else:\n raise ValueError(\"org_id must be a string\")",
"def external_org_id(self, external_org_id):\n\n self._external_org_id = external_org_id",
"def org_name(self, value):\n if value != None:\n if n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the uni_customer_no of this AccountListItem. | def uni_customer_no(self):
return self._uni_customer_no | [
"def customer_id(self):\n if \"customerId\" in self._prop_dict:\n return self._prop_dict[\"customerId\"]\n else:\n return None",
"def customer_code(self) -> str:\n return self._customer_code",
"def customer_id(self) -> pulumi.Output[Optional[str]]:\n return pulu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the uni_customer_no of this AccountListItem. | def uni_customer_no(self, uni_customer_no):
self._uni_customer_no = uni_customer_no | [
"def customer_code(self, customer_code: str):\n\n self._customer_code = customer_code",
"def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number",
"def next_customer_number(self, next_customer_number):\n\n self._next_customer_number = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the last_modified of this AccountListItem. | def last_modified(self):
return self._last_modified | [
"def last_modified(self):\n return self.metadata.last_modified",
"def last_modified_at(self) -> \"datetime\":\n return self._attrs.get(\"last_modified_at\")",
"def getlastmodified(self):\n t = calendar.timegm(time.gmtime(self.st_mtime))\n return DAVElement.getlastmodified(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the last_modified of this AccountListItem. | def last_modified(self, last_modified):
self._last_modified = last_modified | [
"def last_modified_at(self, last_modified_at: \"datetime\"):\n self._attrs[\"last_modified_at\"] = last_modified_at",
"def date_modified_billing(self, date_modified_billing):\n\n self._date_modified_billing = date_modified_billing",
"def last_modified_at(self) -> \"datetime\":\n return self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the dealer_id of this AccountListItem. | def dealer_id(self):
return self._dealer_id | [
"def account_id(self):\n\n return self._account_id.value",
"def get_ad_id(self):\n return self.get_item(conf.AD_ID_KEY)",
"def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]",
"def get_account_id(self):\n return self.wepay_account_id",
"def debit_acco... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dealer_id of this AccountListItem. | def dealer_id(self, dealer_id):
self._dealer_id = dealer_id | [
"def dealer_id(self):\n return self._dealer_id",
"def payer_id(self, payer_id):\n\n self._payer_id = payer_id",
"def advertisement_id(self, advertisement_id):\n\n self._advertisement_id = advertisement_id",
"def set_id_receiver(self, id_receiver):\n self.id_receiver = id_receiver",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the dealer_name of this AccountListItem. | def dealer_name(self):
return self._dealer_name | [
"def getAcctHolderName(self):\n return self._acctHolderName",
"def account_name(self) -> str:\n return self['accountName']",
"def dealer_id(self):\n return self._dealer_id",
"def _get_name(self) -> \"std::string\" :\n return _core.ListItem__get_name(self)",
"def get_name_item(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dealer_name of this AccountListItem. | def dealer_name(self, dealer_name):
self._dealer_name = dealer_name | [
"def set_name_item(self, item_name):\n self.name_item = item_name",
"def set_player_name(self, player):\r\n self.__name = player",
"def referrer_name(self, referrer_name):\n\n self._referrer_name = referrer_name",
"def dealer_id(self):\n return self._dealer_id",
"def book_name(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the dealer_reference of this AccountListItem. | def dealer_reference(self):
return self._dealer_reference | [
"def dealer_id(self):\n return self._dealer_id",
"def account_ref(self):\n return self._account_ref",
"def debit_account_uid(self):\n if self.is_null():\n return None\n else:\n return self._debit_account_uid",
"def getAcctHolderName(self):\n return self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dealer_reference of this AccountListItem. | def dealer_reference(self, dealer_reference):
self._dealer_reference = dealer_reference | [
"def dealer_id(self):\n return self._dealer_id",
"def account_ref(self, account_ref):\n\n self._account_ref = account_ref",
"def payer_id(self, payer_id):\n\n self._payer_id = payer_id",
"def set_carrier_data_reference(self, reference):\n value, name = reference, 'set_carrier_data_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create money from amount. | def FromAmount(amount):
m = Money()
m.Amount = amount
return m | [
"def convert(self, amount):\n return self.compute(\n request.nereid_website.company.currency.id,\n amount,\n request.nereid_currency.id)",
"def test_creation(self):\n result = Money(10, 'USD')\n self.assertEqual(result.amount, 10)\n\n result = Money(-10... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |