query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
KLUERE micro f1 (except no_relation) | def klue_re_micro_f1(preds, labels):
label_list = ['no_relation', 'org:top_members/employees', 'org:members',
'org:product', 'per:title', 'org:alternate_names',
'per:employee_of', 'org:place_of_headquarters', 'per:product',
'org:number_of_employees/members', 'per:children',
'per:place_of... | [
"def klue_re_micro_f1(preds, labels, binary):\n if binary:\n label_list = ['org:top_members/employees', 'org:members',\n 'org:product', 'per:title', 'org:alternate_names',\n 'per:employee_of', 'org:place_of_headquarters', 'per:product',\n 'org:number_of_employees/members', 'per:child... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Figure out the route from the data | def getRoute(data, to):
if to: return to
else:
if isinstance(data, Element):
to = data.get('to')
if not to:
raise Exception, "Can't extract routing information from %s" \
% data
else:
... | [
"def get_route(self):\n\n\t\tif self.tour_completed:\n\t\t\treturn self.route_taken\n\t\treturn None",
"async def get_route(self, routespec):\n routespec = self.validate_routespec(routespec)\n router_alias = traefik_utils.generate_alias(routespec, \"router\")\n async with self.mutex:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert 'to' to a JID object | def getJID(to):
if isinstance(to, JID):
return to
else:
try:
jid = JID(to)
except Exception, e:
raise Exception, "Can't convert %s to a JID object" % to
return jid | [
"def TO_NODE_ID(self):\n return \"to_node_id\"",
"def _task_id_to_job_id(task_id):\n return 'job_' + '_'.join(task_id.split('_')[1:3])",
"def strToId(thisStr):\n\n return ObjectId(thisStr)",
"def encode_activity(activity):\n return activity['id']",
"def convertToJavaId(id, noLeadingNumber=Tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save current dataframe to `var_name` in the IPython user namespace. | def tee(self, var_name: str, clobber: bool = False) -> pd.DataFrame:
if self._ip and var_name:
if var_name in self._ip.ns_table["user_local"] and not clobber:
warnings.warn(f"Did not overwrite existing {var_name} in namespace")
else:
self._ip.ns_table["use... | [
"def save_variable(self, var):\n \n # Assuming that json is used to transfer data between RF and TOOLS\n self.variable.update(var)",
"def save_dataframe(df, filename):\n \n df.to_pickle(\"./datasets/processed_pickle_files/\" + filename)",
"def saveglobals(self):\r\n ns = self.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter output columns matching names in `cols` expression(s). | def filter_cols(
self,
cols: Union[str, Iterable[str]],
match_case: bool = False,
sort_cols: bool = False,
) -> pd.DataFrame:
curr_cols = self._df.columns
filt_cols: Set[str] = set()
if isinstance(cols, str):
filt_cols.update(_name_match(curr_cols,... | [
"def _filter_columns(columns: list,\n regex_terms: list):\n return list(filter(lambda x: any([re.match(pattern=p, string=x, flags=re.IGNORECASE) for p in regex_terms]),\n columns))",
"def frame_filter(frame, filter=\"\"):\n regex = re.compile(r'{}'.format... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand a list column to individual rows. | def list_to_rows(self, cols: Union[str, Iterable[str]]) -> pd.DataFrame:
orig_cols = self._df.columns
data = self._df
if isinstance(cols, str):
cols = [cols]
for col in cols:
item_col = f"{col}_list_item$$"
ren_col = {item_col: col}
data = ... | [
"def make_rows(num_columns, seq):\n # calculate the minimum number of rows necessary to fit the list in\n # num_columns Columns\n num_rows, partial = divmod(len(seq), num_columns)\n if partial:\n num_rows += 1\n # break the seq into num_columns of length num_rows\n try:\n result = mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
User can join group. Add group id to user's enrolledGroups and create stripe subscription object. | def groupIdJoin(groupId):
group = db.Group.find_one({"_id": ObjectId(groupId)})
user = db.users.find_one({"_id": ObjectId(current_user.id)})
if group is not None:
if not group['enrolledIds']:
updatedGroup = db.Group.update_one({'_id': group['_id']}, {"$set": {
"enrolledId... | [
"def apply_to_join_group(self, request, group_id, extra_context=None):\n group = get_object_or_404(self.model, pk=group_id)\n\n if request.method != 'POST':\n return self.confirmation(request, 'apply_to_join', group, \n extra_context)\n\n already_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Is this view editing a Natural file? | def is_natural_file(view):
try:
location = view.sel()[0].begin()
except:
return False
return view.match_selector(location, 'source.natural') | [
"def can_preview(file):\n return True",
"def is_textual_file(file_type):\n if file_type == TEXT or file_type == MARKDOWN:\n return True\n else:\n return False",
"def is_show_file(self):\n return False",
"def scene_check():\n save_check = cmds.file(query=1, expandName=1)\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the text of the lines containing the specified points up to those points. | def text_preceding_points(view, points):
lines = [view.line(point) for point in points]
lines_to_point = [sublime.Region(line.begin(), point) for line, point in zip(lines, points)]
return [view.substr(region) for region in lines_to_point] | [
"def collect_characters(self, text_line):\n relevant_ccs = [cc for cc in self.fig.connected_components if cc.role != FigureRoleEnum.ARROW]\n initial_distance = np.sqrt(np.mean([cc.area for cc in relevant_ccs]))\n distance_fn = settings.DISTANCE_FN_CHARS\n\n def proximity_coeff(cc): retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the text that is "covered" by the given selector. | def find_text_by_selector(view, selector):
regions = view.find_by_selector(selector)
return [view.substr(region) for region in regions] | [
"def wait_for_contains_text(self, selector, text, timeout=None):\n method = contains_text(selector, text, timeout or self.wait_timeout)\n return self._wait_for(\n method=method,\n timeout=timeout,\n msg=method.message,\n )",
"def find_in_text_regex(self, selec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the variable levels in the given line. If amount is +1, 1 myvar (a10) becomes 2 myvar (a10) Variable levels never go below zero. | def update_var_levels(view, edit, line, amount=+1):
match = __level__.match(view.substr(line))
if not match:
return
start = match.start(1)
end = match.end(1)
level_string = match.group(1)
new_level = int(level_string, base=10) + amount
if new_level < 1:
new_level = 1
new_... | [
"def npl_changed(self, value):\n self.levels_new = value",
"def _set_stats_at_level_(self, level):\n self.current_level = level\n self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)\n self.hp += self.hp_bonus\n self.hp_base = self.hp\n self._set_st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the synaptic weights among the Projection Neurons (PNs) and the Kenyon Cells (KCs). Choose the first sample that has dispersion below the baseline (early stopping), or the one with the lower dispersion (in case non of the samples' dispersion is less than the baseline). | def generate_pn2kc_weights(nb_pn, nb_kc, min_pn=5, max_pn=21, aff_pn2kc=None, nb_trials=100000, baseline=25000,
rnd=RNG, dtype=np.float32):
dispersion = np.zeros(nb_trials)
best_pn2kc = None
for trial in range(nb_trials):
pn2kc = np.zeros((nb_pn, nb_kc), dtype=dtype)
... | [
"def establishSynapses(preSyn, postSyn, thres=10, w=.04):\n\t# print('Establishing synapses.')\n\t# for preSyn in cells:\n\t\t# for postSyn in cells:\n\t# Extract the name of the cell model.\n\tsourceIndex = (str(preSyn).find('_') + 1, str(preSyn).rfind('_'))\n\ttargetIndex = (str(postSyn).find('_') + 1, str(postSy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add two GeoMaps with self += other. | def __add__(self, other):
if isinstance(other, GeoPoint):
other = GeoMap([other])
if not isinstance(other, GeoMap):
raise TypeError
geopoints = self.geopoints + other.geopoints
return self.__class__(geopoints=geopoints) | [
"def add(self, other_metric_map):\n self.metric_map.update(other_metric_map)\n return self",
"def __add__(self, other: 'MapResult') -> 'MapResult':\n if self.key_names != other.key_names:\n raise KeyError('Key names must be identical')\n if self.value_names != other.value_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append a single GeoPoint object to the current GeoMap object. | def append(self, geopoint):
if isinstance(geopoint, GeoPoint):
self.geopoints.append(geopoint)
else:
msg = 'Append only supports a single GeoPoint object as an argument.'
raise TypeError(msg)
return self | [
"def __add__(self, other):\n if isinstance(other, GeoPoint):\n other = GeoMap([other])\n if not isinstance(other, GeoMap):\n raise TypeError\n geopoints = self.geopoints + other.geopoints\n return self.__class__(geopoints=geopoints)",
"def add_point(self, point):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read GeoPoint List from a txt file name longitude latidute | def ReadGeoMapLst(self, mapfile ):
f = open(mapfile, 'r')
Name=[]
for lines in f.readlines():
lines=lines.split()
name=lines[0]
lon=float(lines[1])
lat=float(lines[2])
if Name.__contains__(name):
index=Name.index(name)
... | [
"def gps_read_locations(lfile):\n gps_locations = list()\n\n if os.path.isfile(lfile):\n with open(lfile, \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 4)\n gps_tuple = list()\n gps_tuple.append(float(x[0]))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert Tomographic maps to GeoMap object ( GeoPoint List ), saved as "self.geomap" | def TomoMap2GeoPoints(self, lonlatCheck=True, datatype='ph'):
self.geomap=GeoMap();
SizeC=self.permaps[0].tomomapArr.size;
lonLst=self.permaps[0].tomomapArr[:,0];
latLst=self.permaps[0].tomomapArr[:,1];
Vvalue=self.permaps[0].tomomapArr[:,2];
per0=self.permaps[0].period;
... | [
"def ReadGeoMapLst(self, mapfile ):\n f = open(mapfile, 'r')\n Name=[]\n for lines in f.readlines():\n lines=lines.split()\n name=lines[0]\n lon=float(lines[1])\n lat=float(lines[2])\n if Name.__contains__(name):\n index=Name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot Tomography Map longitude latidute ZValue | def PlotTomoMap(fname, dlon=0.5, dlat=0.5, title='', datatype='ph', outfname='', browseflag=False, saveflag=True):
if title=='':
title=fname;
if outfname=='':
outfname=fname;
Inarray=np.loadtxt(fname)
LonLst=Inarray[:,0]
LatLst=Inarray[:,1]
ZValue=Inarray[:,2]
llcrnrlon=LonLs... | [
"def _skymap(self, **kwargs):\n from pesummary.gw.plots.plot import _ligo_skymap_plot\n\n if \"luminosity_distance\" in self.keys():\n dist = self[\"luminosity_distance\"]\n else:\n dist = None\n\n return _ligo_skymap_plot(self[\"ra\"], self[\"dec\"], dist=dist, **k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the message handler for checking in an event attendee that has already registered for the event. | def handleEventAttendeeCheckIn(self, messageJson, logger):
command = COMMAND_EVENT_ATTENDEE_CHECKIN
message_response = self.json_message.createResponseMessage( command )
if "event_item" in messageJson and "registration_info" in messageJson:
event_item = messageJson["event_item"]
registration_info = messa... | [
"def event_exists(self, event_meetup_id: str) -> bool:\n for event in self.events:\n if event.meetup_id == event_meetup_id:\n return True\n return False",
"def eventcheckin():",
"def attendee_add(self, attendee=\"\"):\n if attendee == \"\": raise AttributeError\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the message handler that handles deleting an attendee that is registered for a particular event. | def handleEventAttendeeDelete(self, messageJson, logger):
command = COMMAND_EVENT_ATTENDEE_DELETE
message_response = self.json_message.createResponseMessage( command )
if "event_item" in messageJson and "registration_info" in messageJson:
event_item = messageJson["event_item"]
registration_info = message... | [
"def delete_event(self, event_id):\n pass",
"def delete_event(sender, instance, **kwargs):\r\n Event.objects.filter(pk=instance.event_ptr_id).delete()",
"def delete(self, request):\n auth_token = request.headers['Authorization'].replace('Token ', '')\n user = YouYodaUser.objects.get(auth... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the message handler that handles editing an attendee that is registered for a particular event. | def handleEventAttendeeEdit(self, messageJson, logger):
command = COMMAND_EVENT_ATTENDEE_EDIT
message_response = self.json_message.createResponseMessage( command )
if "event_item" in messageJson and "registration_info" in messageJson:
event_item = messageJson["event_item"]
registration_info = messageJson... | [
"def _modify_attendee(self, attendee: MeetingUser) -> MeetingUser:\n attendee.is_accepted = False\n attendee.may_join = False\n attendee.is_response = True\n return attendee",
"def eventsEdit(self, eid, event, callback):\n j = Json().put(u\"eid\", eid).put(u\"event_info\", JSONO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles a Stripe Charge request. | def handleStripeCharge(self, messageJson, logger):
command = COMMAND_STRIPE_CHARGE
STRIPE_TOKEN_KEY = "stripe_token"
message_response = self.json_message.createResponseMessage( command )
if STRIPE_TOKEN_KEY in messageJson:
chargeResult = GravityCharge.stripeCharge( messageJson[STRIPE_TOKEN_KEY] )
messa... | [
"def charge(request):\n stripe_public_key = settings.STRIPE_PUBLIC_KEY\n stripe_secret_key = settings.STRIPE_SECRET_KEY\n\n if request.method == 'POST':\n form_data = {\n 'donor': request.user,\n 'donor_full_name': request.POST['donor_full_name'],\n 'email': request.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the stripe session information used for Stripe Checkout, which is sent from the javascript on the client. | def handleStripeGetSession(self, messageJson, logger):
command = COMMAND_STRIPE_GET_SESSION
message_response = self.json_message.createResponseMessage( command )
if "stripe_sku" in messageJson:
sku = messageJson["stripe_sku"]
result = GravityCharge.stripeCheckout(sku)
message_response["session"] = r... | [
"def create_checkout_session(request):\n if request.method == 'GET':\n # define domain URL\n domain_url = settings.DOMAIN_URL\n # set stripe API from SECRET KEY variable\n stripe.api_key = settings.STRIPE_SECRET_KEY\n # get user chosen membership from session\n membershi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the relationship types from the database returns a dictionary whose key is (module_number, relationship name, object_name1, object_name2) and whose value is the relationship type ID for that relationship. | def handle_interaction_get_relationship_types(self):
db_file = self.make_full_filename(self.sqlite_file.value)
with DBContext(self) as (connection, cursor):
return list(self.get_relationship_types(cursor).items()) | [
"def get_relationship_types(self, cursor):\n relationship_type_table = self.get_table_name(T_RELATIONSHIP_TYPES)\n statement = \"SELECT %s, %s, %s, %s, %s FROM %s\" % (\n COL_RELATIONSHIP_TYPE_ID,\n COL_RELATIONSHIP,\n COL_MODULE_NUMBER,\n COL_OBJECT_NAME1,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle the conversion from json mangled structure to dictionary json_struct the result from handle_interaction_get_relationship_types which has been dumbeddown for json and which json has likely turned tuples to lists | def grt_interaction_to_dict(self, json_struct):
return dict([(tuple(k), v) for k, v in json_struct]) | [
"def code_decode_json_hook(obj):\n for (k, v) in obj.items():\n if type(v) is str and k in BYTES_PROPS:\n obj[k] = v.encode('utf8')\n if type(v) is list:\n obj[k] = tuple(v)\n return obj",
"def parse_json(obj):\n return ensure_dict(obj, 'JWE')",
"def test_json_issue():\n grammar = \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the relationship types from the database returns a dictionary whose key is (module_number, relationship name, object_name1, object_name2) and whose value is the relationship type ID for that relationship. | def get_relationship_types(self, cursor):
relationship_type_table = self.get_table_name(T_RELATIONSHIP_TYPES)
statement = "SELECT %s, %s, %s, %s, %s FROM %s" % (
COL_RELATIONSHIP_TYPE_ID,
COL_RELATIONSHIP,
COL_MODULE_NUMBER,
COL_OBJECT_NAME1,
C... | [
"def handle_interaction_get_relationship_types(self):\n db_file = self.make_full_filename(self.sqlite_file.value)\n with DBContext(self) as (connection, cursor):\n return list(self.get_relationship_types(cursor).items())",
"def get_relationship_type_id(\n self, workspace, module_nu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a relationship type to the database | def handle_interaction_add_relationship_type(
self, module_num, relationship, object_name1, object_name2
):
with DBContext(self) as (connection, cursor):
return self.add_relationship_type(
module_num, relationship, object_name1, object_name2, cursor
) | [
"def add_type(self, d_type):\n conn = sqlite3.connect(\"dampers.db\")\n conn.execute(\"PRAGMA foreign_keys=1\") # enable cascade deleting and updating.\n cur = conn.cursor()\n try:\n cur.execute(\"INSERT INTO d_types(d_type) VALUES(:d_type)\", {\"d_type\": d_type})\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if user wants any well tables | def wants_well_tables(self):
if self.db_type == DB_SQLITE:
return False
else:
return (
self.wants_agg_mean_well
or self.wants_agg_median_well
or self.wants_agg_std_dev_well
) | [
"def is_needed(self):\n return False",
"def check_tables():\n res = run_sql(\"SHOW TABLES\")\n for row in res:\n table_name = row[0]\n write_message(\"checking table %s\" % table_name)\n run_sql(\"CHECK TABLE %s\" % wash_table_column_name(table_name)) # kwalitee: disable=sql",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
All subsequent modules should not write measurements | def should_stop_writing_measurements(self):
return True | [
"def test_no_hwmon() -> None:\n with PatchSysFiles(no_hwmon=True):\n assert new_under_voltage() is None",
"def test_diagnostics_disabled(coresys):\n coresys.config.diagnostics = False\n assert filter_data(coresys, SAMPLE_EVENT, {}) is None",
"def test_export_nodata(monkeypatch, statsdict):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ignore objects (other than 'Image') if this returns true If strict is True, then we ignore objects based on the object selection | def ignore_object(self, object_name, strict=False):
if object_name in (EXPERIMENT, NEIGHBORS,):
return True
if strict and self.objects_choice == O_NONE:
return True
if strict and self.objects_choice == O_SELECT and object_name != "Image":
return object_name no... | [
"def _select_only_selected_complementary(state, objects_to_ignore):\n if (state.audioObjects is None or\n not any(in_by_id(audio_object, objects_to_ignore)\n for audio_object in state.audioObjects)):\n yield state",
"def _disable_prune_preserve(cls, layer):\n\n return la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if we should ignore a feature | def ignore_feature(
self,
object_name,
feature_name,
measurements=None,
strict=False,
wanttime=False,
):
if (
self.ignore_object(object_name, strict)
or feature_name.startswith("Description_")
or feature_name.startswith("Mod... | [
"def skipped_features(self):\n return self.features.unused_features()",
"def ignore(self):\n return \"ignore\" in self.attributes and self.attributes[\"ignore\"] == \"true\"",
"def unused_features(self):\n return self.features.unused_features()",
"def if_feature_disabled(\n parser: Par... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get object aggregate columns for the PerImage table pipeline the pipeline being run image_set_list for cacheing column data post_group true if only getting aggregates available postgroup, false for getting aggregates available after run, None to get all | def get_aggregate_columns(self, pipeline, image_set_list, post_group=None):
columns = self.get_pipeline_measurement_columns(pipeline, image_set_list)
mappings = self.get_column_name_mappings(pipeline, image_set_list)
ob_tables = self.get_object_names(pipeline, image_set_list)
result = []... | [
"def get_pipeline_measurement_columns(\n self, pipeline, image_set_list, remove_postgroup_key=False\n ):\n d = self.get_dictionary(image_set_list)\n if D_MEASUREMENT_COLUMNS not in d:\n d[D_MEASUREMENT_COLUMNS] = pipeline.get_measurement_columns()\n d[D_MEASUREMENT_COLU... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The list of selected aggregate names | def agg_names(self):
return [
name
for name, setting in (
(AGG_MEAN, self.wants_agg_mean),
(AGG_MEDIAN, self.wants_agg_median),
(AGG_STD_DEV, self.wants_agg_std_dev),
)
if setting.value
] | [
"def _find_matching_aggregates(self):\n aggregate_names = self._client.list_aggregates()\n pattern = self.configuration.netapp_aggregate_name_search_pattern\n return [aggr_name for aggr_name in aggregate_names\n if re.match(pattern, aggr_name)]",
"def aggregate(self, function_n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates empty image and object tables Creates the MySQL database (if MySQL), drops existing tables of the same name and creates the tables. cursor database cursor for creating the tables column_defs column definitions as returned by get_measurement_columns mappings mappings from measurement feature names to column name... | def create_database_tables(self, cursor, workspace):
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
# Create the database
if self.db_type == DB_MYSQL:
# result = execute(cursor, "SHOW DATABASES LIKE '%s'" %
# self.db_name.value)
... | [
"def __init__(self):\n self.connector = mysql.connector.connect(\n host=HOST,\n user=USER,\n passwd=PASSWD\n )\n self.mycursor = self.connector.cursor()\n\n #This part create if not exists the whole database and its tables\n self.create_databas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a SQL statement that generates the image table | def get_create_image_table_statement(self, pipeline, image_set_list):
statement = "CREATE TABLE " + self.get_table_name("Image") + " (\n"
statement += "%s INTEGER" % C_IMAGE_NUMBER
mappings = self.get_column_name_mappings(pipeline, image_set_list)
columns = self.get_pipeline_measurement... | [
"def get_create_object_table_statement(self, object_name, pipeline, image_set_list):\n if object_name is None:\n object_table = self.get_table_name(OBJECT)\n else:\n object_table = self.get_table_name(object_name)\n statement = \"CREATE TABLE \" + object_table + \" (\\n\"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the "CREATE TABLE" statement for the given object table object_name None = PerObject, otherwise a specific table | def get_create_object_table_statement(self, object_name, pipeline, image_set_list):
if object_name is None:
object_table = self.get_table_name(OBJECT)
else:
object_table = self.get_table_name(object_name)
statement = "CREATE TABLE " + object_table + " (\n"
stateme... | [
"def get_table_name(self, object_name):\n return self.get_table_prefix() + \"Per_\" + object_name",
"def get_table_name(engine, obj):\n # noinspection PyProtectedMember\n return engine._compute_table_name(obj.__class__)",
"def Create_table(self, tableName):\n \n return \"CREATE TABLE ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the "CREATE VIEW" statement for the given object view object_names is the list of objects to be included into the view | def get_create_object_view_statement(self, object_names, pipeline, image_set_list):
object_table = self.get_table_name(OBJECT)
# Produce a list of columns from each of the separate tables
list_of_columns = []
all_objects = dict(
list(
zip(
... | [
"def create_db_views(conn):\n sql_snpcount = \"\"\" DROP VIEW IF EXISTS snp_counts;\n CREATE VIEW snp_counts\n AS SELECT snp_id, COUNT(*) as count\n FROM genotypes GROUP BY snp_id;\n \"\"\"\n # Create each view in tur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the statements to create the relationships table Returns a list of statements to execute. | def get_create_relationships_table_statements(self, pipeline):
statements = []
#
# View name + drop view if appropriate
#
relationship_view_name = self.get_table_name(V_RELATIONSHIPS)
statements.append("DROP VIEW IF EXISTS %s" % relationship_view_name)
#
#... | [
"def get_statements(self) -> List[str]:\n statements = []\n for statement in self._parsed:\n if statement:\n sql = str(statement).strip(\" \\n;\\t\")\n if sql:\n statements.append(sql)\n return statements",
"def tables(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the relationship_type_id for the given relationship workspace the analysis workspace module_num the module number of the module that generated the record relationship the name of the relationship object_name1 the name of the first object in the relationship object_name2 the name of the second object in the relation... | def get_relationship_type_id(
self, workspace, module_num, relationship, object_name1, object_name2
):
assert self.db_type != DB_MYSQL_CSV
d = self.get_dictionary()
if T_RELATIONSHIP_TYPES not in d:
if self.db_type == DB_SQLITE:
try:
j... | [
"def handle_interaction_add_relationship_type(\n self, module_num, relationship, object_name1, object_name2\n ):\n with DBContext(self) as (connection, cursor):\n return self.add_relationship_type(\n module_num, relationship, object_name1, object_name2, cursor\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write SQL statements to generate a perwell table pipeline the pipeline being run (to get feature names) image_set_list fid file handle of file to write or None if statements should be written to a separate file. | def write_mysql_table_per_well(self, pipeline, image_set_list, fid=None):
if fid is None:
file_name = "SQL__Per_Well_SETUP.SQL"
path_name = self.make_full_filename(file_name)
fid = open(path_name, "wt")
needs_close = True
else:
needs_close = Fa... | [
"def get_create_image_table_statement(self, pipeline, image_set_list):\n statement = \"CREATE TABLE \" + self.get_table_name(\"Image\") + \" (\\n\"\n statement += \"%s INTEGER\" % C_IMAGE_NUMBER\n\n mappings = self.get_column_name_mappings(pipeline, image_set_list)\n columns = self.get_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine if a column should be written in run or post_group column 3 or 4 tuple column from get_measurement_columns post_group True if in post_group, false if in run returns True if column should be written | def should_write(column, post_group):
if len(column) == 3:
return not post_group
if not isinstance(column[3], dict):
return not post_group
if MCA_AVAILABLE_POST_GROUP not in column[3]:
return not post_group
return post_group if column[3][MCA_AVAILABLE_... | [
"def wants_well_tables(self):\n if self.db_type == DB_SQLITE:\n return False\n else:\n return (\n self.wants_agg_mean_well\n or self.wants_agg_median_well\n or self.wants_agg_std_dev_well\n )",
"def _update_show_col_groups... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the data in the measurements out to the database workspace contains the measurements mappings map a feature name to a column name image_number image number for primary database key. Defaults to current. | def write_data_to_db(self, workspace, post_group=False, image_number=None):
if self.show_window:
disp_header = ["Table", "Statement"]
disp_columns = []
try:
zeros_for_nan = False
measurements = workspace.measurements
assert isinstance(measureme... | [
"def write_post_run_measurements(self, workspace):\n columns = workspace.pipeline.get_measurement_columns()\n columns = list(\n filter(\n (\n lambda c: c[0] == EXPERIMENT\n and len(c) > 3\n and c[3].get(MCA_AVAILABLE_PO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write any experiment measurements marked as postrun | def write_post_run_measurements(self, workspace):
columns = workspace.pipeline.get_measurement_columns()
columns = list(
filter(
(
lambda c: c[0] == EXPERIMENT
and len(c) > 3
and c[3].get(MCA_AVAILABLE_POST_RUN, Fals... | [
"def write_nevts_files(self,jobs):\n for job in jobs:\n with open(pjoin(job['dirname'],'nevts'),'w') as f:\n if self.run_card['event_norm'].lower()=='bias':\n f.write('%i %f\\n' % (job['nevents'],self.cross_sect_dict['xseca']))\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the CellProfiler Analyst properties file | def write_properties_file(self, workspace):
all_properties = self.get_property_file_text(workspace)
for properties in all_properties:
fid = open(properties.file_name, "wt")
fid.write(properties.text)
fid.close() | [
"def write_property_file(w_string, file_name=\"c_cpp_properties.txt\"):\n mkdir(\".vscode\")\n\n dst_string = \"\"\n dst_string += c_cpp_property_str_head\n dst_string += w_string\n dst_string += c_cpp_property_str_tail\n\n dst_file = open(r\".\\.vscode\\\\\"+file_name, \"w\")\n dst_file.write(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the text for all property files workspace the workspace from prepare_run Returns a list of Property objects which describe each property file | def get_property_file_text(self, workspace):
class Properties(object):
def __init__(self, object_name, file_name, text):
self.object_name = object_name
self.file_name = file_name
self.text = text
self.properties = {}
fo... | [
"def properties_file_content(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"properties_file_content\")",
"def write_properties_file(self, workspace):\n all_properties = self.get_property_file_text(workspace)\n for properties in all_properties:\n fid = open(properties.file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If requested, write a workspace file with selected measurements | def write_workspace_file(self, workspace):
if self.db_type == DB_SQLITE:
name = os.path.splitext(self.sqlite_file.value)[0]
else:
name = self.db_name.value
tbl_prefix = self.get_table_prefix()
if tbl_prefix != "":
if tbl_prefix.endswith("_"):
... | [
"def saveToMeasurementParameterList(self):\n \n date, time = utilities.partTimeStamp(self.timeStamp)\n FWHMx, FWHMy = utilities.readFWHMfromBeamprofile()\n file = utilities.createOrOpenMeasurementParameterList()\n file.write(date+'\\t')\n file.write(time+'\\t')\n fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the file name and path name widths needed in table defs | def get_file_path_width(self, workspace):
m = workspace.measurements
#
# Find the length for the file name and path name fields
#
FileNameWidth = 128
PathNameWidth = 128
image_features = m.get_feature_names("Image")
for feature in image_features:
... | [
"def calcColWidth(self):",
"def get_width(self):\n dividechars = 1\n table_size = self.hits.get_width() + self.columns[1][0] + self.columns[2][0] + dividechars * 3\n return table_size",
"def get_widths(self):\n for i, _ in enumerate(self.headers):\n self.widths.append(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the table name associated with a given object object_name name of object or "Image", "Object" or "Well" | def get_table_name(self, object_name):
return self.get_table_prefix() + "Per_" + object_name | [
"def get_table_name(engine, obj):\n # noinspection PyProtectedMember\n return engine._compute_table_name(obj.__class__)",
"def _get_table(self, obj):\r\n if isinstance(obj, Marble):\r\n return obj\r\n else:\r\n return obj.table",
"def get_create_object_table_statement(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the measurement columns for this pipeline, possibly cached | def get_pipeline_measurement_columns(
self, pipeline, image_set_list, remove_postgroup_key=False
):
d = self.get_dictionary(image_set_list)
if D_MEASUREMENT_COLUMNS not in d:
d[D_MEASUREMENT_COLUMNS] = pipeline.get_measurement_columns()
d[D_MEASUREMENT_COLUMNS] = self... | [
"def get_feature_columns(self):\n return self.feature_columns",
"def columns(self):\n return self.c",
"def get_columns(self) -> dict:\n\n return self.source.columns",
"def host_snmp_cache_columns(self):\n columns = ('*',)\n limit = 1\n record = self.get_snmp_cache(col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter out and properly sort measurement columns | def filter_measurement_columns(self, columns):
columns = [
x
for x in columns
if not self.ignore_feature(x[0], x[1], True, wanttime=True)
]
#
# put Image ahead of any other object
# put Number_ObjectNumber ahead of any other column
#
... | [
"def sort(self):\n \n srt = np.argsort( self.mean_wavelength() )\n self.filters = self.filters[srt]\n self.measurement = self.measurement[srt]\n self.e_measurement = self.e_measurement[srt]\n self.s_measurement = self.s_measurement[srt]\n self.unit = self.unit[srt]\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a feature name to the collection | def add(self, feature_name):
self.__dictionary[feature_name] = feature_name
self.__mapped = False | [
"def add_feature(self, feat_name):\n # Check if feature exists and if so, return the feature ID. \n if feat_name in self.feature_dict:\n return self.feature_dict[feat_name]\n # If 'add_features' is True, add the feature to the feature \n # dictionary and return the feature ID.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is a very repeatable pseudorandom number generator seed a string to seed the generator yields integers in the range 065535 on iteration | def random_number_generator(seed):
m = hashlib.md5()
m.update(seed.encode())
while True:
digest = m.digest()
m.update(digest)
yield digest[0] + 256 * digest[1] | [
"def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF",
"def initial_seed() -> int:\n return default_generator.initial_seed()",
"def make_seed(self):\n hash_digest = self.Hash(self.name.encode()).digest()\n hash_int... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return if worker is available for work | def is_available(self):
return self._worker_available.is_set() | [
"def is_worker():\n return _IN_WORKER is True",
"def _is_worker(self):\n return (\n dist_utils.has_chief_oracle() and not dist_utils.is_chief_oracle()\n )",
"def is_someone_working(self):\n for station in self.pool:\n if not station.free:\n return Tru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the flags confirming utc time in the message is valid | def time_is_valid(self, msg):
flag_byte = ctypes.c_uint8(msg[-3])
return True if flag_byte.value & 4 == 4 else False | [
"def timestamp_valid(self):\n return _raw_util.raw_message_timestamp_valid(self)",
"def valid_time(time_string):\n hours = [(dt.time(i).strftime('%I:%M %p')) for i in range(7, 20)]\n return time_string in hours",
"def check_message_time(msg_buf):\n lines = msg_buf.strip().splitlines()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Is workspace factory enabled. | def isEnabled(workspaceFactory): | [
"def is_allowed_for_workspace(self, workspace: Workspace) -> bool:\n scoped_feature = self._use_case.get_scoped_to_feature()\n if scoped_feature is None:\n return True\n if isinstance(scoped_feature, Feature):\n return workspace.is_feature_available(scoped_feature)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a default Renderer instance for testing purposes. | def _make_renderer():
renderer = Renderer(string_encoding='ascii', file_encoding='ascii')
return renderer | [
"def get_instance() -> 'RenderEngine':\n return _SINGLETON",
"def make_renderer(self):\n renderer = _libass.ass_renderer_init(ctypes.byref(self)).contents\n renderer._after_init(self)\n return renderer",
"def init_renderers(cls):",
"def renderer(self):\n # The renderer is search... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the file_encoding default. | def test_file_encoding__default(self):
renderer = Renderer()
self.assertEqual(renderer.file_encoding, renderer.string_encoding) | [
"def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the file_encoding attribute is set correctly. | def test_file_encoding(self):
renderer = Renderer(file_encoding='foo')
self.assertEqual(renderer.file_encoding, 'foo') | [
"def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the search_dirs default. | def test_search_dirs__default(self):
renderer = Renderer()
self.assertEqual(renderer.search_dirs, [os.curdir]) | [
"def pines_dir_check():\n home_dir = Path(os.path.expanduser('~/Documents/'))\n default_pines_dir = Path(os.path.join(home_dir, 'PINES_analysis_toolkit/'))\n if os.path.exists(default_pines_dir):\n return default_pines_dir\n else:\n print('ERROR...I have not set this up to work for directo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the search_dirs attribute is set correctly when a string. | def test_search_dirs__string(self):
renderer = Renderer(search_dirs='foo')
self.assertEqual(renderer.search_dirs, ['foo']) | [
"def _check_directories(self):\n mode = os.F_OK | os.R_OK | os.W_OK | os.X_OK\n for attr in ('data_dir', 'data_underlay_dir'):\n path = getattr(self, attr)\n \n # allow an empty underlay path or None\n if attr == 'data_underlay_dir' and not path:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the search_dirs attribute is set correctly when a list. | def test_search_dirs__list(self):
renderer = Renderer(search_dirs=['foo'])
self.assertEqual(renderer.search_dirs, ['foo']) | [
"def test_list_dirs(self):\n self.list_dirs(self.Dirs)\n self.list_dirs(self.SubDirs)\n self.list_dirs(self.NestedDirs)\n return True",
"def testGetDirList(self):\r\n existingDirPath=ufsi.Path(self.existingDirPathStr)\r\n nonExistingDirPath=ufsi.Path(self.nonExistingDirPa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that render() returns a string of type unicode. | def test_render__return_type(self):
renderer = self._renderer()
rendered = renderer.render('foo')
self.assertEqual(type(rendered), unicode) | [
"def test__literal__handles_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n\n literal = renderer.literal\n\n self.assertEqual(literal(u\"foo\"), \"foo\")",
"def test__literal__uses_renderer_unicode(self):\n renderer = self._make_renderer()\n renderer.unicode = mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test passing a nonunicode template with nonascii characters. | def test_render__nonascii_template(self):
renderer = _make_renderer()
template = u"déf".encode("utf-8")
# Check that decode_errors and string_encoding are both respected.
renderer.decode_errors = 'ignore'
renderer.string_encoding = 'ascii'
self.assertEqual(renderer.rende... | [
"def testTemplateUnicode(self):\n # And they will be converted to UTF8 eventually\n template = u'We \\u2665 Python'\n self.assertEqual(self.parse(template), template.encode('UTF8'))",
"def testTemplateTagUTF8(self):\n template = u'We \\u2665 \\xb5Web!'.encode('UTF8')\n self.assertEqual(self.parse(t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the _make_load_partial() method. | def test_make_load_partial(self):
renderer = Renderer()
renderer.partials = {'foo': 'bar'}
load_partial = renderer._make_load_partial()
actual = load_partial('foo')
self.assertEqual(actual, 'bar')
self.assertEqual(type(actual), unicode, "RenderEngine requires that "
... | [
"def test__load_partial__not_found__default(self):\n renderer = Renderer()\n load_partial = renderer.load_partial\n\n self.assertException(TemplateNotFoundError, \"File 'foo.mustache' not found in dirs: ['.']\",\n load_partial, \"foo\")",
"def test_load():\n pro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the render_path() method. | def test_render_path(self):
renderer = Renderer()
path = get_data_path('say_hello.mustache')
actual = renderer.render_path(path, to='foo')
self.assertEqual(actual, "Hello, foo") | [
"def test_render(self):\n\n\t\ttemplate_content = self.app.render('test_render.html', test_var='Testing templates')\n\t\tself.assertEqual(template_content, 'Testing templates')",
"def test_render_template_string(mock_rts, mock_markup):\n test_string = 'some string'\n template.render_template(test_st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test rendering an object instance. | def test_render__object(self):
renderer = Renderer()
say_hello = SayHello()
actual = renderer.render(say_hello)
self.assertEqual('Hello, World', actual)
actual = renderer.render(say_hello, to='Mars')
self.assertEqual('Hello, Mars', actual) | [
"def test_renders(self):\n value = self.block.to_python(\n {\n \"meta_title\": \"Meta Title\",\n \"title\": \"Main Title\",\n \"body\": \"This is the body.\",\n \"link\": {\"external_url\": \"https://omni-digital.co.uk\"},\n }\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test rendering a TemplateSpec instance. | def test_render__template_spec(self):
renderer = Renderer()
class Spec(TemplateSpec):
template = "hello, {{to}}"
to = 'world'
spec = Spec()
actual = renderer.render(spec)
self.assertString(actual, u'hello, world') | [
"def test_render(self):\n\n\t\ttemplate_content = self.app.render('test_render.html', test_var='Testing templates')\n\t\tself.assertEqual(template_content, 'Testing templates')",
"def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/center... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test rendering a View instance. | def test_render__view(self):
renderer = Renderer()
view = Simple()
actual = renderer.render(view)
self.assertEqual('Hi pizza!', actual) | [
"def test_view_used(self):\n if self.view_class is None:\n self.skipTest('view_class attribute is None')\n resp = self.get_response()\n self.assertIsInstance(resp.context['view'], self.view_class)",
"def test_mising_renderer(self):\r\n assert self.giotto_view.can_render('tex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a default Renderer instance for testing purposes. | def _make_renderer(self):
return _make_renderer() | [
"def _make_renderer():\n renderer = Renderer(string_encoding='ascii', file_encoding='ascii')\n return renderer",
"def get_instance() -> 'RenderEngine':\n return _SINGLETON",
"def make_renderer(self):\n renderer = _libass.ass_renderer_init(ctypes.byref(self)).contents\n renderer._after_ini... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that load_partial returns unicode (and not a subclass). | def test__load_partial__returns_unicode(self):
class MyUnicode(unicode):
pass
renderer = Renderer(string_encoding='ascii',
partials={'str': 'foo',
'subclass': MyUnicode('abc')})
actual = renderer.load_partial('str')
... | [
"def test_make_load_partial(self):\n renderer = Renderer()\n renderer.partials = {'foo': 'bar'}\n load_partial = renderer._make_load_partial()\n\n actual = load_partial('foo')\n self.assertEqual(actual, 'bar')\n self.assertEqual(type(actual), unicode, \"RenderEngine require... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that load_partial provides a nice message when a template is not found. | def test__load_partial__not_found__default(self):
renderer = Renderer()
load_partial = renderer.load_partial
self.assertException(TemplateNotFoundError, "File 'foo.mustache' not found in dirs: ['.']",
load_partial, "foo") | [
"def test__load_partial__not_found__dict(self):\n renderer = Renderer(partials={})\n\n load_partial = renderer.load_partial\n\n # Include dict directly since str(dict) is different in Python 2 and 3:\n # <type 'dict'> versus <class 'dict'>, respectively.\n self.assertException(T... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that load_partial provides a nice message when a template is not found. | def test__load_partial__not_found__dict(self):
renderer = Renderer(partials={})
load_partial = renderer.load_partial
# Include dict directly since str(dict) is different in Python 2 and 3:
# <type 'dict'> versus <class 'dict'>, respectively.
self.assertException(TemplateNotFo... | [
"def test__load_partial__not_found__default(self):\n renderer = Renderer()\n load_partial = renderer.load_partial\n\n self.assertException(TemplateNotFoundError, \"File 'foo.mustache' not found in dirs: ['.']\",\n load_partial, \"foo\")",
"def test_no_template_sour... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that literal uses the renderer's unicode function. | def test__literal__uses_renderer_unicode(self):
renderer = self._make_renderer()
renderer.unicode = mock_unicode
literal = renderer.literal
b = u"foo".encode("ascii")
self.assertEqual(literal(b), "FOO") | [
"def test__literal__handles_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n\n literal = renderer.literal\n\n self.assertEqual(literal(u\"foo\"), \"foo\")",
"def test__literal__returns_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n literal = rende... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that literal doesn't try to "double decode" unicode. | def test__literal__handles_unicode(self):
renderer = Renderer(string_encoding='ascii')
literal = renderer.literal
self.assertEqual(literal(u"foo"), "foo") | [
"def test_unicode(snapshot):\n expect = u'pépère'\n snapshot.assert_match(expect)",
"def test_unicode1(self):\r\n # TODO: find something that actually returns suggestions\r\n us1 = raw_unicode(r\"he\\u2149lo\")\r\n self.assertTrue(type(us1) is unicode)\r\n self.assertFalse(self.d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that literal returns unicode (and not a subclass). | def test__literal__returns_unicode(self):
renderer = Renderer(string_encoding='ascii')
literal = renderer.literal
self.assertEqual(type(literal("foo")), unicode)
class MyUnicode(unicode):
pass
s = MyUnicode("abc")
self.assertEqual(type(s), MyUnicode)
... | [
"def test__literal__handles_unicode(self):\n renderer = Renderer(string_encoding='ascii')\n\n literal = renderer.literal\n\n self.assertEqual(literal(u\"foo\"), \"foo\")",
"def test__literal__uses_renderer_unicode(self):\n renderer = self._make_renderer()\n renderer.unicode = mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that escape uses the renderer's escape function. | def test__escape__uses_renderer_escape(self):
renderer = Renderer(escape=lambda s: "**" + s)
escape = renderer.escape
self.assertEqual(escape("foo"), "**foo") | [
"def test__escape(self):\n engine = Renderer(escape=lambda s: \"**\" + s)\n\n self._assert_render(u'**bar', '{{foo}}', {'foo': 'bar'}, engine=engine)",
"def test__escape__uses_renderer_unicode(self):\n renderer = Renderer()\n renderer.unicode = mock_unicode\n escape = renderer.e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that escape uses the renderer's unicode function. | def test__escape__uses_renderer_unicode(self):
renderer = Renderer()
renderer.unicode = mock_unicode
escape = renderer.escape
b = u"foo".encode('ascii')
self.assertEqual(escape(b), "FOO") | [
"def test__escape__uses_renderer_escape(self):\n renderer = Renderer(escape=lambda s: \"**\" + s)\n escape = renderer.escape\n\n self.assertEqual(escape(\"foo\"), \"**foo\")",
"def test_escape(self):\n self.assertRaises(ValueError, escape, \"I am a string type. Not a unicode type.\")\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test rendering the given template using the given context. | def _assert_render(self, expected, template, *context, **kwargs):
partials = kwargs.get('partials', None)
engine = kwargs.get('engine', None)
if not engine:
engine = Renderer(partials=partials)
context = ContextStack(*context)
actual = engine.render(template, contex... | [
"def test_render(self):\n\n\t\ttemplate_content = self.app.render('test_render.html', test_var='Testing templates')\n\t\tself.assertEqual(template_content, 'Testing templates')",
"def test_render_template(self):\n fake_template = 'hello there {{user.first_name}} {{top}}'\n fake_variables = {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that render() uses the literal attribute. | def test__literal(self):
engine = Renderer(literal=lambda s: s.upper())
self._assert_render(u'BAR', '{{{foo}}}', {'foo': 'bar'}, engine=engine) | [
"def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n escape=lambda s: \"**\" + s)\n\n template = 'literal: {{{foo}}} escaped: {{foo}}'\n context = {'foo': 'bar'}\n\n self._assert_render(u'literal: BAR escaped: **bar', ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that render() uses the escape attribute. | def test__escape(self):
engine = Renderer(escape=lambda s: "**" + s)
self._assert_render(u'**bar', '{{foo}}', {'foo': 'bar'}, engine=engine) | [
"def test__escape__uses_renderer_escape(self):\n renderer = Renderer(escape=lambda s: \"**\" + s)\n escape = renderer.escape\n\n self.assertEqual(escape(\"foo\"), \"**foo\")",
"def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that render() does not call literal before or after calling escape. | def test__escape_does_not_call_literal(self):
engine = Renderer(literal=lambda s: s.upper(),
escape=lambda s: "**" + s)
template = 'literal: {{{foo}}} escaped: {{foo}}'
context = {'foo': 'bar'}
self._assert_render(u'literal: BAR escaped: **bar', template, cont... | [
"def test__escape(self):\n engine = Renderer(escape=lambda s: \"**\" + s)\n\n self._assert_render(u'**bar', '{{foo}}', {'foo': 'bar'}, engine=engine)",
"def test__escape__uses_renderer_escape(self):\n renderer = Renderer(escape=lambda s: \"**\" + s)\n escape = renderer.escape\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that render() preserves unicode subclasses when passing to escape. This is useful, for example, if one wants to respect whether a variable value is markupsafe.Markup when escaping. | def test__escape_preserves_unicode_subclasses(self):
class MyUnicode(unicode):
pass
def escape(s):
if type(s) is MyUnicode:
return "**" + s
else:
return s + "**"
engine = Renderer(escape=escape)
template = '{{foo1}} {... | [
"def test__escape__uses_renderer_unicode(self):\n renderer = Renderer()\n renderer.unicode = mock_unicode\n escape = renderer.escape\n\n b = u\"foo\".encode('ascii')\n self.assertEqual(escape(b), \"FOO\")",
"def test_escape(self):\n self.assertRaises(ValueError, escape, \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test an implicit iterator in a literal tag. | def test_implicit_iterator__literal(self):
template = """{{#test}}{{{.}}}{{/test}}"""
context = {'test': ['<', '>']}
self._assert_render(u'<>', template, context) | [
"def test_implicit_iterator__escaped(self):\n template = \"\"\"{{#test}}{{.}}{{/test}}\"\"\"\n context = {'test': ['<', '>']}\n\n self._assert_render(u'<>', template, context)",
"def IsLiteral(self) -> bool:",
"def testLazyEvaluation(self):\n # Tags are looked up lazily\n templa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test an implicit iterator in a normal tag. | def test_implicit_iterator__escaped(self):
template = """{{#test}}{{.}}{{/test}}"""
context = {'test': ['<', '>']}
self._assert_render(u'<>', template, context) | [
"def test_tag_runs(self):\n pass",
"def test_starttag_simple():\n inst = _encoder.TextEncoder('foo')\n\n result = inst.starttag(b'xx', iter([]), False)\n assert result == b'[xx]'\n\n result = inst.starttag(b'yy', iter([(b'aa', None), (b'bb', b'cc')]),\n False)\n ass... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that literals work in sections. | def test_literal__in_section(self):
template = '{{#test}}1 {{{less_than}}} 2{{/test}}'
context = {'test': {'less_than': '<'}}
self._assert_render(u'1 < 2', template, context) | [
"def test_compatible_sections() -> None:\n actual = a2_courses.compatible_sections(SCHEDULE_1, CON123) == {CON123_LEC0123}\n expected = True\n assert actual == expected",
"def test_section_definition(self):\n\t\ttests = {\n\t\t\tTrue:{'name': 'Foo:'},\n\t\t\tFalse:{'name': 'Foo'}\n\t\t}\n\n\t\tfor truth,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that literals work in partials. | def test_literal__in_partial(self):
template = '{{>partial}}'
partials = {'partial': '1 {{{less_than}}} 2'}
context = {'less_than': '<'}
self._assert_render(u'1 < 2', template, context, partials=partials) | [
"def IsLiteral(self) -> bool:",
"def test_unexpandedLiteral(self):\n self.assertEqual(\n u\"hello world\",\n self.expandToText(ConceptTemplate(u\"hello world\"), {}))",
"def test_partial__context_values(self):\n template = '{{>partial}}'\n partials = {'partial': 'unesc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that escape and literal work on context values in partials. | def test_partial__context_values(self):
template = '{{>partial}}'
partials = {'partial': 'unescaped: {{{foo}}} escaped: {{foo}}'}
context = {'foo': '<'}
self._assert_render(u'unescaped: < escaped: <', template, context, partials=partials) | [
"def test_section__context_values(self):\n template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'\n context = {'test': {'foo': '<'}}\n\n self._assert_render(u'unescaped: < escaped: <', template, context)",
"def test__escape_does_not_call_literal(self):\n engine = Rende... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check what happens if there is an end tag with no start tag. | def test_section__end_tag_with_no_start_tag(self):
template = '{{/section}}'
try:
self._assert_render(None, template)
except ParsingError, err:
self.assertEqual(str(err), "Section end tag mismatch: section != None") | [
"def does_end_token_exist(self) -> bool:",
"def test_section__end_tag_mismatch(self):\n template = '{{#section_start}}{{/section_end}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: secti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check what happens if the end tag doesn't match. | def test_section__end_tag_mismatch(self):
template = '{{#section_start}}{{/section_end}}'
try:
self._assert_render(None, template)
except ParsingError, err:
self.assertEqual(str(err), "Section end tag mismatch: section_end != section_start") | [
"def does_end_token_exist(self) -> bool:",
"def handle_endtag(self, tag) -> None:\n if tag in self.keeptags:\n self.textdata += f'</{tag}>'",
"def verify_tag(self, p_tag):\n\t\tmatch_obj = self.v_re_skipped_tags.search(p_tag)\n\t\tif match_obj is None:\n\t\t\tif p_tag == u'a': # If the tag is ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that escape and literal work on context values in sections. | def test_section__context_values(self):
template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'
context = {'test': {'foo': '<'}}
self._assert_render(u'unescaped: < escaped: <', template, context) | [
"def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n escape=lambda s: \"**\" + s)\n\n template = 'literal: {{{foo}}} escaped: {{foo}}'\n context = {'foo': 'bar'}\n\n self._assert_render(u'literal: BAR escaped: **bar', ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that list items can access the parent context. For sections whose value is a list, check that items in the list have access to the values inherited from the parent context when rendering. | def test_section__list_referencing_outer_context(self):
context = {
"greeting": "Hi",
"list": [{"name": "Al"}, {"name": "Bob"}],
}
template = "{{#list}}{{greeting}} {{name}}, {{/list}}"
self._assert_render(u"Hi Al, Hi Bob, ", template, context) | [
"def validate(self, value):\n value = super(ReferenceListProperty, self).validate(value)\n if value is not None:\n if not isinstance(value, list):\n raise db.BadValueError('Property %s must be a list' %\n self.name)\n for item in value:\n if not isinstance... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that rendered section output is not interpolated. | def test_section__output_not_interpolated(self):
template = '{{#section}}{{template}}{{/section}}: {{planet}}'
context = {'section': True, 'template': '{{planet}}', 'planet': 'Earth'}
self._assert_render(u'{{planet}}: Earth', template, context) | [
"def test_section__end_tag_with_no_start_tag(self):\n template = '{{/section}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: section != None\")",
"def test_section__end_tag_mismatch(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that string section values are not interpreted as lists. | def test_section__string_values_not_lists(self):
template = '{{#section}}foo{{/section}}'
context = {'section': '123'}
# If strings were interpreted as lists, this would give "foofoofoo".
self._assert_render(u'foo', template, context) | [
"def test_string_list_unpacking(self):\n self.assertEqual(self.setting.detect_type(u\"(1.0, 2.0, 3.0)\", \"conf file\"), \"float\")\n self.assertEqual(self.setting.detect_type(u\"(1, 2, 3)\", \"conf file\"), \"int\")\n self.assertEqual(self.setting.detect_type(u'(\"1\", \"2\", \"3\")', \"conf f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that "nested truthy" sections get rendered. | def test_section__nested_truthy(self):
template = '| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |'
context = {'bool': True}
self._assert_render(u'| A B C D E |', template, context) | [
"def is_nested(self, ):\n\t\tpass",
"def test_section__end_tag_with_no_start_tag(self):\n template = '{{/section}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: section != None\")",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check a doublynested section with the same context key. | def test_section__nested_with_same_keys(self):
# Start with an easier, working case.
template = '{{#x}}{{#z}}{{y}}{{/z}}{{/x}}'
context = {'x': {'z': {'y': 1}}}
self._assert_render(u'1', template, context)
template = '{{#x}}{{#x}}{{y}}{{/x}}{{/x}}'
context = {'x': {'x': ... | [
"def is_nested(self, ):\n\t\tpass",
"def test33(self):\n self.check('aDict.nestedDict.one')",
"def test31(self):\n self.check('aDict.nestedDict')",
"def _has_context(value):\n if isinstance(value, tuple):\n if len(value) == 2:\n if isinstance(value[1], dict):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that callable output is treated as a template string (issue 46). The spec says When used as the data value for a Section tag, the lambda MUST be treatable as an arity 1 function, and invoked as such (passing a String containing the unprocessed section contents). The returned value MUST be rendered against the cur... | def test_section__lambda__tag_in_output(self):
template = '{{#test}}Hi {{person}}{{/test}}'
context = {'person': 'Mom', 'test': (lambda text: text + " :)")}
self._assert_render(u'Hi Mom :)', template, context) | [
"def templategetter(tmpl):\n tmpl = tmpl.replace('{', '%(')\n tmpl = tmpl.replace('}', ')s')\n return lambda data: tmpl % data",
"def emptyfn(body: str, return_type: str=\"u1\") -> str:\n return f\"fn test() -> {return_type} {{{body}}}\"",
"def testComplexClosureWithoutArguments(self):\n template... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that lists of lambdas are processed correctly for sections. | def test_section__lambda__list(self):
template = '<{{#lambdas}}foo{{/lambdas}}>'
context = {'foo': 'bar',
'lambdas': [lambda text: "~{{%s}}~" % text,
lambda text: "#{{%s}}#" % text]}
self._assert_render(u'<~bar~#bar#>', template, context) | [
"def grade_section(sol_fname, funcs, section):\n for stf in section.student_files:\n for func in funcs:\n test_func(func, stf, sol_fname)",
"def test_format_as_sections(test_input: List[str], expected: str) -> None:\r\n actual = format_as_sections(test_input)\r\n assert actual == expect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test a mixed list of lambdas and nonlambdas as a section value. | def test_section__lambda__mixed_list(self):
template = '<{{#lambdas}}foo{{/lambdas}}>'
context = {'foo': 'bar',
'lambdas': [lambda text: "~{{%s}}~" % text, 1]}
self._assert_render(u'<~bar~foo>', template, context) | [
"def test_rule_mapper(self):\n a = [\"my test string\", \"my test string\", \"\", \"dippadaa\"]\n method = [\"__len__\", \"__contains__\", \"find\", \"__contains__\"]\n args = [\"None\", \"my\", \"my\", \"diu\"]\n expected = [len(a), True, \"True\", \"False\"]\n for i in np.arange... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that section lambdas are not pushed onto the context stack. Even though the sections spec says that section data values should be pushed onto the context stack prior to rendering, this does not apply to lambdas. Lambdas obey their own special case. | def test_section__lambda__not_on_context_stack(self):
context = {'foo': 'bar', 'lambda': (lambda text: "{{.}}")}
template = '{{#foo}}{{#lambda}}blah{{/lambda}}{{/foo}}'
self._assert_render(u'bar', template, context) | [
"def testNoLambdaExpression(self):\n\n self.assertNotIn(_MethodWithLambdaExpression(), self._code_objects)",
"def test37(self):\n assert self.get('aDict.nestedFunc', False) == dummyFunc",
"def test35(self):\n self.check('aDict.nestedFunc')",
"def supports_lambda_closure():\n f = lambda x: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that section lambda return values are not reinterpolated. This test is a sanity check that the rendered lambda return value is not reinterpolated as could be construed by reading the section part of the Mustache spec. | def test_section__lambda__no_reinterpolation(self):
template = '{{#planet}}{{#lambda}}dot{{/lambda}}{{/planet}}'
context = {'planet': 'Earth', 'dot': '~{{.}}~', 'lambda': (lambda text: "#{{%s}}#" % text)}
self._assert_render(u'#~{{.}}~#', template, context) | [
"def test_section__context_values(self):\n template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'\n context = {'test': {'foo': '<'}}\n\n self._assert_render(u'unescaped: < escaped: <', template, context)",
"def test_not_match_input():\n assert multi('{{[}}') is False",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that multiline comments are permitted. | def test_comment__multiline(self):
self._assert_render(u'foobar', 'foo{{! baz }}bar')
self._assert_render(u'foobar', 'foo{{! \nbaz }}bar') | [
"def juniper_multiline_comments():\n single = '-(\"*/\" / \"\\n\")*' # single-line comments only\n multi = '-\"*/\"*' # syntactically correct multi-line support\n if settings.ALLOW_JUNIPER_MULTILINE_COMMENTS:\n return multi\n return single",
"def ShouldPassCommentCheck(self, line):\n self.as... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |