query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Auxiliary function to represent a 2nd order tensor in Voigt notation | def WriteInVoigtNotation(dim, tensor):
voigt_size = 3 * dim - 3
voigt_tensor = zeros(voigt_size, 1)
voigt_tensor[0] = tensor[0,0]
voigt_tensor[1] = tensor[1,1]
if dim == 2:
voigt_tensor[2] = tensor[0,1]
else:
voigt_tensor[2] = tensor[2,2]
voigt_tensor[3] = tensor[1,2]
... | [
"def RevertVoigtNotation(dim, voigt_tensor):\n\n # Revert the Voigt notation\n tensor = zeros(dim, dim)\n for d in range(dim):\n tensor[d,d] = voigt_tensor[d]\n if dim == 2:\n tensor[0,1] = voigt_tensor[2]\n tensor[1,0] = voigt_tensor[2]\n else:\n tensor[1,2] = voigt_tenso... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to set a 2nd order tensor from its Voigt representation | def RevertVoigtNotation(dim, voigt_tensor):
# Revert the Voigt notation
tensor = zeros(dim, dim)
for d in range(dim):
tensor[d,d] = voigt_tensor[d]
if dim == 2:
tensor[0,1] = voigt_tensor[2]
tensor[1,0] = voigt_tensor[2]
else:
tensor[1,2] = voigt_tensor[3]
te... | [
"def WriteInVoigtNotation(dim, tensor):\n\n voigt_size = 3 * dim - 3\n voigt_tensor = zeros(voigt_size, 1)\n voigt_tensor[0] = tensor[0,0]\n voigt_tensor[1] = tensor[1,1]\n if dim == 2:\n voigt_tensor[2] = tensor[0,1]\n else:\n voigt_tensor[2] = tensor[2,2]\n voigt_tensor[3] =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to print the diffusive flux matrix (G) | def PrintDiffusiveFluxMatrix(G,params):
dim = params["dim"]
print("The diffusive matrix is:\n")
for ll in range(dim+2):
for mm in range(dim):
print("G[",ll,",",mm,"]=",G[ll,mm],"\n")
return 0 | [
"def show(self):\n print(self.matrix)",
"def toMatrice(self):\n\t\ttxt = \" \"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t txt += str(i)+\"-\"\n\t\tprint(txt, file=sys.stderr)\n\t\t\n\t\ttxt=\"\"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t\ttxt += str(i)\n\t\t\tfor j in sorted(self.graphe.keys(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert data from csvfile into table in MySQL database | def insert_data_from_file(mysql_connection: mysql.connector.connection, sql_insert_template_filename: str,
data_filename: str) -> None:
mysql_cur = mysql_connection.cursor()
with open(file=data_filename, encoding="UTF-8") as data, \
open(file=sql_insert_template_filename, e... | [
"def insert_from_file(connection: pymysql.Connection, table_name: str, file_path: str) -> None:\n with open(file_path, 'r') as file, connection.cursor() as cursor:\n reader = csv.reader(file)\n for line in reader:\n cursor.execute(f\"\"\"\n REPLACE INTO {table_name} (airline_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the app by creating a root widget. | def build():
return RootWidget() | [
"def build(self):\r\n # application window size\r\n Window.size = (1000, 800)\r\n self.title = \"Songs to learn app\"\r\n # load app.kv\r\n self.root = Builder.load_file('app.kv')\r\n self.category = ['Title', 'Year', 'Artist', 'Unlearn']\r\n self.sort = self.categor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine if there is a link with ID source_link_id. Also implemented as ``__contains__(source_link_id)`` | def is_link(self, source_link_id):
return source_link_id in self._links | [
"def check_a_link(source_link, target_link):\n mylink = get_next_and_set_read()\n\n if mylink is None:\n raise EOFError('There is no more links to check')\n\n links = list_all_links(DEFAULT_URL + mylink.link, target_link)\n log.debug('[level %d] Crawling %r... %d links found', mylink.level, unquo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the link with the given ID, or `None` if no such link. Also implemented as ``__getitem__(source_link_id)`` | def get_link(self, source_link_id):
if source_link_id in self._links:
return self._links[source_link_id]
return None | [
"def get_link_by_id(link_id):\n return models.Link.query.filter_by(link_id=link_id).first()",
"def get_linked(doc, element):\n if element is not None:\n href = element.get(inkex.addNS('href', 'xlink'), None)\n if href is not None:\n linked_id = href[href.find('#')+1:]\n path = '//*[@... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of available multicast entries in the routing tables. | def n_available_multicast_entries(self):
return self._n_available_multicast_entries | [
"def Count(self) -> int:\n return len(self.m_subaddr)",
"def n_local_multicast_packets(self):\n return self._register_values[\n constants.ROUTER_REGISTER_REGISTERS.LOC_MC.value]",
"def n_external_multicast_packets(self):\n return self._register_values[\n constants.ROUT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a routing table entry represented in software to a binary routing table entry usable on the machine. | def convert_routing_table_entry_to_spinnaker_route(routing_table_entry):
route_entry = 0
for processor_id in routing_table_entry.processor_ids:
if processor_id >= Router.MAX_CORES_PER_ROUTER or processor_id < 0:
raise SpinnMachineInvalidParameterException(
... | [
"def route( self, routing_table ):\n routing_index = ((self.routing % len(routing_table) ) - 1)\n return routing_table[ routing_index ]",
"def routing_table(ip, community, ci):\n ipRouteType = \"1.3.6.1.2.1.4.21.1.8\"\n ret = get_bulk(ip, ipRouteType, community)\n if ret != None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a binary routing table entry usable on the machine to lists of route IDs usable in a routing table entry represented in software. | def convert_spinnaker_route_to_routing_ids(route):
processor_ids = [pi for pi in range(0, Router.MAX_CORES_PER_ROUTER)
if route & 1 << (Router.MAX_LINKS_PER_ROUTER + pi)]
link_ids = [li for li in range(0, Router.MAX_LINKS_PER_ROUTER)
if route & 1 << li]
... | [
"def convert_routing_table_entry_to_spinnaker_route(routing_table_entry):\n route_entry = 0\n for processor_id in routing_table_entry.processor_ids:\n if processor_id >= Router.MAX_CORES_PER_ROUTER or processor_id < 0:\n raise SpinnMachineInvalidParameterException(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a valid link_id this method returns its opposite. | def opposite(link_id):
# Mod is faster than if
return (link_id + Router.LINK_OPPOSITE) % Router.MAX_LINKS_PER_ROUTER | [
"def link_invalid_remove(self) -> ConfigNodePropertyBoolean:\n return self._link_invalid_remove",
"def validate_link(link):\n\tpass",
"def reject_link(self,\n net_id: str,\n link_id: ObjectId\n ):\n d = self.get_unsafe_link_document(link_id)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a `datetime` object to number of microseconds since epoch representation (which will be stored in MongoDB). This is the reverse function of `_convert_from_db`. | def _convert_from_datetime(self, val):
result = self._datetime_to_microseconds_since_epoch(value=val)
return result | [
"def _datetime_to_microseconds_since_epoch(self, value):\n # Verify that the value which is passed in contains UTC timezone\n # information.\n if not value.tzinfo or (value.tzinfo.utcoffset(value) != datetime.timedelta(0)):\n raise ValueError(\n \"Value passed to this ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert datetime in UTC to number of microseconds from epoch. | def _datetime_to_microseconds_since_epoch(self, value):
# Verify that the value which is passed in contains UTC timezone
# information.
if not value.tzinfo or (value.tzinfo.utcoffset(value) != datetime.timedelta(0)):
raise ValueError(
"Value passed to this function ne... | [
"def datetime_to_float(dt):\n epoch = datetime.fromtimestamp(0, tz=pytz.UTC)\n if not dt.tzinfo:\n epoch = epoch.replace(tzinfo=None)\n\n total_seconds = (dt - epoch).total_seconds()\n return total_seconds",
"def millis_to_utc(millis):\n return datetime.datetime(1970, 1, 1) + datetime.timede... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We return a custom wrapper over dict which tracks changes to the dictionary and allows us to only write the field to the database on update if the field value has changed very important since it means much more efficient partial updates. | def __get__(self, instance, owner):
value = super().__get__(instance, owner)
if isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
# NOTE: It's important this attribute is set, since only this way mongoengine can determine
... | [
"def _write_dict(self, fieldname, value):\n self._fields[fieldname] = value",
"def test_update_dictfield( ):\n\tclass TestA(Document):\n\t\tdata = DictField( )\n\n\tassert Q( { 'data__123': 'test' } ).toMongo( TestA, forUpdate=True ) == { 'data.123': 'test' }\n\n\t# children of a dictfield shouldn't be mot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set object type QConfigs | def set_object_type(
self, object_type: Union[Callable, str], qconfig_list: List[QConfigAny]
) -> QConfigMultiMapping:
self._insert_qconfig_list("object_type_qconfigs", [object_type], qconfig_list)
return self | [
"def config_setting_type(self, config_setting_type):\n\n self._config_setting_type = config_setting_type",
"def setConfiguration(options):",
"def set_element_type(self, type):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.type\", self._object._eco_id, type)\r\n p2e._app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a QConfigMultiMapping from a list of QConfigMappings | def from_list_qconfig_mapping(
cls, qconfig_mapping_list: List[QConfigMapping]
) -> QConfigMultiMapping:
new_qconfig_multi_mapping = cls()
new_qconfig_multi_mapping.qconfig_mappings_list = copy.deepcopy(
qconfig_mapping_list
)
# we need to avoid the issue descri... | [
"def map_configitems(items):\n return {\n item: (publication, factor)\n for item, publication, factor in items\n }",
"def get_mapping_dict(self, key_list):\n return dict([(key, self.get(key)) for key in key_list])",
"def map_configitems(items):\n return dict(\n (item, (p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the param is present (if required) and is of the correct data type. | def check(self,params):
value = params.get(self.name,None)
# missing check
if self.required and not value:
raise MissingParam(self.name)
# type check
try:
self.checker(value)
except ValueError:
raise BadParamType(self.name,val... | [
"def _validateParamInfo(self, dataType, paramType, name):\n # Legacy data type conversions\n if dataType == 'int':\n dataType = 'integer'\n\n # Parameter Object spec:\n # If type is \"file\", then the swagger \"consumes\" field MUST be either\n # \"multipart/form-data\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes a post request. If data is present it will be presented as the body, otherwise params will be presented. If both are defined an exception will be thrown. | def do_post(self,data=None,params={}):
if data and params:
raise ValueError('Either data or params can be submitted to be the POST body, but not both.')
post_data = json.dumps(data) if data else params
response = requests.post('%s/%s.json' % (self.service_url,self.d... | [
"def _post(self, data=None, url_name=None, url_args=None,\r\n url_kwargs=None, get_kwargs=None, url=None, *args, **kwargs):\r\n url = url or self._url(url_name, url_args, url_kwargs, get_kwargs)\r\n data = self.post_data if data is None else data\r\n return self.client.post(path=url,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Caches the specified descriptor locally. | def _cache_descriptor(self,descriptor_name,force=False):
if not descriptor_name in self._services or force:
response = requests.get('%s/describe/%s/%s/%s.json' % (self._service_url,self._app,self._version,descriptor_name))
self._services[descriptor_name] = HttpService(self._service_url,
... | [
"def _cache_descriptor(self,force=False):\n if not self._descriptor or force:\n response = requests.get('%s/describe/%s/%s/%s.json' % (self._service_url,self._app,self._version,self._resource_slug))\n self._descriptor = response.json(strict=False)\n\n if 'get' in self._descri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Locally caches the resource descriptor. | def _cache_descriptor(self,force=False):
if not self._descriptor or force:
response = requests.get('%s/describe/%s/%s/%s.json' % (self._service_url,self._app,self._version,self._resource_slug))
self._descriptor = response.json(strict=False)
if 'get' in self._descriptor:
... | [
"def _cache_descriptor(self,descriptor_name,force=False):\n if not descriptor_name in self._services or force:\n response = requests.get('%s/describe/%s/%s/%s.json' % (self._service_url,self._app,self._version,descriptor_name))\n self._services[descriptor_name] = HttpService(self._servi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group points according to indices. | def group_point(data: tf.Tensor, indices: tf.Tensor) -> tf.Tensor:
return grouping_module.group_point(data, indices) | [
"def group_indices(indexes):\n\n diff_inds = np.where(np.abs(np.diff(indexes)) > 1)[0]\n diff_points = np.concatenate(([-1], diff_inds, [len(indexes) - 1]))\n length = diff_points.size\n pairs = np.hstack((diff_points[:-1].reshape(length - 1, 1) + 1, diff_points[1:].reshape(length - 1, 1)))\n # pairs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unpack the 008 field of a record (arg1) into a tuple. Unpacks the FixedLength Data Elements fields (008) of a record into a tuple containing the values of the separate fields. The sixth element in the tuple containing the fields appropriate to the material configuration of the record (positions 1834) in the field value... | def fixed_length_tuple(rec):
f = control_value([fld for fld in rec[1] if fld[0] == "008"][0])
return (f[0:6], f[6], f[7:11], f[11:15], f[15:18],
__material_desc(material_type(rec), f[18:35]), f[35:38],
f[38], f[39]) | [
"def unpack_uint8(data: bytes) -> Tuple[int, int]:\n value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0]\n return value, 1",
"def unpack(self, msg):\n\n msg._fields = {}\n\n # unpack main message blocks. A comm\n formats = self.msg_format.split(',')\n buf = msg._bu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine material type for record (arg1). | def material_type(rec):
l = rec[0]
# Book: Leader/06 (Type of record) contains code a (Language
# material) or t (Manuscript language material) and Leader/07
# (Bibliographic level) contains code a (Monographic component
# part), c (Collection), d (Subunit), or m (Monograph)
if l[1] in ("a... | [
"def test_material_id_wrong_type():\n\n nmm.Material.from_library(\"Li4SiO4\", material_id=\"one\")",
"def match_record_type(self, record_type=None, match=None):\n raise errors.Unimplemented()",
"def record_type(self, line: str) -> str:\n\n return line[1: 3]",
"def parse_material_para... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap a control field value in a dict. | def __control_dict(v):
return {"type": "control", "value": v} | [
"def _write_dict(self, fieldname, value):\n self._fields[fieldname] = value",
"def _wrap(name: str, ttype: int) -> dict:\n return {\n 'name': name,\n 'tag_type': ttype,\n }",
"def param2form(self, dico, verbose=DEBUG):\n myform = {} # a dico to handle widg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create appropriate dict for values in a control or variable field. | def __subfield_dict(marc_subfield):
if marc_subfield[3][0] is None:
return __control_dict(marc_subfield[3][1])
return {"type": "variable",
"ind1": marc_subfield[1],
"ind2": marc_subfield[2],
"subfields": dict(marc_subfield[3:])} | [
"def __control_dict(v):\n return {\"type\": \"control\", \"value\": v}",
"def __process_form_input(request_data: dict) -> dict:\n keys = [i for i in request_data.keys()][:-1]\n values = [i for i in request_data.values()][:-1]\n\n values_float = [float(val) for val in values[3:]]\n values_categorica... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the id of the underlying object if wrapped. If not wrapped, returns the object's id. | def object_id(obj):
if isinstance(obj,ObjectWrapper):
return obj.id()
else:
return id(obj) | [
"def id(self):\n return id(self._getobj_())",
"def id(self):\r\n if not hasattr(self, '_id'):\r\n raise MissingID\r\n return self._id",
"def id(obj):\n try:\n return key(obj).id_or_name()\n except AttributeError:\n return obj",
"def get_id(self, obj):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the id of the underlying object | def id(self):
return id(self._getobj_()) | [
"def object_id(obj):\n if isinstance(obj,ObjectWrapper):\n return obj.id()\n else:\n return id(obj)",
"def id(self):\r\n if not hasattr(self, '_id'):\r\n raise MissingID\r\n return self._id",
"def id(self) -> int:\n return self._context.id",
"def id(obj):\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find all numbers which are equal to the sum of the factorial of their digits. | def find_digit_factorials():
df = []
factorials = [fact(i) for i in range(10)]
# upper bound is arbitrary, but I couldn't find it analytically
for i in range(10, 1000000):
fact_digits = [factorials[int(x)] for x in str(i)]
if sum(fact_digits) == i:
df.append(i)
return d... | [
"def sumFactorialFinder(n):\n L = []\n factorials = fillFactorial(9)\n for i in range(10,n+1):\n if sumOfFactorials(i, factorials):\n L.append(i)\n return L",
"def main():\n sum_digit_factorials = 0\n for i in range(LOWER_LIMIT, UPPER_LIMIT):\n digit_factorial_sum = find... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter an input list of strings only for ones that start with a given prefix. >>> filter_by_prefix([], 'a') [] >>> filter_by_prefix(['abc', 'bcd', 'cde', 'array'], 'a') ['abc', 'array'] | def filter_by_prefix(strings: List[str], prefix: str) -> List[str]:
#[SOLUTION]
return [x for x in strings if x.startswith(prefix)] | [
"def names_with_prefix(names, prefix):\n if prefix_is_empty(prefix):\n return set(names)\n\n if not prefix.endswith(\".\"):\n prefix = prefix + \".\"\n\n matching_names = OrderedDict()\n for name in names:\n if name.startswith(prefix):\n matching_names[name] = None\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes a MultiReplacer instance. | def __init__(self, replace_pairs):
replace_list = list(replace_pairs) # make a copy in case input is iterable
self._replace_dict = dict(replace_list)
pattern = '|'.join(re.escape(x) for x, y in replace_list)
self._search_re = re.compile(pattern) | [
"def __init__(self):\n patterns = [(r\"j\", \"i\"), (r\"v\", \"u\"), (r\"J\", \"I\"), (r\"V\", \"U\")]\n self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]",
"def _init_repgroups(self):\r\n for c in self.__rtsp.components:\r\n g_name = self.get_conf(c, \"gro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns an adql.tree.FieldInfo object from a rscdef.Column. | def makeFieldInfo(column):
return adql.FieldInfo(column.type,
column.unit, column.ucd, (column,), stc=column.stc) | [
"def _parse_column(self, line, state):\n\n spec = None\n m = self._re_column.match(line)\n if m:\n spec = m.groupdict()\n spec[\"full\"] = True\n else:\n m = self._re_column_loose.match(line)\n if m:\n spec = m.groupdict()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
constructs a rscdef.Column from a field info pair as left by the ADQL machinery. | def _makeColumnFromFieldInfo(ctx, colName, fi):
if len(fi.userData)==1:
res = svcs.OutputField.fromColumn(fi.userData[0])
else:
res = base.makeStruct(svcs.OutputField, name=colName)
res.name = ctx.getName(colName)
res.ucd = fi.ucd
res.unit = fi.unit
res.type = fi.type
# XXX TODO: do something with stc's "b... | [
"def makeFieldInfo(column):\n\treturn adql.FieldInfo(column.type,\n\t\tcolumn.unit, column.ucd, (column,), stc=column.stc)",
"def parse_field_table_column(coldef):\n parts = [p.strip() for p in coldef.split(\";\")]\n if len(parts) == 1:\n return {\"identifier\": coldef, \"type\": \"string\"}\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a function that adds a tuple as returned by the database to table. This thing is only necessary because of the insanity of having to mash metadata into table rows when STCS strings need to be generated for TAP. Sigh. | def _getTupleAdder(table):
stcsOutputCols = []
for colInd, col in enumerate(table.tableDef):
# needMunging set above. Sigh.
if getattr(col, "needMunging", False):
stcsOutputCols.append((colInd, col))
if not stcsOutputCols: # Yay!
return table.addTuple
else: # Sigh. I need to define a function fumbling t... | [
"def _collect_tuple(cls, row: GeneratedRow, columns: Iterable[MetaColumn]) -> Tuple:\n return tuple(row[column.name] for column in columns)",
"def _insert_tuple(self, row: GeneratedRow, constraint: MetaConstraint, columns: Iterable[MetaColumn]):\n unique_tuples = self._unique_tuples[constraint]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds various info items from query and its parsed tree to a result table. | def _addTableMeta(query, tree, table):
# Copy over tableDef meta so it doesn't get obscured by what
# we're setting now.
for infoMeta in table.tableDef.iterMeta("info", propagate=False):
table.addMeta("info", infoMeta)
table.addMeta("info", "", infoName="server",
infoValue=base.getConfig("web", "serverURL"))
... | [
"def print_query_result(query):\n exec_query(query)\n\n table = prettytable.from_db_cursor(cur)\n print(table)",
"def addRow(self, row_info):\n pass",
"def _form_query_from_data(self, row, parsed):\n d = { k:row[k] for k in row.keys() }\n q = Query(row[\"text\"], row[\"time\"])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the shell command to run to create the erasure code profile described by the profile parameter. | def cmd_erasure_code_profile(profile_name, profile):
if profile == {}:
profile = {
'k': '2',
'm': '1',
'crush-failure-domain': 'osd'
}
return [
'osd', 'erasure-code-profile', 'set',
profile_name
] + [ str(key) + '=' + str(value) for ke... | [
"def erasure_code_profile_create(self, *args, **kwargs):\n banner(\"PCC.Create Erasure Code Profile\")\n self._load_kwargs(kwargs)\n\n payload = {\n \"name\":self.Name,\n \"directory\":self.Directory,\n \"plugin\":self.Plugin,\n \"stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a plot of the respective image with all bboxes. | def plot_image_with_bboxes(image_id,
images_folder_path=Path('data/raw/train/'),
target_folder_path=Path('data/interim/train/')):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
im = Image.open(images_folder_path / (image_id + '.jpg')... | [
"def plot(self,image=None,figsize=(20,20)):\n all_labels=list(self.all_boxes.keys())\n if not image:\n image=self.original_image.copy()\n\n for k in all_labels:\n box_k=self.all_boxes[k]\n h=self.height[k]\n w=self.width[k]\n\n for pt in bo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GIVEN a Plant Model WHEN a new Plant is created THEN check plant data | def test_plant_model(new_plant):
plant = PlantModel(name=new_plant["name"],
latin=new_plant["latin"],
difficulty=new_plant["difficulty"])
assert plant.name == "Monstera"
assert plant.latin == "Monstera Adans."
assert plant.difficulty == 5 | [
"def create():\n\n if request.method == 'GET':\n return render_template('create.html')\n # else: # if request.method == 'POST':\n new_plant = {\n 'name': request.form.get('plant_name'),\n 'variety': request.form.get('variety'),\n 'photo_url': request.form.get('photo'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GIVEN a DescriptionModel WHEN a new Description is created THEN check description data | def test_description_model(new_desc):
desc = DescriptionModel(content=new_desc["content"],
source=new_desc["source"],
plant_id=1)
assert desc.content == 'Description content'
assert desc.source == 'wikipedia'
assert desc.plant_id == 1 | [
"def test_empty_description_todo_create(self):\n self.login()\n\n previous_todo_id = get_max_todo_id()\n self.client.post('/todo/', data=dict(description=''))\n\n self.assertMessageFlashed('The description is required.')\n self.assertEqual(previous_todo_id, get_max_todo_id(), 'The... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns events for a stop ID from the WMATA API | def get_events(stop_id):
if "WMATA_API_KEY" in os.environ:
encrypted_api_key = os.environ['WMATA_API_KEY']
api_key = boto3.client('kms').decrypt(
CiphertextBlob=b64decode(encrypted_api_key)
)['Plaintext']
else:
raise NoApiKeyException("WMATA API key not provided")
... | [
"def find_stop(self,route_id,stop_id):\n return self.routes[route_id].stops[stop_id]",
"def get_stop_embeddings(stops: dict) -> typing.List[np.ndarray]:\n stop_embeddings = []\n\n # loop through all stops in the route\n for stop in stops.values():\n # encode `type` (boolean:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure that the type of created_at is Datetime | def test_created_at_type(self):
self.assertEqual(type(self.user.created_at), datetime) | [
"def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)",
"def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)",
"def test_updated_at_instance_of(self):\n self.assertTrue(isinstance(self.base.upd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure that the type of updated_at is Datetime | def test_updated_at_type(self):
self.assertEqual(type(self.user.updated_at), datetime) | [
"def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)",
"def test_updated_at_instance_of(self):\n self.assertTrue(isinstance(self.base.updated_at, datetime))",
"def test_updated_at_type(self):\n self.assertEqual(type(self.city.upd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure email is string | def test_email_type(self):
self.assertEqual(type(User.email), str) | [
"def test_useremail_unicode(self):\n crusoe = self.fixtures.crusoe\n email = crusoe.email.email\n result = str(models.UserEmail(email=email))\n self.assertIsInstance(result, str)\n assert email in result",
"def _validate_email(ctx, param, value):\n if not is_valid_email(value... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure password is string | def test_password_type(self):
self.assertEqual(type(User.password), str) | [
"def verifyPlaintextPassword(password):",
"def validate_password(self, value):\n validate_password(value)\n return value",
"def acceptable_password(password):\n LOG.debug(\"PASS\")\n LOG.debug(password)\n\n if password is not None:\n LOG.debug(len(password))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure first_name is string | def test_first_name_type(self):
self.assertEqual(type(User.first_name), str) | [
"def test_user_first_name(self):\n self.assertEqual(type(self.testcase.first_name), str)\n self.assertEqual(self.testcase.first_name, \"\")",
"def validate_first_name(value):\n if len(value) <= 3:\n raise ValidationError(\"First Name should be longer than 3 characters\")\n elif len(valu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure last_name is string | def test_last_name_type(self):
self.assertEqual(type(User.last_name), str) | [
"def test_user_last_name(self):\n self.assertEqual(type(self.testcase.last_name), str)\n self.assertEqual(self.testcase.last_name, \"\")",
"def validate_last_name(value):\n if len(value) <= 3:\n raise ValidationError(\"Last Name should be longer than 3 characters\")\n elif len(value) > ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate radiative loss curve which includes multiple ions | def radiative_loss(self, density: u.cm**(-3), **kwargs):
density = np.atleast_1d(density)
rad_loss = u.Quantity(np.zeros(self.temperature.shape + density.shape), 'erg cm^3 s^-1')
for ion in self:
try:
g = ion.contribution_function(density, **kwargs)
except... | [
"def radiative_loss(self, density: u.cm**(-3), **kwargs):\n rad_loss = u.Quantity(np.zeros(self.temperature.shape + density.shape), 'erg cm^3 s^-1')\n for ion in self:\n try:\n g = ion.contribution_function(density, **kwargs)\n except MissingDatasetException:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize from binning or ROOT histogram. | def __init__(self, *args, binning=None, contents=None, errors=None):
if len(args) == 0 and binning is None and contents is None and errors is None:
# This is a blanc histogram
self.binning = None
self.contents = None
self.errors = None
el... | [
"def __init__(self, histograms=None):\n self.histos = {}\n\n if histograms is not None:\n values = histograms.values() if isinstance(histograms, dict) else iter(histograms)\n for histogram in values:\n self.new_histogram(histogram)",
"def setup_non_uniform_binnin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return content and error for bin with given index. Under and overflow bins have indices 0 and 1. | def __getitem__(self, index):
return Bin(self.contents[index], self.errors[index]) | [
"def __getitem__(self, index):\n\n return super(Hist, self).__getitem__(index)\n\n if isinstance(index, tuple) and self.ndim == 1:\n binSel = []\n # Build a new tuple for each of the entries\n for selection in index:\n if selection is Ellipsis:\n binSel.append(Ellipsis)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if this is a blanc histogram. A blanc histogram contains no data. It can be added to any other histogram. | def is_blanc(self):
return self.binning is None | [
"def test_hist_badbin():\n vals = np.zeros((10,))\n with pytest.raises(TypeError):\n classifier.HistogramClassifier(vals, bins='badbin')",
"def numbins(self):\n \n if self.is_blanc:\n raise RuntimeError('Number of bins is not defined for a blanc histogram.')\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Number of bins in the histogram. Under and overflow bins are not counted. | def numbins(self):
if self.is_blanc:
raise RuntimeError('Number of bins is not defined for a blanc histogram.')
return len(self.binning) - 1 | [
"def num_bins(self):\n return len(self.bins) + 1",
"def n_bins(self):\n return self.num",
"def tot_num_bins(self):\n return np.product(self.num_bins)",
"def num_bins(self):\n return [d.num_bins for d in self]",
"def GetNumberOfHistogramBins(self) -> \"unsigned int\":\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise an exception is binnings are not identical. | def _check_binning(self, other):
if not np.array_equal(self.binning, other.binning):
raise RuntimeError('Binnings of the two histograms being added do not match.') | [
"def is_compat(self, other):\n if self.name != other.name:\n logging.trace('Dimension names do not match')\n return False\n\n if self.units.dimensionality != other.units.dimensionality:\n logging.trace('Incompatible units')\n return False\n\n # TODO: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleNodesDown action for Kubernetes Cluster | def test_kub_node_down(environment, murano, session, cluster, influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=1, nodes_count=2)
murano.status_check(deployed_environment,
[[cluster['name'], "master-1", 8080],
... | [
"def test_kub_nodes_down_if_one_present(murano, environment, session, cluster,\n influx):\n deployed_environment = murano.deploy_environment(environment, session)\n murano.check_instances(gateways_count=1, nodes_count=1)\n murano.status_check(deployed_environment,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleNodesUp action for Kubernetes Cluster | def test_kub_nodes_up(murano, environment, session, cluster, influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=1, nodes_count=1)
murano.status_check(deployed_environment,
[[cluster['name'], "master-1", 8080],
... | [
"def test_03_deploy_and_scale_kubernetes_cluster(self):\n if self.setup_failed == True:\n self.fail(\"Setup incomplete\")\n global k8s_cluster\n k8s_cluster = self.getValidKubernetesCluster()\n\n self.debug(\"Upscaling Kubernetes cluster with ID: %s\" % k8s_cluster.id)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleGatewaysDown action for Kubernetes Cluster | def test_kub_gateway_down(murano, environment, session, cluster, influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=2, nodes_count=1)
murano.status_check(deployed_environment,
[[cluster['name'], "master-1", 8080],
... | [
"def test_kub_nodes_down_if_one_present(murano, environment, session, cluster,\n influx):\n deployed_environment = murano.deploy_environment(environment, session)\n murano.check_instances(gateways_count=1, nodes_count=1)\n murano.status_check(deployed_environment,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleGatewaysUp action for Kubernetes Cluster | def test_kub_gateway_up(murano, environment, session, cluster, influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=1, nodes_count=1)
murano.status_check(deployed_environment,
[[cluster['name'], "master-1", 8080],
... | [
"def test_kub_nodes_up_if_limit_reached(murano, environment, session, cluster,\n influx):\n deployed_environment = murano.deploy_environment(environment, session)\n murano.check_instances(gateways_count=1, nodes_count=1)\n murano.status_check(deployed_environment,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleNodesUp and scaleGatewaysUp actions for Kubernetes Cluster if maximum nodes limit is already reached | def test_kub_nodes_up_if_limit_reached(murano, environment, session, cluster,
influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=1, nodes_count=1)
murano.status_check(deployed_environment,
... | [
"def test_kub_nodes_up(murano, environment, session, cluster, influx):\n deployed_environment = murano.deploy_environment(environment, session)\n murano.check_instances(gateways_count=1, nodes_count=1)\n murano.status_check(deployed_environment,\n [[cluster['name'], \"master-1\", 808... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check ScaleNodesDown and scaleGatewaysDown actions for Kubernetes Cluster if only one minion/gateway node is present | def test_kub_nodes_down_if_one_present(murano, environment, session, cluster,
influx):
deployed_environment = murano.deploy_environment(environment, session)
murano.check_instances(gateways_count=1, nodes_count=1)
murano.status_check(deployed_environment,
... | [
"def test_kub_nodes_up_if_limit_reached(murano, environment, session, cluster,\n influx):\n deployed_environment = murano.deploy_environment(environment, session)\n murano.check_instances(gateways_count=1, nodes_count=1)\n murano.status_check(deployed_environment,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implementation of grid search optimisation. Optimises func by selecting the maximal value of func over a given mesh. | def solve(self, func, transpose=True):
if transpose is True:
return torch.max(func(self.mesh).T, dim=1)[0]
else:
return torch.max(func(self.mesh), dim=1)[0] | [
"def arg_max(self, func, transpose=True):\n if transpose is True:\n return self.mesh[torch.max(func(self.mesh).T, dim=1)[1]]\n else:\n return self.mesh[torch.max(func(self.mesh), dim=1)[1]]",
"def sp_maximum_3D ( fun ,\n xmin , xmax ,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implementation of grid search optimisation. Optimises func by selecting the maximal value of func over a given mesh and returns its argument. | def arg_max(self, func, transpose=True):
if transpose is True:
return self.mesh[torch.max(func(self.mesh).T, dim=1)[1]]
else:
return self.mesh[torch.max(func(self.mesh), dim=1)[1]] | [
"def solve(self, func, transpose=True):\n if transpose is True:\n return torch.max(func(self.mesh).T, dim=1)[0]\n else:\n return torch.max(func(self.mesh), dim=1)[0]",
"def find_max_global(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def sp_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test exception for invalid sid format. | def test_bad_format(self):
sids = ("13967",) # missing type code
with self.assertRaises(ValueError) as context:
decode_sids(sids)
message = "invalid SID: 13967"
self.assertEqual(message, str(context.exception))
return | [
"def test_invalid_sid(self):\n invalid_sid = \"z\"\n self.assertFalse(stage_one(self.ccd, invalid_sid))",
"def test_invalid_sid_nonlogin(self):\n invalid_sid = \"z\"\n operations = [op for op in dir(ccdlib)\n if op.startswith(\"OP_\") and not op == \"OP_PLUGIN\"]\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the best class for a given test point | def _find_best_class(self, x: pd.Series) -> int:
optimal_score, optimal_class = float('-inf'), None
for k in self._pi_k.keys():
prob_k = self._prob(x, self._mu_k[k], self._pi_k[k])
if prob_k >= optimal_score:
optimal_score, optimal_class = prob_k, k
retu... | [
"def classify(me, observation):\n scores = calculate(me, observation)\n max_score, klass = scores[0], me.classes[0]\n for i in range(1, len(scores)):\n if scores[i] > max_score:\n max_score, klass = scores[i], me.classes[i]\n return klass",
"def best_classifier(self):\n print(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Migrate legacy RPC clients to their respective NAPALM drivers | def rpc_client_to_napalm_driver(apps, schema_editor):
Platform = apps.get_model('dcim', 'Platform')
Platform.objects.filter(rpc_client='juniper-junos').update(napalm_driver='junos')
Platform.objects.filter(rpc_client='cisco-ios').update(napalm_driver='ios') | [
"def prepare_api_call(nodeName):\n\n logger.info('Preparing api call')\n host = world.machine['nodes'][nodeName]['host']\n port = world.machine['nodes'][nodeName]['ports']['api']\n address =\"http://\"+ host + \":\" + str(port)\n api = Iota(address)\n logger.info('API call prepared for %s',address... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
loads the questions from the JSON file into a Python dictionary and returns it | def load_question(filename):
questions = None
with open(filename, "r") as read_file:
questions = json.load(read_file)
return (questions) | [
"def loaddata(filename):\n print(\"Loading\", filename)\n data = json.load(open(filename, encoding=\"utf-8\"))\n return [x for x in data['questions'] if 'ideal_answer' in x]",
"def load_answers(filename, dataset_name='SQuAD'):\r\n # Load JSON file\r\n with open(filename) as f:\r\n examples =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yields a temporary HOME directory. | def tmp_home(tmp_path: Path) -> Iterator[Path]:
old_home = os.environ.get("HOME")
os.environ["HOME"] = str(tmp_path)
yield tmp_path
if old_home:
os.environ["HOME"] = old_home
else:
del os.environ["HOME"] | [
"def temp_workdir() -> t.ContextManager[None]:\n with TemporaryDirectory() as tmp_dir:\n curdir = os.getcwd()\n os.chdir(tmp_dir)\n try:\n yield None\n finally:\n os.chdir(curdir)",
"def in_temporary_directory():\n previous_dir = os.getcwd()\n with tempfi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the setupshell option. | def test_setup_shell(
shell: str, contents: Optional[str], tmp_home: Path, snapshot: Snapshot
) -> None:
config_file = tmp_home / SHELL_TO_CONFIG[shell]
if contents:
config_file.write_text(contents)
exit_code = app.main(["--setup-shell", shell])
assert exit_code == 0
assert config_file... | [
"def test_invoke_shell(self):\n self.client.invoke_shell()\n self.p_client.invoke_shell.assert_called_with()\n return",
"def test_setup(self):\n cfgdir = mkdtemp()\n cmd = f'kepler setup --path {cfgdir}'.split()\n try:\n out = check_output(cmd)\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search (in that specific order) tenant properties and settings. Raise AttributeError if not found. | def __getattr__(self, k):
try:
return self.tenant_properties[k]
except (AttributeError, KeyError):
# May raise AttributeError which is the behaviour we expect
return getattr(settings, k) | [
"def test_settings_match(self):\n with mock.patch(\"bluebottle.clients.settings\", foo=1):\n p = TenantProperties()\n\n self.assertEqual(p.foo, 1)\n self.assertTrue(hasattr(p, 'foo'))",
"def report_DAV__principal_property_search(\n self, request,\n principal_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the number of seconds until the next fanny pack friday. | async def fpf(self, ctx):
await ctx.send(f'Only {int(next_friday())} more seconds until the next fanny pack friday') | [
"def number_of_days(iteration):\r\n return iteration // 24",
"def ntradingdays():\n return 252*10",
"def fine_counter(self, time):\n days = int(((datetime.today())-time).days)\n weeks = int(days/7)\n final_fine = int(10 + 10*weeks)\n return final_fine",
"def _extract_days(p_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a memory string and returns the number of bytes >>> cast_memory_to_bytes("16B") 16 >>> cast_memory_to_bytes("16G") == 16102410241024 True | def cast_memory_to_bytes(memory_string: str) -> float:
conversion = {unit: (2 ** 10) ** i for i, unit in enumerate("BKMGTPEZ")}
number_match = r"([0-9]*\.[0-9]+|[0-9]+)"
unit_match = "("
for unit in conversion:
if unit != "B":
unit_match += unit + "B|"
for unit in conversion:
... | [
"def parse_bytes(strvalue):\n if not isinstance(strvalue, basestring):\n return strvalue\n\n strvalue = strvalue.replace(\" \", \"\")\n scales = {\n \"KB\": 1024,\n \"MB\": 1024**2,\n \"GB\": 1024**3\n }\n if strvalue[-2:] in scales:\n scale = scales[strvalue[-2:]]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cast a number of bytes to a readable string >>> from autofaiss.utils.cast import cast_bytes_to_memory_string >>> cast_bytes_to_memory_string(16.102410241024) == "16.0GB" True | def cast_bytes_to_memory_string(num_bytes: float) -> str:
suffix = "B"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num_bytes) < 1024.0:
return "%3.1f%s%s" % (num_bytes, unit, suffix)
num_bytes /= 1024.0
return "%.1f%s%s" % (num_bytes, "Y", suffix) | [
"def to_unit_memory(number):\n kb = 1024\n\n number /= kb\n\n if number < 100:\n return '{} Kb'.format(round(number, 2))\n\n number /= kb\n if number < 300:\n return '{} Mb'.format(round(number, 2))\n\n number /= kb\n\n return '{} Gb'.format(round(number, 2))",
"def cast_memory_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert metric_type string/enum to faiss enum of the distance metric | def to_faiss_metric_type(metric_type: Union[str, int]) -> int:
if metric_type in ["ip", "IP", faiss.METRIC_INNER_PRODUCT]:
return faiss.METRIC_INNER_PRODUCT
elif metric_type in ["l2", "L2", faiss.METRIC_L2]:
return faiss.METRIC_L2
else:
raise ValueError("Metric currently not support... | [
"def distance_type(self) -> str:\n return self._distance_type",
"def from_distance(distance: int) -> \"MoveType\":\n try:\n return MoveType(distance)\n except ValueError:\n return MoveType.UNKNOWN",
"def unit_to_metric_type(unit: MeasuredUnit) -> Type[MetricWrapperBase... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine a set of univariate probability distributions. This function is meant for combining uncertainties on a single parameter/ observable. As an argument, it takes a list of probability distributions that all have the same central value. It returns their convolution, but with location equal to the original central va... | def convolve_distributions(probability_distributions):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
central_value = probability_distributions[0].central_value # central value of the first dist
try:
float(central_... | [
"def combine_indep_dstns(*distributions, seed=0):\n # Get information on the distributions\n dist_lengths = ()\n dist_dims = ()\n for dist in distributions:\n dist_lengths += (len(dist.pmf),)\n dist_dims += (dist.dim(),)\n number_of_distributions = len(distributions)\n\n # Initialize... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper to fetch current Tarantool version. | def fetch_tarantool_version(self):
if not hasattr(self, 'tnt_version') or self.tnt_version is None:
srv = None
if hasattr(self, 'servers') and self.servers is not None:
srv = self.servers[0]
if hasattr(self, 'srv') and self.srv is not None:
srv = self.srv
a... | [
"def GetVersion():\n return six.ensure_text(subprocess.check_output([_FindSkaffold(), 'version']))",
"def get_version() -> str:\n return VERSION",
"def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]",
"def _get_version(self):\n solver_exec = self.executable()\n if solver_exec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run tests depending on the tarantool version. Also, it can be used with the 'setUp' method for skipping the whole test suite. | def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if func.__name__ == 'setUp':
func(self, *args, **kwargs)
skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)
if func.__name__ != 'setUp'... | [
"def skip_or_run_test_python(func, REQUIRED_PYTHON_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n ver = sys.version_info\n python_version_str = '%d.%d' % (ver.major, ver.minor)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run tests depending on the Python version. Also, it can be used with the 'setUp' method for skipping the whole test suite. | def skip_or_run_test_python(func, REQUIRED_PYTHON_VERSION, msg):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if func.__name__ == 'setUp':
func(self, *args, **kwargs)
ver = sys.version_info
python_version_str = '%d.%d' % (ver.major, ver.minor)
python_v... | [
"def onlyPython(*args) -> Callable:\n def decorator(func: Callable) -> Callable:\n import pytest\n\n python_version = f'{sys.version_info.major}.{sys.version_info.minor}'\n return pytest.mark.skipif(\n python_version not in args,\n reason=f\"Python {python_version} not ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run SQLrelated tests depending on the tarantool version. Tarantool supports SQLrelated stuff only since 2.0.0 version. So this decorator should wrap every SQLrelated test to skip it if the tarantool version < 2.0.0 is used for testing. | def skip_or_run_sql_test(func):
return skip_or_run_test_tarantool(func, '2.0.0', 'does not support SQL') | [
"def skip_or_run_test_python(func, REQUIRED_PYTHON_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n ver = sys.version_info\n python_version_str = '%d.%d' % (ver.major, ver.minor)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run VARBINARYrelated tests depending on the tarantool version. Tarantool supports VARBINARY type only since 2.2.1 version. | def skip_or_run_varbinary_test(func):
return skip_or_run_test_tarantool(func, '2.2.1',
'does not support VARBINARY type') | [
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run decimalrelated tests depending on the tarantool version. Tarantool supports decimal type only since 2.2.1 version. | def skip_or_run_decimal_test(func):
return skip_or_run_test_pcall_require(func, 'decimal',
'does not support decimal type') | [
"def test_decimal():\n assert hug.types.decimal(\"1.1\") == Decimal(\"1.1\")\n assert hug.types.decimal(\"1\") == Decimal(\"1\")\n assert hug.types.decimal(1.1) == Decimal(1.1)\n with pytest.raises(ValueError):\n hug.types.decimal(\"bacon\")",
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run UUIDrelated tests depending on the tarantool version. Tarantool supports UUID type only since 2.4.1 version. | def skip_or_run_UUID_test(func):
return skip_or_run_test_tarantool(func, '2.4.1',
'does not support UUID type') | [
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run datetimerelated tests depending on the tarantool version. Tarantool supports datetime type only since 2.10.0 version. | def skip_or_run_datetime_test(func):
return skip_or_run_test_pcall_require(func, 'datetime',
'does not support datetime type') | [
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run tests related to extra error info provided over iproto depending on the tarantool version. Tarantool provides extra error info only since 2.4.1 version. | def skip_or_run_error_extra_info_test(func):
return skip_or_run_test_tarantool(func, '2.4.1',
'does not provide extra error info') | [
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to skip or run tests related to error extension type depending on the tarantool version. Tarantool supports error extension type only since 2.4.1 version, yet encoding was introduced only in 2.10.0. | def skip_or_run_error_ext_type_test(func):
return skip_or_run_test_tarantool(func, '2.10.0',
'does not support error extension type') | [
"def skip_if_no_flake8_ext(pytestconfig):\n if not pytestconfig.getoption(\"--flake8_ext\"):\n pytest.skip(\"'--flake8_ext' not specified\") # pragma: no cover",
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to skip or run tests related to SSL password and SSL password files support. Supported only in Tarantool EE. Do not check Enterprise prefix since TNT_SSL_TEST already assumes it. Tarantool EE supports SSL passwords and password files only in current master since commit e1f47dd4 (after 2.11.0entrypoint). | def skip_or_run_ssl_password_test_call(self):
return skip_or_run_test_tarantool_call(self, '2.11.0',
'does not support SSL passwords') | [
"def test_disabled_tls(self):\n args = {\n \"connect\": {\"host\": \"localhost\"},\n \"tls\": {\"certfile\": \"/lcliueurhug/ropko3kork32\"},\n }\n\n with pytest.raises(exceptions.MQTTTLSError):\n MQTTClient(**args)\n\n args[\"tls\"][\"enable\"] = False\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to skip or run tests related to configuring authentication method. Tarantool supports auth_type only in current master since commit 2574ff1a (after 2.11.0entrypoint). | def skip_or_run_auth_type_test_call(self):
return skip_or_run_test_tarantool_call(self, '2.11.0',
'does not support auth type') | [
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a new entry with the specified title and optional values The item is created and then the generated ID from the key is stored in the 'id' field. This is done atomically in a transaction to guarantee that the 'id' field is never 0. | def create(title, notes=None, complete=None):
user = users.get_current_user()
if not user:
return None
entry = TodolistEntry(
id=0,
title=title,
user_id=user.user_id(),
created=now,
modified=now)
if notes: entry.not... | [
"def create_item(cls, item):\n\n connection = sqlite3.connect(\"data.db\")\n cursor = connection.cursor()\n\n cursor.execute(\"INSERT INTO items VALUES (?, ?)\", (item[\"name\"], item[\"price\"]))\n\n connection.commit()\n connection.close()",
"def add_entry_to_database(value_fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method wrapper that will update a global timestamp to the current time before calling the URI handler method. Add this decoration if there is a need for the handler to reference the current time. | def update_timestamp(method):
#noinspection PyUnusedLocal
def wrapper(self, *args, **kwargs):
global now
now = time()
method(self, *args, **kwargs)
return wrapper | [
"def originalTimegateCallback(uri_r):\n LOGGER.debug('Executing timegateCallback...')\n accept_datetime = None\n location = None\n # redirect to intermediate resource\n accept_datetime = parseHTTPDate(request.headers['Accept-Datetime'])\n LOGGER.debug('Accept-Datetime: %s' % accept_datetime)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a new cloudtodolist entry | def post(self):
try:
entry = TodolistEntry.create(title=self.request.get("title", None),
notes=self.request.get("notes", None),
complete=self.request.get("complete", None))
self.response.set_status(201)
... | [
"def create(title, notes=None, complete=None):\n user = users.get_current_user()\n if not user:\n return None\n\n entry = TodolistEntry(\n id=0,\n title=title,\n user_id=user.user_id(),\n created=now,\n modified=now)\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Task request to clean archived deleted entries | def get(self):
query = TodolistEntry.all()
query.filter("deleted", True)
count=0
for entry in query:
count+=1
entry.delete()
logging.info("CleanArchiveHandler removed %d deleted entries"%count) | [
"def delete(self):\n for index,delTask in enumerate(self.db): \n if self.task.delete in delTask.add:\n del self.db[index]\n self.delete() ##function must be recursive to update self.db indexes",
"def delete_request():",
"def clear_tasks(except_task_id=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stores the client_id of the client that connected to a channel. Once stored, updates will be sent out, via the channel to this client | def post(self):
client_id = self.request.get('from')
logging.info("Connecting client update channel "+client_id)
add_update_client(client_id) | [
"def add_client(self, client):\n client_id = self.next_id\n self.next_id += 1\n self.clients[ client_id ] = client\n\n # this shouldn't throw an error in production but while\n # we're working on it, we want to make sure we assign the\n # client_id correctly no matter what.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Printa personalizado (colorido) a maior nota. | def maior_nota_de_todas():
print('\n\033[0;30mA maior nota foi\033[m', end=' ')
print(f'\033[0;32m{maior_nota}\033[m') | [
"def welcome_user(self):\n\t\ttext = pyfiglet.figlet_format(f\"Welcome {self.username}\", font=\"starwars\")\n\t\tto_print = colored(text)\n\t\tcolorama.init()\n\t\tcprint(text, 'cyan', 'on_grey', attrs=['bold'])",
"def format(self, message, color):\n print color + \"[%s] \" % self.time().time() + message... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Printa personalizado (colorido) os melhores alunos. | def melhores_alunos_de_todos():
mensagem = '========= MELHORES ALUNOS ========='
print(f'\n\033[1;31m{mensagem}\033[m')
for nome_aluno in melhores_alunos:
print(f"\033[0;34m{nome_aluno.center(len(mensagem))}\033[m")
print(f'\033[1;31m{"=" * len(mensagem)}\033[m') | [
"def maior_nota_de_todas():\n print('\\n\\033[0;30mA maior nota foi\\033[m', end=' ')\n print(f'\\033[0;32m{maior_nota}\\033[m')",
"def welcome_user(self):\n\t\ttext = pyfiglet.figlet_format(f\"Welcome {self.username}\", font=\"starwars\")\n\t\tto_print = colored(text)\n\t\tcolorama.init()\n\t\tcprint(text... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Printa personalizado (colorido) o ranking dos alunos (do melhor ao pior). | def alunos_rankeados():
mensagem = '========= RANKING DE ALUNOS ========='
print(f'\n\033[1;31m{mensagem}\033[m')
c = 1
ranking_alunos.reverse()
ultima_nota = ranking_alunos[0]['nota']
for aluno in ranking_alunos:
if ultima_nota != aluno['nota']:
ultima_nota = aluno['nota']
... | [
"def rank_print(text: str):\n rank = dist.get_rank()\n # Keep the print statement as a one-liner to guarantee that\n # one single process prints all the lines\n print(f\"Rank: {rank}, {text}.\")",
"def print_page_ranks(page_ranks):\n print(\"Page ranks:\")\n for i in range(len(page_ranks)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load image and bounding boxes info from .mat file in the dataset PRW format. | def _load_mat_annotation(self, index):
anno_file = osp.join(self._root_dir,'annotations/',index+'.jpg.mat')
anno = loadmat(anno_file)
for key in anno.iterkeys():
# There are different keys in PRW dataset, including
# 'anno_previous', 'anno_file' and 'box_new'
... | [
"def read_mat_7_3(mat_file):\n import digitStruct #Use sarahrn/Py-Gsvhn-DigiStruct-Reader to decode file\n objectList = []\n x_pix = []\n y_pix = []\n for dsObj in digitStruct.yieldNextDigitStruct(mat_file): #Only call to digiStruct\n label = ''\n bounding = []\n for bbox in dsO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the list of (img, roi) for probes. For test split, it's defined by the protocol. For training split, will randomly choose some samples from the gallery as probes. | def _load_probes(self):
self.probe_num = 2057
probes = []
roi = np.zeros([self.probe_num, 4], dtype = np.int32)
probetxt = open(osp.join(self._root_dir, 'query_info.txt'), 'r')
try:
i = 0
for line in probetxt:
line = line.strip('\r\n')
... | [
"def load(self):\n if os.path.exists(self.loaded_data):\n with open(self.loaded_data, 'rb') as f:\n preloaded_data = pickle.load(f)\n # Train part\n self.class2imgid = preloaded_data['class2imgid']\n self.path2class_sketch = preloaded_data['path2clas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a treelike structure with ancestors | def ancestors_tree(self):
tree = {}
for f in self.parents():
tree[f] = f.ancestors_tree()
return tree | [
"def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)",
"def ancestors(self) -> QuerySet['TreeModel']:\n queryset = self.__class__.objects.filter(path__d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a set of ancestors edges | def ancestors_edges_set(self, cached_results=None):
if cached_results is None:
cached_results = dict()
if self in cached_results.keys():
return cached_results[self]
else:
res = set()
for f in self.parents():
res.add((f, self))
... | [
"def edges_set(self):\n edges = set()\n edges.update(self.descendants_edges_set())\n edges.update(self.ancestors_edges_set())\n return edges",
"def get_node_ancestors(synset):\n ancestors = set()\n # In the following line, synset.parents already is a set but we create a copy\n # of ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a set of all edges | def edges_set(self):
edges = set()
edges.update(self.descendants_edges_set())
edges.update(self.ancestors_edges_set())
return edges | [
"def get_all_possible_edges(self) -> Set[Edge]:",
"def edge_set(self):\n return set(self.edges())",
"def edges(self):\n return self.generateEdges()",
"def get_edges(self):\n output = set()\n for node_from in self._graph:\n for node_to in self._graph[node_from]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that the object is not an ancestor, avoid self links | def circular_checker(parent, child):
if parent == child:
raise ValidationError('Self links are not allowed.')
if child in parent.ancestors_set():
raise ValidationError('The object is an ancestor.') | [
"def isAncestor(self, label):\n return ( label in self.ancestors )",
"def is_ancestor(self, id_, ancestor_id):\n return # boolean",
"def has_parent(self):\n return not self.parent == None",
"def has_ancestor(self, other: 'Snapshot') -> bool:\n\t\treturn core.BNSnapshotHasAncestor(self.ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provide the next key in the sequence | def next_key(self, instance, parent):
raise NotImplementedError | [
"def get_next_available_key(self) -> str:\n\n last_key = self._get_last_project_key()\n assert last_key.startswith(self.initials)\n key_number = int(last_key[len(self.initials) :])\n key_number += 1\n next_available_key = f\"{self.initials}{key_number:05d}\"\n return next_a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provide the first key in the sequence | def first_key(self):
raise NotImplementedError | [
"def getMinKey(self):\n try:\n return list(self.valdictionary[self.minvalue])[0]\n except IndexError:\n return \"\"\n\n\n # Your AllOne object will be instantiated and called as such:",
"def first(seq): # real signature unknown; restored from __doc__\n pass",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initiation method, initialised the given Iphone files and stores a dictionary in storage master. | def __init__(self, backup_path, parsed_info_file, parsed_manifest_file, parsed_status_file):
self.backup_path = backup_path
self.parsed_info_file = parsed_info_file
self.parsed_manifest_file = parsed_manifest_file
self.parsed_status_file = parsed_status_file
self.storage_master =... | [
"def parse_indexed_files(self):\n self.storage_master['paired_devices'] = self.get_paired_devices()\n self.storage_master['voicemail_information'] = self.get_voicemail_information()\n self.storage_master['sms_message_information'] = self.get_sms_message_information()",
"def __init__(self):\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |