query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns the first primary field or False | def hasPrimary(self):
for f in self.fields():
if getattr(f, 'primary', False):
return f
return False | [
"def hasPrimaryField(self, i: int) -> bool:\n ...",
"def primary_field(self: Cdef) -> Optional[JField]:\n return self._primary_field",
"def primary_field(self):\n return self._schema.primary_field",
"def is_primary_key(self):\n return self.primary_key",
"def is_primary(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize a Schema. The first positional argument may be a sequence of Fields. (All further positional arguments are ignored.) Keyword arguments are added to my properties. | def __init__(self, *args, **kwargs):
Schemata.__init__(self)
self._props = self._properties.copy()
self._props.update(kwargs)
if len(args):
if type(args[0]) in [ListType, TupleType]:
for field in args[0]:
self.addField(field)
... | [
"def __init__(self, schema_name, property_name, schema_dict, required_list): # DOESN'T TAKE INTO CONSIDERATION REFERENCES\n self.name = property_name\n self.type = self._get_type(schema_dict, schema_name + property_name)\n self.is_required = self.attr_required(property_name, required_list)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a deep copy of this Schema. | def copy(self):
c = BasicSchema()
for field in self.fields():
c.addField(field.copy())
# Need to be smarter when joining layers
# and internal props
c._props.update(self._props)
return c | [
"def copy(self):\n c = Schemata()\n for field in self.fields():\n c.addField(field.copy())\n return c",
"def copy(self):\n return Struct(self.__dict__.copy())",
"def copy(self) -> \"Atoms\":\n return deepcopy(self)",
"def clone(self):\n fields = dict((k, v.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method mutates fields in the given instance. For each keyword argument k, the key indicates the name of the field to mutate while the value is used to call the mutator. E.g. updateAll(instance, id='123', amount=500) will, depending on the | def updateAll(self, instance, **kwargs):
keys = kwargs.keys()
for name in keys:
field = self.get(name, None)
if field is None:
continue
if not field.writeable(instance):
continue
# If passed the test above, mutator is ... | [
"def update(self, **kw):\n at = kw.pop(\"__at\", getCallStack())\n label = kw.pop(\"__label\", \"update\")\n\n for name, value in kw.items():\n try:\n field = self._fields[name]\n field.__set__(self, value, at=at, label=label)\n except KeyErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
change the schemata for a field | def changeSchemataForField(self, fieldname, schemataname):
field = self[fieldname]
self.delField(fieldname)
field.schemata = schemataname
self.addField(field) | [
"def addSchemata(self, name):\n from Products.Archetypes.Field import StringField\n\n if name in self.getSchemataNames():\n raise ValueError, \"Schemata '%s' already exists\" % name\n self.addField(StringField('%s_default' % name, schemata=name))",
"def setScheming(self, schemingOb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of schemata names in order of appearing | def getSchemataNames(self):
lst = []
for f in self.fields():
if not f.schemata in lst:
lst.append(f.schemata)
return lst | [
"def get_species_names():\n return ['S', 'I', 'R']",
"def getSchemataFields(self, name):\n return [f for f in self.fields() if f.schemata == name]",
"def getSNPSetsList() :\n\timport rabaDB.filters as rfilt\n\tf = rfilt.RabaQuery(SNPMaster)\n\tnames = []\n\tfor g in f.iterRun() :\n\t\tnames.append... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of fields belong to schema 'name' in order of appearing | def getSchemataFields(self, name):
return [f for f in self.fields() if f.schemata == name] | [
"def getNames(schema):\n return [f.getName() for f in schema.fields()]",
"def getFields(sorted=True):",
"def get_fields(self, table_name):\n newlist = []\n for field in self.fields:\n if field.table == table_name:\n newlist.append(field)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move a field >>> from Products.Archetypes.atapi import StringField as SF >>> schema = Schema((SF('a'), SF('b'), SF('c'),)) >>> schema.keys() ['a', 'b', 'c'] >>> sbefore = schema.copy() >>> sbefore.moveField('c', before='a') >>> sbefore.keys() ['c', 'a', 'b'] >>> safter = schema.copy() >>> safter.moveField('a', after='b... | def moveField(self, name, direction=None, pos=None, after=None, before=None):
if bool(direction) + bool(after) + bool(before) + bool(pos is not None) != 1:
raise ValueError, "You must apply exactly one argument."
keys = self.keys()
if name not in keys:
raise KeyError, na... | [
"def _moveFieldToPosition(self, name, pos):\n keys = self._names\n oldpos = keys.index(name)\n keys.remove(name)\n if oldpos >= pos:\n keys.insert(pos, name)\n else:\n keys.insert(pos - 1, name)\n self._names = keys",
"def _moveFieldInSchemata(self, na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves a field with the name 'name' to the position 'pos' This method doesn't obey the assignement of fields to a schemata | def _moveFieldToPosition(self, name, pos):
keys = self._names
oldpos = keys.index(name)
keys.remove(name)
if oldpos >= pos:
keys.insert(pos, name)
else:
keys.insert(pos - 1, name)
self._names = keys | [
"def moveField(self, name, direction=None, pos=None, after=None, before=None):\n if bool(direction) + bool(after) + bool(before) + bool(pos is not None) != 1:\n raise ValueError, \"You must apply exactly one argument.\"\n keys = self.keys()\n\n if name not in keys:\n raise... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves a field with the name 'name' inside its schemata | def _moveFieldInSchemata(self, name, direction):
if not direction in (-1, 1):
raise ValueError, "Direction must be either -1 or 1"
fields = self.fields()
fieldnames = [f.getName() for f in fields]
schemata_names = self.getSchemataNames()
field = self[name]
f... | [
"def changeSchemataForField(self, fieldname, schemataname):\n field = self[fieldname]\n self.delField(fieldname)\n field.schemata = schemataname\n self.addField(field)",
"def _moveFieldToPosition(self, name, pos):\n keys = self._names\n oldpos = keys.index(name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all fields belonging to schemata 'name' | def delSchemata(self, name):
for f in self.fields():
if f.schemata == name:
self.delField(f.getName()) | [
"def _remove_special(cls, data):\n for key in list(data.keys()):\n if key.startswith(\"_\") or key == \"name\":\n del data[key]",
"def getSchemataFields(self, name):\n return [f for f in self.fields() if f.schemata == name]",
"def delete_field(self, name):\n if 'id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new schema by adding a new field with schemata 'name' | def addSchemata(self, name):
from Products.Archetypes.Field import StringField
if name in self.getSchemataNames():
raise ValueError, "Schemata '%s' already exists" % name
self.addField(StringField('%s_default' % name, schemata=name)) | [
"def create_schema():\n name = request.form['name']\n if name is None:\n return 'name not provided', 400\n\n try:\n schema = get_datastore().create_schema(name)\n except storage.error.SchemaExistsError:\n return 'Already exisits', 409\n\n return jsonify({'id': schema}), 201",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add model results to the flowsheet template. If fname is specified, this saves the resulting svg to a file. If fname is not specified, it returns the svg string. | def write_pfd(self, fname=None):
infilename = os.path.join(this_file_dir(), "steam_turbine_template.svg")
with open(infilename, "r") as f:
s = svg_tag(svg=f, tag_group=self.tags_steam_streams, outfile=fname)
if fname is None:
return s | [
"def save(self, fn=None):\n if fn == None:\n if self._fn == None:\n self._fn = \"defaul.svg\"\n else:\n self._fn = fn\n log.debug(\"Saving content to svg-file \" + str(fn))\n\n self.__create(self._fn)\n\n tree = etree.parse(self._fn)\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of supply point ids for the selected location and all of its descendants OR all supply point ids in the domain. | def get_relevant_supply_point_ids(domain, active_location=None):
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_locat... | [
"def assigned_locations(self) -> Sequence[LocationID]:",
"def getFlattenCoordIds (self, skipOptional = True):\n return [d.coord.id for d in self.root.flatten (skipOptional)]",
"def get_locations_and_children(self, location_ids):\n locations = self.filter(location_id__in=location_ids)\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test invalid inputs for parse_int() | def test_parse_int_invalid_input(test_input):
with pytest.raises(ValueError):
parse_int(test_input) | [
"def validate_int(arg):\n try:\n int(arg)\n except ValueError:\n print_error(\"usage: must be integer\")",
"def is_valid_integer(request_prompt, error_prompt):\n valid = False\n while not valid:\n value_string = input(request_prompt)\n try:\n value = int(value_st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the total volume of num_spheres spheres of the same radius. | def multiple_spheres_volume(radius: float, num_spheres: int) -> float:
#Your code here | [
"def sphere_volume(radius: Number) -> Number:\n return (4.0/3.0) * pi * radius * radius * radius",
"def sphere_volume(r):\n return (3.0/4.0)*pi*r**3",
"def sphere_radius(self) -> float:\n return self.GetSphereRadius()",
"def sphere(n):\n X, Y, Z = _np.mgrid[-1:1:n*1j, -1:1:n*1j, -1:1:n*1j]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the number of full cups you can make with total millilitres of coffee if every cup can hold amount_per_cup millilitres. >>> total_cups(200, 100) 2 >>> total_cups(350, 100) 3 | def total_cups(total: int, amount_per_cup: int) -> int:
#Your code here | [
"def add_cups(nums, total):\n return nums + list(range(max(nums)+1, total+1))",
"def convert_teaspoons_to_cups(num_to_convert):\n return c((num_to_convert * 0.0208)) + \" cups\"",
"def calcNumberOfCoolers(context):\n diameter = context[\"diameter\"]\n propellant = context.get(\"propellant\", 0)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the distance travelled (in km) when travelling with speed kilometers per hour for travel_days days and travel_hours hours. Assume uninterrupted travel. >>> trip_distance(40.0, 2, 20.0) 2720.0 >>> trip_distance(90.0, 0, 3.5) 315.0 | def trip_distance(speed: float, travel_days: int, travel_hours: float) -> float:
#Your code here | [
"def distance_of_trip(self, path):\n if path not in self.trips_by_path:\n return \"NO SUCH ROUTE\"\n return self.trips_by_path[path].distance",
"def calc_distance(self, tour):\r\n\r\n # Determines the distance from first city until last city.\r\n distance = 0\r\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of rows from a topchart. | def get_rows_from_topchart(soup: BeautifulSoup) -> List[element.ResultSet]:
return soup.find("ol", {"class": "elto-list"}).find_all(
"li", {"class": "elto-item"}
) | [
"def produce_rows_lst():\n\n soup = open_custom_html('')\n rows = soup.findChildren(\"tr\")[1:]\n return rows",
"def get_rows(self) -> List[dict]:\n\n return self.source.rows",
"def get_topchart_infos(soup: BeautifulSoup, category: str) -> List[Dict]:\n rows = get_rows_from_topchart(soup)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of dict containing data of a topchart. | def get_topchart_infos(soup: BeautifulSoup, category: str) -> List[Dict]:
rows = get_rows_from_topchart(soup)
if category == "films":
return [movies_utils.get_movies_infos_from_row(x) for x in rows]
elif category == "series":
return [series_utils.get_series_infos_from_row(x) for x in rows]
... | [
"def top_tvshow(self):\n top_tvshow = {}\n data = requests.get(self.url.format('Top250TVs', self.api_key)).json()\n #Loops through the data\n for item in data['items']:\n top_tvshow.setdefault(data['id'], [data['title'], data['year'], data['rank'], data['imDbRating']])\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the order of columns for a topchart based on its category. | def get_topchart_order(category: str) -> List:
if category == "films":
return movies_utils.get_order_movies_columns()
elif category == "series":
return series_utils.get_order_series_columns()
elif category == "jeuxvideo":
return videogames_utils.get_order_videogames_columns()
eli... | [
"def columns(self, category):\n\n if category == \"risk\":\n return [\"Date\", \"Title\", \"Severe\", \"Fatality\", \"Design\", \"Sample\", \"Sampling Method\", \"Matches\"]\n\n return [\"Date\", \"Title\", \"Design\", \"Sample\", \"Sampling Method\", \"Matches\"]",
"def get_col_top_label... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a Cigar string and a Read, will return the sequence of the read that mapped to the genome. | def recoverMappedPortion(Cigar,Read):
#Edit Oct 10, 2013 to include skipped portions of reference sequence (introns)
#first process the CIGAR string
cigarSplit=re.findall('(\d+|[a-zA-Z]+)', Cigar)
cigarSplit=[[int(cigarSplit[ii]),cigarSplit[ii+1]] for ii in range(0,len(cigarSplit),2)]
#The... | [
"def get_aligned_segment_from_read(self, read):\n\n read_alignment_start = read.reference_start\n # read_alignment_stop = self.get_read_stop_position(read)\n\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n # read_quality... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given txt_annot={strand,exon,CDS},readStrand,readPosiiton, will return a colonseparated string of transcript_id, readPosition_rel_to_CDSstart, readPosition_rel_to_CDSEnd, S/AS where S/AS indicates whether the read is on the same strand or not | def getTxtRelPositions(transcript_id,txt_annot,readStrand,readPosition):
#figure out sense/antisense. Easy
txtStrand=txt_annot['strand']
if txtStrand==readStrand:
SorAS='S'
else:
SorAS='AS'
#Great, now figure out position relative to CDSStart
#To do this, I will calculate th... | [
"def annotate_region_cdna_transcript1(args, q, t, db):\n\n ## checks\n # check transcript name if it is given\n if q.tpt and t.name != q.tpt:\n raise IncompatibleTranscriptError('Transcript name unmatched')\n # check q.beg and q.end is a valid Pos w.r.t exon boundaries\n t.check_exon_boundary(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the sequence data for ``seqID`` available in the container. | def get_sequence( self, seqID, key_residues=None ):
_check_type(self)
if key_residues is None:
return self[_check_column(self, "sequence", seqID)]
else:
return _get_key_sequence(self, "sequence", seqID, key_residues) | [
"def getseqbyid(conn, seqid):\n return strquery(conn, \"select sequence from sequences where \" \\\n + \"idsequences=%s\", (seqid,))",
"def get_seq(self, identifier):\n return self[self._identifier_to_index[identifier]]",
"def getseq_forid(self,seqfile,id):\t\n\t\tseq_dict={}\n\t\t\n\t\tfasta_seq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the phi angle for ``seqID`` available in the container. | def get_phi( self, seqID, key_residues=None ):
_check_type(self)
if key_residues is None:
return self[_check_column(self, "phi", seqID)]
else:
return _get_key_sequence(self, "phi", seqID, key_residues) | [
"def _compute_pitch_angle(self):\n return np.arctan(self.pitch / (2.0 * np.pi * self.radii))",
"def phase(angle):\n return angle % (2*math.pi)",
"def phi_max(self, theta):\n\n # The polar angle is fixed!\n return self._angle",
"def euler_phi(self):\n if not self.is_integral():\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the psi angle for ``seqID`` available in the container. | def get_psi( self, seqID, key_residues=None ):
_check_type(self)
if key_residues is None:
return self[_check_column(self, "psi", seqID)]
else:
return _get_key_sequence(self, "psi", seqID, key_residues) | [
"def phase_angle(self, scanpoint):\n raise NotImplementedError()",
"def GetAngle(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetAngle(self)",
"def phase_angle(self, scanpoint=0):\n disc_openings = (26.0, 42.0, 43.5, 126.0)\n O_C1d, O_C2d, O_C2Bd, O_C3d = disc_openings\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List which structure identifiers are available in the data container | def get_available_structures( self ):
_check_type(self)
return _get_available(self, "structure_") | [
"def structures_list_data():\n structures = StructureTable(dbfilename)\n return get_structures_json(structures, show_all=True)",
"def all_structs(self):\n if self.cluster:\n for e in self.cluster.structs:\n yield e\n for e in self.idl.structs:\n yield e",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the structure data for ``seqID`` available in the container. | def get_structure( self, seqID, key_residues=None ):
_check_type(self)
if key_residues is None:
return self[_check_column(self, "structure", seqID)]
else:
return _get_key_sequence(self, "structure", seqID, key_residues) | [
"def getseq_forid(self,seqfile,id):\t\n\t\tseq_dict={}\n\t\t\n\t\tfasta_sequences = SeqIO.parse(open(seqfile),'fasta')\n\t\t\n\t\tfor fasta in fasta_sequences:\n\t\t\tif fasta.id==id:\n\t\t\t\tseq_dict[fasta.id]=fasta.seq\n\t\t\t\tbreak\n\t\t\t\t\n\t\treturn(seq_dict)",
"def getseqbyid(conn, seqid):\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List which structure prediction identifiers are available in the data container. | def get_available_structure_predictions( self ):
_check_type(self)
return _get_available(self, "psipred_") | [
"def get_status(self):\n # type: () -> List[Any]\n self.cursor.execute(\"SELECT id, hyperparams, pid FROM experiments\")\n exps = []\n for exp_id, hyperparams, pid in self.cursor.fetchall():\n exps.append({'id': exp_id, 'hyperparams': hyperparams, 'pid': pid})\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List which slabels are available in the data container. | def get_available_labels( self ):
_check_type(self)
return _get_available(self, "lbl_") | [
"def get_labels(self):\n return label_list",
"def get_labels(self):\n return self.labels_list",
"def list_labels(self):\n return list(self.repo.labels.list())",
"def get_labels(self):\n labels = []\n for g, graph in enumerate(self.graphs):\n for s in xrange(len(gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stores a link between a an amenity and a place | def create_link_place_amenity(place_id, amenity_id):
place = models.storage.get(Place, place_id)
if not place:
abort(404)
amenity = models.storage.get(Amenity, amenity_id)
if not amenity:
abort(404)
if models.storage_t == "db":
if amenity in place.amenities:
retur... | [
"def link_amenity_to_place(place_id, amenity_id):\n place_obj = get_object(Place, place_id)\n amenity_obj = get_object(Amenity, amenity_id)\n if environ.get('HBNB_TYPE_STORAGE') != 'db':\n if amenity_id in place_obj.amenity_ids:\n return jsonify(amenity_obj.to_dict())\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes a link between a place and an amenity and returns an empty JSON | def delete_link_place_amenity(place_id, amenity_id):
place = models.storage.get(Place, place_id)
if not place:
abort(404)
amenity = models.storage.get(Amenity, amenity_id)
if not amenity:
abort(404)
if models.storage_t == "db":
if amenity not in place.amenities:
a... | [
"def delete_amenity_from_place(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n amenity = storage.get(\"Amenity\", amenity_id)\n if amenity is None:\n abort(404)\n amenities = place.amenities\n if amenity not in amenities:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the area and entity domain data entries with new registry data. | async def update_registries() -> None:
await update_area_registry()
await update_entity_registry() | [
"async def update_entity_registry() -> None:\n\n get_base().entities = await hass_entities()",
"async def update_area_registry() -> None:\n\n get_base().areas = await hass_areas()",
"def update(self):\n self.domain.update()",
"def entity_registry_updated(self, event):\n if event.data[\"act... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the area domain data entry with new registry data. | async def update_area_registry() -> None:
get_base().areas = await hass_areas() | [
"def update(self):\n self.domain.update()",
"def update(self):\n db_domain = Domain.query.all()\n list_db_domain = [d.name for d in db_domain]\n dict_db_domain = dict((x.name,x) for x in db_domain)\n\n headers = {}\n headers['X-API-Key'] = PDNS_API_KEY\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the entity domain data entry with new registry data. | async def update_entity_registry() -> None:
get_base().entities = await hass_entities() | [
"def update(self):\n self.domain.update()",
"def entity_registry_updated(self, event):\n if event.data[\"action\"] == \"remove\":\n self.remove_empty_devices()",
"def update_data(self, new_dict):\n\n self.data.update(new_dict)",
"async def update_registries() -> None:\n\n aw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove an entity from the registry | async def remove_entity_from_registry(entity_id: str) -> None:
base = get_base()
entities = base.entities
for entity in entities:
if entity[CONF_ENTITY_ID] == entity_id:
entities.remove(entity)
break | [
"def remove_entity(self, entity):\n self.entities.remove(entity)",
"def entity_removed(self, entity):\r\n\t\tpass",
"def remove_entity(self, entity):\n del self.entities[entity.uuid]\n entity.universe = None",
"def delete(self, entity):",
"def pop_entity(self, entity):\n self._en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A dictionary list for the HA area registry used for this integrations domain data. | async def hass_areas() -> List[AreaSettings]:
hass = get_base().hass
areas: List[AreaSettings] = [] # make as an array so it can be sorted
store = Store(hass, 1, f"{DOMAIN}.{CONF_AREAS}")
data: Optional[AreaSettingsRegistry] = await store.async_load()
if data is None:
data = {}
# So... | [
"async def update_area_registry() -> None:\n\n get_base().areas = await hass_areas()",
"def get_area(self):\n return {'area name': self.__class__.__name__.lower(),\n 'width': self.__width,\n 'height': self.__height}",
"def _load_adm_areas(self):\n countries = {}\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Match and area with an entity by checking if the area name is at the beginning of the entity ID. | def match_area_with_entity_id(
entity_id: Optional[str], areas: Optional[List[AreaEntry]]
) -> Optional[str]:
if entity_id is None or areas is None:
return None
for area in areas:
name = area.name.lower().replace(" ", "_")
quote = "'"
regex = f"(all_)?({name.replace(quote, ... | [
"def match(self, oid):\n i = 0\n if isinstance(oid, str):\n oid = OID(oid)\n try:\n if not oid[0]:\n oid = OID(oid[1:])\n parts = self.part[-len(oid):]\n elif not oid[-1]:\n oid = OID(oid[:-1])\n parts ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A dictionary list for the HA entity registry used for this integrations domain data. | async def hass_entities() -> List[EntitySettings]:
hass = get_base().hass
entities: List[EntitySettings] = [] # make as an array so it can be sorted
entities_processed: List[
str
] = [] # keep track of ids so they don't get processed twice
store = Store(hass, 1, f"{DOMAIN}.{CONF_ENTITIE... | [
"def get_entity_registry_data(platform=None, domain=None) -> list:\n\n try:\n entity_reg = entity_registry.async_get(Gb.hass)\n entities = {k:_registry_data_str_to_dict(k, v, platform, domain)\n for k, v in entity_reg.entities.items()\n if _base_domai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a DataPreparer from config file. | def from_file(cls, cfg: ConfigType) -> 'DatasetPreparer':
cfg = copy.deepcopy(cfg)
data_preparer = cls(
data_root=cfg['data_root'],
dataset_name=cfg.get('dataset_name', ''),
task=cfg.get('task', 'textdet'),
nproc=cfg.get('nproc', 4),
train_pre... | [
"def from_config(cls, config: Dict):\n if keys.DataSteps.DATA not in config[keys.GlobalKeys.STEPS]:\n raise Exception(\"Cant have datasource without data step.\")\n\n # this is the data step config block\n step_config = config[keys.GlobalKeys.STEPS][keys.DataSteps.DATA]\n sour... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the feature and onestepahead predictions for some synthesized/transformed clinical data, check if it passes a utility threshold. | def check_if_pass(feat_pred_ori_user, one_step_pred_ori_user, feat_pred_syn_user, \
one_step_pred_syn_user, feature_gt, one_step_gt):
bounds = 4.5 # Have to be within X of the utility bounds
passes = []
user_utility_pass = True
# Iterate through each feature prediction, and get RMSE
for i, sing... | [
"def test_strict_thresholding():\n\n # Generate test dataset\n test_dset_size = (100, 100)\n test_hdim_1_pt = 50.0\n test_hdim_2_pt = 50.0\n test_hdim_1_sz = 10\n test_hdim_2_sz = 10\n test_amp = 10\n test_data = np.zeros(test_dset_size)\n test_data = tbtest.make_feature_blob(\n te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an initial population of synthesized user data for our genetic algorithm (basically samples some X user data and add small noise to them) It is worth noting that the noise is added by first scaling a user's data by a small float value, and then adding it as noise to another user. | def generate_initial_population(starting_data_nonuser, starting_original_user_data, initial_noise, \
population_starting_size):
initial_population = []
# Randomly take X users, add them as noise to the user
random_user_idxs = np.random.choice(starting_data_nonuser.shape[0], \
population_starti... | [
"def population_add_noise(current_population, starting_data_nonuser, noise_scale, \\\n population_max_size):\n\n new_population = []\n\n population_size = len(current_population)\n random_user_idxs = np.random.choice(starting_data_nonuser.shape[0], population_size)\n\n # Update current population to ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used to mutate members of the population (basically, add more noise to each synthesized data candidate for a particular user) | def population_add_noise(current_population, starting_data_nonuser, noise_scale, \
population_max_size):
new_population = []
population_size = len(current_population)
random_user_idxs = np.random.choice(starting_data_nonuser.shape[0], population_size)
# Update current population to have populatio... | [
"def mutate_population(self):\n for invidual in self.population:\n if(np.random.uniform(0, 1) < self.mutation_chance):\n invidual.mutate_random_gene(self.mutation_scale)\n self.rate_population()",
"def mutate(self):\n for i in range(len(self.population)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the genetic algorithm for a particular user to determined the best synthesized data for this user (i.e. passes the utility thresholds and has a large amount of noise added.) | def user_repetitive_algorithm(feature_pred_scores_ori_user, one_step_scores_ori_user, \
feature_gt_user, one_step_gt_user, num_users, feature_predictors, one_step_predictors, chosen_feature_idxs, \
starting_original_user_data, starting_data_nonuser):
# synthesized_user_data = starting_original_user_data
... | [
"def runGA(self, time, state, numberOfActions, maxPopSize, doGASubsumption, selection):\r\n \r\n # Don't do a GA if the theta_GA threshold is not reached, yet\r\n if self.getSize()==0 or (time-self.getTimeStampAverage()) < cons.theta_GA:\r\n return\r\n \r\n self.setTimeStam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
When clustering the users by their feature values (not onestepahead), we are trying to find the user closest to the cluster's center. This makes more sense in the context of the generate_user_clusters | def get_best_cluster_representative(user_ids_in_cluster, user_utility_values):
# Get utility values for all user ids in this cluster
cluster_average_utility_value = user_utility_values[user_ids_in_cluster]
# Get centroid (I'll define as the average user)
cluster_average_utility_value = cluster_average_... | [
"def get_stop_move_and_clusters_user(df_user=None, dict_clust=None,\n quantile_diameter_cluster=None):\n dict_description_clusters = {}\n #We perform the computation on every cluster of our user.\n dfgb = df_user.groupby('label')\n for label, sub_df in dfgb:\n d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Once we have run the genetic algorithm for all cluster representatives, we take the transformation/noise for each representative user and apply it to all other users' data in the same cluster. | def get_synthetic_data(cluster_synthetic_noise, user_clusters, ori_data):
# Iterate through the users of the dataset
for user_idx in np.arange(ori_data.shape[0]):
# Get the cluster id of this user
cluster_id = user_clusters[user_idx]
# Get the cluster noise for this cluster id
... | [
"def user_repetitive_algorithm(feature_pred_scores_ori_user, one_step_scores_ori_user, \\\n feature_gt_user, one_step_gt_user, num_users, feature_predictors, one_step_predictors, chosen_feature_idxs, \\\n starting_original_user_data, starting_data_nonuser):\n\n # synthesized_user_data = starting_original_u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First, generates a set of clusters for each user (reason mentioned in generate_user_clusters) Then, iterate through the user representatives for each cluster, and run the genetic algorithm to determine the noise to be added for each user representative's data. Then, apply the noise for all users in the same cluster to ... | def generate_sythetic_dataset(feat_pred_ori, one_step_pred_ori, \
feature_gt, one_step_gt, num_users, feature_predictors, one_step_predictors, chosen_feature_idxs, \
ori_data, num_clusters):
# Keep track of how many users are within bounds
full_fails = 0
initial_fails = 0
fail_dict = {}
... | [
"def get_synthetic_data(cluster_synthetic_noise, user_clusters, ori_data):\n\n # Iterate through the users of the dataset\n for user_idx in np.arange(ori_data.shape[0]):\n\n # Get the cluster id of this user\n cluster_id = user_clusters[user_idx]\n # Get the cluster noise for this cluster... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configure nsxcontroller on the device. | def _configure_nsx_controller(self, device, nsx_cnt_name, nsx_cnt_ip, nsx_cnt_port):
result = self._set_nsxcontroller_name(device, nsx_cnt_name)
if result:
result = self._set_nsxcontroller_ip(device, nsx_cnt_name, nsx_cnt_ip)
if result:
result = self._set_nsxcontroller_po... | [
"def _set_nsxcontroller_port(self, device, nsx_cnt_name, nsx_cnt_port):\n try:\n device.nsx.set_nsxcontroller_port(name=nsx_cnt_name, port=nsx_cnt_port)\n return True\n except RuntimeError as e:\n self.logger.error(\n 'Configuring NSX-Controller %s Faile... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set NSX controller name. | def _set_nsxcontroller_name(self, device, nsx_cnt_name):
try:
device.nsx.nsx_controller_name(name=nsx_cnt_name)
return True
except RuntimeError as e:
self.logger.error(
'Configuring NSX-Controller %s Failed with Exception: %s' % e)
return F... | [
"def set_nick_name(self, val):\n self.nick = val",
"def _configure_nsx_controller(self, device, nsx_cnt_name, nsx_cnt_ip, nsx_cnt_port):\n result = self._set_nsxcontroller_name(device, nsx_cnt_name)\n if result:\n result = self._set_nsxcontroller_ip(device, nsx_cnt_name, nsx_cnt_ip... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set NSX controller IP. | def _set_nsxcontroller_ip(self, device, nsx_cnt_name, nsx_cnt_ip):
try:
device.nsx.set_nsxcontroller_ip(name=nsx_cnt_name, ip_addr=nsx_cnt_ip)
return True
except RuntimeError as e:
self.logger.error(
'Configuring NSX-Controller %s Failed with Exception... | [
"def set_udfs_ip(self, ip, port):\n self.udfs_helper.config(host=ip, port=port)\n return \"Success\"",
"def SetMasterIpAddress(self, master_ip):\n master = self.master.get()\n master.external_ip = master_ip\n master.put()",
"def set_IP(self,obj,key,val='',test=0):\n parts=val.split... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set NSX controller port. | def _set_nsxcontroller_port(self, device, nsx_cnt_name, nsx_cnt_port):
try:
device.nsx.set_nsxcontroller_port(name=nsx_cnt_name, port=nsx_cnt_port)
return True
except RuntimeError as e:
self.logger.error(
'Configuring NSX-Controller %s Failed with Exce... | [
"def set_port(self, port_name):\r\n global port\r\n port = port_name\r\n print(\"port set to: \" + port)\r\n self.port_menu()",
"def _configure_nsx_controller(self, device, nsx_cnt_name, nsx_cnt_ip, nsx_cnt_port):\n result = self._set_nsxcontroller_name(device, nsx_cnt_name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the total weight seen by the node. Returns float Total weight seen. | def total_weight(self):
return self.stats.mean.n | [
"def total_weight(self):\n return self.weight_fun(self.graph, self.path)",
"def total_weight(self) -> float:\r\n return sum([self.neighbours[x] for x in self.neighbours])",
"def get_weight(self) -> float:\n return 0",
"def get_sum_connections_weight(self):\n total = 0.0\n fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs a call to Google's API to perform speechtotext. | def speech_text(audio_path):
# Data structure
data = {
'audio_path': audio_path
}
# Dumping the data into a JSON object
payload = json.dumps(data)
# Tries to perform the API call
try:
# POST request over the part-of-speech API method
r = requests.post(GOOGLE_API, d... | [
"def callNLPService(text):\n google_cloud_credentials = \"./assets/Interview_Voice_google_cloud_key.json\"\n nlp_service = get_google_nlp_service(google_cloud_credentials)\n client = nlp_service.documents()\n request1 = client.analyzeEntitySentiment(body={\n \"document\": {\n \"type\":... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens a new tab with a new editor instance | def _evt_new(self, event):
self.notebook.new_editor_tab() | [
"def open_new_tab(self, url=None):\n self.driver.execute_script(\n f'window.open(\"{url or self.server_url}\", \"new window\")'\n )",
"def open_file_in_new_tab(self, file_path):\n try:\n ActionSystem.new_file(file_path)\n except Exception as e:\n LogSys... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Closes the current editor tab | def _evt_close_current_editor_tab(self, event):
self.notebook.close_active_editor() | [
"def closeEditor (self, event = None):\r\n try: self.passageFrame.closeFullscreen()\r\n except: pass\r\n try: self.passageFrame.Destroy()\r\n except: pass",
"def close_current_tab(frame):\r\n children = frame.winfo_children()\r\n if len(children) != 0:\r\n children[0].dest... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Undo for the current editor tab | def _evt_undo_current_editor_tab(self, event):
self.notebook.undo_active_editor() | [
"def undo():\n pass",
"def undo(self):\n\n self.revert_to_checkpoint()",
"async def undo(self):\n self._preview_embed = self._history.undo()",
"def undo(self):\n \n return True",
"def undo(self):\n if len(self._undo_stack) == 0: \n print(\"At initial state.\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Redo for the current editor tab | def _evt_redo_current_editor_tab(self, event):
self.notebook.redo_active_editor() | [
"def editRedo(self):\n self.model.redoList.undo()\n self.updateAll(False)",
"async def redo(self):\n self._preview_embed = self._history.redo()",
"def _evt_undo_current_editor_tab(self, event):\n self.notebook.undo_active_editor()",
"def help_redo(self):\n print(help_msg.cmd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cut for the current editor tab | def _evt_cut_current_editor_tab(self, event):
self.notebook.cut_active_editor() | [
"def editCut(self):\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.cut()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()\n self.nodeDe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy for the current editor tab | def _evt_copy_current_editor_tab(self, event):
self.notebook.copy_active_editor() | [
"def editCopy(self):\n splitter = self.activeWindow.rightTabs.currentWidget()\n if splitter == self.activeWindow.outputSplitter:\n for view in splitter.children():\n try:\n if view.hasSelectedText():\n view.copy()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
paste for the current editor tab | def _evt_paste_current_editor_tab(self, event):
self.notebook.paste_active_editor() | [
"def on_paste(self, sender, arg=None):\n buf = self.get_active_buffer()\n if buf is not None:\n buf.paste_clipboard(sender, arg)",
"def _evt_copy_current_editor_tab(self, event):\n self.notebook.copy_active_editor()",
"def editPaste(self):\n if self.activeWindow.treeView.h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clear for the current editor tab | def _evt_clear_current_editor_tab(self, event):
self.notebook.clear_active_editor() | [
"def clear(self):\n os.system(self.clear_term)",
"def clear(self):\n self._tokens.clear()",
"def clear_content(self) -> None:\n\n self.ids.container.clear_widgets()",
"def clearPuzzle(self):\n print(\"Got to clearPuzzle\")\n self.puzzleTitleEdit.setText(\"\")\n self.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Selectall for the current editor tab | def _evt_selectall_current_editor_tab(self, event):
self.notebook.selectall_active_editor() | [
"def _select_all(self, event):\n event.widget.tag_add(\"sel\", \"1.0\", \"end\")\n event.widget.mark_set(INSERT, \"1.0\")\n event.widget.see(INSERT)\n return 'break' # Won't display the character",
"def selectAllEvent(self):\n try:\n self.currentTabController().selec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace for the current editor tab | def _evt_replace_current_editor_tab(self, event):
self.notebook.replace_active_editor() | [
"def editor_replace_tabs_with_spaces(self):\r\n editorWidget = self.get_current_editor()\r\n if editorWidget:\r\n helpers.replace_tabs_with_spaces(editorWidget)",
"def replace(self):\n\n try:\n myFile = open(self._filePath, 'r')\n tabsReplaced = myFile.read()\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lees de uitslag van het blad 'Finales 8 teams' | def _importeer_finales_8(self, ws):
final_8 = list()
for row_nr in (12, 14, 18, 20, 24, 26, 30, 32):
team_naam = self._lees_team_naam(ws, 'B' + str(row_nr))
if team_naam in self.deelnemende_teams.keys():
final_8.append(team_naam)
else:
... | [
"def scrape_teams():\n teams = []\n\n response = requests.get('http://www.basketball-reference.com/leagues/NBA_2015.html')\n soup = bs4.BeautifulSoup(response.content)\n team_soup = soup.find(id='all_standings').find(class_=\"valign_top\")\n\n eastern_conference_soup = team_soup.tbody.contents\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yield successive nsized chunks from lst. | def chunks(self, lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n] | [
"def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]",
"def _chunks(lst: List[T], chunk_size: int) -> Generator[List[T], None, None]:\n return (lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size))",
"def chunks(lst, chunk_size):\n for i in range(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a video animation of dataset1 and dataset2 (time is z axis) | def videoviz(dateset1, dataset2):
z_rng = [0 , dateset1.shape[2]]
# z_rng = [70,80]
fig, (im1, im2) = plt.subplots(1, 2)
# fig.set_facecolor('black')
ax1 = im1.imshow(dateset1[:,:,0], animated=True)
ax2 = im2.imshow(dataset2[:,:,0], animated=True)
im1.get_xaxis().set_visible(False)
... | [
"def combine_gen_sources(source_a, source_b, mask):\n animation = zip(source_a(), source_b(), mask())\n\n first_time = cv2.getTickCount()\n for frame_a, frame_b, frame_mask in animation:\n frame = primitives.mask_together(frame_a, frame_b, frame_mask)\n last_time = cv2.getTickCount()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot quantification using a labeled dataset For each machine labeled voxel find the closest manually labeled and calculate the physical distance | def plot_quantification(labeled_path, skeleton, x_size, y_size, z_size):
labeled_0_nz_voxel_ids = np.genfromtxt(labeled_path, delimiter=' ')
labeled_0_nz_voxel_ids = np.around(np.uint16(labeled_0_nz_voxel_ids[:,2:5]))
labeled_0_nz_voxel_ids = labeled_0_nz_voxel_ids[:, [1,0,2]]
nz_skeleton = np.asarray(n... | [
"def vizualization():\n X = np.array(pandas.read_csv(\"dbscan-paintedData.csv\", sep=\"\\t\"))\n plt.figure()\n plt.subplot(2, 1, 1)\n for k in [1, 3, 15]:\n dists = k_dist(X, k=k)\n plt.plot(dists, label=\"k=%d\" % k)\n plt.legend()\n plt.xlabel(\"i-ti primer\")\n plt.ylabel(\"ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a bucket exists. Sets `exists` property of `bucket` | def check_bucket_exists(self, bucket):
if not isinstance(bucket, S3Bucket):
raise ValueError("Passed object was not type S3Bucket")
bucket_exists = True
try:
self.s3_client.head_bucket(Bucket=bucket.name)
except ClientError as e:
if e.respon... | [
"def s3_bucket_exists(self, bucketName):\n\n try:\n self._s3Res.meta.client.head_bucket(Bucket=bucketName)\n except Exception as e:\n return False\n return True",
"def does_bucket_exist(self, bucket_name):\n for bucket in self.my_s3.buckets.all():\n if(bucket_name == bucket.na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for the READACP permission on `bucket` by trying to get the bucket ACL | def check_perm_read_acl(self, bucket):
if bucket.exists != BucketExists.YES:
raise BucketMightNotExistException()
try:
bucket.foundACL = self.s3_client.get_bucket_acl(Bucket=bucket.name)
self.parse_found_acl(bucket) # If we can read ACLs, we know the rest of... | [
"def check_perm_read(self, bucket):\r\n if bucket.exists != BucketExists.YES:\r\n raise BucketMightNotExistException()\r\n\r\n list_bucket_perm_allowed = True\r\n try:\r\n self.s3_client.list_objects_v2(Bucket=bucket.name, MaxKeys=0) # TODO: Compare this to doing a HeadBu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks for the READ permission on the bucket by attempting to list the objects. Sets the `AllUsersRead` and/or `AuthUsersRead` property of `bucket`. | def check_perm_read(self, bucket):
if bucket.exists != BucketExists.YES:
raise BucketMightNotExistException()
list_bucket_perm_allowed = True
try:
self.s3_client.list_objects_v2(Bucket=bucket.name, MaxKeys=0) # TODO: Compare this to doing a HeadBucket
exc... | [
"def check_perm_read_acl(self, bucket):\r\n\r\n if bucket.exists != BucketExists.YES:\r\n raise BucketMightNotExistException()\r\n\r\n try:\r\n bucket.foundACL = self.s3_client.get_bucket_acl(Bucket=bucket.name)\r\n self.parse_found_acl(bucket) # If we can read ACLs, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for WRITE permission by trying to upload an empty file to the bucket. File is named the current timestamp to ensure we're not overwriting an existing file in the bucket. | def check_perm_write(self, bucket):
if bucket.exists != BucketExists.YES:
raise BucketMightNotExistException()
timestamp_file = str(datetime.datetime.now().timestamp()) + '.txt'
try:
# Try to create a new empty file with a key of the timestamp
self.s... | [
"def has_write_permission(fileName):\r\n return os.access(fileName, os.W_OK)",
"def check_write_permissions(file):\n try:\n open(file, 'a')\n except IOError:\n print(\"Can't open file {}. \"\n \"Please grant write permissions or change the path in your config\".format(file))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks for WRITE_ACP permission by attempting to set an ACL on the bucket. | def check_perm_write_acl(self, bucket):
if bucket.exists != BucketExists.YES:
raise BucketMightNotExistException()
# TODO: See if there's a way to simplify this section
readURIs = []
writeURIs = []
readAcpURIs = []
writeAcpURIs = []
fullContr... | [
"def action_bucket__set_canned_acl(args, params=None): # pylint: disable=unused-argument\n bucket = cached_get_bucket(args.conn, args.bucket_name)\n filestr = 'private'\n if isinstance(params, dict):\n filestr = params['str']\n elif isinstance(params, str):\n filest... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download `obj` from `bucket` into `dest_directory` | def download_file(self, dest_directory, bucket, verbose, obj):
dest_file_path = pathlib.Path(os.path.normpath(os.path.join(dest_directory, obj.key)))
if not self.is_safe_file_to_download(obj.key, dest_directory):
print(f"{bucket.name} | Skipping file {obj.key}. File references a parent ... | [
"def download(source_bucket, source_object_key, tmp):\n # TODO\n pass",
"def download_object(tempdir,bucket, obj):\n if 'fname' not in obj:\n obj['fname'] = tempdir+\"/\"+os.path.basename(obj['Key'])\n get_object(bucket, obj['Key'], obj['fname'])",
"def download_file(self, file_name, obj_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enumerate all the objects in a bucket. Sets the `BucketSize`, `objects`, and `objects_enumerated` properties of `bucket`. | def enumerate_bucket_objects(self, bucket):
if bucket.exists == BucketExists.UNKNOWN:
self.check_bucket_exists(bucket)
if bucket.exists == BucketExists.NO:
raise Exception("Bucket doesn't exist")
try:
for page in self.s3_client.get_paginator("list_obje... | [
"def iterate_bucket(self, bucket, prefix, fn):\n paginator = boto3.client('s3').get_paginator('list_objects')\n for page in paginator.paginate(Bucket=bucket, Prefix=prefix):\n for obj in page['Contents']:\n key = obj['Key']\n fn(bucket, key)",
"def s3_object_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translate ACL grants into permission properties. If we were able to read the ACLs, we should be able to skip manually checking most permissions | def parse_found_acl(self, bucket):
if bucket.foundACL is None:
return
if 'Grants' in bucket.foundACL:
for grant in bucket.foundACL['Grants']:
if grant['Grantee']['Type'] == 'Group':
if 'URI' in grant['Grantee'] and grant['Grantee']['URI... | [
"def test_permission_convert_from_acl(self):\n utils.warn_version(self, \"permission set conversion\", \"3.0\", test_version=__meta__.__version__, skip=True)\n\n perm = PermissionSet((Allow, 1, \"write-deny-match\"))\n utils.check_val_equal(perm.name, Permission.WRITE)\n utils.check_val_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test construction of the IP Test Tool base Server class. | def test_init(self):
self.assertRaises(TypeError, itt.Server) | [
"def setUp(self):\n\n self.testport = random.randint(40000, 65530)\n self.server = TServer.TSimpleServer(TestService.Processor(self),\n EzSSLServerSocket(host='localhost', \n port=self.testport,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles the interaction with Globus Auth. | def auth_callback():
if 'error' in request.args:
flash("You could not be logged into the portal: "
+ request.args.get('error_description'),
request.args['error'])
return redirect('/')
scheme = 'http' if 'localhost' in request.base_url else 'https'
redirect_uri = u... | [
"def authcallback():\r\n # If we're coming back from Globus Auth in an error state, the error\r\n # will be in the \"error\" query string parameter.\r\n if 'error' in request.args:\r\n flash(\"You could not be logged into the portal: \" +\r\n request.args.get('error_description', reques... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing response from host.utility.id_exists | def test_id_exists(host_id: int,
database_connection: mysql.connector.connect,
print_response: bool = False):
response = utility.id_exists(host_id, database_connection)
assert response
if print_response:
print(json.dumps(response, indent=2)) | [
"def test_exists_by_id(self, _id):",
"def test_portals_id_exists_get(self):\n pass",
"def uuidExists(self, uuid):\n if IGUIDManager(self.context.dmd).getObject(uuid):\n return DirectResponse.succeed()\n else:\n return DirectResponse.fail()",
"def test_id_not_exist(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing response from utility.slug_exists | def test_slug_exists(host_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
response = utility.slug_exists(host_slug, database_connection)
assert response
if print_response:
print(json.dumps(response, indent=2)) | [
"def test_slug_not_exists(host_slug: str,\n database_connection: mysql.connector.connect,\n print_response: bool = False):\n response = utility.slug_exists(host_slug, database_connection)\n assert not response\n if print_response:\n print(json.dumps(re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing response from utility.slug_exists | def test_slug_not_exists(host_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
response = utility.slug_exists(host_slug, database_connection)
assert not response
if print_response:
print(json.dumps(response, ind... | [
"def test_slug_exists(host_slug: str,\n database_connection: mysql.connector.connect,\n print_response: bool = False):\n response = utility.slug_exists(host_slug, database_connection)\n assert response\n if print_response:\n print(json.dumps(response, indent=2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing response from info.retrieve_by_slug | def test_retrieve_by_slug(host_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
host_dict = info.retrieve_by_slug(host_slug, database_connection)
assert host_dict is not None
assert "id" in host_dict
if print_resp... | [
"def test_retrieve_details_by_slug(host_slug: str,\n database_connection: mysql.connector.connect,\n print_response: bool = False):\n host_dict = details.retrieve_by_slug(host_slug, database_connection)\n assert host_dict is not None\n asser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Testing response from details.retrieve_by_slug | def test_retrieve_details_by_slug(host_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
host_dict = details.retrieve_by_slug(host_slug, database_connection)
assert host_dict is not None
assert "appeara... | [
"def test_retrieve_by_slug(host_slug: str,\n database_connection: mysql.connector.connect,\n print_response: bool = False):\n host_dict = info.retrieve_by_slug(host_slug, database_connection)\n assert host_dict is not None\n assert \"id\" in host_dict\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Forward batch to neural network and get results. | def forward_batch(self,data_batch):
self.model.blobs['data'].reshape(*data_batch.shape)
self.model.blobs['data'].data[...] = data_batch
result = self.model.forward()[self.output_layer]
return result | [
"def forward(self, inputs: List[float]) -> List[float]:\n self.__inputs__ = inputs\n return [neuron.compute_output(self.__inputs__)\n for neuron in self.__neurons__]",
"def evaluate(self, neural_network: NeuralNetwork) -> np.ndarray:\n return neural_network.feed_forward(self.te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get value for particular xtion number and key. | def get_xtion_val(n, key):
return xtion[n][key] | [
"def get_value(self, key: str) -> Any:\r\n if self.get_index(key) is None:\r\n return None\r\n return self.hash_table[self.get_index(key)][1]",
"def __getitem__(self, key):\n query = select([self.store.c.value]).where(self.store.c.key == key)\n result = self.conn.execute(que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print out the data frame to console and save it to file | def print_and_save_df(df, output=None):
if output:
output = ensure_dir(output)
df.to_csv(output, index=False, float_format='%.4f')
print(df.to_string(justify='right', float_format='%.4f', index=False)) | [
"def display_data_frame(self):\n header_text = ''\n col_keys = []\n for key in self.columns:\n header_text += (key + '\\t')\n col_keys.append(key)\n\n print(header_text)\n for i in range(0, len(self.columns[col_keys[0]].column_data)):\n print_strin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setter method for Animal._age. | def age(self, age):
self._age = age | [
"def set_age(self, new_age: int):\n self.__age = new_age",
"def set_age(self, age=0):\r\n self.age = age",
"def _set_age(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=Res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Increments age by one every season. | def aging(self):
self.age += 1 | [
"def age_one(self):\n self.age += 1",
"def IncreaseAge(self):\n self.__age += 1\n if self.__age >= self.__max_age:\n self.__Die()",
"def IncYear(self):\n self.year = self.year + 1\n self.set_day = None",
"def advance(self):\n max_days = self.months[self.mon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Birth weight of a newborn animal is drawn randomly from a gaussian curve. | def birth_weight(self):
birth_weight = random.gauss(self.p["w_birth"], self.p["sigma_birth"])
return birth_weight | [
"def genWeight(self, sex, height):\n self.identity.bmi = self.bmis[sex][GaussianChoice(5).getIndex(distribution = \"c\")]\n # add 2 for a more realistic weight\n self.identity.weight = float(format(float(self.identity.bmi * math.pow(height/100, 2)), '.1f')) + 2",
"def breed(mom, dad):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a convolutional autoencoder. input_dims is a tuple of integers containing the dimensions of the model input. filters is a list containing the number of filters for each convolutional layer in the encoder, respectively. The filters should be reversed for the decoder latent_dims is a tuple of integers containing ... | def autoencoder(input_dims, filters, latent_dims):
input_x = keras.layers.Input(shape=input_dims)
encode = input_x
for i in range(0, len(filters)):
encode = keras.layers.Conv2D(filters[i], (3, 3),
padding="same",
activation='r... | [
"def autoencoder(input_dims, filters, latent_dims):\n input_l = keras.Input(shape=(input_dims))\n\n x = keras.layers.Conv2D(filters[0], (3, 3), activation='relu',\n padding='same')(input_l)\n x = keras.layers.MaxPooling2D((2, 2), padding='same')(x)\n for i in range(1, len(filt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify user isn't already a member. | def validate_user(self,data):
pool = self.context['pool']
user = data
query_membership = Membership.objects.filter(pool=pool, user=user)
if query_membership.exists():
raise serializers.ValidationError('User is already member of this pool')
return data | [
"def clean_user(self):\n user = self.cleaned_data['user']\n if self.group.user_set.filter(id=user.id).exists():\n raise forms.ValidationError(\"User is already a member of this group\")\n return user",
"def test_is_member(self):\n new_github_id = \"U0G9QF9C6\"\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that the pool is capable of accepting a new member. | def validate(self,data):
pool = self.context['pool']
if pool.is_limited and pool.members.count() >= pool.members_limit:
raise serializers.ValidationError('Pool has reached its member limit!')
return data | [
"def test_pool_must_use_private_addresses(config_path, pool_members):\n print(f\"currently processing: {config_path}, members: \", end=\"\")\n for member in pool_members:\n print(f\"member:{member} \", end=\"\")\n assert IPv4Address(member).is_private # member IP address must be... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the GET /api/v1.0/releases endpoint returns 403 following failed authorization. | def test_list_helm_releases_insufficient_permissions(self):
rules = {'armada:get_release': policy_base.RULE_ADMIN_REQUIRED}
self.policy.set_rules(rules)
resp = self.app.simulate_get('/api/v1.0/releases')
self.assertEqual(403, resp.status_code) | [
"def test_get_movies_unauthenticated(client):\n url = reverse('movies')\n response = client.get(url)\n assert response.status_code == 403",
"def test_admin_only_with_valid_user_and_invalid_token(self):\n response = self.app.post(\n \"/api/1.0/releases/\",\n data=None, # For ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For a list of words, print out each word on a separate line, but in all uppercase. | def print_upper_words(words):
for word in words:
print(word.upper()) | [
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def print_all_uppercased(s):\n\twords = s.split()\n\tfor word in words:\n\t\tif word[0].isupper():\n\t\t\tprint word",
"def uppercase_words(string):\n string_parts = string.split()\n string_final = []\n for word in string_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
dfs() function takes as a parameter the root of a tree and searches through the node using a depthfirst method and prints the data at each node accessed in the order that it is accessed. visited set is used to prevent cycles during search this function is recursive | def dfs(root, visited=set()):
visited.add(root)
print(root.data)
for n in root.children:
if n not in visited:
dfs(n)
return visited | [
"def print_level_k_dfs(root, k):\n if root is None:\n return\n if k == 0:\n print(root.data, end=\" \")\n\n else:\n print_level_k_dfs(root.left, k-1)\n print_level_k_dfs(root.right, k-1)",
"def dfs(self, starting_node):\n\n raise NotImplementedError",
"def dfs_all():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The internal signal that is strobed by this field when an unsigned integer underflow occurs, or `None` if there is no such signal. The shape of the internal is the shape of the field descriptor, so repeated fields should index it by their field index. | def underflow_internal(self):
return self._underflow_internal | [
"def bit_underflow_internal(self):\n return self._bit_underflow_internal",
"def FieldHandle(self) -> _n_2_t_10:",
"def getAttachedField(self) -> \"SoField *\":\n return _coin.SoFieldSensor_getAttachedField(self)",
"def raw_fp_field_extraction(optree):\n size = optree.get_precision().get_base_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The internal signal that is strobed by this field when an bit overflow occurs (a high bit is written high using a bitset operation), or `None` if there is no such signal. The shape of the internal is the shape of the field descriptor, so repeated fields should index it by their field index. | def bit_overflow_internal(self):
return self._bit_overflow_internal | [
"def get_bitfield(self, name):\n for bf in self.bitfields:\n if bf.name == name:\n return bf\n return None",
"def getAttachedField(self) -> \"SoField *\":\n return _coin.SoFieldSensor_getAttachedField(self)",
"def bit_underflow_internal(self):\n return self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The internal signal that is strobed by this field when an bit underflow occurs (a low bit is written low using a bitclear operation), or `None` if there is no such signal. The shape of the internal is the shape of the field descriptor, so repeated fields should index it by their field index. | def bit_underflow_internal(self):
return self._bit_underflow_internal | [
"def getAttachedField(self) -> \"SoField *\":\n return _coin.SoFieldSensor_getAttachedField(self)",
"def underflow_internal(self):\n return self._underflow_internal",
"def FieldHandle(self) -> _n_2_t_10:",
"def bit_overflow_internal(self):\n return self._bit_overflow_internal",
"def get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show insults or post a new one | def insults(current_user):
if request.method == 'GET':
insults = Insults.query.all()
output = []
for insult in insults:
output.append({
'insult': insult.insult,
'submitted_by': insult.user
})
return jsonify({
... | [
"def new_insult():\n new_insult = insult_me()\n message.set(new_insult)",
"async def insult(ctx):\n insultees = ctx.message.mentions\n if insultees: # if someone was mentioned\n for insultee in insultees:\n slur = insulter.gen_insult(\"cogs/insulter/\")\n await ctx.send(in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialise the object with the models and parameters we wish to use in the grid search | def __init__(self, models, params):
'''
Initialise the object with the models and parameters we wish to use in the grid search
'''
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys())... | [
"def initialize_models(self):\n pass",
"def __init__(self, data_sources, model):\n self._data_sources = data_sources\n self._model = model",
"def _construct(self, model_config):\n pass",
"def initModel(self, model):\r\n\r\n self.modelName = model\r\n self.dbItem = Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialises the classifier object with paths to the datasets and output directories | def __init__(self, training_dirpath, training_labels_filepath, testing_dirpath, testing_labels_filepath, out_dirpath, watch_words_data_filepath):
self.training_dirpath=training_dirpath
self.testing_dirpath=testing_dirpath
self.out_dirpath=out_dirpath
self.training_labels_filepath=trainin... | [
"def __init__(self, \n root_dir,\n imgs_path_file,\n labels_file,\n pars={}):\n \n self.root_dir = root_dir\n if not(os.path.exists(root_dir)):\n os.mkdir(root_dir)\n self.imgs_path_file = imgs_path_file\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |