query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once. | def installed(cls):
try:
return cls._installed
except AttributeError:
name = "mezzanine.pages.middleware.PageMiddleware"
installed = middlewares_or_subclasses_installed([name])
setattr(cls, "_installed", installed)
return installed | [
"def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)",
"def test_deferred_page_classes_are_not_registered(self):\n list(SimplePage.objects.defer(\"content\"))\n simplepage_subclasses = [\n cls for cls in get_page_models() if i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if all the arguments it receives are numeric (according to | def are_numeric(*values):
for value in values:
if not is_numeric(value):
return False
return True | [
"def isnumeric(self): # real signature unknown; restored from __doc__\n return False",
"def isNumeric(self) -> bool:\n ...",
"def check_value(self, iterable):\n allnumeric = True\n for item in iterable:\n if type(item) not in [int, float]:\n allnumeric = Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the unitwise definition corresponding to attrname | def _get_wavelength_attrs_with_units(self, attrname, units='AA'):
attr = self._lick[attrname]
if self.wavelength_unit is not None:
if units is None:
return attr * unit[self.wavelength_unit]
else:
return (attr * unit[self.wavelength_unit]).to(units)... | [
"def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scan for independent loops and set up dictionaries. | def main(self, verbose=0):
indepdict=self.scan_for_loop(self.indeploop)
pegdict1 = self.scan_for_loop(self.pegloop1)
pegdict2 = self.scan_for_loop(self.pegloop2)
if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:
return dict()
a... | [
"def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare looped lines from looping dictionary. | def prepare_looped_lines(self, alldict, comblist):
loopline_dict=dict()
for stridx in comblist:
lidx = int(stridx.split('-')[0])
loopidx = int(stridx.split('-')[1])
loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[... | [
"def _processLines(self):\n self.nlines = len(self.lines)\n self.params = {}\n self._pline = {}\n for i,line in enumerate(self.lines):\n if (line[0] is not '#') & (line.strip() is not ''):\n spl = line.split()\n self.params[spl[0]] = ' '.join(spl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare looped datasets from looping lines. | def prepare_looped_datasets(self, alldict, allcombs):
datasets_dict=dict()
numcombs = len(allcombs)
combct = 0
while combct < numcombs:
newdata = list(self.baseinput.data)
loopedlines = dict()
loopedlines = self.prepare_looped_lines(alldict, allcombs[c... | [
"def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)",
"def create_data_generators(shuffl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create independently looped input files. | def create_input_files(self, datasets_dict):
ifname = self.keywords['inputfile']
dirstem = os.path.dirname(ifname)
basename = os.path.basename(ifname).split('.')[0]
createdfiles=list()
if dirstem == "":
dirstem = os.getcwd()
dkeys = datasets_dict.keys()
... | [
"def create_temp_files(self):\n for input_file in os.listdir(self.input_path):\n full_filename = self.input_path + input_file\n print(full_filename)\n if self.format == \"xml\":\n data_loader = loader.AbstractsXmlLoader(full_filename, config=Config(None))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract constant names from sybdb.h to use as python constants | def extract_constants(freetds_include="sybdb.h", constants_file="bcp_constants.py"):
fileno, source_file = mkstemp(suffix=".c", text=True)
write(fileno, "#include <{}>".format(freetds_include).encode())
close(fileno)
fileno, include_directives = mkstemp(suffix=".txt")
close(fileno)
if ON_WINDO... | [
"def getConstants():\n \n out = []\n \n api = __import__('api')\n\n for constant in dir(api):\n if constant[0].isupper():\n id = getattr(api, constant)\n if type(id).__name__ not in [\"function\", \"type\"]:\n out.append(constant)\n\n return out",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get open accounts Returns array with active account numbers | async def get_open_accounts(self):
result = []
URL = API_HOST + "/api/resources/header"
async with async_timeout.timeout(TIMEOUT):
response = await self.session.get(URL)
json_data = await response.json()
accounts = json_data["data"]["accounts"]["data"]["data"]
... | [
"def active_accounts(self):\n # TODO: Figure out what accounts are active based on memberships.\n return self.accounts.all()",
"def user_open_orders(self):\n response = self.query('user_open_orders')\n return response",
"def list_accounts(self):\n pass",
"def getConnectedAcc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get budget billing data | async def __getBBL_async(self, account, projectedBillData) -> dict:
_LOGGER.info("Getting budget billing data")
data = {}
try:
async with async_timeout.timeout(TIMEOUT):
response = await self.session.get(
URL_BUDGET_BILLING_PREMISE_DETAILS.format(... | [
"def billing(self) -> pulumi.Output['outputs.BucketBillingResponse']:\n return pulumi.get(self, \"billing\")",
"def GetCampaignBudget(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get data from appliance usage | async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict:
_LOGGER.info("Getting appliance usage data")
JSON = {"startDate": str(lastBilledDate.strftime("%m%d%Y"))}
data = {}
try:
async with async_timeout.timeout(TIMEOUT):
response = await... | [
"def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the default form class used for user registration. | def get_form_class(self, request):
return RegistrationForm | [
"def get_form_class(self, request):\n\t\treturn RegistrationForm",
"def get_registration_form_class():\n custom_class = getattr(django_settings, 'REGISTRATION_FORM', None)\n if custom_class:\n return load_module(custom_class)\n else:\n return OpenidRegisterForm",
"def get_form_class(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the sum tree data structure for the given replay capacity. | def __init__(self, capacity):
assert isinstance(capacity, int)
if capacity <= 0:
raise ValueError(
'Sum tree capacity should be positive. Got: {}'.format(capacity))
self.nodes = []
self.depth = int(np.ceil(np.log2(capacity)))
self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -... | [
"def ConstructTree(self):\n step = 0\n totalNodes = 0\n while step <= self.__steps:\n self.__nodes[step] = {}\n nUps = 0\n while nUps <= totalNodes:\n combins = BinomialOptionModel.__nCr(totalNodes, nUps)\n self.__nodes[step][nUps] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs stratified sampling using the sum tree. | def stratified_sample(self, batch_size, rng):
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size),
batch_size, self.depth)
return np.minimum(indi... | [
"def bootstrap_sampling(tree_stats, rng, num_trials=100, priors=\"conditional\"):\n bound_rng = partial(rng, size=num_trials)\n stat_result, levels, labels = tree_stats\n max_level, n_classes = stat_result.shape[:2]\n if (isinstance(priors, (list, numpy.ndarray)) and\n len(priors) == n_classe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints a message only when app is in debug mode | def print_debug(message):
if current_app.debug:
print(message) | [
"def debug(msg):\n debug = False\n if debug:\n print msg",
"def checkDebug(message):\n if debug == True:\n print(message)",
"def DebugMessage(message=\"\"):\n if global_debug:\n print(\"\\033[93m DEBUG: \" + message + \"\\033[0m\")",
"def debug(msg):\n if debug_level >= 1:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will optionally print a header guard for `cl_khr_fp64` if a 64bit type is used as the source or destination and return a bool that indicates whether this guard will need closed after the calling function has finished printing functions that use the 64bit source/destination type. | def conditional_guard(src, dst):
int64_count = 0
float64_count = 0
float16_count = 0
if src in int64_types or dst in int64_types:
int64_count = 1
if src in float64_types or dst in float64_types:
float64_count = 1
if src in float16_types or dst in float16_types:
float16_count = 1
if float16_cou... | [
"def is_format_header(h):\n\n return h.dtype == header_image_dtype or h.dtype == header_image_dtype.newbyteorder()",
"def condition(segl: MessageSegment, segr: MessageSegment):\n return isPrintable(segl.bytes) and isPrintable(segr.bytes)",
"def have_binary128():\n try:\n ti = type_info(np.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This helper function returns the correct clc core conversion function name for a given source and destination type, with optional size, mode and saturation arguments. | def clc_core_fn_name(dst, size='', mode='', sat=''):
return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode) | [
"def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()",
"def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply weight normalization module from all of the layers. | def apply_weight_norm(self):
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_app... | [
"def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n torch.nn.utils.weight_norm(m)\n self.apply(_apply_weight_norm)",
"def associate_normalization_layers(self, model):\n if (len... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register stats for denormalization as buffer. | def register_stats(self, stats):
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
s... | [
"def _weight2buffer(self):\n delattr(self.module, 'weight')\n self.module.register_buffer('weight', self.weight.data)\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n delattr(self.module, 'bias')\n self.module.register_buffer('bias', self.bias.data)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove weight normalization module from all of the layers. | def remove_weight_norm(self):
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_re... | [
"def remove_weight_norm(self):\n def _remove_weight_norm(m):\n try:\n if isinstance(m, torch.nn.Conv1d) \\\n or isinstance(m, torch.nn.ConvTranspose2d):\n torch.nn.utils.remove_weight_norm(m)\n #logging.debug(f\"Weight norm is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply weight normalization module from all of the layers. | def apply_weight_norm(self):
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_app... | [
"def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n torch.nn.utils.weight_norm(m)\n self.apply(_apply_weight_norm)",
"def associate_normalization_layers(self, model):\n if (len... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a humanized rstring representing time difference between now() and the input timestamp. The output rounds up to days, hours, minutes, or seconds. 4 days 5 hours returns '4 days' 0 days 4 hours 3 minutes returns '4 hours', etc... | def time_since(timestamp=None):
rstr = ""
if not timestamp or not isinstance(timestamp, datetime.datetime):
return rstr
now = timezone.now()
timediff = now - timestamp
days = timediff.days
weeks = days//7
months = days//30
minutes = timediff.seconds % 3600 // 60
seconds = ti... | [
"def humanizeTimeDiff(timestamp = None):\n import datetime\n \n timeDiff = datetime.datetime.now() - timestamp\n days = timeDiff.days\n hours = timeDiff.seconds/3600\n minutes = timeDiff.seconds%3600/60\n seconds = timeDiff.seconds%3600%60\n \n str = \"\"\n tStr = \"\"\n if days > 0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return elements in the message with given parameters match is the type of elements you want to get (check the parse_type variable to see possibilities) using ! at start of match will reverse the value of positive occurences will create the nth indexes elements to capture None will find everything | def finder(self, match="w", occurences=None, start=None, stop=None, trigger=True, positive=True, reverse=False, keep_prefix=False):
res = []
length = len(self.parse_type)
if occurences != None:
occurences = str(occurences)
index_array = self.indexes(occurences, 1)
is... | [
"def _any_depth_parse(match):\n markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]\n for idx in (4, 5):\n if markers[idx]:\n markers[idx] = mtypes.emphasize(markers[idx])\n return [m for m in markers if m]",
"def _parse_pb_prune(matches):\n if len(matches) != 2:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if parameters does match the parse_type match is the amount of each parse_type elements you want to search. You can write www to check 3 words in a row ranges follow the same syntax as occurences except it targets indexes | def checker(self, match="xw", ranges="0,1", in_a_row=True, reverse=False):
res = []
length = len(self.parse_type)
if ranges != None:
ranges = str(ranges)
index_array = self.indexes(ranges)
substring = ""
for idx in range(length*reverse-reverse, length*(-reve... | [
"def can_process(self, statement):\r\n set1 = ['sweet', 'room']\r\n set2 = ['delux', 'room']\r\n set3 = ['condo', 'room']\r\n\r\n if all(x in statement.text.split() for x in set1):\r\n return True\r\n elif all(x in statement.text.split() for x in set2):\r\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve which events to capture from the config | def set_capture_events_from_config(self):
event_config = [
{
"config_key": "events_watchlist",
"events": [
"watchlist.hit.process",
"watchlist.hit.binary",
"watchlist.storage.hit.process",
... | [
"def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()",
"def events(self):\n return self._events",
"def get_subscribed_events():",
"def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()",
"def events(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compares an image to its reference | def compare(self, reference, image):
if not os.path.isfile(reference):
raise PictureComparatorError("Reference file %s does not exist" % reference)
if not os.path.isfile(image):
raise PictureComparatorError("Image file %s does not exist" % image)
referen... | [
"def compare_image(src_img, obj_img):\n im_src = aircv.imread(src_img) \n im_obj = aircv.imread(obj_img)\n\n pos = aircv.find_template(im_src, im_obj)\n logging.debug('Compare_Image: \\nSource Image: %s \\nObject Image:%s \\nResults: %s\\n' %(src_img, obj_img, pos)) \n \n if pos:\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or nonzero if they are different), creates list of pixels which are different a PNG image of the same size as 'step' image, where each different pixel is coloured RED | def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones):
# complete diff "image" to the size of step image
diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1)
# ignore e... | [
"def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if two things have the same type. | def same_type(one, two):
return isinstance(one, type(two)) | [
"def is_same(type1, type2):\n nake_type1 = remove_declarated(type1)\n nake_type2 = remove_declarated(type2)\n return nake_type1 == nake_type2",
"def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1, type(variable2))",
"def _is_equal_same_type(self, other):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AirInstance constructor name The name of the instance input An object with the YAML description of the IR instance transmit_handler A function to be called to transmit pkts Add support to allow the specification of the MetaIR instance | def __init__(self, name, input, transmit_handler):
local_dir = os.path.dirname(os.path.abspath(__file__))
MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml'))
self.transmit_handler = transmit_handler
self.name = name
self.tm_started = False
self.disabl... | [
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n associate_public_ip_address: Optional[pulumi.Input[bool]] = None,\n ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Launch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process any table initialization spec from the IR desc The IR specification may provide a set of table initialization operations in a "table_initialization" object. This takes the form of a sequence of table entry specifications. | def process_table_init(self):
logging.debug("Processing table initialization, %d entries",
len(self.table_initialization))
for init_entry in self.table_initialization:
for table_name, entry_desc in init_entry.items():
self.air_table[table_name].add_entr... | [
"def initial_table(table_name, metadata, line):\r\n table = Table(table_name, metadata,\r\n Column('tuning_id', Integer, primary_key=True),\r\n Column('_round', Integer, primary_key=True),\r\n Column('_cost', VARCHAR(255), nullable=False)\r\n )\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable the switch instance Start the traffic manager threads and allow packets to enter the processor chain | def enable(self):
if not self.tm_started:
for name, tm in self.air_traffic_manager.items():
logging.debug("Starting tm %s" % name)
tm.start()
tm_started = True
logging.debug("Enabling switch %s" % self.name)
self.disabled = False | [
"def enable_packet_switching(self):\n self.send_and_recv('CALL:LTE:SIGN:PSWitched:ACTion CONNect')\n self.wait_for_pswitched_state()",
"def enable(self):\n\n super(SequenceShot, self).enable()\n\n # create the switch handlers\n for switch in self.config['switches']:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Disable the switch instance Packets on ingress are discarded while the switch is disabled. Traffic manager threads are not stopped. | def disable(self):
logging.debug("Disabling switch %s" % self.name)
self.disabled = True | [
"def disable_packet_switching(self):\n self.send_and_recv('CALL:LTE:SIGN:PSWitched:ACTion DISConnect')\n self.wait_for_pswitched_state()",
"def disable(self):\n\n super(SequenceShot, self).disable()\n\n for switch in self.config['switches']:\n self.machine.switch_controller.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transmit handler template for documentation out_port The port number to which the packet is to be sent packet A bytearray object holding the packet to transmit | def dummy_transmit_handler(out_port, packet):
pass | [
"def handle_packet_out(self, buffer_id, in_port, actions, data):\n pass",
"def _post(self, which_port, msg):\n return _spacegrant_swig.binary_sink_sptr__post(self, which_port, msg)",
"def write(self, *args):\n return _yarp.PortWriterBufferBase_write(self, *args)",
"def _post(self, which_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take a field from the csv and expand/split on a delimiter and return a list of individual values. If the return_list flag is set to true, then this method will return the data back as a list of new fields instead of a cleaned up string and normalized with semicolon delimiter | def expand_and_normalize_field(field, return_list=False):
if isinstance(field, basestring):
field = field.rstrip(';:,')
data = [_normalize_expanded_field(r) for r in re.split(",|;|:", field)]
if return_list:
return data
else:
return ";".join(data)
else:
... | [
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def splitCSVLine(self, line):\n import string\n list = []\n position = 0\n fieldStart = 0\n while 1:\n if position >= len(line):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a row and a field which may have delimited values and convert into a list of new rows with the same data expect for the replaced delimited value. | def expand_rows(row, delimited_fields, expand_row):
# _log.debug('expand_row is {}'.format(expand_row))
# go through the delimited fields and clean up the rows
copy_row = copy.deepcopy(row)
for d in delimited_fields:
if d in copy_row:
copy_row[d] = expand_and_normalize_field(copy_ro... | [
"def _transform_row(self, row):\n tmp_row = []\n\n for i, column in enumerate(row.value):\n if column.scalar_value.null:\n tmp_row.append(None)\n elif column.has_array_value:\n field_name, rep, mutate_to, cast_from = self._column_data_types[i]\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply mapping of row data to model. | def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs):
initial_data = kwargs.get('initial_data', None)
model = model_class()
# _log.debug("map_row's mappings {}".format(mapping))
# If there are any initial states we need to set prior to mapping.
if initial_data:
... | [
"def apply_model(row):\n model = load_model()\n return model(row)",
"def transform(self, X):\n\n self.check_is_fitted([\"mappings\"])\n\n X = super().transform(X)\n\n for c in self.columns:\n\n X[c] = X[c].map(self.mappings[c])\n\n return X",
"def _fast_map_row(row):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates stats inside mod_stats_map with data gathered from the file. | def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map):
with open(file_name) as f:
lines = f.readlines()
upstream_ref = None
upstream_start_line = None
for line_number, line in enumerate(lines):
if REGION_START_TAG in line:
tag, ref_name = _extract_tag_and_ref_name_from_line(... | [
"def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)",
"def read_stats(self, files):\n with open(self.stats_file, newline='', encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n self.stats = {row['file']: row\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the tracking file for the given file. Returns the last path mentioned in the file via a tracking tag or the equivalent thirdparty path given the file's path. If there is no file in the default path and no files mentioned within the file exist, returns None. Normally the thirdparty path must exist. Passing |check_e... | def compute_tracking_path(stats, our_path, our_lines, do_lint_check=False,
check_exist=True, check_uses_tags=False):
tracking_path = staging.get_default_tracking_path(our_path)
base_matcher = re.compile(re.escape(FILE_TRACK_TAG) + r' "([^\"]+)"')
tag_matcher = re.compile(re.escape(REGION... | [
"def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n el... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the notices object as if the two paths were properly staged. analyze_diffs needs to be independent of staging. Staging might not have been run, or might be out of date from when analyze_diffs is run. So we make a best attempt to reconstruct the notices that would have occurred poststaging. | def _compute_staged_notices(mods_path, third_party_path):
mods_notices = notices.Notices()
if mods_path:
mods_notices.add_sources([mods_path])
third_party_notices = notices.Notices()
if third_party_path:
third_party_notices.add_sources([third_party_path])
# If there are mods and third_party notices, p... | [
"def apply_decisions(base, decisions):\n\n merged = copy.deepcopy(base)\n prev_path = None\n parent = None\n last_key = None\n resolved = None\n diffs = None\n # clear_parent actions should override other decisions on same obj, so\n # we need to track it\n clear_parent_flag = False\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update dictionary from a collection of documents. Each document is a list of tokens. | def add_document_lists(self, docs):
for sent in docs:
sent = map(self.process_token, sent)
self._token_count.update(sent) | [
"def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)",
"def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the list of token_id given doc. | def doc2id(self, doc):
if isinstance(doc, string_types):
raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string")
doc = map(self.process_token, doc)
return [self.token_to_id(token) for token in doc] | [
"def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]",
"def get_word2ids_from_tokens(word2id,tokens):\r\n return [get_word2id(word2id,x) for x in tokens]",
"def get_tokens_for_doc(self, pid):\n end_offset = self.end_offsets[pid]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the token_id of given token. | def token_to_id(self, token):
token = self.process_token(token)
return self.token2id.get(token, len(self.token2id) - 1) | [
"def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)",
"def get_id(self, token):\n\n if not isinstance(token, types.UnicodeType):\n raise TypeError(\"token must be Unicode\")\n\n if token not in self.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tokenid to token (string). | def id_to_token(self, idx):
return self._id2token[idx] | [
"def id_to_token(self,\n id):\n return self.sp_processor.IdToPiece(id)",
"def convert_id_to_token(self, index: int) -> str:\n try:\n return self.tokens[index]\n except IndexError:\n raise IndexError(f\"Unrecognized index: '{index}'\")",
"def map_id_to_toke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the current trigger. | def delete(self):
request = self.triggers_service.delete(path=self._path)
request.execute() | [
"def delete_trigger(self, Name: str) -> Dict:\n pass",
"def delete_trigger(self, trigger_id):\n self._delete(path=\"triggers/{}\".format(trigger_id))",
"def create_delete_trigger(self):\n self.execute(self.commands.delete_function(\n dest_table=self.name,\n pk_col=self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and return a D > D0 pi Selection object. | def makeDstar2D0Pi( name
, config
, DecayDescriptor
, inputSel
) :
daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config']
combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config']
dstarCuts = "(VF... | [
"def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all quest handlers here | def load_quests(self):
raise NotImplementedError() | [
"def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, rever... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a quest handler to the aiohttp app | def add_quest(self, method: str, route: str, handler):
self.aiohttp.router.add_route(method, route, handler) | [
"def handle_telegram_request(app: Flask):\n pass",
"async def run(self, app: ASGIApp) -> None:\n try:\n await app(self.request.scope, self.receive, self.send)\n except BaseException as exc:\n self.logger.error(\"Exception in 'http' protocol.\", exc_info=exc)\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Representation of the linked list | def __repr__(self):
return "LinkedList([{}],{}/{})".format(self.cur_node, self.cur_pos, self.length) | [
"def __repr__(self):\n\n return \"LinkedList created\"",
"def __repr__(self):\r\n return \"ListNode({})\".format(self.data)",
"def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll",
"def __init__(self, linked_list: object):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the linked list | def list_print(self):
node = self.cur_node # cant point to ll!
while node:
print(node.data)
node = node.next | [
"def print_list(self):\n\t\tcur_node = self.head\n\t\twhile cur_node:\n\t\t\tprint(cur_node.data)\n\t\t\tcur_node = cur_node.next",
"def print_list(self):\n\n current = self.head\n\n while current:\n\n print current.data\n\n current = current.next",
"def show(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the data of the next node | def get_next(self):
return self.cur_node.next.data | [
"def get_next(node):\n return node['next']",
"def data(self):\n return self.first_node.data",
"def get_data(node):\n return node['data']",
"def node_data(self):\n return self.node_data_",
"def get_next(self): \n return self.nextval",
"def get_next(self):\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Representation of the spinlock | def __repr__(self):
return "Spinlock({})".format(self.stepforward) | [
"def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')",
"def spinlocks(self):\n return self._spinlocks",
"def acquire_lock(self, object_id):",
"def lock(self):\n raise NotImplementedError",
"def SynchronizingObject(self) -> _n_1_t_3:",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the tile location (x,y) and zoom level z, fetch the corresponding tile from the server and save it to the location specfied in fpath. Note, this saves just one tile; usually, want to use `positive_dataset` instead. | def save_tile(x,y,z,fpath):
UA = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0"
tile_url = f"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png"
# cmd = f"wget --user-agent='please download' -O {fpath} {url}"
if os.path.exists(fpath):
print(f"A... | [
"def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save the tiles whose coordinates are in the input DataFrame, defined by columns x, y, and z | def save_tiles(df,output_dir,namefunc = None):
if not isinstance(df,pd.core.frame.DataFrame):
raise TypeError("df must be a pandas DataFrame!")
if any(e not in df.columns for e in ('z','x','y')):
raise ValueError("df must have columns x, y, and z")
if namefunc is None:
def namefunc(x... | [
"def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add latitude/longitude values to a dataframe | def add_latlon(df):
LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])]
LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude'])
return pd.concat([df.reset_index(drop=True),LLdf],axis = 1) | [
"def add_lat_lon(df):\r\n df[\"lat\"] = df['geohash6'].apply(lambda x: geohash2.decode_exactly(x)[0])\r\n df[\"lon\"] = df['geohash6'].apply(lambda x: geohash2.decode_exactly(x)[1])",
"def add_coord_to_grid_data_frames(grid):\n bus2coord = (\n pd.merge(grid.bus2sub[[\"sub_id\"]], grid.sub[[\"lat\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns first n values from the given sequence. | def take(n, seq):
seq = iter(seq)
result = []
try:
for i in range(n):
result.append(next(seq))
except StopIteration:
pass
return result | [
"def take(n, seq):\n return list(itertools.islice(seq, 0, n))",
"def take(n, seq):\n return itertools.islice(seq, n)",
"def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(seq.next())\n except StopIteration:\n pass\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Report Method to Get Work Order Details. | def get_work_order_detail(self, date_range):
work_order_obj = self.env["task.line"]
start = datetime.strptime(date_range.get("date_from"), "%Y-%m-%d")
end = datetime.strptime(date_range.get("date_to"), "%Y-%m-%d")
step = timedelta(days=1)
workorder_detail = []
while start... | [
"def get_work_order_detail_by_advance_search(self):\n self.ensure_one()\n return {\n \"name\": _(\"Work Order\"),\n \"view_type\": \"form\",\n \"view_mode\": \"tree,form\",\n \"res_model\": \"fleet.vehicle.log.services\",\n \"type\": \"ir.actions.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate xlsx format print report. | def generate_xlsx_report(self, workbook, data, parts_data):
worksheet = workbook.add_worksheet("daily_parts_issuance_wizard")
worksheet.set_column(0, 0, 10)
worksheet.set_column(1, 1, 15)
worksheet.set_column(2, 2, 20)
worksheet.set_column(3, 3, 15)
worksheet.set_column(4... | [
"def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Here we define the configuration settings needed for all ingestion plugins with reasonable defaults. | def vdk_configure(self, config_builder: ConfigurationBuilder) -> None:
# Plugin-related configurations
config_builder.add(
key="INGEST_METHOD_DEFAULT",
default_value=None,
description="Default Ingestion method to be used.",
)
config_builder.add(
... | [
"def _configure_plugin(self):\n # The execution setting.\n if 'Execution' in self.configuration:\n self.workflow.config['execution'] = self.configuration['Execution']\n self.logger.debug(\n \"Workflow %s execution parameters: %s.\" %\n (self.workflow... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a single TW task as an Albert Item. | def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore
field = get_as_subtext_field
task_id = tw_side.get_task_id(task)
actions = [
FuncAction(
"Complete task",
lambda args_list=["done", task_id]: run_tw_action(args_list),
),
FuncAction(
... | [
"async def get_task(self, task_id: str) -> Task:",
"def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)",
"def _get_task(self, task_id: str) -> Mapping[str, Any]:\n return se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether current query is of a subcommand. If so first returned the corresponding SubcommandQeury object. | def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:
if not query_str:
return None
# spilt:
# "subcommand_name rest of query" -> ["subcommand_name", "rest of query""]
query_parts = query_str.strip().split(None, maxsplit=1)
if len(query_parts) < 2:
query_str = ""
... | [
"def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False",
"def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens and reads the parameters in the [SUBCATCHMENT] and [SUBAREA] headers within the SWMM input file. Adds these parameters (as strings) to a numpy array | def read_initial_parameters(inputfilename):
subc_params = []
subarea_params = []
global subc_names
subc_names = []
subcatchment_parameters = []
inputfile = open(inputfilename, 'r')
for line in inputfile:
if(line.find("[SUBCATCHMENTS]") != -1):
line = inputfile.re... | [
"def subcatch(ini_file='subcatch.ini'):\n config.read(ini_file)\n print 'Read the file ', ini_file\n\n file_in = config.get('file_in', 'file_in')\n\n file_out = config.get('file_out', 'file_out')\n\n picture_out = config.get('picture_out', 'picture_out')\n\n Xoutlet = config.getfloat('coord_outlet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets parameters for rigs rig_ids_str coma separated string with rig ids "1,2,3,4" miner Miner to set. Leave it null if you do not want to change. "claymore", "claymorez", "ewbf", ... miner2 Second miner to set. Leave it null if you do not want to change. "0" if you want to unset it. id_wal ID of wallet. Leave it null i... | def multiRocket(self, rig_ids_str, miner, miner2, id_wal, id_oc):
if rig_ids_str is not None:
self.log("Rigs ids required")
exit()
params = {
'method': 'multiRocket',
'rig_ids_str': rig_ids_str,
'miner': miner,
'miner2': miner2,
... | [
"def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump utils image template.py as a Dict. The key is like "simnet/lndbtc" | def _dump_template(self, utils_image) -> Dict[str, str]:
cmd = f"docker run -i --rm --entrypoint python {utils_image}"
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
out, _ = p.communicate(input=SCRIPT.encode())
output = out.decode()
if p.returncode != 0:
... | [
"def generate_image(config):\n scripts = dict()\n count = 0\n for controller in config['nodes']:\n if 'controller' not in controller['roles']:\n continue\n # Add function to create directories\n script = BytesIO()\n\n # Add environment information\n script.writ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send detection data and return status | def send_detection_data(self, image_width, image_height,
image, detection_result):
if self._send_buffer.full() is True:
log_error("Send detection data failed for buffer is full")
return False
image_data = None
if isinstance(image, AclImage):
... | [
"def send_image(self, image_width, image_height, image):\n detection_result = []\n return self.send_detection_data(image_width, image_height, image, detection_result)",
"def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send detection image data | def send_image(self, image_width, image_height, image):
detection_result = []
return self.send_detection_data(image_width, image_height, image, detection_result) | [
"def send_detection_data(self, image_width, image_height,\n image, detection_result):\n if self._send_buffer.full() is True:\n log_error(\"Send detection data failed for buffer is full\")\n return False\n\n image_data = None\n if isinstance(image... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get channel presenter_server_ip, port, channel_name, content_type | def get_channel_config(config_file):
config = configparser.ConfigParser()
config.read(config_file)
presenter_server_ip = config['baseconf']['presenter_server_ip']
port = int(config['baseconf']['presenter_server_port'])
channel_name = config['baseconf']['channel_name']
content_type = int(config['... | [
"def getChannel(self):\r\n return self.channel",
"def getchannelinfo(self, channelid):\n requestquery = {}\n requestquery.update(Commonquery)\n requestquery['credential'] = self.credential or \"None\"\n request = self.session.get(self.apiaddress+'/live/channels/'+channelid, para... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the caller of this Dial. | def caller(self, caller):
self._caller = caller | [
"def caller_user(self, caller_user):\n \n self._caller_user = caller_user",
"def caller_name(self, caller_name):\n \n self._caller_name = caller_name",
"def caller_address(self, caller_address):\n \n self._caller_address = caller_address",
"def caller_cnam(self, calle... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dialstatus of this Dial. | def dialstatus(self, dialstatus):
if dialstatus is None:
raise ValueError("Invalid value for `dialstatus`, must not be `None`") # noqa: E501
self._dialstatus = dialstatus | [
"def setPeerStatus(self, status):\n self.status = status",
"def SetStatus(self, status):\r\n self.status = status",
"def setStatus(self, status):\n self.battleDelegate.status = status",
"def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dialstring of this Dial. | def dialstring(self, dialstring):
self._dialstring = dialstring | [
"def dialer(self, dialer):\n allowed_values = [\"DEFAULT\", \"SHOONYA\"]\n if dialer not in allowed_values:\n raise ValueError(\n \"Invalid value for `dialer` ({0}), must be one of {1}\"\n .format(dialer, allowed_values)\n )\n\n self._dialer =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the forward of this Dial. | def forward(self, forward):
self._forward = forward | [
"def move_forward(self):\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)",
"def forward(self):\n pass",
"def forward(self):\r\n pass",
"def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the forwarded of this Dial. | def forwarded(self, forwarded):
self._forwarded = forwarded | [
"def forward(self, forward):\n\n self._forward = forward",
"def forwarder(self, forwarder: ICNForwarder):\n self._forwarder = forwarder",
"def indirect_forwarding_flag(self, indirect_forwarding_flag):\n\n self._indirect_forwarding_flag = indirect_forwarding_flag",
"def forward(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the peer of this Dial. | def peer(self, peer):
if peer is None:
raise ValueError("Invalid value for `peer`, must not be `None`") # noqa: E501
self._peer = peer | [
"def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()",
"def connect(self, peer: \"LightningCommandsGenerator\", peer_listen_port: int):\n pass",
"def peer(self, value: Optional[MicrobitPeer]) -> None:\n if self.__peer is not None:\n self.__peer.remo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provide video codec vcodec = "h264" acodec = "copy" extra = "" split_cmd = "ffmpeg i '%s' vcodec %s acodec %s y %s" % (file_path, vcodec, acodec, extra) s_cmd = " i '%s' vcodec %s acodec %s"%(file_path, vcodec, acodec) | def split_video_random(file_path, start_pos, split_length, out_path):
s_cmd = " -i '%s'"%(file_path) #use default CODEC
try:
fileext = file_path.split(".")[-1]
except IndexError as e:
raise IndexError("No ext. in filename. Error: " + str(e))
split_start = start_pos
split_length = split_length
... | [
"def getCommandLine(self, vcodec, filename):\n\t\ttarget = \"\"\n\t\tpattern = self.getPattern()\n\t\tif self.preset != 0:\n\t\t\t(x, y), fps, br, target = self.presets[self.preset]\n\t\t\n\t\tcmdLine = []\n\t\tif not scripting.main_is_frozen():\n\t\t\tffmpegs = {\"linux\": \"bin/ffmpeg\", \"linux2\": \"bin/ffmpeg\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get path to the PubChem template if it exists. | def _get_pubchem_template_path(self, het_id):
path = os.path.join(self.pubchem_templates, f"{het_id}.sdf")
return path if os.path.isfile(path) else "" | [
"def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path",
"def get_template_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts number of collisions among all bonds. Can be used for estimations of how 'wrong' the depiction is. | def count_bond_collisions(self):
errors = 0
for i in range(0, len(self.bonds)):
for a in range(i + 1, len(self.bonds)):
result = self._intersection(self.bonds[i], self.bonds[a])
if result:
errors += 1
return errors | [
"def collisions(self) -> int:\n return self.num_collisions",
"def numberOfCollisions(self):\n c = 0\n for i in range(self.n):\n for j in range(self.n):\n if self.pairManagerX[i][j] and self.pairManagerY[i][j]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get batch generator `batch_generator` must define a `shape` property that returns the shape of generated sequences as a (n_samples, n_features) tuple. `batch_generator` must define a method called `get_steps_per_epoch` with the signature `def get_steps_per_epoch(self, protocol, subset)` that returns the number of batch... | def get_generator(self, file_generator, batch_size=None, **kwargs):
raise NotImplementedError('') | [
"def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError",
"def create_keras_generator(self, part='train', batch_size=1, shuffle=True,\n reshape=None):\n from tensorflow.keras.utils import Sequence\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract embedding from internal Keras model | def extract_embedding(self, from_model):
return from_model | [
"def gensim_to_keras(model):\n return model.wv.get_keras_embedding()",
"def gensim_to_keras(model):\n emebed_layer = model.wv.get_keras_embedding(train_embeddings=False)\n return emebed_layer",
"def get_embedding(self, model):\n embedding = []\n for node in range(len(self.graph.nodes())):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a logger that produces reasonable output. | def _get_logger():
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)8s] %(message)s"))
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
return logger | [
"def get_logger() -> Logger:\n global __logger# pylint: disable=global-statement\n verbosity = config.verbosity\n log_path = config.log_filename\n noisy = config.noisy\n if __logger is None:\n name = 'default'\n log_levels = ['ERROR', 'WARNING', 'INFO', 'DEBUG']\n level = log_lev... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
import count or FPKM table | def import_countOrFPKMTable(
self,filename_I):
#import and format the data
io = base_importData();
io.read_tab(filename_I);
countOrFPKMTable = self.format_countOrFPKMTable(io.data);
return countOrFPKMTable; | [
"def load_counttable(filename, small=False):\n if small:\n counttable = _SmallCounttable(1, [1])\n counttable.load(filename)\n\n else:\n counttable = _Counttable(1, [1])\n counttable.load(filename)\n\n return counttable",
"def count(self):\n ans = self.execute(self.comm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat attr tables into a dictionary for rapid alignment of attr table with tracking_id | def reformat_attrTable(
self):
#format into a dictionary of rows for quick aligning with the tracking_id
if self.attrTable: attrTable = self.attrTable[:];
else: attrTable = [];
attrTable_dict = {};
for row in attrTable:
attrTable_dict[row['tracking_id']] = ro... | [
"def _organize_attributes(self, row, existing_entities, ignore=[]):\n output = {\"attributes\": {}, \"relationshipAttributes\": {},\n \"root\": {}, \"custom\": {}}\n for column_name, cell_value in row.items():\n # Remove the required attributes so they're not double dipping... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat count table into a flattened table of sample_names/values | def reformat_countTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.countTable: countTable = self.countTable[:];
else: countTable = [];
countTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=countTable,
analysi... | [
"def build_contingency_table(\n sample_data: pd.Series, batch_data: pd.Series\n) -> pd.DataFrame:\n categorical_values = pd.concat([sample_data, batch_data])\n data_origins = np.array([\"sample\"] * len(sample_data) + [\"batch\"] * len(batch_data))\n\n return pd.crosstab(index=categorical_values, column... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat fpkm table into flattened table of sample_names/values | def reformat_fpkmTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.fpkmTable: fpkmTable = self.fpkmTable[:];
else: fpkmTable = [];
fpkmTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=fpkmTable,
analysis_id_I=... | [
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat count or FPKM tables into flattened table of sample_names/values for rapid alignment of attr table with tracking_id | def reformat_countOrFPKMTable(
self,
countOrFPKMTable_I=None,
analysis_id_I=None,
sna2experimentID_I=None,
sna2sns_I=None,
count_or_FPKM = 'count'):
#format into a dictionary of rows for quick aligning with the tracking_id
countOrFPKMTable_flat = [];
... | [
"def make_taxon_table(result_together, samples):\n ##get a named list\n ##result = dict(zip(taxon,SB_100)) #continue from here\n pathogens = pd.Series()\n for sample in samples:\n pathogens = pathogens.append(result_together[sample]['species']['species'])\n\n # Get the unique genera \n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering hours as json. | def json_hours(request):
current_site = Site.find_for_request(request)
if request.method == 'GET':
if request.GET.get('fallback'):
fallback = request.GET['fallback']
return JsonResponse(
{
'llid': get_default_unit().location.libcal_library_id,
... | [
"def hours(self) -> pli.Series:",
"def _draw_hours(self):\n tmp_str_list = []\n for i in range(0, self._g_width, self._min_grid):\n if i % self._hour_grid == 0:\n tmp_str_list.append('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (\n i + .5 +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering events feed data as json. | def json_events(request):
if request.method == 'GET':
ttrss_url = request.GET['feed']
# need xml for this.
university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'
n = datetime.datetime.now()
return JsonResponse(
... | [
"def view_events():\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]",
"def get_events(request):\n events = Event.objects.all()\n return JsonResponse({\n 'events': [event.to_dict() for event in events],\n })",
"def api_get_events():\n events = []\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering news feed data as json. | def json_news(request):
if request.method == 'GET':
feed = request.GET['feed']
return JsonResponse(
{
'news': get_news(feed),
}
) | [
"def news():\n response.generic_patterns = ['.rss']\n nodes = db().select(db.node.ALL, orderby=db.node.title)\n return dict(\n \ttitle = 'node rss feed',\n\t link = 'http://127.0.0.1:8000/thinker/default/index', description = 'idea news',\n\t created_on = request.now,\n\t items = [\n\t d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for retreiving the chat status for Ask a Librarian pages. Returns json. | def chat_status(request):
if request.method == 'GET':
ask_name = request.GET['name']
status = get_chat_status_and_css(ask_name)
return JsonResponse(
{
'chat_status': status[0],
'chat_css': status[1],
}
) | [
"def chat_status(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'GET' and request.is_ajax():\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n status = team.num_waiting_messages\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pour un indice de disque k, la famille des listes de des indices, et l'indice du disque k dans la famille L, renvoie la liste / l'ensemble des indices de disques dont la vitesse va etre modifiee par k | def influence(k,L,n):
try:
to_check = L[n-1] #set des indices
contact_direct=C(k,0)
return list(to_check.intersection(contact_direct))
except:
return [] | [
"def indices(self):",
"def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks",
"def ind(k):\n return k-1",
"def list_indices(self):",
"def occurk(self,couleur,k):\n l = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new hotel to the system | async def add_hotel_endpoint(request):
hotel_name = request.args["hotel_name"][0]
hotel_id = model.add_hotel(hotel_name)
return json({"hotel_id": hotel_id}) | [
"def create_hotel(game, player, hotel):\n first_action = ensure_action(game, 'create_hotel', player)\n if 'creation_tile' not in first_action:\n raise GamePlayNotAllowedError('cannot create tile without playing a '\n 'creation tile')\n if hotel not in hotels_off_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add inventory to a given hotel | async def add_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
room_inventory = request.args["room_inventory"][0]
model.add_inventory(hotel_id, room_type, room_inventory)
return json({"success": True}) | [
"def add_inventory(self, current_inventory):\n for item in self.inventory:\n current_inventory.append(item)\n # remove supplies from the tile\n self.inventory = []",
"def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory ful... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cancel an existing reservation | async def cancel_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
model.cancel_reservation(reservation_id)
return json({"success": True}) | [
"def cancel_reservation(client, reservation_id, dry_run):\n print('Canceling reservation {}'.format(reservation_id))\n response = client.cancel_capacity_reservation(\n CapacityReservationId=reservation_id,\n DryRun=dry_run\n )\n print(response)",
"def cancel_reservation(payload, clothes_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new reservation | async def add_reservation_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
arrival_date = request.args["arrival_date"][0]
departure_date = request.args["departure_date"][0]
status = request.args["status"][0]
reservation_id = model.add_reservation... | [
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing reservation | async def get_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
reservation_dict = model.get_reservation(reservation_id)
return json(reservation_dict) | [
"def reservation() -> Response:\n # log incoming request\n log(app.current_request.to_dict(), app.current_request.json_body)\n\n # perform routing based off request\n if app.current_request.method == \"GET\":\n # GET reservation; if 'id' query param is available, use to get a single res. if no pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the inventory of a hotel in a specific date range | async def list_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
start_date = request.args["start_date"][0]
end_date = request.args["end_date"][0]
inventory = model.list_inventory(hotel_id, start_date, end_date)
if inventory == model.OPERATION_ERROR_RETURN_CODE:
return json... | [
"def getFreeBookablesByIntervalDate(self,resourceId,startDate,endDate):\n url = self.urlBookables+'{0}/{1}/{2}/'.format(resourceId,startDate,endDate)\n return ExecuteQuery().Query(url, 'GET')",
"def inventory(request, concierge, template=\"concierges/inventory_check.html\"):\n inventory = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Best Path Heuristic (consistent) (seems to be a very good heuristic) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find which dirty tile is best to start from For each dirty tile in state.dirty_locations 1.1 Set it as the start node 1.2 Use Total Manhattan Distance(third heu... | def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:
# TODO a nontrivial consistent heuristic
if not state.dirty_locations:
return 0
best_start = 0 # best dirty tile to start from
best_cost = INF # cost of the path from the above start tile
for i in range(le... | [
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n\n \"*** YOUR CODE HERE ***\"\n # Se detallan los intentos sucesivos que tuvimos en la búsqueda de una heurística adecuada... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the header string for this description If the description is empty, return an empty string. Otherwise, the raw data is joined together and returned with no '' components. | def to_header(self):
if not self.filled:
return ''
return "\n".join(self.data) | [
"def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
d1, d2 为单层val为数值类型的字典 把字典的相同key的value相加 d1 will be changed | def dicttvalsplus(d1, d2):
middle_dict = {}
for key in d2.keys():
middle_dict[key] = d1.get(key, 0) + d2.get(key, 0)
d1.update(middle_dict)
return d1 | [
"def update_dict_by_adding_another(dict1, dict2):\n for key in dict2.keys():\n if key not in dict1:\n if hasattr(dict2[key], 'copy'):\n dict1[key] = dict2[key].copy()\n else:\n dict1[key] = dict2[key]\n else:\n if (isinstance(dict1[key]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
x_max = max(seq) x_min = min(seq) epilson = 1e6 new_seq = [10000 (epilson + x x_min )/(epilson + x_max x_min) for x in seq] | def normalization(seq):
new_seq = [6.3578286171 * x for x in seq]
return new_seq | [
"def geo_seq(val, ratio, length):\n return [val * pow(ratio, i) for i in range(length)]",
"def sequence(start, end, factor):\n\n values = []\n v = start\n while v < end:\n values.append(v)\n v *= factor\n return values",
"def generate_values_in_range():\n\treturn [x * 0.5 for x in r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write array to a file as text or binary (default). | def quick_save_array(data, file_name, delimiter=',', ):
data.tofile(file_name, sep=delimiter) | [
"def writeAsText(fileName, array):\n ioWriter = open(fileName, 'w')\n for item in array:\n print item\n ioWriter.write(item + \"\\n\")\n ioWriter.close()\n print \"Tersimpan dalam \" + fileName",
"def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is fl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the datetimes from the excel file | def get_datetimes(file_name):
csv_file = open(file_name, 'rb')
file_content = csv.reader(csv_file)
# ignore header
file_content.next()
datetimes = []
for row in file_content:
datetimes.append(row[0])
csv_file.close()
return datetimes | [
"def get_dates(folder=os.getcwd()):\n \n \n res = []\n files = os.listdir(folder)\n for i in files:\n i = re.sub(\".xlsx\", \"\", i)\n i = datetime.strptime(i, \"%y%m%d\")\n res.append(i)\n return res",
"def getFileDates(self, file_id):\n sq = self.getEntry('File', fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |