query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Prepare to restart the service | def _restart(self):
pass | [
"def restart(self):\n self.logger.debug('Server - td-agent-bit - restart call.')\n self.change_service_status(\"restart\")",
"def on_restart(self):\n self.set_state(SupvisorsStates.RESTARTING)",
"def restart(self):\n\t\trun('/etc/init.d/puppet restart')",
"def restart(self):\n cfg.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert milliseconds to seconds | def millisec_to_sec(self, millisec):
return millisec / 1000 | [
"def convert_ms(millis):\n millis = int(millis)\n seconds = (millis / 1000) % 60\n seconds = int(seconds)\n minutes = (millis / (1000 * 60)) % 60\n minutes = int(minutes)\n # hours=(millis/(1000*60*60))%24\n min2sec = minutes * 60\n\n return min2sec + seconds",
"def to_seconds(timing, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Current time in milliseconds. | def current_time_millis(self):
return int(round(time.time() * 1000)) | [
"def get_time(self):\r\n return float(self._cur_time)",
"def current_time_ns():\n return int(time.time() * (10 ** 9))",
"def millis():\r\n return int(round(time.time() * 1000))",
"def get_current_time(self):\n return datetime.datetime.now().strftime(\"%H:%M:%S\")",
"def millis():\r\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create heights and times that match the hourly UKV extract as UKV data is not used in forward_operator_from_obs() | def create_heights_and_times(day):
# heights taken from the UKV
height = np.array([ 5.00000000e+00, 2.16666641e+01, 4.50000000e+01,
7.50000000e+01, 1.11666679e+02, 1.55000000e+02,
2.05000000e+02, 2.61666687e+02, 3.25000000e+02,
3.95000000e+02, 4.71666809e+02, ... | [
"def compute_state_energies_vs_time( hvib ):\n nsteps = len(hvib) \n nstates = hvib[0].num_of_rows\n energies = []\n for state in range( nstates ):\n energies.append( [] )\n for step in range( nsteps ):\n energies[ state ].append( hvib[ step ].get( state, state ).real - hvib[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the S array from the climatology (month, RH_fraction) given the month and RH | def get_S_climatology(time, rh_frac, ceil_lam):
# 1. Read in the data
filename = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/' + \
'S_climatology_NK_SMPS_APS_' + str(ceil_lam) + 'nm.npy'
data = np.load(filename).flat[0]
S_... | [
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the S array from the climatology (month, RH_fraction) given the month and RH | def get_S_climatology(time, rh_frac, ceil_lam):
# 1. Read in the data
filename = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/' + \
'S_climatology_NK_SMPS_APS_' + str(ceil_lam) + 'nm.npy'
data = np.load(filename).flat[0]
S_... | [
"def subset(self, months):\n #-- check if months is an array or a single value\n months = np.atleast_1d(months)\n #-- number of months\n n = len(months)\n #-- check that all months are available\n months_check = list(set(months) - set(self.month))\n if months_check:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in the hourly f_RH data from netCDF file for all aerosols EW 21/02/17 | def read_hourly_f_RH(mod_time, ceil_lam):
import sys.platform as platform
# file name and path
if platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
... | [
"def read_f_RH(mod_time, ceil_lam):\n\n # file name and path\n if sys.platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'\n filename = 'monthly_f(RH... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate Q_ext_wet using Q_ext_dry and f(RH) for current wavelength Q_ext,dry and f_RH are monthly varying based on obs at NK and CH for urban and rural site default settings respectively. f_RH also varies with geometric radius. EW 23/02/17 | def calc_Q_ext_wet(ceil_lam, r_d, r_g, rh_frac, mod_time):
import sys
if sys.platform == 'linux2':
sys.path.append('/net/home/mm0100/ewarren/Documents/AerosolBackMod/scripts/ellUtils') # general utils
from ellUtils import nearest, netCDF_read, binary_search_nearest
else:
from ellUtil... | [
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in the f_RH data from netCDF file EW 21/02/17 | def read_f_RH(mod_time, ceil_lam):
# file name and path
if sys.platform == 'linux2':
miedir = '/data/jcmm1/ewarren/Mie/'
else:
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/common_data/Mie/'
filename = 'monthly_f(RH)_NK_'+str(c... | [
"def read_hourly_f_RH(mod_time, ceil_lam):\n\n import sys.platform as platform\n\n # file name and path\n if platform == 'linux2':\n miedir = '/data/jcmm1/ewarren/Mie/'\n else:\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/commo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in the Q_ext for dry murk. EW 21/02/17 | def read_Q_dry_ext(ceil_lam):
miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'
filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'
raw = np.loadtxt(miedir + filename, delimiter=',')
Q_ext_dry = {'radius': raw[:, 0],
... | [
"def wq_from_file(self, water_quality_raw_data):",
"def get_qcodes(qrunes_file):\n startStr = '@qcodes:'\n endStr = '@script:'\n newLi = []\n info = fetch(startStr,endStr,qrunes_file,newLi)\n if not info :\n print('Please check the qcodes .')\n pass\n qcodes_content = ''.join(info)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract MURK aerosol and calculate RH for each of the sites in the ceil metadata Can retain the full forecast, or just for the day | def mod_site_extract_calc(day, ceil_metadata, modDatadir, model_type, res, ceil_lam,
fullForecast=False, Z=21, allvars=False, m_coeff=1.0, rh_coeff=1.0,
version=FOcon.aerFO_version, **kwargs):
# if 'nan_policy' in kwargs.keys():
def calc_RH(mod_T_celsius, mo... | [
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in day and following day's data, for all rh obs. | def read_all_rh_obs(day, site_rh, rhDatadir, mod_data):
# define array
rh_obs = {}
# get date string for obs of the main and following days
doyStr = day.strftime('%Y%j')
# doyStr2 = (day + dt.timedelta(hours=24)).strftime('%Y%j')
for site, height in site_rh.iteritems():
rh_obs[site] ... | [
"def read_wxt_obs(day, time, z):\n\n filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'\n wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])\n\n # extract out RH o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in RH observations from KSSW, time match them to the model data, and extend them in height to match the dimensions of model RH | def read_wxt_obs(day, time, z):
filepath = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \
'Davis_BGH_' + day.strftime('%Y') + '_15min.nc'
wxt_obs = eu.netCDF_read(filepath, vars=['time', 'RH', 'Tair', 'press'])
# extract out RH obs to match ... | [
"def setup_hds(self):\n if self.hds_kperk is None or len(self.hds_kperk) == 0:\n return\n from .gw_utils import setup_hds_obs\n # if len(self.hds_kperk) == 2:\n # try:\n # if len(self.hds_kperk[0] == 2):\n # pass\n # except:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calibrate the bsc observations | def calibrate_BSC_data_v1p0(bsc_obs, site):
calib_path = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/' \
'Calibrations_for_LUMO_Ceilometers/'
filename = calib_path + site + '_window_trans_daily_cpro.pickle'
# sort site name out (i... | [
"def __calibrate(self):\n my_name = '__calibrate'\n\n # check that transient members are present\n if not (hasattr(self, \"_Spectrum__wave_solution\")):\n self.__calibrationWaveSolution()\n\n # calibrate\n self.__wavelength = self.__wave_solution(np.arange(self.__spectr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calibrate the bsc observations | def calibrate_BSC_data_v1p0(bsc_obs, site):
calib_path = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/' \
'Calibrations_for_LUMO_Ceilometers/'
filename = calib_path + site + '_window_trans_daily_cpro.pickle'
# sort site name out (i... | [
"def __calibrate(self):\n my_name = '__calibrate'\n\n # check that transient members are present\n if not (hasattr(self, \"_Spectrum__wave_solution\")):\n self.__calibrationWaveSolution()\n\n # calibrate\n self.__wavelength = self.__wave_solution(np.arange(self.__spectr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates filename from site stinrg and day | def create_filename(ceilDatadir, site, day, fType):
# site id (short) and site str in filename
split = site.split('_')
site_id = split[-1]
bsc_site_name = split[0] + '_' + fType + '_' + split[-1]
# date for the main day
doyStr = day.strftime('%Y%j')
# time resolution of data in filename
... | [
"def file_name(self):\n return datetime.strftime(self.creation_date, '%Y-%m-%d-') + self.slug + '.html'",
"def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename",
"def make_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
deletes an organizer by name | def delete(self, name):
organizer = OrganizerModel.find_organizer_by_name(name)
if organizer:
organizer.delete_from_db()
return {"message": "Organizer deleted"}, 204
return {"message": "Organizer not found"}, 404 | [
"def delete(self, name: str) -> None:\n sub_line = self._get_from(name)\n for _, department, clerk in sub_line:\n clerk.delete(department)",
"def remove_agent(self, *, agent_name: str) -> None:",
"def deleteAffordancesFromViewer(self, Viewer, obstacleName=\"\"):\n affs = self.get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get all unique combinations of two lists in Python | def unique_comb_of_two_lists(A, B):
res = []
for p in permutations(A, len(B)):
zipped = zip(p, B)
res.append(list(zipped))
return res | [
"def union(list1, list2):\n\n return list(set(list1) | set(list2))",
"def list_reunion(list1, list2):\n return list(set(list1) | set(list2))",
"def list_union(a, b):\n c = list(copy(a))\n for item in b:\n if item not in a:\n c.append(item)\n return c",
"def union(list_a, list_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an array of the captured image | def getImageFromCam(self):
with picamera.array.PiRGBArray(self.CAMERA) as output:
self.CAMERA.capture(output, 'rgb')
print('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))
return output.array | [
"def grab_one(self):\n\n camera_array = self._get_camera_array()\n\n size = camera_array.GetSize()\n\n result = []\n\n for i in range(size):\n grab_result = camera_array[i].GrabOne(self._TIME_OUT)\n image_array = self.post_processing(grab_result).GetArray()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor for Book Class. | def __init__(self, author, title):
self.author = author
self.title = title | [
"def __init__(self, book_name, book_author, book_year=None, rates=[]):\n self.book_name = book_name\n self.book_author = book_author\n self.book_year = book_year\n self.owner = None\n self.__rates = rates",
"def __init__(self, bible, trad=False):\n\n self._bible = bible\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints title and author. | def display(self):
bookinfo = '"{}, written by {}"'.format(self.title, self.author)
print bookinfo | [
"def printInfo(self):\r\n\r\n about = \"Student name is {0}, {1}, and {2} is taking {3}.\".format(\r\n self.lastName, self.firstName, self.pronoun, len(self._courseList))\r\n\r\n print(about)",
"def print_infoheader():\n\tprint(\" _______.__ _______.\")\n\tprint(\"|_ ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle archiving the channel after the fact. | async def tempChannelsArchive(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
archiving = await guildConfig.get_attr(KEY_ARCHIVE)()
if archiving:
archiving = False
self.logger.info(
"%s (%s) DISABLED archiving the temp channel for %s (%s)",... | [
"def archive(self, channel_name):\n # Might not need to do this since we now do this in `stale`\n if self.ignore_channel(channel_name):\n self.logger.debug(\"Not archiving #%s because it's in ignore_channels\", channel_name)\n return\n\n if self.config.activated or self.co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle the creation/deletion of the temporary channel. | async def tempChannelsToggle(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
enabled = await guildConfig.get_attr(KEY_ENABLED)()
if enabled:
enabled = False
self.logger.info(
"%s (%s) DISABLED the temp channel for %s (%s)",
... | [
"async def tempChannelsDelete(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n channelId = await guildConfig.get_attr(KEY_CH_ID)()\n channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()\n\n if channelCreated and channelId:\n # Channel created, see w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the duration of the temp channel. Max 100 hours. | async def tempChannelsDuration(self, ctx: Context, hours: int, minutes: int):
if (hours >= 100) or (hours < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter valid hours!"
)
return
if (minutes >... | [
"def set_duration(self, hours, minutes, seconds):\n self.duration = (hours, minutes, seconds)",
"def set_duration(self, duration):\n pass",
"def duration(self, duration):\n self._duration = duration",
"async def set_frequency_duration_minutes(self, duration: int):\n self._frequency.dur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the parent category of the text channel. | async def tempChannelsCategory(
self, ctx: Context, *, category: discord.CategoryChannel = None
):
await self.config.guild(ctx.guild).get_attr(KEY_CH_CATEGORY).set(category.id)
if not category:
self.logger.info(
"%s (%s) disabled category nesting on %s (%s)",
... | [
"def set_parent(self, parent):\n self._parent = parent",
"def set_parent(self, parent_node):\n self.parent = parent_node",
"def assignParentControl(self):\r\n super(GUI, self).assignParentControl()\r\n selected = pm.selected()\r\n\r\n if not selected:\r\n self.paren... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a role to allow access to the channel. | async def tempChannelsAllowAdd(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:
if role.id not in roleAllow:
roleAllow.append(role.id)
self.logger.info(
"%s (%s) added role... | [
"def add_role(self, role):\n print(f'Parsing permissions for the role - {role.name}')\n\n self.permissions = role.permissions\n self.can_kick = role.permissions.kick_members\n self.can_ban = role.permissions.ban_members\n self.can_move = role.permissions.move_members\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a role from being able access the temporary channel. | async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:
if not roleAllow or role.id not in roleAllow:
await ctx.send(
":negative_squared_cross_mark: TempChannel ... | [
"async def tempChannelsDenyRemove(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:\n if not roleDeny or role.id not in roleDeny:\n await ctx.send(\n \":negative_squared_cross_mark: TempCha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a role to block sending message to the channel. This role should be HIGHER in the role hierarchy than the roles in the allowed list! The bot will not check for this. | async def tempChannelsDenyAdd(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:
if role.id not in roleDeny:
roleDeny.append(role.id)
self.logger.info(
"%s (%s) added role %s t... | [
"async def tempChannelsAllowAdd(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:\n if role.id not in roleAllow:\n roleAllow.append(role.id)\n self.logger.info(\n \"%s (%s)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove role from being blocked sending to the channel. | async def tempChannelsDenyRemove(self, ctx: Context, *, role: discord.Role):
async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_DENY)() as roleDeny:
if not roleDeny or role.id not in roleDeny:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Rol... | [
"async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):\n async with self.config.guild(ctx.guild).get_attr(KEY_ROLE_ALLOW)() as roleAllow:\n if not roleAllow or role.id not in roleAllow:\n await ctx.send(\n \":negative_squared_cross_mark: Te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the temp channel, if it exists. | async def tempChannelsDelete(self, ctx: Context):
guildConfig = self.config.guild(ctx.guild)
channelId = await guildConfig.get_attr(KEY_CH_ID)()
channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()
if channelCreated and channelId:
# Channel created, see when we shou... | [
"async def on_channel_delete(self, channel):",
"def test__Channel__delete__2():\n guild_id = 202211090005\n channel_id = 202211090006\n \n guild = Guild.precreate(guild_id)\n \n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_category, guild_id = guild_id)\n guild.cha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loop to check whether or not we should create/delete the TempChannel. | async def checkChannels(self): # pylint: disable=too-many-branches,too-many-statements
while self == self.bot.get_cog("TempChannels"):
await asyncio.sleep(SLEEP_TIME)
# Create/maintain the channel during a valid time and duration, else
# delete it.
for guild in s... | [
"async def tempChannelsDelete(self, ctx: Context):\n guildConfig = self.config.guild(ctx.guild)\n channelId = await guildConfig.get_attr(KEY_CH_ID)()\n channelCreated = await guildConfig.get_attr(KEY_CH_CREATED)()\n\n if channelCreated and channelId:\n # Channel created, see w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of sentences, returns a preprocessed list of sentences (Very basic preprocessing) | def preprocess(list_of_sentences):
ret_list = []
for f in list_of_sentences:
f = f.lower()
f= f.replace('\n', '')
f= f.replace('?','')
ret_list.append(f)
return ret_list | [
"def _preprocess(sentences, preprocess_pipeline, word_tokenize=None):\n if preprocess_pipeline is not None:\n for function in preprocess_pipeline:\n sentences = function(sentences)\n\n if word_tokenize is None:\n return sentences\n else:\n return sentences, [word_tokenize(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts a general power value into the `power_provenance` table. | def insert_power(self, description, the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO power_provenance(
description, the_value)
VALUES(?, ?)
""", [description, the_value]) | [
"def insert_core(self, x, y, p, description, the_value):\n with self.transaction() as cur:\n core_id = self._get_core_id(cur, x, y, p)\n cur.execute(\n \"\"\"\n INSERT INTO core_provenance(\n core_id, description, the_value)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Records provenance into the `gatherer_provenance` table. | def insert_gatherer(self, x, y, address, bytes_read, run, description,
the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO gatherer_provenance(
x, y, address, bytes, run, description, the_value)
... | [
"def add_provenance(self, source_field, term, notification_field, matched, explanation):\n uc = dataobj.to_unicode()\n obj = {\n \"source_field\" : self._coerce(source_field, uc),\n \"term\" : self._coerce(term, uc),\n \"notification_field\" : self._coerce(notification... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts data into the `monitor_provenance` table. | def insert_monitor(self, x, y, description, the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO monitor_provenance(
x, y, description, the_value)
VALUES(?, ?, ?, ?)
""", [x, y, description, the_v... | [
"def insert_board_provenance(self, connections):\n if not connections:\n return\n with self.transaction() as cursor:\n cursor.executemany(\n \"\"\"\n INSERT OR IGNORE INTO boards_provenance(\n ethernet_x, ethernet_y, ip_addres)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts data into the `router_provenance` table. | def insert_router(
self, x, y, description, the_value, expected=True):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO router_provenance(
x, y, description, the_value, expected)
VALUES(?, ?, ?, ?, ?)
... | [
"def insert_board_provenance(self, connections):\n if not connections:\n return\n with self.transaction() as cursor:\n cursor.executemany(\n \"\"\"\n INSERT OR IGNORE INTO boards_provenance(\n ethernet_x, ethernet_y, ip_addres)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts data for a specific core into the `core_provenance` table. | def insert_core(self, x, y, p, description, the_value):
with self.transaction() as cur:
core_id = self._get_core_id(cur, x, y, p)
cur.execute(
"""
INSERT INTO core_provenance(
core_id, description, the_value)
VALUES(?, ?... | [
"def create_core(self, **kwargs):\n return CoreProfile(entity=self, **kwargs).save()",
"def core_product(self, core_product):\n\n self._core_product = core_product",
"def add_ip_to_core_map(self, interface, core):\n\n self.logger.info(\"Add interface %s and core [%s] into local map\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save and if applicable logs a message to the `reports` table. Only logs the messages up to the cutoff set by configuration `provenance_report_cutoff` | def insert_report(self, message):
with self.transaction() as cur:
cur.execute(
"""
INSERT INTO reports(message)
VALUES(?)
""", [message])
recorded = cur.lastrowid
cutoff = get_config_int("Reports", "provenance_report... | [
"def save_report():\n ct.save_report()",
"def persist_report():",
"async def report(self, ctx, *, report = None):\n if not report:\n raise CustomPermissionError\n try:\n await ctx.bot.log.send(embed = await Macro.Embed.infraction(\n f\"{ctx.author.name} ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts edge data into the `connector_provenance` | def insert_connector(
self, pre_population, post_population, the_type, description,
the_value):
with self.transaction() as cur:
cur.execute(
"""
INSERT OR IGNORE INTO connector_provenance(
pre_population, post_population, th... | [
"def append_edge(self, edge):",
"def insert_gatherer(self, x, y, address, bytes_read, run, description,\n the_value):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO gatherer_provenance(\n x, y, addre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the connection details retrieved from spalloc_client job to the `boards_provenance` table. | def insert_board_provenance(self, connections):
if not connections:
return
with self.transaction() as cursor:
cursor.executemany(
"""
INSERT OR IGNORE INTO boards_provenance(
ethernet_x, ethernet_y, ip_addres)
VALUES... | [
"def save_process_table(self):\n if self.procs != None:\n f = open(self.proctable_path, 'w')\n f.write(self.processtable_header)\n for id in self.procs.keys():\n proc = self.procs[id]\n f.write(self.processtable_line % (id, proc['product'], proc[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
THIS IS A TESTING METHOD. This will lock the database and then try to do a log | def _test_log_locked(self, text):
with self.transaction() as cur:
# lock the database
cur.execute(
"""
INSERT INTO reports(message)
VALUES(?)
""", [text])
cur.lastrowid # pylint: disable=pointless-statement
... | [
"def db_lock_action(self): # pragma: no cover\n pass",
"def test_default_connection_details_value():\n RedLock(\"test_simple_lock\")",
"def test_lock_account_user(self):\n pass",
"def mustlock(self):\n pass",
"def realopen(self,ro=False):\n\n#\t\tprint \"open \",self.name\n\n\t\tglo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Path to archive file. Because the archive file path contains runtime, it's use can cause a race condition or recursion error if used in some locations. If we removed runtime from the path we would not have a way to track changes to runtime which is more important than needing to be mindful of where this is used. | def archive_file(self) -> Path:
return self.project.build_directory / (
f"{self.project.source_code.root_directory.name}."
+ ("layer." if self.usage_type == "layer" else "")
+ f"{self.runtime}.{self.project.source_code.md5_hash}.zip"
) | [
"def _archive_path(tm_env, archive_type, instance, uniq):\n return os.path.join(tm_env.archives_dir, '%s-%s.%s.tar.gz' %\n (instance.replace('#', '-'), uniq, archive_type))",
"def getArchivePathFor(weblogentry):",
"def get_archive_install_dir(self) -> Path:\n # Prioritizes archi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SHA256 of the archive file. | def code_sha256(self) -> str:
file_hash = FileHash(hashlib.sha256())
file_hash.add_file(self.archive_file)
return base64.b64encode(file_hash.digest).decode() | [
"def compute_sha256(self, file):\n if file:\n m = hashlib.sha256()\n m.update(file)\n return m.hexdigest()\n return 0",
"def _hash_file_sha256(directory: str, path: str) -> str:\n path = os.path.join(directory, path)\n hash_obj = hashlib.sha256()\n with open... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List of compatible instruction set architectures. | def compatible_architectures(self) -> Optional[List[str]]:
return self.project.compatible_architectures | [
"def supported_archs(self):\n return self.SUPPORTED_ARCHS",
"def get_available_architectures(self):\n query = \"select distinct architecture from packages where architecture != 'all'\"\n\n #just check if any of the rows retured is empty\n return [ arch[0] for arch in self.__execute_que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List of compatible runtimes. | def compatible_runtimes(self) -> Optional[List[str]]:
return self.project.compatible_runtimes | [
"def compatible_runtimes(self) -> Optional[List[str]]:\n if self.META_TAGS[\"compatible_runtimes\"] in self.object_tags:\n return self.object_tags[self.META_TAGS[\"compatible_runtimes\"]].split(\"+\")\n return None",
"async def runtimes(self) -> List[Runtime]:\n runtimes = await se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MD5 of the archive file. | def md5_checksum(self) -> str:
file_hash = FileHash(hashlib.md5())
file_hash.add_file(self.archive_file)
return base64.b64encode(file_hash.digest).decode() | [
"def MD5Sum(klass, filename):\n return hashlib.md5(path(filename).text()).hexdigest()[:8]",
"def file_md5(filename):\r\n file_o = read_file(filename)\r\n file_str = file_o.read()\r\n file_o.close()\r\n return string_md5(file_str)",
"def _get_file_md5sum(file_name):\n hash_obj = hashlib.md5... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
S3 object version ID. | def object_version_id(self) -> Optional[str]:
if (
not self._put_object_response
or "VersionId" not in self._put_object_response
):
return None
return self._put_object_response["VersionId"] | [
"def _object_version_id(self, filename):\n attribute_block = self._object_attribute_block(filename)\n return attribute_block.version_id",
"def as_object_version_id(value):\n return value.version_id if isinstance(value, ObjectVersion) else value",
"def get_id(self):\n return self.bucket_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runtime of the deployment package. | def runtime(self) -> str:
return self.project.runtime | [
"def generate_runtime_container(self):\n for version in self.versions:\n self.display('docker build -f {}/dockerfiles/{}_{}.d -t {} {}'.format(\n self.tmp, self.runtime, version, 'continuous:{}_{}'.format(self.runtime, version), self.tmp), \"yellow\")\n self.exec('docker ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fix file permissions of the files contained within the archive file. Only need to ensure that the file is executable. Permissions will be change to 755 or 655 if needed. The change will occur within the archive file only the original file will be unchanged. This should be run after all files have been added to the arch... | def _build_fix_file_permissions(self, archive_file: zipfile.ZipFile) -> None:
for file_info in archive_file.filelist:
current_perms = (
file_info.external_attr & self.ZIPFILE_PERMISSION_MASK
) >> 16
required_perm = 0o755 if current_perms & stat.S_IXUSR != 0 el... | [
"def update_permission(self):\n\n from stat import S_IEXEC\n\n for data in self.files:\n if data not in ['iana', 'dir_structure']:\n stats = stat(self.destination + self.files[data])\n chmod(\n self.destination +\n self.fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle installing & zipping dependencies. | def _build_zip_dependencies(
self,
archive_file: zipfile.ZipFile,
) -> None:
self.project.install_dependencies()
for dep in self.iterate_dependency_directory():
archive_file.write(
dep,
self.insert_layer_dir(
dep, self.p... | [
"def _install_dependencies(self):\n for package in self._dependencies:\n print('installing dependency %s...' % package)\n process_args = [\n self.__python, '-m', 'pip', 'install', '--upgrade', package\n ]\n subprocess.Popen(process_args, shell=False)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle zipping the project source code. | def _build_zip_source_code(self, archive_file: zipfile.ZipFile) -> None:
for src_file in self.project.source_code:
archive_file.write(
src_file,
self.insert_layer_dir(
src_file, self.project.source_code.root_directory
).relative_to(... | [
"def create_lambda_zip(self, prefix='lambda_package', handler_file=None,\n minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None):\n import pip\n\n print(\"Packaging project as zip...\")\n\n if not venv:\n if 'VIRTUAL_ENV' in os.environ:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert directory into local file path for layer archive. If required, this should be overridden by a subclass for language specific requirements. | def insert_layer_dir(
file_path: Path, relative_to: Path # pylint: disable=unused-argument
) -> Path:
return file_path | [
"def add_directory(self, local_dir):\n self.images.add_directory(os.path.abspath(local_dir))",
"def _add_root_dir(self, root_path: Path):\n dirname = os.path.split(root_path)[1] + '/'\n with open(self.log_path, mode='ab') as lf:\n lf.write(bytes(dirname, 'utf-8'))\n lf.w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate over the contents of the dependency directory. If ``gitignore_filter`` is set, it will be used to exclude files. | def iterate_dependency_directory(self) -> Iterator[Path]:
for child in self.project.dependency_directory.rglob("*"):
if child.is_dir():
continue # ignore directories
if self.gitignore_filter and self.gitignore_filter.match(child):
continue # ignore files... | [
"def package_files(self):\n for root, dirs, files in os.walk(os.path.join(self.path, 'p')):\n for basename in files:\n path = os.path.join(root, basename)\n if path != os.path.join(self.path, '.options'):\n yield os.path.join(self.path, path)",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize deployment package. This should be used in place of creating an instance of this class directly as it will automatically account for the S3 object already existing. | def init(
cls,
project: _ProjectTypeVar,
usage_type: Literal["function", "layer"] = "function",
) -> DeploymentPackage[_ProjectTypeVar]:
s3_obj = DeploymentPackageS3Object(project, usage_type)
if s3_obj.exists:
if s3_obj.runtime == project.runtime:
... | [
"def __init__ (self):\n # Create a connection to S3\n self.handle = self.connect()",
"def _set_s3(self):\n logger.info(\"Setting up s3 ...\")\n\n cluster_name_id = AXClusterId().get_cluster_name_id()\n\n self._bucket_name = AXClusterDataPath(cluster_name_id).bucket()\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List of compatible runtimes. | def compatible_runtimes(self) -> Optional[List[str]]:
if self.META_TAGS["compatible_runtimes"] in self.object_tags:
return self.object_tags[self.META_TAGS["compatible_runtimes"]].split("+")
return None | [
"def compatible_runtimes(self) -> Optional[List[str]]:\n return self.project.compatible_runtimes",
"async def runtimes(self) -> List[Runtime]:\n runtimes = await self._http_session.get_response(\"get\", \"runtimes/\")\n runtime_list = []\n for runtime in runtimes:\n runtime_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Response from HeadObject API call. | def head(self) -> Optional[HeadObjectOutputTypeDef]:
try:
return self.bucket.client.head_object(
Bucket=self.bucket.name, Key=self.object_key
)
except self.bucket.client.exceptions.ClientError as exc:
status_code = exc.response.get("ResponseMetadata", ... | [
"def do_HEAD(self):\n # create request object\n self.create_request(\"get\")\n f = self.on_request(\"get\")\n if f:\n f.close()",
"def head_object(self, container_name, key):\n url = '/'.join([self.endpoint, container_name, key])\n try:\n res = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update tags of the S3 object. | def update_tags(self) -> None:
new_tags = self.build_tag_set(url_encoded=False)
if new_tags == self.object_tags:
LOGGER.debug(
"%s tags don't need to be updated",
self.bucket.format_bucket_path_uri(key=self.object_key),
)
return
... | [
"def update_tags(self):\n raise NotImplementedError",
"def set_tags(self, url_prefix, microver, instance, tags):\n try:\n for tag in tags:\n instance.add_tag(self.conn, tag)\n except AttributeError:\n # Try a low-level access if SDK version is old\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function add G losses to tensorboard and store the value in the logger loss_adv loss_direct loss_G | def tb_add_step_loss_g(self, writer, global_step):
step_loss_G = {
'loss_adv': self.loss_adv_G.item(),
'loss_direct': self.loss_direct_G.item(),
'loss_G': self.loss_G.item(),
}
writer.add_scalars("Train/Generator", step_loss_G, global_step=global_step)
... | [
"def add_loss(loss):\n tf.add_to_collection(LOSSES, loss)",
"def compute_G_loss(self):\n # netD(0) for the separation branch.\n pred_fake1 = self.netD(0, self.fake_A)\n pred_fake2 = self.netD(0, self.fake_B)\n pred_fake3 = self.netD(0, self.fake_C)\n pred_fake4 = self.netD(0,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove handlers from all loggers | def clear_loggers():
import logging
loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values())
for logger in loggers:
handlers = getattr(logger, "handlers", [])
for handler in handlers:
logger.removeHandler(handler) | [
"def remove_handlers_root_logger_object():\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)",
"def remove_handlers(self):\n\n self.log.removeHandler(self.fileHandler)\n if self.streamHandler:\n self.log.removeHandler(self.streamHandler)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function uses the SARIMAX model from statsmodels. It predicts 'y' by using 'x' as exogenous variables. For each exogenous variable, the function takes 1 lag at a distance of 'h' time units. It forecasts 'h_max' time steps in future. Arguments | def forecast_SARIMAX(window: int, n_train: int, p: int, d: int, q: int, ps: int, ds: int, qs: int, m: int,
x: list[str], y: str, h_max: int, transf: Callable[[float], float], itransf: Callable[[float], float] )-> pd.DataFrame:
df_US = pd.read_csv('OWID_weekly.csv')
df_US.index = pd.to_dat... | [
"def esm_arima(ts):\n\n test_n = 60\n ses = []\n trend = []\n dtrend = []\n arima = []\n j=0\n \n for i in range(test_n,0,-1): #(60,59,58...3,2,1)\n # moving window, walk foward 1 step \n train = np.asarray(ts[j:len(ts)-i])\n j= j+1\n \n # 3 different t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a compiler ("markdown", "rest"), and whether it's meant for a post or a page, and compilers, return the correct entry from post_pages. | def filter_post_pages(compiler, content_type, compilers, post_pages):
# First throw away all the post_pages with the wrong is_post
is_post = False if content_type == 'page' else True
filtered = [entry for entry in post_pages if entry[3] == is_post]
# These are the extensions supported by the required... | [
"def get_pages_content(self):\n\n #TODO other markup langage (piece of cake)\n for page in self.postlist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the last character entered and returns it because we don't want to display it. | def remove_last(view):
position = view.sel()[0].begin()
region = Region(position, position - 1)
character = view.substr(region)
view.run_command("left_delete")
# undoing twice to remove the character and also retain the view's dirty state.
view.run_command("undo")
view.run_command("undo")
... | [
"def promptBackspace(self):\n\n self.hdirty = True\n if self.position != 0:\n self.string = self.string[:self.position - 1] + \\\n self.string[self.position:]\n self.position -= 1\n if self.promptValidate():\n self.promptFromScratch()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the regions of where the cursor(s) should land. It the command is in select mode, the regions are apart, otherwise it is on one character. | def _get_found_regions(view, character, sel, line, direction):
if direction == Constants.RIGHT:
line_portion = Region(sel[0], line.b)
else:
line_portion = Region(line.a, sel[1])
from_sel = view.substr(line_portion)
if direction == Constants.RIGHT:
found_pos = from_sel.find(char... | [
"def get_word_cursor(view, region):\n\n if region.a == region.b:\n lrgn = sublime.Region(0, 0)\n lrgn.a = 0 if region.a < 20 else (region.a - 20)\n lrgn.b = region.b\n rrgn = sublime.Region(0, 0)\n rrgn.a = region.a\n rrgn.b = view.size() if region.b + 20 > view.size() e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a threaded WSGI Server | def makeServer(host, port, app, handler_class=WSGIRequestHandler):
httpd = ThreadedWSGIServer((host, port), handler_class)
httpd.set_app(app)
return httpd | [
"def run() -> None: # pragma: no cover\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s'\n )\n args = parse_user_args()\n config = wsgi_config.WSGIConfig()\n config.configure_gwsgi(args)\n httpd = make_wsgi_server(\n config.hos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function tests several cases of before_space(s) and after_space(s). | def testA():
assert before_space("ab cde")=="ab"
assert before_space("ab cde fg")=="ab"
assert before_space(" ab cde")==""
assert after_space("ab cde")=="cde"
assert after_space("ab cde fg")=="cde fg"
assert after_space(" ab cde")=="ab cde" | [
"def test_before_space():\n print('Testing before_space')\n result = currency.before_space(' ')\n introcs.assert_equals('', result)\n result = currency.before_space('Nicholas ')\n introcs.assert_equals('Nicholas', result)\n result = currency.before_space(' Nicholas')\n introcs.assert_equals(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function tests several cases of first_inside_quotes(s), get_from(json), get_to(json) and has_error(json) | def testB():
assert first_inside_quotes('a"bc"de')=="bc"
assert first_inside_quotes('a"bc"de"f"g')=="bc"
assert first_inside_quotes('"a"bc"de"f"g')=="a"
assert first_inside_quotes('abcd""')==""
json1= '{"from":"2 United States Dollars","to":"1.825936 Euros","success":true,"error":""}'
assert ge... | [
"def test_from_json_string(self):\n var1 = None\n ret1 = Base.to_json_string(var1)\n self.assertEqual(Base.from_json_string(ret1), [])\n\n var2 = []\n ret2 = Base.to_json_string(var2)\n self.assertEqual(Base.from_json_string(ret2), [])\n\n var3 = [{}]\n ret3 =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function tests several cases of currency_response(currency_from,currency_to,amount_from). | def testC():
assert currency_response('USD','EUR',2.5)=='{ "from" : "2.5 United States Dollars", "to" : "2.0952375 Euros", "success" : true, "error" : "" }'
assert currency_response('AAA','EUR',2.5)=='{ "from" : "", "to" : "", "success" : false, "error" : "Source currency code is invalid." }'
assert currenc... | [
"def test_service_response():\n print('Testing service_response')\n result = currency.service_response('USD', 'EUR', 2.5)\n introcs.assert_equals(\n '{\"success\": true, \"src\": \"2.5 United States Dollars\", \"dst\": \"2.2160175 Euros\", \"error\": \"\"}', result)\n result = currency.service_re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function tests several cases of is_currency(currency) and exchange(currency_from,currency_to,amount_from). | def testD():
assert iscurrency('USD')==True
assert iscurrency('KES')==True
assert iscurrency('kes')==False
assert iscurrency('aaa')==False
assert exchange('USD','EUR',2.5)-2.0952375<=0.000001
assert exchange('USD','KES',5.4)-557.4752586<=0.000001
assert exchange('LKR','NGN',1.7)-3.9997994984... | [
"def test_exchange():\n print('Testing exchange')\n result = currency.exchange('USD', 'EUR', 2.5)\n introcs.assert_floats_equal(2.2160175, result)\n result = currency.exchange('USD', 'EUR', -2.5)\n introcs.assert_floats_equal(-2.2160175, result)",
"def _smart_exchange(self, currency_amount):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register all the callables in a single module with gin. A useful way to add gin configurability to a codebase without explicilty using the .configurable decorator. | def register_module_with_gin(module, module_name=None):
module_name = module.__name__ if module_name is None else module_name
for attr in dir(module):
if callable(getattr(module, attr)):
setattr(module, attr, gin.configurable(getattr(module, attr), module=module_name)) | [
"def call_register(root_dir):\n for mod in imported_modules:\n if hasattr(mod, \"register\"):\n mod.register()",
"def register_run_config(name):\n basecls = BaseConfig\n registry = RUN_CONFIG_REGISTRY\n reg_func = partial(_register, name=name, basecls=basecls, registry=registry)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use for parsing collections that may contain a 'gin' key. The 'gin' key is assumed to map to either a dict or str value that contains gin bindings. e.g. | def gin_dict_parser(coll):
if 'gin' in coll:
if is_mapping(coll['gin']):
gin.parse_config("".join(map(lambda t: f'{t[0]} = {t[1]}\n', iteritems(coll['gin']))))
elif isinstance(coll['gin'], str):
gin.parse_config(coll['gin'])
return coll | [
"def nested_gin_dict_parser(coll):\n return nested_dict_walker(gin_dict_parser, coll)",
"def _parse_input_(self, input_item):\r\n for key, value in input_item.items():\r\n if isinstance(value, dict):\r\n value = PreserveKeysDottedDict(**{str(k): v for k, v in value.items()})\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use for parsing nested collections that may contain a 'gin' key. The 'gin' key is assumed to map to a dict value that contains gin bindings (see gin_dict_parser). Enables support for gin keys in yaml files. | def nested_gin_dict_parser(coll):
return nested_dict_walker(gin_dict_parser, coll) | [
"def gin_dict_parser(coll):\n if 'gin' in coll:\n if is_mapping(coll['gin']):\n gin.parse_config(\"\".join(map(lambda t: f'{t[0]} = {t[1]}\\n', iteritems(coll['gin']))))\n elif isinstance(coll['gin'], str):\n gin.parse_config(coll['gin'])\n return coll",
"def _parse_input... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take an array of algorithm ID ints and return an array of PublicKeyCredentialParameters | def _generate_pub_key_cred_params(
supported_algs: List[COSEAlgorithmIdentifier],
) -> List[PublicKeyCredentialParameters]:
return [
PublicKeyCredentialParameters(type="public-key", alg=alg)
for alg in supported_algs
] | [
"def encode_anchors(anchors):\n nanchors = len(anchors) // 4\n keys = np.empty(shape=(nanchors), dtype=np.int64)\n\n for i in range(nanchors):\n idx = 4*i\n anchor = anchors[idx:idx+4]\n keys[i] = encode_anchor(anchor)\n\n return keys",
"def get_h_param_combinations(h_params):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the default parameters. We're supering this as `Base` is setting up some basic globally required parameters. It's a must. We check for `self.name` before we set the destination paths for the service files as sometimes `self.name` is not provided (for instance, when retrieving status for all services under the init ... | def __init__(self, logger=None, **params):
super(SystemD, self).__init__(logger=logger, **params)
if self.name:
self.svc_file_dest = os.path.join(
constants.SYSTEMD_SVC_PATH, self.name + '.service')
self.env_file_dest = os.path.join(
constants.SYS... | [
"def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()",
"def __init__ ( self, fname=None ) :\n\n self.fname_cp = self.declareParameter( name='FNAME_CONFIG_PARS', val=fname, val_def='confpars.txt', type='str' )",
"def __init__(self, basep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate service files and returns a list of them. Note that env var names will be capitalized using a Jinja filter. This is template dependent. Even though a param might be named `key` and have value `value`, it will be rendered as `KEY=value`. We retrieve the names of the template files and see the paths where the ge... | def generate(self, overwrite=False):
super(SystemD, self).generate(overwrite=overwrite)
self._validate_init_system_specific_params()
svc_file_template = self.template_prefix + '.service'
env_file_template = self.template_prefix
self.svc_file_path = self.generate_into_prefix + '.... | [
"def create_files(self) -> None:\n data = self.data()\n for file in sorted(self.files):\n logger.debug(\n \"node(%s) service(%s) template(%s)\", self.node.name, self.name, file\n )\n rendered = self._get_rendered_template(file, data)\n file_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Install the service on the local machine This is where we deploy the service files to their relevant locations and perform any other required actions to configure the service and make it ready to be `start`ed. | def install(self):
super(SystemD, self).install()
self.deploy_service_file(self.svc_file_path, self.svc_file_dest)
self.deploy_service_file(self.env_file_path, self.env_file_dest)
sh.systemctl.enable(self.name)
sh.systemctl('daemon-reload') | [
"def install_service():\n result = CliRunner().invoke(cli, args=args.COMMON_ARGS + [home, defs.SERVICE,\n commands.INSTALL, service_path])\n sanity_check(result, home)\n assert os.path.exists(os.path.join(home, defs.SERVICES, service_name, \"{}_service.py\".fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uninstall the service. This is supposed to perform any cleanup operations required to remove the service. Files, links, whatever else should be removed. This method should also run when implementing cleanup in case of failures. As such, idempotence should be considered. | def uninstall(self):
sh.systemctl.disable(self.name)
sh.systemctl('daemon-reload')
if os.path.isfile(self.svc_file_dest):
os.remove(self.svc_file_dest)
if os.path.isfile(self.env_file_dest):
os.remove(self.env_file_dest) | [
"def _uninstall_service(service_name) -> None:\n if service_name.startswith('/'):\n service_name = service_name[1:]\n # Note service-names *cannot* have underscores in them.\n service_name = service_name.replace(\"_\", \"-\")\n log.info(\"Uninstalling {}.\".format(service_name))\n sdk_install.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of the statuses of the `name` service, or if name is omitted, a list of the status of all services for this specific init system. There should be a standardization around the status fields. There currently isn't. `self.services` is set in `base.py` | def status(self, name=''):
super(SystemD, self).status(name=name)
svc_list = sh.systemctl('--no-legend', '--no-pager', t='service')
svcs_info = [self._parse_service_info(svc) for svc in svc_list]
if name:
names = (name, name + '.service')
# return list of one ite... | [
"def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')",
"def get_service_statuses():\n\n # We'll collect the statuses for the service in a list.\n # Note: increasing the \"minutes\" value will reduce the ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the init system exists and False if not. | def is_system_exists():
return is_system_exists() | [
"def _is_installed(self):\n return self._system.exists(os.path.join(self.get_install_path(), \"bin/root\"))",
"def _is_system_installed( self ):\n return self._system.test_library(self._library, self._headers)",
"def is_installed() -> bool:\n if platform.system() in (\"Linux\", \"Darwin\"):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renvoie True si mot1 est un anagramme de mot2 | def est_anagramme(mot1,mot2):
return sorted(mot1) == sorted(mot2) | [
"def is_anagram(word1, word2):\n return sorted(word1) == sorted(word2)",
"def is_anagram(word1, word2):\n \n word1_list = [i for i in word1.lower() if i != \" \"]\n word2_list = [j for j in word2.lower() if j != \" \"]\n \n word1_list.sort()\n word2_list.sort()\n \n return word1_list... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renvoie la liste des anagrammes de mot | def anagrammes(mot, hashTable):
li = hashTable.get(prehash(mot))
result = []
for m in li:
if est_anagramme(m, mot):
result.append(m)
return result | [
"def test_find_anagram_phrases(self):\n dict_file = os.path.abspath('tests/data/ch03/dictionary.txt')\n word_list = cleanup_dict(dict_file)\n word_list = cleanup_list_more(word_list)\n anagram_dict = anagram_generator.get_anagram_dict(word_list)\n # Test a word without anagrams.\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setup a list of rigid link assemblies (RA) | def rigidLinkAssemblies(self):
# allocate(RAm1(1:Init%NElem)) ! NOTE: do not deallocate, this is an "output" of this function
# RAm1(1:Init%NElem) = -1
#
# --- Establish a list of rigid link elements
Er = [e for e in self.Elements if e.data['TypeID']==idMemberRigid ]
EIDr = [e.ID for ... | [
"def setup(self):\n self._rl_modules = {}\n self.__check_module_configs(self.config.modules)\n for module_id, module_spec in self.config.modules.items():\n self._rl_modules[module_id] = module_spec.build()",
"def _load_links(self) -> NoReturn:\n total = self.project_size[2]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The neighborelements of element e0 (that are found within the list Er) are added to the list Ea | def addNeighbors(self, e0, EIDr, EIDa) :
#print('----------------------------------------------------')
#print('>>> Looking for neighbors of ',e0.ID, 'within',EIDr)
if len(EIDr)==0:
return EIDa
EIDn =[] # List of neighbors of e0
# Loop through all elements, setup list of e0-neighbors... | [
"def augVertexListHelper(self, edgesConsidered):\n vertexLists = {key : [] for key in self.complex.oneCells} # return a list of the vertices with more information about them\n\n stackingVertexDict = {generator : \\\n {vertex: StackingVertex(vertex, [], [], [], []) for vertex in self.vertexL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
! !> Returns constraint matrix Tc for a rigid assembly (RA) formed by a set of elements. !! x_c = Tc.x_c_tilde !! where x_c are all the DOF of the rigid assembly, and x_c_tilde are the 6 reduced DOF (leader DOF) | def RAElimination(self, RA):
Elements = [self.getElement(eid) for eid in RA]
# --- List of nodes stored first
#print('>>> Elements',Elements)
Nodes = self.elements2nodes(Elements)
INodesID = [n.ID for n in Nodes]
print(' Nodes involved in assembly (unique)', INodesID)
#--- Look for... | [
"def maccormack(U_init,numt,numx,numy,delx,dely,Tw,Tfs,rho_fs,ufs,c_v,c_p,viscfs,Prt,lmbda,R,gamma):\n Un = numpy.zeros((numt+1,4,numx,numy))\n Un[0,:,:,:] = U_init.copy()\n #\n U = U_init.copy()\n #\n Us = U_init.copy()\n #\n for t in range(1,numt+1):\n \t#get properties to calculate flu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts a job to load a bigquery table from CSV | def load_table(bigquery, project_id, dataset_id, table_name, source_schema,
source_path, num_retries=5):
# Generate a unique job_id so retries
# don't accidentally duplicate query
job_data = {
'jobReference': {
'projectId': project_id,
'job_id': str(uuid.uuid4... | [
"def load_to_gbq(filename, bq_configuration):\n # construct Client object with the path to the table in which data will be stored\n client = bigquery.Client(project = bq_configuration[\"project_id\"])\n dataset_ref = client.dataset(bq_configuration[\"dataset_id\"])\n table_ref = dataset_ref.table(bq_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Column that adds primary key foreign key reference. | def ReferenceCol(tablename, nullable=False, pk_name='id', **kwargs):
return db.Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name)),
nullable=nullable, **kwargs) | [
"def reference_col(tablename, nullable=False, pk_name='id', ondelete='CASCADE', **kwargs):\n return db.Column(db.ForeignKey(f'{tablename}.{pk_name}', ondelete=ondelete),\n nullable=nullable, **kwargs)",
"def foreign_key(self: Fdef) -> Optional[str]:\n self._resolve_if_needed()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given A Microsoft Graph List response (collection of objects) When calling parse_list() Then Validate output parsing | def test_parse_list():
with open('test_data/risk_detections_response.json') as f:
response = json.load(f)
human_readable_title = "Risks"
context_path = "Risks_path"
parsed = parse_list(response, human_readable_title=human_readable_title, context_path=context_path)
outputs = parsed.outputs
... | [
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def test_withListCompleted(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given A Microsoft Graph List response (collection of objects) When calling parse_list() Then Validate output parsing | def test_parse_list_empty():
empty_response = dict()
human_readable_title = "Risks"
context_path = "Risks_path"
parsed = parse_list(empty_response, human_readable_title=human_readable_title, context_path=context_path)
outputs = parsed.outputs
assert outputs == {f'AADIdentityProtection.{context_... | [
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def test_withListCompleted(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mocks the request to list detections from the API. The mock will manually take into consideration the filter and limit supplied as parameters. It also accepts the user_id and user_principal_name, to allow full running of fetch (as the actual function receives these parameters). | def mock_list_detections(limit, filter_expression, user_id, user_principal_name):
from AzureADIdentityProtection import DATE_FORMAT, date_str_to_azure_format
test_incidents = util_load_json('test_data/incidents.json')
all_possible_results = test_incidents.get('value')
start_time = filter_expression.spl... | [
"def test_fetch_detections_success_when_detections_equal_to_max_fetch(requests_mock):\n incidents = load_mock_response(\"mock_incidents.json\")\n\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n\n mock_response = load_mock_response(\"fetch_d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mocks the function that retrieves the fetch time that should be used. | def mock_get_last_fetch_time(last_run, params):
last_fetch = last_run.get('latest_detection_found')
if not last_fetch:
# To handle the fact that we can't freeze the time and still parse relative time expressions such as 2 days
last_fetch = "2021-07-16T11:08:55.000Z"
return last_fetch | [
"def time_mock(mocker):\n\n TimeMock(mocker)",
"def test_get_time_tracking_entry(self):\n pass",
"def mock_time(self, t):\n utils._micros_since_epoch = lambda : t",
"def fetch_time(self) -> float:\n return self.navigation_timing.response_end - self.navigation_timing.fetch_start",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets dps data/ properties | def set(self, dps_data):
base_payload = OrderedDict([("devId", self.dev_id), ("dps", dps_data), ("t", int(time.time()))])
enc_payload = self.message.compose('set', base_payload)
return self.communicate(enc_payload) | [
"def setMPxData(*args, **kwargs):\n \n pass",
"def setData(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def draw_data_property(self, dp):\n\n # draw the data property\n o = self.scene.mlab.points3d(dp.x, dp.y, dp.z, color=green,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set LED brightness to X% | def set_brightness(self, percentage):
try:
percentage = round(percentage, 1)
except TypeError:
raise ValueError("percentage must be numeric (integer, float)")
percentage = max(1, percentage) * 10
return self.set(OrderedDict([(DPS.POWER, True), (DPS.BRIGHT, percen... | [
"def setBrightness(self, value = 0):\n\t\tgrovepi.fourDigit_brightness(self.display, value)",
"def set_backlight(val):\n val = max(0, min(1.0, val))\n board.DISPLAY.auto_brightness = False\n board.DISPLAY.brightness = val",
"def update_led(self):\n if self.pwm < 300:\n self.set_led_fu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find tuya compatible devices using gid/devid can be found in apps for smart devices. Use MITM to find localkey, capture the packets from smart home/LED app | def find_devices(gid=None, key=''):
import concurrent.futures
def checkport(port):
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(('', port))
sock.settimeout(10)
cipher = SyskaCipher()
start = time.time()
while True:
try:
data_json = jso... | [
"def find_devices(controller):\n pysicl.gpib_timeout(500)\n for addr in range(1,31):\n print addr\n if addr != 21:\n status = dev_status(controller+str(addr))\n print addr,status\n if status > -1:\n print addr,\":\",status\n pysicl.gpib_timeout(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize this readwrite lock. | def __init__(self, lock=None):
# Condition variable, used to signal waiters of a change in object
# state.
if lock is None:
self.__condition = Condition(Lock())
else:
self.__condition = Condition(lock)
# Initialize with no writers.
self... | [
"def acquire_read(self):\n with self.monitor:\n if self.rwlock == -1 and self.writer == threading.currentThread():\n #We already have a write lock - we don't acquire try to acquire\n # a read lock.- we increment the number of write locks.\n self.wcount +=1 \n else:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Acquire a read lock for the current thread, waiting at most timeout seconds or doing a nonblocking check in case timeout is <= 0. In case timeout is None, the call to acquireRead blocks until the lock request can be serviced. In case the timeout expires before the lock could be serviced, a RuntimeError is thrown. | def acquireRead(self, blocking=True, timeout=None):
if not blocking:
endtime = -1
elif timeout is not None:
endtime = time() + timeout
else:
endtime = None
me = current_thread()
self.__condition.acquire()
try:
if... | [
"async def acquire(self, blocking=None, blocking_timeout=None):\n sleep = self.sleep\n token = b(uuid.uuid1().hex)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n blocking_timeout =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Acquire a write lock for the current thread, waiting at most timeout seconds or doing a nonblocking check in case timeout is <= 0. In case the write lock cannot be serviced due to the deadlock condition mentioned above, a ValueError is raised. In case timeout is None, the call to acquireWrite blocks until the lock requ... | def acquireWrite(self, timeout=None):
if timeout is not None:
endtime = time() + timeout
me, upgradewriter = current_thread(), False
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new write lock, al... | [
"def test_write_lock_acquired(self) -> None:\n # First to acquire this lock, so it should complete\n lock = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock is not None\n\n # Enter the context manager\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open a dialogue box (dialog) using a program appropriate to the desktop environment in use. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to open the dialog instead of guessing or detecting which environment is being used. Suggested values for 'des... | def open(self, desktop=None):
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError("Desktop '%s' not supported (no kn... | [
"def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_SwitchDesktop(jitter):\n ret_ad, args =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialise a menu with the given heading 'text', column 'titles', and optional 'items' (which may be added later), 'width' (in characters), 'height' (in characters) and 'list_height' (in items). | def __init__(self, text, titles, items=None, width=None, height=None, list_height=None):
Simple.__init__(self, text, width, height)
self.titles = ([""] * self.number_of_titles + titles)[-self.number_of_titles:]
self.items = items or []
self.list_height = list_height | [
"def __init__(self, menu_list, attr, pos, body):\n \n content = [urwid.AttrWrap(SelText(\" \" + w), None, attr[1])\n for w in menu_list]\n\n #Calculate width and height of the menu widget:\n height = len(menu_list)\n width = 0\n for entry in menu_list:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of windows which are children of this window. If the optional 'all' parameter is set to a true value, all such windows will be returned regardless of whether they have any name information. | def children(self, all=0):
s = _xwininfo(self.identifier, "children")
return self._descendants(s, all and self.find_all or self.find_named) | [
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.pare... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of windows which are descendants of this window. If the optional 'all' parameter is set to a true value, all such windows will be returned regardless of whether they have any name information. | def descendants(self, all=0):
s = _xwininfo(self.identifier, "tree")
return self._descendants(s, all and self.find_all or self.find_named) | [
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.pare... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return windows using the given 'callable' (returning a true or a false value when invoked with a window name) for descendants of this window. | def find(self, callable):
s = _xwininfo(self.identifier, "tree")
return self._descendants(s, callable) | [
"def _find_window(self, predicate, timeout = 10.0):\n window_handle = None\n end_time = time.time() + timeout\n while window_handle is None and end_time > time.time():\n for handle in self.driver.window_handles:\n if predicate(handle):\n window_handl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |