query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
construct a string summarizing the episode using its metadata, or just return the episode's description if needed. | def makeEpisodeSummary(episode):
# using inverted pyramid strategy; more detail at bottom of description
summary = episode['description'] + "\n\n"
if episode['publisher'] != '':
summary = "%sPublisher: %s\n" % (summary, episode['publisher'])
if episode['season'] != '':
summary = "%sSeason: %s\n" % (summary, epi... | [
"def getEpisodeDescription(self, seasonnum, episodenum):\r\n if (type(seasonnum) is not int) and (type(episodenum) is not int):\r\n return('Invalid input, season number and episode number must be integers.')\r\n try:\r\n episodename = showInformation.getEpisodeName(self, seasonnu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
construct a directory item for a series existing in user's queue. Selecting this item leads to more details about the series, and the ability to remove it from the queue. | def makeQueueItem(queueInfo):
Log.Debug("queueinfo: %s" % queueInfo)
s = Dict['series']
sId = str(queueInfo['seriesId'])
thumb = (s[sId]['thumb'] if (sId in s and s[sId]['thumb'] is not None) else R(CRUNCHYROLL_ICON))
art = (s[sId]['art'] if (sId in s and s[sId]['art'] is not None) else R(CRUNCHYROLL_ART))
queueI... | [
"def QueueChangePopupMenu(sender, seriesId):\n\tlogin()\n\tdir = MediaContainer(title1=\"Queue\",title2=sender.itemTitle,disabledViewModes=[\"Coverflow\"])\n\tif isRegistered():\n\t\tqueueList = getQueueList()\n\t\tinQ = False\n\t\tfor item in queueList:\n\t\t\tif item['seriesId'] == seriesId:\n\t\t\t\tinQ = True\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show menu for browsing content of type=ANIME_TYPE or DRAMA_TYPE | def BrowseMenu(sender,type=None):
if type==ANIME_TYPE:
all_icon = ANIME_ICON
elif type==DRAMA_TYPE:
all_icon = DRAMA_ICON
dir = MediaContainer(disabledViewModes=["coverflow"], title1="Browse %s" % type)
dir.Append(Function(DirectoryItem(AlphaListMenu,"All", title1="All", thumb=R(all_icon)), type=type))
di... | [
"def showTypeMenu(self, menu):\n index = self.selectionModel().currentIndex()\n self.scrollTo(index)\n rect = self.visualRect(index)\n pt = self.mapToGlobal(QtCore.QPoint(rect.center().x(), rect.bottom()))\n menu.popup(pt)",
"def loadTypeSubMenu(self):\n selectTypes = {no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display a menu showing episodes available in a particular season. | def SeasonMenu(sender,seriesId=None,season=None):
dir = MediaContainer(disabledViewModes=["Coverflow"], title1=sender.title1, title2="Series")
epList = getSeasonEpisodeListFromFeed(seriesId, season)
for episode in epList:
dir.Append(makeEpisodeItem(episode))
return dir | [
"def load_episodes(self):\n self.episode_menu.clear_items()\n for episode in get_episode_list(self.show['pk']):\n self.episode_menu.add_item(MenuItem(\n \"%d x %d : %s \" % (\n episode['season_number'],\n episode['episode_number'],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add some video tests to a MediaContainer | def addMediaTests(dir):
if ENABLE_DEBUG_MENUS:
testEpisodes = [
{'title': 'Bleach Episode 1',
'season': 'One',
'summary': "480p Boxee feed. This needs a premium account. No ads should show! Plex client should show a resolution of 853x480. (I do not know the 480p url, or if there is one, so it'll probably ... | [
"def test_api_videos_post(self):\n pass",
"def test_api_videos_get(self):\n pass",
"def addMedia(self, m):",
"def test_video_metadata(self):\n with self.subTest(\"Test mkv video\"):\n self.mock_metadata.has.return_value = False\n self.mock_metadata._MultipleMetadata_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
remove seriesID from queue | def RemoveFromQueue(sender,seriesId):
login()
result = removeFromQueue(seriesId)
if result:
return MessageContainer("Success",'Removed from Queue')
else:
return MessageContainer("Failure", 'Could not remove from Queue.') | [
"def remove(self):\r\n self.queue.pop(0)",
"def remove(self, node_id):\n for x,(y,z) in enumerate(self.queue):\n if z[1][-1] == node_id:\n del self.queue[x]\n return self.queue\n raise NotImplementedError",
"def remove_from_queue(self, index):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add seriesId to the queue. | def AddToQueue(sender,seriesId,url=None):
login()
result = addToQueue(seriesId)
if result:
return MessageContainer("Success",'Added to Queue')
else:
return MessageContainer("Failure", 'Could not add to Queue.') | [
"def add_to_queue(self, video_id):\n self.start_session_if_none()\n self._session.add_to_queue(video_id)",
"def add_to_queue(self, sid, data):\n self.activation_queue.put((sid, data))",
"def __add_to_queue(self, _id, url):\n payload = dumps(dict(\n id=str(_id),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Popup a Menu asking user if she wants to add or remove this series from her queue | def QueueChangePopupMenu(sender, seriesId):
login()
dir = MediaContainer(title1="Queue",title2=sender.itemTitle,disabledViewModes=["Coverflow"])
if isRegistered():
queueList = getQueueList()
inQ = False
for item in queueList:
if item['seriesId'] == seriesId:
inQ = True
break
if inQ:
dir.Appen... | [
"def show_menu():\n\tchoice = 0\n\tarray_size = int(input(\"Enter size for array as interger value: \"))\n\tqueue_object = Queue(array_size)\n\t\n\twhile choice >= 0 and choice < 4:\n\t\tprint \"1. Insert\"\n\t\tprint \"2. Delete\"\n\t\tprint \"3. Display\"\n\t\tprint \"4. Exit\"\n\t\tchoice = int(input(\"Enter cho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
construct a URL to display at resolution based on videoInfo without checking for coherence to what the site's got or if the resolution is valid | def getVideoUrl(videoInfo, resolution):
url = videoInfo['baseUrl']+"?p" + str(resolution) + "=1"
# we always skip adult filtering (it's done in the presentation code before we reach here)
url = url + "&skip_wall=1"
url = url + ("&t=0" if Prefs['restart'] == 'Restart' else "")
url = url + "&small="+("1" if videoIn... | [
"def get_video_url():\n return f'{API_URL}{quote(VIDEO_NAME)}'",
"def stream_url(self) -> Optional[str]:\n video_streams = self.video\n if not video_streams:\n return None\n\n for quality in VIDEO_QUALITY_TYPES:\n video_stream_url = video_streams.get(quality)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct media objects from an episode. | def constructMediaObject(episode):
if True or len(episode['availableResolutions']) == 0:
episode['availableResolutions'] = getAvailResFromPage(episode['link'])
# FIXME I guess it's better to have something than nothing? It was giving Key error
# on episode number
if str(episode['mediaId']) not in Dict['episod... | [
"def build_from(lines:[str], number:int=0) -> object:\n have_chapter = any(REG_CHAPTER.fullmatch(line.strip()) for line in lines)\n lines = iter(lines)\n # get title, and waste the next line, that should be empty\n title = next(lines).strip()\n empty = next(lines).strip()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Play a freebie video using the direct method. As long as crunchyroll.com delivers ads through the direct stream (they do as of Feb 14 2012), this is okay IMO. This gets around crashes with redirects/content changes of video page, and sacrifices the ability to use javascript in the site config. | def PlayVideoFreebie2(sender, mediaId):
episode = getEpisodeDict(mediaId)
infoUrl = episode['link'] + "?p360=1&skip_wall=1&t=0&small=0&wide=0"
req = HTTP.Request(infoUrl, immediate=True, cacheTime=10*60*60) #hm, cache time might mess up login/logout
match = re.match(r'^.*(<link *rel *= *"video_src" *href *= *")(h... | [
"def send_video_to_vidly(video):\n notify_url = absolutify(reverse('flicks.videos.notify',\n args=[settings.NOTIFY_KEY]))\n shortlink = addMedia(video.upload_url, notify_url)\n\n if shortlink is None:\n video.state = 'error'\n video.save()\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make a crunchyroll.com API request with the passed dictionary. Optionally, specify referer to prevent request from choking. | def makeAPIRequest(valuesDict,referer=None):
h = API_HEADERS
if not referer is None:
h['Referer'] = referer
h['Cookie']=HTTP.CookiesForURL(BASE_URL)
req = HTTP.Request("https"+API_URL,values=valuesDict,cacheTime=0,immediate=True, headers=h)
response = re.sub(r'\n\*/$', '', re.sub(r'^/\*-secure-\n', '', req.conte... | [
"def GET_request(action):\n\n # OAuth token of the user that requests will be made on behalf of\n\n\n # Login of the advertising agency client\n # Required parameter if requests are made on behalf of an advertising agency\n clientLogin = 'marketingdigital@zara.com'\n\n headers = {\n # OAuth to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
put a default authentication status structure into the global Dict{}. Every datum is least permissions on default. | def resetAuthInfo():
Dict['Authentication'] = {'loggedInSince':0.0, 'failedLoginCount':0, 'AnimePremium': False, 'DramaPremium': False} | [
"def create_default_values (self):\n\n self.default_values = {\"username\": '',\n \"password\": '',\n \"is_demo\": True,\n \"epic\": 'IX.D.DAX.IMF.IP',\n \"api_key\": '',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
does the user own a paid account of any type? | def hasPaid():
login()
if not Dict['Authentication']: resetAuthInfo()
authInfo = Dict['Authentication']
if (time.time() - authInfo['loggedInSince']) < LOGIN_GRACE:
if authInfo['AnimePremium'] is True or authInfo['DramaPremium'] is True:
return True
return False | [
"def is_pro_account():\n try:\n windscribe.login(username, password)\n return \"Free\" in windscribe.account().plan\n except:\n return False",
"def is_paid_via_app(self):\n return self.channel == SaleTrade.WX or self.channel == SaleTrade.ALIPAY or self.channel == SaleTrade.BUDGET... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
change the preferred resolution serverside to integer res | def setPrefResolution(res):
if hasPaid():
res2enum = {360:'12', 480:'20', 720:'21', 1080:'23'}
response = jsonRequest(
{ 'req': "RpcApiUser_UpdateDefaultVideoQuality",
'value': res2enum[res]
}
)
if response.get('result_code') == 1:
return True
else:
return False
return False | [
"def s_resolution(self):\n return self.get('s_resolution') * u.arcsec",
"def get_resolution():\n\treturn user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)",
"def s_resolution_max(self):\n rmax = self.get('s_resolution_max', default=None)\n return rmax if not rmax else rmax * u.arcsec",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
remove seriesID from queue | def removeFromQueue(seriesId):
login()
if not isRegistered():
return False
response = makeAPIRequest2("req=RpcApiUserQueue_Delete&group_id=%s"%seriesId)
#FIXME response should have meaning; do something here?
Log.Debug("remove response: %s"%response)
return True | [
"def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')",
"def remove(self):\r\n self.queue.pop(0)",
"def remove(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add seriesId to the queue. | def addToQueue(seriesId):
login()
if not isRegistered():
return False
Log.Debug("add mediaid: %s"%seriesId)
response = makeAPIRequest2("req=RpcApiUserQueue_Add&group_id=%s"%seriesId)
Log.Debug("add response: %s"%response)
return True | [
"def AddToQueue(sender,seriesId,url=None):\n\tlogin()\n\tresult = addToQueue(seriesId)\n\t\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Added to Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not add to Queue.')",
"def add_to_queue(self, video_id):\n self.start_session_if_none... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return an episode dict object identified by mediaId. If you know the mediaId, it SHOULD be in the cache already. If not, you could get None if recovery doesn't work. This might happen with mediaId's that come from the great beyond (queue items on server, e.g.) and are in series with a lot of episodes. Sry bout that. | def getEpisodeDict(mediaId):
if str(mediaId) not in Dict['episodes']:
# get brutal
recoverEpisodeDict(mediaId)
return Dict['episodes'].get(str(mediaId)) | [
"def __getitem__(self, media_id):\n for media in self:\n if media.id == media_id:\n return media\n\n raise KeyError('No media with id {}'.format(media_id))",
"def constructMediaObject(episode):\n\tif True or len(episode['availableResolutions']) == 0:\n\t\tepisode['available... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
try everything possible to recover the episode info for mediaId and save it in Dict{}. If it fails, return none. | def recoverEpisodeDict(mediaId):
Log.Debug("#######recovering episode dictionary for mediaID %s" % str(mediaId))
# get a link with title in it.
#import urllib2
req = urllib2.urlopen(BASE_URL+"/media-" + str(mediaId) + "?pskip_wall=1")
redirectedUrl = req.geturl()
req.close
redirectedUrl = redirectedUrl.replace(... | [
"def get_episode_metadata(show_id, season_num, episode_num):\n metadata_provider = ADDON.getSetting(\"tv_metadata_provider\")\n info, created_time = fetch_episode_from_db(show_id,\n str(season_num),\n str(episode_num),... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sort list of dict by key 'title' and return the result | def titleSort(dictList):
res = sorted(dictList, key=lambda k: getSortTitle(k))
return res | [
"def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].title )\n\t\treturn r_list",
"def arrange(l: Dict[str, List[str]]) -> None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the best background art URL for the passed episode. | def getEpisodeArt(episode):
seriesId = None
for sk in Dict['series'].keys():
if Dict['series'][str(sk)]['title']==episode['seriesTitle']:
seriesId = int(sk)
if seriesId is not None:
artUrl = ""
if Dict['series'][str(seriesId)]['tvdbId'] is not None and Prefs['fanart'] is True:
artUrl = fanartScrapper.get... | [
"def get_episode_media_url(self, podcast_entry):\r\n links = podcast_entry[\"links\"]\r\n\r\n for link in links:\r\n if \"audio\" in link[\"type\"]:\r\n return link[\"href\"]",
"def get_background_art_urls():\n server = get_plex_server('XXXXXXXXX', 'XXXXXXXXX', 'XXXXXXXX... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to find a better thumb than the one provided via url. The thumb data returned is either an URL or the image data itself. | def getThumb(url,tvdbId=None):
ret = None
if (tvdbId is not None and Prefs['fanart'] is True):
thumb = fanartScrapper.getRandImageOfTypes(tvdbId,['tvthumbs'])
if thumb is None: thumb = url
url=thumb
if url==R(CRUNCHYROLL_ICON):
ret = url
else:
if url is not None:
try:
data = HTTP.Request(url, cac... | [
"def parsethumbfromdescription(descriptionelement):\n soup = bs4.BeautifulSoup(descriptionelement.text,'html.parser')\n img = soup.find('img')\n if not img: return None\n return img.attrs['src']",
"def _findBestImage(self, url):\n largeUrls = [\n url.replace('100x100'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given an url of a page where video is watched, return a list of integers of available heights. If user is a guest, just return 360, which is all they get ;) | def getAvailResFromPage(url):
if not Prefs['username'] or not Prefs['password']:
return [360]
login()
availRes = [360]
link = url.replace(BASE_URL, "")
req = HTTP.Request(url=url, immediate=True, cacheTime=3600*24)
html = HTML.ElementFromString(req)
try:
small = not isPremium()
except: small = False... | [
"def get_video_information():\n url = get_video_url()\n response = requests.get(url)\n info =response.json()\n number_frames = info['frames']\n return number_frames",
"def get_pages(url):\n return url.json()['size'] // 10",
"def get_height():\n while True:\n val = get_int(\"Height: \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Train Kalman Filter Decoder | def fit(self,X_kf_train,y_train):
#First we'll rename and reformat the variables to be in a more standard kalman filter nomenclature (specifically that from Wu et al, 2003):
#xs are the state (here, the variable we're predicting, i.e. y_train)
#zs are the observed variable (neural data here, i.... | [
"def __generate_kalman_data(self):\n if self.print_kf_progress:\n print('Generating inputs...')\n self.kf_timestamps, initial_state, initial_covariance, transition_matrices, transition_covariances, observation_matrices, observation_covariances, self.kf_measurements = self.__generate_kalman_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect to the given address. | async def connect(self, address: Tuple[str, int]):
... | [
"def connect(self, addr):\n sock = socket.socket(self.address_family, self.socket_type)\n sock.connect(addr)\n if VERBOSE: print \"Connected to \" + str(addr)\n self.sock = sock",
"def connect(addr):\n port = 1\n s = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n s.connect((add... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform any handshake if needed. Can be a noop for less complicated protocols. | async def handshake(self) -> None:
... | [
"def handshake(self):\r\n self.stream.write_uchar(3)\r\n c1 = packet.Handshake()\r\n c1.first = 0\r\n c1.second = 0\r\n c1.payload = self.create_random_bytes(1528)\r\n c1.encode(self.stream)\r\n self.stream.flush()\r\n\r\n self.stream.read_uchar()\r\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
\param combobox gtk.ComboBox instance or gtk.ComboBoxEntry, if second then use_completion can be used \param answers list of tuples (value, string), string will be displayed in combobox, value will be returned by \ref get_value \param none_answer value for returning if empty item is selected \param checkbutton gtk.Togg... | def __init__(self, combobox, answers = None, none_answer = None, checkbutton = None, use_completion = True):
self.checkbutton = checkbutton
self.combobox = combobox
self.none_answer = none_answer
if not (use_completion and isinstance(combobox.get_child(), gtk.Entry)):
c = gtk... | [
"def update_answers(self, answers, none_answer = None):\n if answers == None:\n return\n if len(answers) == 0:\n m = gtk.ListStore(int, str)\n self.combobox.set_model(m)\n if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
\brief set new answers set \param answers list of tuples like for \ref __init__ \param none_answer value to return when empty item is selected | def update_answers(self, answers, none_answer = None):
if answers == None:
return
if len(answers) == 0:
m = gtk.ListStore(int, str)
self.combobox.set_model(m)
if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry):
self... | [
"def test_student_set_answer_base_case() -> None:\n student = Student(1, 'John')\n q1 = MultipleChoiceQuestion(1, \"a b c or d?\", ['a', 'b', 'c', 'd'])\n a1 = Answer('a')\n q2 = CheckboxQuestion(5, \"do you like dogs?\", ['yes', 'no', 'sometimes'])\n a2 = Answer([\"yes\", \"sometimes\"])\n q3 = N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the softmax function for each row of the input x. It is crucial that this function is optimized for speed because it will be used frequently in later code. | def softmax(x):
x = x.T - np.max(x.T, axis=0)
x = np.exp(x) / np.sum(np.exp(x),axis=0)
return x.T | [
"def softmax(x):\n if len(x.shape) > 1:\n # Matrix\n # substracting max leaves function unchanged due to softmax's invariance to sums by a constant \n # keepdims= True, because broadcasting requires trailing shape entries to match\n x -= np.max(x, axis=1, keepdims=True)\n x = n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the gradient for the sigmoid function here. Note that for this implementation, the input f should be the sigmoid function value of your original input x. | def sigmoid_grad(f):
return f * (1-f) | [
"def sigmoid_derivative(x):\n return x * (1 - x)",
"def sigmoidGradient(z):\r\n\r\n g = np.multiply(sigmoid(z), (1. - sigmoid(z)))\r\n return g",
"def perf_sigmoid_derivative(x):\n # result = perf_sigmoid(x)\n # return result * (1 - result)\n return x * (1 - x)",
"def _gradient(self, _x, _y)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gradient check for a function f f should be a function that takes a single argument and outputs the cost and its gradients x is the point (numpy array) to check the gradient at | def gradcheck_naive(f, x):
rndstate = random.getstate()
random.setstate(rndstate)
nprndstate = np.random.get_state()
np.random.set_state(nprndstate)
fx, grad = f(x) # Evaluate function value at original point
h = 1e-4
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_inde... | [
"def gradcheck_naive(f, x):\n\n rndstate = random.getstate()\n random.setstate(rndstate)\n fx, grad = f(x) # Evaluate function value at original point\n h = 1e-4 # Do not change this!\n\n # Iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use a templating library to turn a prefix and a list of contents into an HTML directory index | def render_index(prefix, order_by, contents, reverse_order, base_path):
logger.debug('rendering index for {prefix} ordered by {order_by} and reverse_order={reverse_order}'.format(prefix=prefix, order_by=order_by, reverse_order=reverse_order))
sorted_contents = sorted(contents, key=lambda k: k[order_by], revers... | [
"def get_dir_index(path, page):\n if page[0] is not \"/\":\n page = f\"/{page}\"\n if page is \"/\":\n page = \"\"\n index_html = \"<pre>\\n\"\n files = os.listdir(path)\n for file in files:\n index_html += f\"<a href='{page}/{file}'>{file}</a>\\n\"\n index_html += \"</pre>\"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save all the data about the rooms | async def write_rooms(rooms):
with open(ROOMDATA, 'wb') as opened_file:
pickle.dump(rooms, opened_file, protocol=pickle.HIGHEST_PROTOCOL) | [
"async def save(self):\r\n data = await self._api.update_room(\r\n self._location_id, self._room_id, self.to_data()\r\n )\r\n if data:\r\n self.apply_data(data)",
"def save_data( self, ):\n\n log_msg = \"in save_data() \" #print( log_msg )\n self.logg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns values common to both move lines (except for debit, credit and amount_currency which are reversed) | def _get_shared_move_line_vals(self, debit, credit, amount_currency):
if self.payment_difference_handling == 'open' and not self.payment_difference and not self._context.get(
'credit_aml', False):
if self.payment_method_type == 'adjustment' \
and debit > 0.0 \
... | [
"def group_move_lines(self, line):\n\t\t\n\t\tline2 = {}\n\t\tfor l in line:\n\t\t\ttmp = self.inv_line_characteristic_hashcode(l)\n\t\t\tif tmp in line2:\n\t\t\t\tam = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])\n\t\t\t\tline2[tmp]['debit'] = (am > 0) and am or 0.0\n\t\t\t\tline2[tmp]['... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the journal items for the payment and update the payment's state to 'posted'. A journal entry is created containing an item in the source liquidity account (selected journal's default_debit or default_credit) and another in the destination reconcilable account (see _compute_destination_account_id). If invoice_id... | def post(self):
AccountMove = self.env['account.move'].with_context(default_type='entry')
for rec in self:
if rec.state not in ['draft', 'pdc']:
raise UserError(_("Only a draft payment can be posted."))
if any(inv.state != 'posted' for inv in rec.invoice_ids):
... | [
"def post(self):\n AccountMove = self.env['account.move'].with_context(default_type='entry')\n for rec in self:\n\n if rec.state != 'approve':\n raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'posted' for inv in rec.invoice_ids)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes node attributes and field descriptors to generate the ``modifiers`` node attribute and set it on the provided node. Alters its first argument inplace. | def setup_modifiers(node, field=None, context=None, in_tree_view=False):
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node) | [
"def set_modifier(self, mod):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.modifier\", \r\n self._node._eco_id, mod)\r\n p2e._app.Exec(arg_str)",
"def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parameterize a fixture named 'dummy_list' with an empty list | def pytest_generate_tests(metafunc):
if 'dummy_list' in metafunc.fixturenames:
metafunc.parametrize("dummy_list", [[]]) | [
"def test_default_init(self):\n dset_list = DatasetList()\n\n assert dset_list == []\n assert dset_list.info.type_id == \"list\"\n assert dset_list.info.py_type == \"list\"\n assert len(dset_list) == 0",
"def test_default_list_argument_value():\n arguments = [\n {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CPP wrapper for a grid sub_sampling (method = barycenter for points and features | def grid_sub_sampling(points, features=None, labels=None, grid_size=0.1, verbose=0):
if (features is None) and (labels is None):
return cpp_subsampling.subsample(points, sampleDl=grid_size, verbose=verbose)
elif labels is None:
return cpp_subsampling.subsample(points, features=features, sampleD... | [
"def subsampleGrid(self, subsample_fac, get_convergence=False):\n # Check that buildGrid has already been called.\n if not hasattr(self, 'im_g1'):\n raise RuntimeError(\"PowerSpectrum.buildGrid must be called before subsampleGrid\")\n\n # Check that subsample_fac is a factor of ngrid... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes neighbors for a batch of queries and supports | def batch_neighbors(queries, supports, q_batches, s_batches, radius):
return cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius) | [
"def compute_neighbours(self, nns):\n self.NNS = []\n for i in range(len(self.embeds)):\n start_time = time.clock()\n write(\"Computing nearest neighbours for embedding no = %d ...\" % i)\n nbrs = NearestNeighbors(n_neighbors=nns, algorithm='ball_tree').fit(self.embeds... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Forges an `OpcUa_ReadRequest` and returns the corresponding `Request`. | def new_read_request(nodeIds, attributes=None):
if attributes is None:
attributes = [AttributeId.Value for _ in nodeIds]
assert len(nodeIds) == len(attributes),\
'There should the same number of NodeIds, attributes, and datavalues when reading nodes'
# TODO: protect this ... | [
"def _SendReadRequest(self):\n req = {\n 'method': 'IO.read',\n 'params': {\n 'handle': self._stream_handle,\n 'size': 32768,\n }\n }\n\n # Send multiple reads to hide request latency.\n while len(self._pending_read_ids) < 2:\n self._pending_read_ids.append(self._SendRe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Forges an `OpcUa_WriteResponse` and returns the corresponding `Request`. Types for `datavalues` must be provided. For each `pys2opc.types.DataValue`, the type is either found in `datavalue.variantType`, or in the `types` list. If both `datavalue.variantType` and the type in `types` are given, they must be equal. | def new_write_request(nodeIds, datavalues, attributes=None, types=None):
if attributes is None:
attributes = [AttributeId.Value for _ in nodeIds]
assert len(nodeIds) == len(attributes) == len(datavalues),\
'There should the same number of NodeIds, attributes, and datavalues when ... | [
"def build_write_single_register_value( # pylint: disable=too-many-arguments\n register_type: RegisterType,\n register_address: int,\n register_data_type: DataType,\n register_name: Optional[str],\n write_value: Union[str, int, float, bool, ButtonPayload, SwitchPayload, datetime]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Forges an `OpcUa_BrowseResponse` and returns the corresponding `Request`. | def new_browse_request(nodeIds, maxReferencesPerNode=1000):
# Prepare the request, it will be freed by the Toolkit
payload = allocator_no_gc('OpcUa_BrowseRequest *')
payload.encodeableType = EncodeableType.BrowseRequest
view = allocator_no_gc('OpcUa_ViewDescription *')
view.encod... | [
"def _do_browse(self, options=None):\r\n if options is None:\r\n options = {}\r\n\r\n options = self._prepare_browse_options(options)\r\n request_json = self._prepare_browse_json(options)\r\n\r\n flag, response = self._cvpysdk_object.make_request('POST', self._BROWSE, request_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Internal helper that makes a `Request` to read the missing types, if any, in the provided `datavalues` and `types` list. Return the type list. Used by `write_nodes` implementations. | def helper_maybe_read_types(nodeIds, datavalues, attributes, types, sendFct):
# Note: this function is here to avoid copy/paste in users of new_write_request that wish to use the "auto-type" functionality.
# The sendFct hints that this function may not be in the optimal place.
if attributes is... | [
"def test_get_types_from_request(self):\n test_query_dict = {'location': 'West Hollywood, CA, United States', 'open': 'true', 'radius': '50', 'types': 'amusement_park,cafe,campground,casino,clothing_store,department_store,library,movie_theater,movie_rental,night_club,park,restaurant,shopping_mall,zoo'}\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends a `request` on link with index `idx` (either a connection id or an endpoint id). When `bWaitResponse`, waits for the response and returns it. Otherwise, returns the `request`, and the response will be available through `get_response`. | def send_generic_request(self, idx, request, bWaitResponse):
reqCtx = int(request.requestContext)
self._dRequestContexts[reqCtx] = request
request.timestampSent = time.time()
self._send_request(idx, request)
if bWaitResponse:
self._sSkipResponse.add(reqCtx)
... | [
"def make_request(self, item):\n url = item['url']\n headers = {}\n if item['bytes_to'] != 0:\n byte_range = 'bytes=%s-%s' % (item['bytes_from'], item['bytes_to'])\n headers['Range'] = byte_range\n try:\n response = self.session.get(url, headers=headers,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receives an OpcUa_Response, creates a Response, associates it to a Request bothways. It is called for every response received through the LibSub callback_generic_event. The dictionary _dResponseClasses contains classes that will be instantiated with the OpcUa_Response as parameter. It is possible to add new elements to... | def _on_response(self, responsePayload, responseContext, timestamp):
assert responseContext in self._dRequestContexts, 'Unknown requestContext {}.'.format(responseContext)
request = self._dRequestContexts.pop(responseContext)
try:
if responsePayload is None:
return
... | [
"def _process_create_response(self, request, response):\n return self.to_resource(response[self.container])",
"def __generateResponse(self,request,response,code=200):\r\n if isinstance(response, str):\r\n return Response(code,response,{HttpHeader.CONTENT_TYPE: MediaType.TEXT_PLAIN})\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This callback is called when the class receives a Response that is not waited upon. It is possible to not override it. It is possible to not call the on_generic_response of the parent class. The default implementation of this method stores the response in a doubleendqueue which tracks available responses (see pop_respo... | def on_generic_response(self, request, response):
# TODO: Upd doc
assert request.requestContext not in self._dPendingResponses,\
'A request with context {} is still waiting for a response'.format(request.requestContext)
self._dPendingResponses[request.requestContext] = response | [
"def handle_response(self, response):\n d = self.requests.pop(response.id)\n if response.error:\n d.errback(response)\n else:\n d.callback(response)",
"def _on_response(self, responsePayload, responseContext, timestamp):\n assert responseContext in self._dRequestC... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample data from empirical probability density function using inverse transform sampling. | def _sample_from_pdf(x, pdf, n):
cum_sum = np.cumsum(pdf)
inverse_density_function = interp1d(cum_sum, x)
b = np.zeros(n)
for i in range(len( b )):
u = random.uniform( min(cum_sum), max(cum_sum) )
b[i] = inverse_density_function( u )
return b | [
"def inverse_transform_sampling(self, uni_samples):\n if self.distribution == 'normal':\n self.samples = norm.ppf(uni_samples,\n loc=self.theta[0], scale=self.theta[1])\n\n elif self.distribution == 'lognormal':\n self.samples = np.exp(norm.ppf(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Estimates log(R_5) dispersion probability density function for a given log(R'HK) value using the bivariate KDE distribution presented in Gomes da Silva et al. (2020). Can use values from diferent catalogues if 'filepath' is not 'None'. | def get_rhk_std_pdf(log_rhk, bw=0.07, subset="all", key_x="log_rhk_med", key_y="log_sig_r5", filepath=None, show_plot=True, save_plot=False, savepath="rhk_std_kde.pdf"):
if not filepath:
filepath = os.path.join(os.path.dirname(__file__), "data.csv")
if log_rhk < -5.5 or log_rhk > -3.6:
print("*... | [
"def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density",
"def get_edensity_from_hu(self,huvalue):\n f = interp1d(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simulate stellar populations with median values of log(R'HK) and log(sigma(R5)) by sampling from the activity variabilitylevel bivariate KDE presented in Gomes da Silva et al. (2020). Can use values from diferent catalogues if 'filepath' is not 'None'. | def simulate_rhk_population(n_samples, subset='all', bw=0.07, key_x="log_rhk_med", key_y="log_sig_r5", filepath=None, show_plot=True, save_plot=False, savepath1="rhk_sim_hists.pdf", savepath2="rhk_sim_maps.pdf"):
if not filepath:
filepath = os.path.join(os.path.dirname(__file__), "data.csv")
df = pd.re... | [
"def load_sample_data_opt():\n sd_excel_1 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25, 28.125, 30, 31.875, 33.75, 35.625, 37.5, 39.375, 41.25, 43.125, 45, 46.875, 48.75, 50.625, 52.5, 54.375, 56.25, 58.125, 60])\n time_excel_1 = sd_excel_1/50\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts that a trail of edges is a ring in the graph | def assertIsRing(self, graph, edges):
for e in edges:
self.assertIn(
e,
graph,
f"The edge {e} of the ring does not exist in the graph."
)
self.assertGreaterEqual(
len(edges),
3,
"A ring consists ... | [
"def test_extended_sanity(self):\n testgraph = nx.Graph([(0,1),(0,2),(0,3),(2,4),(2,5),(3,6),(3,7),(7,8),(6,8)])\n found, thering = ring_extended(testgraph)\n self.assertTrue(found)\n self.is_ring(testgraph, thering)\n # Uncomment to visualize the graph and returned ring:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TensorFlow has its own wrapper for shapes because some entries could be None. This function turns them into intlists. None will become a 1. Arguments | def tensorshape_to_intlist(tensorshape):
return list(map(lambda j: 1 if j is None else int(j), tensorshape)) | [
"def _to_shape(shape):\n return tuple(int(sh) for sh in shape)",
"def _shapes(x):\n def shape(x):\n try:\n return tuple([int(i) for i in x.shape])\n except Exception: # pylint: disable=broad-except\n return ()\n return tuple(nested_map(shape, x))",
"def normalize_shape(shape):\n\n if sh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This constructor takes a reference to a TensorFlow Operation or Tensor or Keras model and then applies the two TensorFlow functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with the nodes you need for inf... | def __init__(self, model, session = None):
output_names = None
if issubclass(model.__class__, tf.Tensor):
output_names = [model.op.name]
elif issubclass(model.__class__, tf.Operation):
output_names = [model.name]
elif issubclass(model.__class__, Sequential):
session = tf.keras.backend.get_session... | [
"def translate(self):\n\t\toperation_types = []\n\t\toperation_resources = []\n\t\treshape_map = {}\n\t\toperations_to_be_ignored = [\"Reshape\", \"Pack\", \"Shape\", \"StridedSlice\", \"Prod\", \"ConcatV2\"]\n\t\toperations_to_be_ignored_without_reshape = [\"NoOp\", \"Assign\", \"Const\", \"RestoreV2\", \"Save... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The constructor has produced a graph_def with the help of the functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes. translate() takes that graph_def, imports it, and translates it into two lists which then can be processed by an Optimzer object. Return | def translate(self):
operation_types = []
operation_resources = []
reshape_map = {}
operations_to_be_ignored = ["Reshape", "Pack", "Shape", "StridedSlice", "Prod", "ConcatV2"]
operations_to_be_ignored_without_reshape = ["NoOp", "Assign", "Const", "RestoreV2", "SaveV2", "IsVariableInitialized", "Identity"]... | [
"def _build_graph(self):\n self.op_size = len(self._ops)\n op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]\n self._add_connections(op_node_connections)\n for i in range(self.op_size):\n self._uses[i].update(self._ops[i].input_arg_names())\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be used as the left multiplier in the matrix multiplication. Arguments | def matmul_resources(self, op):
inputs = op.inputs
left = inputs[0]
right = inputs[1]
if left.op.type == "Const":
matrix = self.sess.run(left) if not op.get_attr("transpose_a") else self.sess.run(left).transpose()
else:
matrix = self.sess.run(right).transpose() if not op.get_attr("transpose_b") el... | [
"def _transfer_tensor_to_tuple(inputs):\n if isinstance(inputs, Tensor):\n return (inputs,)\n\n return inputs",
"def test_meta_const():\n\n with tf.Graph().as_default():\n one_mt = mt.const(1, \"int32\", \"Const\")\n\n with tf.Graph().as_default():\n another_one_mt = mt(1)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the filter, the stride of the filter, and the padding from op as well as the shape of the input coming into op Arguments | def conv2d_resources(self, op):
inputs = op.inputs
image = inputs[0]
filters = op.inputs[1]
filters = self.sess.run(filters)
image_shape = tensorshape_to_intlist(image.shape)[1:]
strides = op.get_attr('strides')[1:3]
padding_str = op.get_attr('padding').decode('utf-8')
pad_top, pad_left, p... | [
"def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(\n image_dimensions, kernel_size, stride\n )",
"def convolve_complex_1d(\n tensor: tf.Tensor,\n filter: tf.Tensor,\n stride: int = 1,\n padding: str = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same as ``lisa.target.Target.pull`` but will cache the file in the ``target.res_dir`` folder, based on the source path. | def cached_pull(self, src, dst, **kwargs):
cache = (self._cache_dir / 'pull')
cache.mkdir(parents=True, exist_ok=True)
m = hashlib.sha256()
m.update(src.encode('utf-8'))
key = m.hexdigest()
cached_path = cache / key / os.path.basename(src)
if not cached_path.exi... | [
"def _pull(paths: List[str]):\n pull_paths_from_storage(project_context.repository, *paths)",
"def pull(self, remote = 'origin'):",
"def refresh_source(options):\n cd(options.source, options.dry_run)\n if options.update:\n update_existing_repo(options.dry_run)\n else:\n clone_repo(opti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the given devlib module is available. | def is_module_available(self, module):
if module not in _DEVLIB_AVAILABLE_MODULES:
raise ValueError(f'"{module}" is not a devlib module')
try:
getattr(self, module)
except Exception: # pylint: disable=broad-except
return False
else:
return... | [
"def module_check():\n\tstatus = True\n\ttry:\n\t\timport fpdf\n\t\tprint '[+] Fpdf module installed.'\n\texcept ImportError as e:\n\t\tstatus = False\n\t\tif \"fpdf\" in repr(e):\n\t\t\tprint \"[-] FPDF module not installed. Run the following commands:\"\n\t\t\tprint \"[-] python -m pip install fpdf\"\n\ttry:\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List our attributes plus the ones from the underlying target, and the devlib modules that could be loaded ondemand. | def __dir__(self):
attrs = set(super().__dir__()) | set(dir(self.target)) | self._devlib_loadable_modules
return sorted(attrs) | [
"def build_attributes(self):\n pass",
"def print_attribute_list(self):\n p = prettytable.PrettyTable((\"VISA name\", \"Constant\", \"Python name\", \"val\"))\n for attr in getattr(self.current, \"visa_attributes_classes\", ()):\n try:\n val = self.current.get_visa_at... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Install tools additional to those specified in the test config 'tools' field | def install_tools(self, tools):
def bin_path(tool):
binary = os.path.join(ASSETS_PATH, 'binaries', self.abi, tool)
if not os.path.isfile(binary):
binary = os.path.join(ASSETS_PATH, 'binaries', 'scripts', tool)
return binary
tools = sorted(set(tools) ... | [
"def tools(c):\n for tool in TOOLS:\n if not which(tool):\n c.run(f\"{VENV_BIN}/python -m pip install {tool}\", pty=PTY)",
"def install_all():\n wf_list = list(set().union(verify_list(), verify_directory()))\n wf_list.sort()\n\n tools = {}\n for wflow in wf_list:\n WFC.impo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Context manager that lets you freeze the userspace. | def freeze_userspace(self):
logger = self.logger
if not self.is_rooted:
logger.warning('Could not freeze userspace: target is not rooted')
cm = nullcontext
elif not self.is_module_available('cgroups'):
logger.warning('Could not freeze userspace: "cgroups" devl... | [
"def softModCtx(*args, **kwargs):\n\n pass",
"def softModContext(*args, **kwargs):\n\n pass",
"def nonsecure_lock(self) -> _MemAttrContext:\n return self.hnonsec_lock(NONSECURE)",
"def __enter__(self) -> Context:\n return self",
"def jvm_context_manager(parent_task, current_task):\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Context manager that lets you disable all idle states | def disable_idle_states(self):
logger = self.logger
logger.info('Disabling idle states for all domains')
try:
cpuidle = self.cpuidle
except AttributeError:
logger.warning('Could not disable idle states, cpuidle devlib module is not loaded')
cm = nullc... | [
"def deactivate(self, context):\n context.deactivate()",
"def deactivate(self, context):\n pass",
"def idle(self):\n self._change_state(\"idle\")",
"def noop_context():\n yield",
"def to_idle(self):\r\n\r\n\t\tself.__send_extended_byte_array(self.MODE_IDLE, [])",
"def keep_system_active():... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorates a given function to execute remotely using | def remote_func(self, **kwargs):
def wrapper_param(f):
@functools.wraps(f)
def wrapper(*f_args, **f_kwargs):
return self.execute_python(f, f_args, f_kwargs, **kwargs)
return wrapper
return wrapper_param | [
"def wrap_with_server(f, server):\n if not has_ls_param_or_annotation(f, type(server)):\n return f\n\n if asyncio.iscoroutinefunction(f):\n\n async def wrapped(*args, **kwargs):\n return await f(server, *args, **kwargs)\n\n else:\n wrapped = functools.partial(f, server)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kind of a broadcast version of `torch.gather` function Currently this support for inputs `x` with 3 dimensions and `indices` with 2 dimensions. | def gather_row(x, indices):
assert (
len(x.size()) == 3 and len(indices.size()) == 2
), "not supported input tensor shape"
batch_size, sequence_size, hidden_size = x.size()
indices += torch.arange(0, batch_size * sequence_size, sequence_size).to(x.device)[
:, None
]
out = x.view... | [
"def gather(data, axis, indices):\n return cpp.gather(data, axis, indices)",
"def batch_gather(tensor, indices):\n shape = list(tensor.shape)\n flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])\n indices = tf.convert_to_tensor(indices)\n offset_shape = [shape[0]] + [1] * (indices.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
`torch.nn.functional.gumbel_softmax(vector)` does not work if some elements of `vector` should be masked. This performs a gumbel_softmax on just the nonmasked portions of `vector`. Passing `None` in for the mask is also acceptable; you'll just get a regular gumbel softmax. `vector` can have an arbitrary number of dimen... | def masked_gumbel_softmax(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1, tau: float = 1,
) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.gumbel_softmax(vector, dim=dim, tau=tau)
else:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
... | [
"def masked_softmax(\n vector: torch.Tensor,\n mask: torch.BoolTensor,\n dim: int = -1,\n memory_efficient: bool = False,\n) -> torch.Tensor:\n if mask is None:\n result = torch.nn.functional.softmax(vector, dim=dim)\n else:\n while mask.dim() < vector.dim():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert source and target text to proper word ids | def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')]
target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] + [target_vocab_to_int['<EOS>']... | [
"def convert_word_to_wordId(source_line, target_line):\n source_words = source_line.split()\n target_words = target_line.split()\n source_id = [SourceWLexicon[word.lower()] for word in source_words]\n target_id = [TargetWLexicon[word.lower()] for word in target_words]\n return source_id, target_id",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random piece on the board. | def getRandPiece():
row = random.randint(0, 11)
# Board is a weird L shape
col = random.randint(0, 5 if row < 6 else 11)
# Return move in row (letter) + col (number) grid reference
# e.g. A3 is represented as 0,2
return (row, col) | [
"def get_random_move(self):\n return random.choice(self.possible_moves)",
"def random_strategy(player, board):\n return random.choice(othello.legal_moves(player, board))",
"def get_piece_at(self, x, y) -> object:\n return self.board[y-1][x-1]",
"def __get_random_move(self, possible_moves):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that if an order is saved, the related pricing is recalculated and the order updated. | def test_pricing_updated_on_order_save(self):
order = OrderFactory(vat_status=VATStatus.UK, discount_value=0)
assert order.vat_cost > 0
order.vat_status = VATStatus.OUTSIDE_EU
order.save()
order.refresh_from_db()
assert order.vat_cost == 0 | [
"def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that if an unrelated field gets updated, the pricing stays the same. | def test_pricing_unchanged_if_update_unrelated(self):
order = OrderFactory()
pre_update_pricing = get_pricing_from_order(order)
order.description = 'updated description'
order.save()
order.refresh_from_db()
post_update_pricing = get_pricing_from_order(order)
as... | [
"def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that if a new assignee is added, the pricing on the order changes. | def test_pricing_update_on_assignee_created(self):
order = OrderFactory(discount_value=0)
assert order.total_cost > 0
pre_update_total_cost = order.total_cost
OrderAssigneeFactory(order=order)
order.refresh_from_db()
assert order.total_cost > 0
post_update_total... | [
"def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that if an assignee is updated, the pricing on the order changes. | def test_pricing_updated_on_assignee_updated(self):
order = OrderFactory(discount_value=0)
assert order.total_cost > 0
pre_update_total_cost = order.total_cost
assignee = order.assignees.first()
assignee.estimated_time += 100
assignee.save()
order.refresh_from_d... | [
"def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n pos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that if an assignee is deleted, the pricing on the order changes. | def test_pricing_updated_on_assignee_deleted(self):
order = OrderFactory(discount_value=0)
assert order.total_cost > 0
pre_update_total_cost = order.total_cost
assignee = order.assignees.first()
assignee.delete()
order.refresh_from_db()
post_update_total_cost = ... | [
"def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n pos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
works for 3 sec and returns i2 + j | def fun(i, j):
start = time.time()
while time.time() - start < 3.:
0. + 0. # pointless operation to see the CPU activity raising (top, htop, ...)
return i ** 2 + j | [
"def example2(S):\n n = len(S)\n total = 0\n for j in range(0, n, 2): # note the increment of 2\n total += S[j]\n return total",
"def add3(i):\n pass",
"def _sum(a, i, j):\n if i > j: # T(n) = 0 \n return 0\n if i == j: # T(n)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create new CloudFormation Stack from the template | def launch(args, config, cf_conn, template):
print("Creating CloudFormation Stack %s..." % config['stack_name'])
stack_id = cf_conn.create_stack(
config['stack_name'],
template_body=template.to_json(),
parameters=cf_params(),
tags=config['tags'],
capabilities=['CAPABILITY... | [
"def create_stack(self):\n command = \"cfn-create-stack \" + self.stack_name + \" -f \" + self.template_file\n if (self.parameters is not None):\n command += \" -p \\\"\" + self.parameters + \"\\\"\"\n run_command(command)",
"def create_stack(self, **kwargs):\n stack_name = kwargs.get('stack_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update an existing CloudFormation Stack | def update(args, config, cf_conn, template):
print("Updating CloudFormation Stack %s..." % config['stack_name'])
stack_id = cf_conn.update_stack(
config['stack_name'],
template_body=template.to_json(),
parameters=cf_params(),
tags=config['tags'],
capabilities=['CAPABILITY... | [
"def update(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n parameters = [\n {\"ParameterKey\": key, \"ParameterValue\": value}\n for key, value in self.parameters.items()\n ]\n try:\n client.update_stack(\n StackName=s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes an existing CloudFormation Stack | def delete(args, config, cf_conn):
# Delete an existing CloudFormation Stack with same name
print("Deleting CloudFormation Stack %s..." % config['stack_name'])
resp = cf_conn.delete_stack(
config['stack_name'],
)
print(resp) | [
"def delete(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n client.delete_stack(StackName=self.stack_id)",
"def destroy(stack, cf_resource):\n print(f\"Deleting {stack.name}.\")\n stack.delete()\n print(\"Waiting for stack removal.\")\n waiter = cf_resource.meta.clie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Describes a CloudFormation Stack and prints the outputs | def output(args, config, cf_conn):
print("Describing CloudFormation Stack %s..." % config['stack_name'])
resp = conn.describe_stacks(
config['stack_name']
)
print('---');
print('region: %s' % args['--region'])
for output in resp[0].outputs:
print("%s: %s" % (output.description, o... | [
"def describe(self, req):\r\n self._enforce(req, 'DescribeStacks')\r\n\r\n def format_stack_outputs(o):\r\n keymap = {\r\n engine_api.OUTPUT_DESCRIPTION: 'Description',\r\n engine_api.OUTPUT_KEY: 'OutputKey',\r\n engine_api.OUTPUT_VALUE: 'OutputV... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that all the objects in the object_dep list remain in the database. Call the callback if this isn't true. | def ensure_object_permanence(self, object_dep, cb):
if object_dep is None or cb is None:
return
self.ensuring_objects = True
self.ensuring_object_cb = cb
self.ensuring_object_dep = object_dep | [
"def check(self):\r\n gc.collect()\r\n dead = self.allNames[:]\r\n alive = []\r\n for k in self.objs:\r\n dead.remove(k)\r\n alive.append(k)\r\n print(\"Deleted objects:\", dead)\r\n print(\"Live objects:\", alive)",
"def _objects_changed(self, old, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the position of a fake perception object. | def set_fake_position(self, pos):
raise NotImplementedError() | [
"def set_player_position(self, position):",
"def set_random_position(self):\n self.random_coords = self.shape.\\\n set_random_position(self.field_width - 15)\n\n self.x = self.random_coords + 1\n self.y = 1\n\n self.move(self.x, self.y)\n self.update()",
"def test_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the test coverage of the Matrix.cpp is 100% | def test_Matrix_coverage(self):
coverage = "not found"
for l in check_output(["python3", "coverage.py", "-r", ".", "-f", "Matrix.cpp"]).split("\n"):
if l.startswith("Matrix.cpp"):
coverage = l.split()[3]
self.assertEqual(coverage, "100%", msg="Test coverage is not 100%") | [
"def test_correct_estimates(self):\n self.assertEqual(self.ajive.common.rank, 1)\n self.assertEqual(self.ajive.blocks['x'].individual.rank, 1)\n self.assertEqual(self.ajive.blocks['y'].individual.rank, 2)",
"def test_sim_i_all():\n input_df = pd.read_csv(data_path + \"/playground_df_cleane... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save matrix M to file using the specified format | def save(self, M, filename):
m, n = M.shape
np.savetxt(filename, M, fmt='%d', header="{} {}".format(m, n), comments='') | [
"def save_M(M, f_out):\n _ATOM = '%s%5i %-4s%3s %c%4i%c %8.3f%8.3f%8.3f%6.2f%6.2f %4s%2s%2s\\n'\n\n def get_ATOM_line(atom_i, name, resid, x, y, z, aa_type):\n \"\"\"\n Write PDB ATOM line.\n \"\"\"\n args=('ATOM ', atom_i, name, aa_type, 'A', resid, ' ', x, y, z, 0.0, 0.0, 'X'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize a ProtocolEngine transport for use in a child thread. This adapter allows a client to make blocking command calls on its thread to the asynchronous ProtocolEngine running with an event loop in a different thread. | def __init__(self, engine: ProtocolEngine, loop: AbstractEventLoop) -> None:
self._engine = engine
self._loop = loop | [
"def _InitTransport(self):\n if self.transport is None:\n self.transport = \\\n self.transport_class(self._GetAddress(),\n timeouts=self.timeouts,\n allow_non_master=self.allow_non_master)",
"def startProtocol(self):\n reactor.resolve(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a view of the Protocol Engine's state. | def state(self) -> StateView:
return self._engine.state_store | [
"def get_state(self):\n return self.StateEngine(self.symbols)",
"def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))",
"def get_state(self):\n \n return self._instance.state",
"def v(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Benchmarks argmax over fractions. | def bench_argmax_over_fracs(num_values):
fractions = MultiArray(sizes=[num_values, 3], value_type=sint)
fractions.assign_all(1)
argmax_over_fracs(fractions) | [
"def maximum_basic(a: float, b: float) -> float:",
"def realmax():\n return np.finfo(float).max",
"def max(x):\n\treturn np.max(x)",
"def argmaxn(arr, num_vals):\n return arr.argsort()[-num_vals:][::-1]",
"def test_maximum():\n test_maximum_case(0, [0, 0, 0], 0)\n test_maximum_case(1, [2, 0, 0],... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load logging.json config and set specified logging settings. | def setup_logging():
with open(CONFIG_JSON_PATH) as f:
logging_config = json.load(f)
if DEBUG:
for logger_name, logger_info in logging_config["loggers"].items():
logger_info["level"] = "DEBUG"
logging.config.dictConfig(logging_config) | [
"def load_logging_config():\n log_config_path = os.path.join(constants.CORE_CONF_DIR, \"logging.conf\")\n with open(log_config_path, \"r\") as log_config_file:\n log_config = json.load(log_config_file)\n logging.config.dictConfig(log_config)",
"def configure_logging(logging_config: dict[str, A... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Example particle HDF5 file generated by OSIRIS 4.4.4 The associated data types are taken from an example output file. | def make_osiris_444_particles_hdf(path: Path, data: np.ndarray, name: str):
# makes sure we have data with a 'charge'
if "q" not in data.dtype.fields:
raise ValueError("structured dataset with a field 'q' required")
with h5.File(path, mode="w") as fp:
# root attrs
fp.attrs["NAME"] =... | [
"def make_osiris_dev_particles_hdf(path: Path, data: np.ndarray, name: str):\n # makes sure we have data with a 'charge'\n if \"q\" not in data.dtype.fields:\n raise ValueError(\"structured dataset with a field 'q' required\")\n\n with h5.File(path, mode=\"w\") as fp:\n # root attrs\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Example particle HDF5 file generated by the dev branch of OSIRIS (May 2021) The associated data types are taken from an example output file. | def make_osiris_dev_particles_hdf(path: Path, data: np.ndarray, name: str):
# makes sure we have data with a 'charge'
if "q" not in data.dtype.fields:
raise ValueError("structured dataset with a field 'q' required")
with h5.File(path, mode="w") as fp:
# root attrs
fp.attrs["NAME"] =... | [
"def make_osiris_444_particles_hdf(path: Path, data: np.ndarray, name: str):\n # makes sure we have data with a 'charge'\n if \"q\" not in data.dtype.fields:\n raise ValueError(\"structured dataset with a field 'q' required\")\n\n with h5.File(path, mode=\"w\") as fp:\n # root attrs\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a granule list file from a file path pattern matching the granules. If a granules has already been ingested with same md5sum signature, it is not included in this list. When deconstruct_nfs is True, the paths will shown as viewed on the nfs server and not as they are mounted on the nfs client where the script r... | def create_granule_list(file_path_pattern, dataset_ingestion_history_manager,
granule_list_file_path, deconstruct_nfs=False,
date_from=None, date_to=None,
forward_processing=False):
file_list = get_file_list(file_path_pattern)
logger.info... | [
"def pdm_gfal_list_file(props, root, result):\n listing = props.copy()\n listing['name'] = os.path.split(root)[1]\n result[os.path.split(root)[0]] = [listing]",
"def granules ( path, prod, syn_time, coll='006', nsyn=8 ):\n\n # Determine synoptic time range\n # -----------------------------\n dt ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the configuration and launch the ingestion for the given collection row | def collection_row_callback(collection,
collection_config_template,
granule_file_list_root_path,
dataset_configuration_root_path,
history_root_path,
deconstruct_nfs=False,
... | [
"def test_create_ingestion_configuration(self):\n\n\n #------------------------------------------------------------------------\n # Make assertions\n #----------------------------------------------------------------------\n # checking that an ingestion_configuration_id gets successfully ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all places from places.csv and arrange each place's attribute | def load_places(self):
file_read = open('places_backup.csv', 'r')
for place in file_read:
place_string = place.split(",")
self.places.append(
[Place(place_string[0], place_string[1], int(place_string[2]), place_string[3].strip())])
file_read.close(... | [
"def load_places():\r\n places_list = []\r\n file_open = csv.reader(open(\"places.csv\"))\r\n for row in file_open:\r\n places_list.append(row)\r\n places_list = sorted(places_list, key=lambda places_list: places_list[2])\r\n return places_list",
"def parse_places_from_csv(file: IO) -> Itera... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the place list and count the number of places that still need to be visited | def count_unvisited_places(self):
unvisited_places = 0
for place in self.places:
if place[0].status == 'y':
unvisited_places += 1
return unvisited_places | [
"def count_visited_places(self):\r\n visited_places = 0\r\n for place in self.places:\r\n if place[0].status == 'n':\r\n visited_places += 1\r\n return visited_places",
"def display_visited_places(places_list):\r\n visited_list = []\r\n for place in range(0, le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the place list and count the number of visited places | def count_visited_places(self):
visited_places = 0
for place in self.places:
if place[0].status == 'n':
visited_places += 1
return visited_places | [
"def count_unvisited_places(self):\r\n unvisited_places = 0\r\n for place in self.places:\r\n if place[0].status == 'y':\r\n unvisited_places += 1\r\n return unvisited_places",
"def display_visited_places(places_list):\r\n visited_list = []\r\n for place in ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save all the changes from the user to places.csv | def save_file(self):
file_write = open('places_backup.csv', 'w')
for place in self.places:
file_write.write(
place[0].name + "," + place[0].country + "," + str(place[0].priority) + "," + place[
0].status + "\n")
file_write.close() | [
"def _saveCSV( self ):",
"def save_rewards(self):\n with open(self.rewards_path, 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(self.global_rewards)",
"async def save(self):\r\n\r\n # Loads all inactive users from the old file and adds to the n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true of the profile is pure | def is_pure_profile(game, prof):
# For an asymmetric game, this will always return false, but then it
# shouldn't be an issue, because pure strategy regret will be more
# informative.
pure = np.any(np.add.reduceat(prof, game.role_starts) > 1.5)
utils.check(
game.is_profile(np.asarray(prof, i... | [
"def isPure(self):\n return self.pure",
"def _profile_flag(self, group):\n if self.core.metadata.groups[group].is_profile:\n return 'yes'\n else:\n return 'no'",
"def verify_profile_availability(self, profile):\n pass",
"def is_pure(self):\n return \"py... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the welfare of a profile or mixture | def welfare(game, prof):
if is_pure_profile(game, prof): # pylint: disable=no-else-return
return regret.pure_social_welfare(game, np.asarray(prof, int)).item()
else:
return regret.mixed_social_welfare(game, prof).item() | [
"def social_welfare(game, profile, role=None):\n\tif is_pure_profile(profile):\n\t\tvalues = (game.values[game[profile]] * game.counts[game[profile]])\n\telif is_mixture_array(profile):\n\t\tplayers = np.array([game.players[r] for r in game.roles])\n\t\tvalues = (game.getExpectedPayoff(profile) * players)\n\telif i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add parser for payoff calculation | def add_parser(subparsers):
parser = subparsers.add_parser(
"payoffs",
aliases=["pay"],
help="""Compute payoffs""",
description="""Compute payoff relative information in input game of
specified profiles.""",
)
parser.add_argument(
"--input",
"-i",
... | [
"def _parse_price_original(self, response, add_xpath=None):\n xpathes = '//*[@id=\"price\"]/.//*[contains(@class, \"a-text-strike\")]' \\\n '/text()'\n\n if add_xpath:\n xpathes += ' |' + add_xpath\n\n price_original = self._is_empty(\n response.xpath(xpat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add each entity as Alarm Control Panel. | def async_add_alarm_entity(config: dict):
entity_id = "{}.{}".format(PLATFORM, slugify(config["name"]))
alarm_entity = AlarmoAreaEntity(
hass=hass,
entity_id=entity_id,
name=config["name"],
area_id=config["area_id"],
)
hass.data[const.DOMA... | [
"def list_alarms(self, entity):\r\n uri = \"/%s/%s/alarms\" % (self.uri_base, utils.get_id(entity))\r\n resp, resp_body = self.api.method_get(uri)\r\n return [CloudMonitorAlarm(self, dct, entity)\r\n for dct in resp_body[\"values\"]]",
"def spinup_alarms(self, database_class):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a unique ID to use for this entity. | def unique_id(self):
return f"{self.entity_id}" | [
"def unique_id(self):\n return self.id",
"def UniqueEntityId(self) -> str:",
"def id(self):\r\n if not hasattr(self, '_id'):\r\n raise MissingID\r\n return self._id",
"def id(self):\n return id(self._getobj_())",
"def UniqueId(self) -> str:",
"def get_entity_id(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the arm mode. | def arm_mode(self):
return self._arm_mode | [
"def getRSelMode(self,targetDevice):\n if (targetDevice in self.adc_based_acquisition):\n return \"e5x\"\n elif (targetDevice in [\"SAML22\"]):\n return \"l22\"\n elif (targetDevice in [\"PIC32CZCA80\", \"PIC32CZCA90\"]):\n return \"pic32cz\"\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send arm custom_bypass command. | async def async_alarm_arm_custom_bypass(self, code=None, skip_code=False):
_LOGGER.debug("alarm_arm_custom_bypass")
await self.async_handle_arm_request(STATE_ALARM_ARMED_CUSTOM_BYPASS, code=code, skip_code=skip_code) | [
"def disableProtection(self):\n self.write(\"PROT:OVP 0\")\n self.write(\"PROT:OCP 0\")\n self.write(\"PROT:OPP 0\")",
"def _iac_dont(self, option):\n logger.debug('send IAC DONT %s', name_option(option))\n self.send_str(bytes(''.join((IAC, DONT, option))))",
"def send_negativ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Arm the alarm or switch between arm modes. | async def async_arm(self, arm_mode, **kwargs):
skip_delay = kwargs.get("skip_delay", False)
bypass_open_sensors = kwargs.get("bypass_open_sensors", False)
self._arm_mode = arm_mode
self._bypass_mode = bypass_open_sensors
leave_delay = self._config[const.ATTR_MODES][arm_mode]["ex... | [
"def do_arm(self, unused_line): # pylint: disable=invalid-name\n self._CheckState([actuator_types.kActuatorStateInit])\n self._CheckServosSelected()\n set_state_msg = pack_avionics_messages.ServoSetStateMessage()\n (set_state_msg\n .state_command) = actuator_types.kActuatorStateCommandArm\n (set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |