query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return the filename of this script with frozen compatibility the filename of THIS script.
def __get_this_filename(): return __file__ if not getattr(sys, 'frozen', False) else sys.executable
[ "def script_name(self):\n return os.path.basename(self.script)", "def _get_filename(self) -> \"std::string\" :\n return _core.SATImportOptions__get_filename(self)", "def _get_filename(self) -> \"std::string\" :\n return _core.ImportOptions__get_filename(self)", "def workflow_filename():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if path2Test is a subpath of path. Only for clean, absolute (unix) paths without trailing /. No filesystemaccess involved!
def is_subpath(path2Test, path_, allowEquals=False): assert path2Test[-1] != '/' and path_[-1] != '/' if allowEquals and path2Test == path_: return True return path2Test.startswith(path_ + '/')
[ "def _isSubpathInPath(self, path, subpath):\n path = self._getAbsPath(path)\n subpath = self._getAbsPath(subpath)\n\n # If the parent path is the root directory ('/') or otherwise already\n # ends in a separator character, we need to strip the separator from\n # the end so we don'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The exercise knows when it was solved by a particular user.
def test_wasSolvedBy(self): store = Store() exercise = makeExercise(store=store) someUser = User(store=store, email="foo@example.com") self.assertFalse(exercise.wasSolvedBy(someUser)) exercise.solvedBy(someUser) self.assertTrue(exercise.wasSolvedBy(someUser)) s...
[ "def wasSolved(self):\n return self.exercise.wasSolvedBy(self.user)", "def is_solved_by(self, user):\n try:\n return user.student.solution_set.filter(task=self).count() == 1\n except ObjectDoesNotExist:\n # TODO: Handle properly\n return False", "def meet(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The current exercise has been marked as sovled by the current user.
def wasSolved(self): return self.exercise.wasSolvedBy(self.user)
[ "def trackEx(self, userexercise):\n exname = userexercise.exercise.name\n idx = self.exIndexTracked(exname)\n if idx is not None:\n self.tracked[idx].combine(userexercise)\n elif self.exIndexUntracked(exname) is None:\n self.tracked.append(userexercise)\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A user can get previously solved exercises.
def test_getSolvedExercises(self): response = self.locator.getExercises(solved=True) exercises = list(response["exercises"]) exercises.sort(key=lambda d: d["identifier"]) self.assertEqual(exercises, [ {b"title": u"Exercise 1", b"identifier": b"1"}, ])
[ "def test_load_manage_exercises(self):\n resp = self.client.get('/networking/Fall2012/problemsets/P2/manage_exercise', HTTP_USER_AGENT=self.userAgent)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(len(resp.context['exercises']), 2)", "def exercises_from_workout(workout_id):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A user can get details about a solved exercise.
def test_getSolvedExerciseDetails(self): details = self.locator.getExerciseDetails(identifier=b"1") self.assertEqual(details, { b"identifier": b"1", b"title": u"Exercise 1", b"description": u"\N{CLOUD}", b"solved": True })
[ "def show_exercise(name_of_person):\n f = open((name_of_person + \"_exercise.txt\"), \"r\")\n print(f.read())\n f.close()", "def test_getUnsolvedExerciseDetails(self):\n details = self.locator.getExerciseDetails(identifier=b\"2\")\n self.assertEqual(details, {\n b\"identifier\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A user can get details about an unsolved exercise.
def test_getUnsolvedExerciseDetails(self): details = self.locator.getExerciseDetails(identifier=b"2") self.assertEqual(details, { b"identifier": "2", b"title": u"Exercise 2", b"description": u"\N{CLOUD}", b"solved": False })
[ "def test_getSolvedExerciseDetails(self):\n details = self.locator.getExerciseDetails(identifier=b\"1\")\n self.assertEqual(details, {\n b\"identifier\": b\"1\",\n b\"title\": u\"Exercise 1\",\n b\"description\": u\"\\N{CLOUD}\",\n b\"solved\": True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When attempting to get details for an exercise that doesn't exist, an error is raised.
def test_missingExercise(self): self.assertRaises(UnknownExercise, self.locator.getExerciseDetails, identifier="BOGUS")
[ "def test_getSolvedExerciseDetails(self):\n details = self.locator.getExerciseDetails(identifier=b\"1\")\n self.assertEqual(details, {\n b\"identifier\": b\"1\",\n b\"title\": u\"Exercise 1\",\n b\"description\": u\"\\N{CLOUD}\",\n b\"solved\": True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When the user has no secret yet, gets a new secret. Asserts that the secret requests a sufficient number of bytes from urandom.
def test_new(self): self.patch(os, "urandom", self._urandom) self.assertEqual(self.store.query(Secret).count(), 0) secret = Secret.forUser(self.user) self.assertEqual(secret.entropy, "sikrit") self.assertEqual(secret.user, self.user)
[ "def random_secret():\n while True:\n secret = os.urandom(32)\n if secret != EMPTY_SECRET:\n return secret", "def test_update_secret(self):\n pass", "def test_secret_create_defaults_valid_bit_length(self, bit_length):\n secret = self.barbicanclient.secrets.create(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The user attribute of secrets is indexed.
def test_indexed(self): self.assertTrue(Secret.user.indexed)
[ "def protected_index(self):\n return self.__protected_index", "def getsecretuserinfo(self, authinfo, userinfo):\n dict_secret = self.list_dict_secret_data( authinfo, userinfo, access_type='ldif' )\n raw_secrets = {}\n for key in dict_secret.keys():\n raw_secrets.update( dict...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create chirp phase. Ascending and descending. Same as scipy.signal.waveforms._chirp_phase with constant frequency extension for t >= tEnd.
def chirp_phase(t, freqStart, tEnd, freqEnd, method='linear', vertex_zero=True): if (tEnd <= t[0]) or (freqStart == freqEnd): # Only constant frequency return TAU * freqEnd * t phase = _chirp_phase(t, f0=freqStart, t1=tEnd, f1=freqEnd, method=method, vertex_zero=vertex_...
[ "def phase(dp):\n from tayph.vartests import typetest\n import numpy as np\n from astropy.io import ascii\n from astropy.time import Time\n from astropy import units as u, coordinates as coord\n import tayph.util as ut\n dp=check_dp(dp)#Path object\n d=ascii.read(dp/'obs_times',comment=\"#\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get BUFFER_SIZE many phase samples for a given frequency. Also supports an array of frequencies (varying frequency). If so has to be BUFFER_SIZE long.
def sample_phase(frequency, startPhase=0.): constFrequency = (np.ndim(frequency) == 0) if constFrequency: t = get_time(BUFFER_SIZE + 1, DT) phase = TAU * frequency * t + startPhase else: phase = np.empty(BUFFER_SIZE + 1) phase[0] = startPhase phase[1:] = TAU * DT * np...
[ "def freq_from_pcm(pcm, window, step, channels):\n\n # XXX doesn't pad data with zeroes at the start\n offset = 0\n while offset < pcm.shape[0]:\n data = numpy.zeros(window, numpy.float64)\n for ch in channels:\n chunk = pcm[offset : offset + window, ch]\n if len(chunk) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pivoted QR factorization, where the pivots are used as a heuristic for subsampling.
def get_qr_column_pivoting(Ao, number_of_subsamples): A = deepcopy(Ao) _, _, pvec = qr(A.T, pivoting=True) z = pvec[0:number_of_subsamples] return z
[ "def qr_to_rq_decomposition(self):\n Q, R = np.linalg.qr(np.flipud(self.P).T)\n R = np.flipud(R.T)\n return R[:, ::-1], Q.T[::-1, :]", "def QRDecompositionExample():\n A = np.array([[1,1,0],[1,2,1],[-2,-3,1]])\n print(A)\n Q,R = QRDecomposition(A)\n print(Q)\n print(R)", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retain rows with largest pivots in LU factorisation. AKA Leja sequence.
def get_lu_row_pivoting(Ao, number_of_subsamples): A = Ao.copy() P = lu(A)[0] z = np.where(P==1)[1][:number_of_subsamples] return z
[ "def fattorizzazione_lu_pivot(A):\n\n def swap_rows(M, r1, r2):\n M[[r1, r2], :] = M[[r2, r1], :]\n\n m, n = A.shape\n if m != n:\n print(\"Matrice non quadrata\")\n return [], [], [], False\n\n U = A.copy()\n P = np.eye(n)\n for k in range(n - 1):\n if U[k, k] == 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). Models with highest metric value will be retained. The logic of how to store objects is delegated to ``save_handler``.
def gen_save_best_models_by_val_score( save_handler: Union[Callable, BaseSaveHandler], evaluator: Engine, models: Union[torch.nn.Module, Dict[str, torch.nn.Module]], metric_name: str, n_saved: int = 3, trainer: Optional[Engine] = None, tag: str = "val", score_sign: float = 1.0, **kwa...
[ "def save_best_model_by_val_score(\n output_path: str,\n evaluator: Engine,\n model: torch.nn.Module,\n metric_name: str,\n n_saved: int = 3,\n trainer: Optional[Engine] = None,\n tag: str = \"val\",\n score_sign: float = 1.0,\n **kwargs: Any,\n) -> Checkpoint:\n return gen_save_best_m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). Models with highest metric value will be retained.
def save_best_model_by_val_score( output_path: str, evaluator: Engine, model: torch.nn.Module, metric_name: str, n_saved: int = 3, trainer: Optional[Engine] = None, tag: str = "val", score_sign: float = 1.0, **kwargs: Any, ) -> Checkpoint: return gen_save_best_models_by_val_score...
[ "def gen_save_best_models_by_val_score(\n save_handler: Union[Callable, BaseSaveHandler],\n evaluator: Engine,\n models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],\n metric_name: str,\n n_saved: int = 3,\n trainer: Optional[Engine] = None,\n tag: str = \"val\",\n score_sign: float =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`. Metric value should increase in order to keep training and not early stop.
def add_early_stopping_by_val_score( patience: int, evaluator: Engine, trainer: Engine, metric_name: str, score_sign: float = 1.0, ) -> EarlyStopping: es_handler = EarlyStopping( patience=patience, score_function=get_default_score_fn(metric_name, score_sign=score_sign), trainer=trainer ...
[ "def build_early_stopping_callback(name, params, outdir='out'):\n if name == 'basic':\n patience_loss = params['patience_loss']\n patience = params['patience']\n callback = EarlyStopping(monitor=patience_loss,\n patience=patience,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a component of the given type to the GameObject
def add_component(self, component: GameObjectComponent): self.__component_container.append(component)
[ "def add_component(self, component):\n self.components.append(component)", "def add(self, component):\n # check if component is valid\n if component == None:\n return False\n # according to the object type the component will be added\n if type(component) == place.Plac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to get a component from the GameObject of the specified type Will return an empty list if no component is found
def get_component(self, component_type): components = [] for component in self.__component_container: # Iterate through all components if isinstance(component, component_type): # If the component is of the required type add it to the list components.append(component) ...
[ "def get_component(component_type: str) -> typing.Optional[typing.Any]:\n components = get_components_by_type(component_type)\n\n def attrgetter(attrname: str, default: typing.Optional[int] = None) -> typing.Callable[[typing.Any], int]:\n def inside(obj: typing.Any) -> int:\n return int(typi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to get a component from the GameObject with the given name Will return None if there is no matched Component
def get_component_by_name(self, name): for component in self.__component_container: # Iterate through all components if component.name == name: # If the component has the required name return component return None
[ "def find_component(self, name, required=True):\n\n for component in self.components:\n if component.name == name:\n return component\n\n if required:\n self._missing_component(name)\n\n return None", "def get_component_by_name( app, name ):\n sa_sessio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the floored X position Returns int The floored X position of the GameObject
def get_x(self): return math.floor(self.position.x)
[ "def get_x(self):\r\n return self.get_3d_position()[\"position\"].x", "def get_pos_x(self):\n return self._position[0]", "def xposition(self):\n return self._xposition", "def OriginX(self) -> float:", "def get_xcoord(self, x):\n return (x - self.xlimits[0]) / self.dx", "def get_x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the floored Y position Returns int The floored Y position of the GameObject
def get_y(self): return math.floor(self.position.y)
[ "def get_y(self):\r\n return self.get_3d_position()[\"position\"].y", "def get_pos_y(self):\n return self._position[1]", "def getY(self):\n return self.pos.y", "def OriginY(self) -> float:", "def yposition(self):\n return self._yposition", "def get_ycoord(self, y):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic integration function using uniform sampling. The cube size is determined based on the fact that almost all the mass in a bivariate standar normal distribution is within 5,5 x 5,5.
def montecarlo_integration_uniform(f, n=1000): cube_size = 5 z1_trials = rnd.uniform(-cube_size, cube_size, n) z2_trials = rnd.uniform(-cube_size, cube_size, n) V = 2 * cube_size * 2 * cube_size integral = np.sum(f(z1_trials, z2_trials)) return V * integral / n
[ "def generate_integration(self, each_sample: int) -> float:\r\n x_rand = np.random.uniform(low=0, high=1.0, size=each_sample)\r\n x_rand = [self.f(i) for i in x_rand]\r\n\r\n result: float = round((self.high_bound-self.low_bound)\r\n * np.sum(x_rand)/each_sample, 5)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds an inverse CDF function given a sorted vector of values defining a marginal distribution.
def build_empirical_inverse_cdf(X): n = len(X) def f(prob): """ Args: prob (ndarray): vector with probablities to compute the inverse """ # assert 0<=prob<=1, 'Argument of inverse function is a probability >=0 and <= 1.' return X[np.minimum((n * np.array(prob...
[ "def inverse_transform(inv_cdf, **params):\n return inv_cdf(np.random.uniform(), **params)", "def invCDF(val):\n a = density(0)*crossSection\n b = scaleHeight/np.cos(particleTheta)\n c = np.exp(-particleHeight/scaleHeight)\n if b>0:\n n = 1-np.exp(-a*b*c)\n else:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup a scout database.
def database(context, institute_name, user_name, user_mail, api_key): LOG.info("Running scout setup database") # Fetch the omim information api_key = api_key or current_app.config.get('OMIM_API_KEY') if not api_key: LOG.warning("Please provide a omim api key with --api-key") raise click...
[ "def setup_db():\n create_service_db()", "def test_setup_database(mock_app):\n\n runner = mock_app.test_cli_runner()\n assert runner\n\n # test the CLI command for seting up scout\n result = runner.invoke(cli, [\"setup\", \"demo\"])\n\n # Make sure that setup function works correctly\n assert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup a scout demo instance. This instance will be populated with a case, a gene panel and some variants.
def demo(context): LOG.info("Running scout setup demo") institute_name = context.obj['institute_name'] user_name = context.obj['user_name'] user_mail = context.obj['user_mail'] adapter = context.obj['adapter'] setup_scout( adapter=adapter, institute_id=institute_name, u...
[ "def setup(self):\n # Name of the pipeline reduction step\n self.name = 'noiseplots'\n self.description = \"Make Noise Plots\"\n\n # Shortcut for pipeline reduction step and identifier for\n # saved file names.\n self.procname = 'npl'\n\n # Clear Parameter list\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of songs given an expression
def read_songs_criteria(expression): logging.debug("{songs_controller} BEGIN function read_song_criteria()") try: songs = CRUD.read_songs_by_criteria(expression) except Exception: return RESP.response_500(message='Database is down!') array = [] for song in songs: array.app...
[ "def get_search_queries(self):\n artists_songs = []\n\n # Iterating through the playlist track objects inside the paging object.\n for playlist_track in self.playlist[\"tracks\"][\"items\"]:\n # Getting the track itself from the playlist track object.\n track = playlist_tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates an active song matching a given id with given parameters such as title, artist, album, release year and path. When a parameter is empty it is not updated
def update_song(id, body): logging.debug("{songs_controller} BEGIN function update_song()") if id is '': return RESP.response_400(message='The id parameter is empty!') try: song = CRUD.read_song_by_song_id(id) except Exception: return RESP.response_500(message='Database is down...
[ "def _update_audio_(course_id, audio_info):\n course = Course.objects.get(course_id=course_id)\n dir = audio_info[\"url\"].split(\"/\")\n if dir[-2] == \"audio_temp\":\n audio = AudioTemp.objects.get(pk=audio_info[\"id\"]).position\n course.audio_url = File(audio, dir[-1])\n audio.clos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an active song given an id
def delete_song(id): logging.debug("{songs_controller} BEGIN function delete_song()") if id is '': return RESP.response_400(message='The id parameter is empty!') try: song = CRUD.read_song_by_song_id(id) except Exception: return RESP.response_500(message='Database is down!') ...
[ "def delete(self, _id):\n raise NotImplementedError(\"delete item\")", "def delete(cls, id_):\n try:\n title = cls.query.filter_by(id=id_).one()\n db.session.delete(title)\n db.session.commit()\n except sqlalchemy.exc.SQLAlchemyError:\n db.session.r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check booking confirmation and price for a given Flight.
async def confirm_flight(self, flight: Flight) -> (Flight, bool): log.info(f'Checking {flight}') confirm_flight_endpoint = self.CONFIRM_FLIGHT_ENDPOINT.format( booking_token=flight.booking_token ) flights_checked = False while not flights_checked: try: ...
[ "def book_flight(token, pass_info, currency_name):\r\n # request booking from server\r\n booking_payload = {'currency': currency_name, 'booking_token': token,\r\n \"passengers\": pass_info,\r\n }\r\n booking = request_server_response(requests.post, \"http://37.13...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract content from an html table.
def table_extract(table): table_content = [] rows = table.find_elements_by_tag_name("tr") for row in rows: for tag in ("th", "td"): row_text = [e.text for e in row.find_elements_by_tag_name(tag)] if row_text: table_content.append(row_text) return table_con...
[ "def extract_table(htmlstr):\n match = re.search(r'<table.*?/table>', htmlstr, re.DOTALL)\n tablehtml = match.group()\n tableList = re.findall(r'<tr>.*?</tr>', tablehtml, re.DOTALL)\n table = []\n for row in tableList:\n cell = re.findall('<td>(.*?)</td>', row, re.DOTALL)\n table.append...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check to see if page update is blocked by "Please Wait" message Return True if blocked, False if not blocked
def check_page_blocked(self): blocker = self.driver.find_element_by_id("blockingDiv") return blocker.is_displayed()
[ "def isWaiting(self):\r\n return self.iswaiting", "def checkForEntertainmentWaitTime(self):\n self.waitTimeData = requests.get(\"https://disneyworld.disney.go.com/api/wdpro/facility-service/entertainments/{}/wait-times\".format(self.__id), headers=getHeaders()).json()\n # data = json.loads(s....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Login to Portal. Should work with both new and old Portals.
def login(self, username, password, url): # Default fail return result result = 1 # Create Webdriver instance self.driver = webdriver.Firefox() driver = self.driver #Move window off edge of screen (effecivtely hides it) if flag set if self.offscreen: d...
[ "def __login(self):\n self.__access_to_api(\"https://secure.nicovideo.jp/secure/login?site=niconico\", is_post=True, post_data={\n 'mail_tel': Constants.Niconico.LOGIN_ID,\n 'password': Constants.Niconico.PASSWORD,\n })", "def login(self):\n url = \"http://\"+self.ip+\"/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Alternative login that uses firefox profile with Modify Headers to access Eric without using the portal (doesn't work in every Eric instance but does with DEV10).
def login_direct(self, url="http://ds01za003:7813/lscapps/eric-emi-murali/AutoLoginServlet", profile_path="" ): ffp_object = FirefoxProfile(profile_path) # Use Webdriver to open Firefox using chosen profile self.driver = webdriver.Firefox(ffp_object) ...
[ "def login_into_hasjob(driver):\n\tlogin_url = \"https://hasjob.co/login\"\n\t# login_url = \"https://auth.hasgeek.com/login\"\n\tdriver.get(login_url)\n\n\ttime.sleep(2)\n\ta = driver.find_element_by_id(\"showmore\")\n\ta.click()\n\ti = driver.find_element_by_id(\"username\")\n\ti.send_keys(\"vickyojha2@yahoo.com\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click Eric link in portal and wait for Eric app to open
def open_eric(self): driver = self.driver # Click the Hyperlink driver.find_element_by_link_text(self.application_link).click() # Wait for the Eric Window to open, then switch to it. WebDriverWait(driver, 20).until(lambda x: len(x.window_handles) == 2, self.driver) newwin...
[ "def openApp(self, app_name):\n time.sleep(2)\n locatorStr = ('//*[@title=\"' + app_name + '\"]')\n self.double_click_object(By.XPATH, locatorStr)", "def _open_homepage(self):\r\n if(self.web_browser_name == \"ie\"):\r\n self.driver = webdriver.Ie()\r\n elif(self.web_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds which reports are listed Should either be 0 or the 4/6 standard reports "Civil financial statement", "Criminal financial statement" "Family mediation financial statement" "Financial statement summary" Returns search result message, list of report names, eg. "4 reports found for 2N875V", [u'Civil financial stateme...
def report_list_items(self): driver = self.driver # No convenient locators for these items. Also response message # in different location if search unsuccessful # If report search successful, there's a div on the page # If search was successful there's a "tableBoxIndHalf2" div ...
[ "def reports(simulation, action='find', report=None):\n lreports = []\n if action == 'find':\n simulation = simulation.lower()\n simulation = sanitize(simulation)\n app.logger.debug('Checking if '\n + _REPORT_PATH + ' exists.')\n for item in os.listdir(_REPO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select Report on Eric Main Screen by position (from 0) or by name. Only selects, does not open.
def select_report(self, report=0): #reports = ["Civil financial statement", # "Criminal financial statement", # "Family mediation financial statement", # "Financial statement summary"] # Find the report name present on screen driver = self....
[ "def selectScreen(self, screen_name):\n time.sleep(1) #this is a hack. Otherwise it won't find the app consistently. Need to double-check implicit/explicit waits\n screen_tile = AppEditorPageLocators(screen_name)\n self.click_object_at_location(1, 1, *screen_tile.screen_tile) #clicking with...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the name of the report currently selected
def read_report_choice(self): driver = self.driver # No convenient identifier. # Also not always present # Also report text is in the 2nd h3 tag h3_texts = [e.text for e in driver.find_elements_by_tag_name("h3")] report_name = "" if len(h3_texts) == 2: ...
[ "def get_report_title(self):\n return None", "def _get_report_from_name(self, report_name):\n res = super(ReportXML, self)._get_report_from_name(report_name)\n\n if res:\n return res\n\n report_obj = self.env['ir.actions.report']\n qwebtypes = ['qweb-pdf', 'qweb-html'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click the "View Report" button Switch focus to new report window that appears. Wait for "Please Wait" message to dissapear.
def view_report(self): driver = self.driver # Click "View Report" button # lacks a convenient identifier # Accessing via its parent form form = driver.find_element_by_id("ReportDetailsForm") # Contains multiple "input" fields, filter to get right one input_element...
[ "def get_reports_window(self):\n self.gui.active_window.hide()\n\n self.associated_window = reports_window.ReportsWindow(self.gui)\n self.gui.active_window = self.associated_window\n\n self.gui.active_window.show()", "def switch_to_analysis_window():\n driver.switch_to.window(driver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click close report button Return focus back to main Eric window.
def close_report(self): driver = self.driver # Buttons lack convenient labels. Finding by tag name button_div = driver.find_element_by_id("buttons2") buttons = button_div.find_elements_by_tag_name("a") # Click the "Close Report" button (assuming its the last one) buttons[...
[ "def cancel(self):\r\n\r\n self.parent.focus_set()\r\n self.window.destroy()", "def close_window(self):\r\n Window.close()", "def close(self):\n\n Dialog.close(self)\n gui.no_modal_dialog=True", "def click_button_close(self):\n # AutoGen method click_link: None\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log out from Eric and the Portal (if present) Note if accessed using Modify Headers, Portal logout not possible.
def log_out(self): driver = self.driver window_count = len(driver.window_handles) # Click Eric logout link driver.find_element_by_link_text("Log out").click() # Follow-up Portal logout if window_count ==2: # Wait for the Eric window to close WebDr...
[ "def logout(self):\n self.session.get(EE_LOGOUT_URL)", "def logout():\n _logout()", "def logout():\n helper.set_login_state(False)\n helper.update_session('Authorization', None)", "def logout(self): #simple enough lol\n self.currentuser = None\n return self.post_request(**{'actio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the current selected text of a combo box.
def get_selected_text(self, widget): return widget.GetStringSelection()
[ "def get_active_text_in_combobox(self,combobox):\n model = combobox.get_model()\n active = combobox.get_active()\n if active < 0:\n return None\n\n return model[active][0]", "def get_value(self) -> str:\n text = self.combo.currentText()\n return self.options[te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the text of a combo box item at a particular index.
def get_item_text(self, widget, index): return widget.GetString(index)
[ "def get_value(self) -> str:\n text = self.combo.currentText()\n return self.options[text]", "def get_active_text_in_combobox(self,combobox):\n model = combobox.get_model()\n active = combobox.get_active()\n if active < 0:\n return None\n\n return model[active]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the GNG graph database
def __init_graph(self) -> None: self.graph = Graph()
[ "def _init_graph(self):\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self._init_network_variables()\n self._init_network_functions()", "def __init__(self):\n self.G = nx.Graph()\n self.node_attr_dfs = dict()\n self.unique_relations = set()\n self.node_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load the header record and test that it actually loads it without exceptions
def test_header_record(header_record): rec = HeaderRecord() rec.load(header_record) assert rec.bank_app == 'T' assert rec.app_id == '363914' assert rec.edi_msg == 'HEADER' assert rec.separator is None assert rec.rec_typ == '00' assert rec.app_ver == '01.0000' assert rec.app_brand ==...
[ "def test_advmul_header_record(advmul_header_record, advmuz_header_record):\n rec = AdvmulHeaderRecord()\n rec.load(advmul_header_record)\n\n assert rec.bank_app == 'T'\n assert rec.app_id == '363914'\n assert rec.edi_msg == 'ADVMUL'\n assert rec.separator is None\n assert rec.rec_typ == '01'\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load the lock record and test that it actually loads it without exceptions
def test_lock_record(lock_record): rec = LockRecord() rec.load(lock_record) assert rec.bank_app == 'T' assert rec.app_id == '363914' assert rec.edi_msg == 'LOCK' assert rec.separator is None assert rec.rec_typ == '99' assert rec.count == 0 assert rec.timestamp == datetime.datetime(y...
[ "def load(self):\n self.__lockData = {}\n data = None\n try:\n with open(self.__path, 'r') as lockfile:\n data = json.load(lockfile)\n lockfile.close()\n except json.decoder.JSONDecodeError:\n Logger.warning(\"MEG Locking: Unable to rea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load the advmul header record (in both variants) and test that it actually loads it without exceptions
def test_advmul_header_record(advmul_header_record, advmuz_header_record): rec = AdvmulHeaderRecord() rec.load(advmul_header_record) assert rec.bank_app == 'T' assert rec.app_id == '363914' assert rec.edi_msg == 'ADVMUL' assert rec.separator is None assert rec.rec_typ == '01' assert rec...
[ "def test_header_record(header_record):\n rec = HeaderRecord()\n rec.load(header_record)\n\n assert rec.bank_app == 'T'\n assert rec.app_id == '363914'\n assert rec.edi_msg == 'HEADER'\n assert rec.separator is None\n assert rec.rec_typ == '00'\n assert rec.app_ver == '01.0000'\n assert r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load the advmul record and test that it actually loads it without exceptions
def test_advmul_record(advmul_record): rec = AdvmulRecord() rec.load(advmul_record) assert rec.bank_app == 'T' assert rec.app_id == '363914' assert rec.edi_msg == 'ADVMUL' assert rec.separator is None assert rec.rec_typ == '02' assert rec.message_type is None assert rec.transact_no ...
[ "def test_advmul_header_record(advmul_header_record, advmuz_header_record):\n rec = AdvmulHeaderRecord()\n rec.load(advmul_header_record)\n\n assert rec.bank_app == 'T'\n assert rec.app_id == '363914'\n assert rec.edi_msg == 'ADVMUL'\n assert rec.separator is None\n assert rec.rec_typ == '01'\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load the advmuz record and test that it actually loads it without exceptions
def test_advmuz_record(advmuz_record): rec = AdvmuzRecord() rec.load(advmuz_record) assert rec.bank_app == 'T' assert rec.app_id == '363914' assert rec.edi_msg == 'ADVMUZ' assert rec.separator is None assert rec.rec_typ == '02' assert rec.message_type == 'CRE' assert rec.client_no =...
[ "def test_record_loading(self):\n test_record = self.db.lookup(accession = \"X55053\")\n assert test_record.name == \"ATCOR66M\"\n assert test_record.id == \"X55053.1\", test_record.id\n assert test_record.description == \"A.thaliana cor6.6 mRNA.\"\n assert isinstance(test_record....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get camera instances. Get camera instances to capture images from. RETURNS List[Tuple[cv2.VideoCapture, Dict]] List of camera instances and camera properties dictionaries.
def get_cameras(self) -> List[Tuple[cv2.VideoCapture, Dict]]: pass
[ "def cameraList(self):\r\n var = (CameraInfoEx*10)()\r\n self.dll.PvCameraListEx(byref(var), 1, None, sizeof(CameraInfoEx))\r\n return var", "def list_cameras(self):\n res = []\n camera_names = cmds.listCameras()\n for camera_name in camera_names:\n camera_shap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Capture images from cameras to file. Returns
def capture_images(self, img_path: str = 'tmp.jpg', warm_up: bool = True, preview: bool = True, save: bool = True): pass
[ "def take_picture(self):\n imgpath = \"\"\n # print(\"Take pic from device %d\" % (self.cv2_cam_dev1))\n try:\n self.lights.headlights(True)\n time.sleep(self.light_wakeup_t)\n cap = cv2.VideoCapture(self.cv2_cam_dev1)\n ret, frame = cap.read()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the ports and return a tuple with the available ports and the ones that are working. Returns Tuple[List[int], List[int]] List of working ports and list of available ports
def list_ports() -> Tuple[List[int], List[int]]: working_ports = [] available_ports = [] devices = [dev for dev in os.listdir('/dev/') if 'video' in dev] for dev_port in range(len(devices)): camera = cv2.VideoCapture(dev_port) if camera.isOpened(): ...
[ "def run_scan(self):\n open_ports = []\n closed_ports = []\n for port in range(1, 3000):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock = ssl.wrap_socket(s)\n sock.connect((target, port))\n sock.shu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For every newly created `Hospital`, create a `Department` corresponding to each `Specialty`.
def create_departments_for_hospital(sender, instance, created, **kwargs): if created: departments = list() for specialty in Specialty.objects.all(): departments.append(Department( hospital=instance, name="Department of %s" % specialty.name, ...
[ "def create_departments_for_specialty(sender, instance, created, **kwargs):\n if created:\n departments = list()\n for hospital in Hospital.objects.all():\n departments.append(Department(\n hospital=hospital,\n name=\"Department of %s\" % instance.name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For every newly created `Specialty`, create a corresponding `Department` in each `Hospital`.
def create_departments_for_specialty(sender, instance, created, **kwargs): if created: departments = list() for hospital in Hospital.objects.all(): departments.append(Department( hospital=hospital, name="Department of %s" % instance.name, s...
[ "def create_departments_for_hospital(sender, instance, created, **kwargs):\n if created:\n departments = list()\n for specialty in Specialty.objects.all():\n departments.append(Department(\n hospital=instance,\n name=\"Department of %s\" % specialty.name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test parsing a valid OFX document containing a 'success' message.
def test_successful_parse(self): self.assertEqual("SUCCESS", self.checkparse["body"]["OFX"]["SIGNONMSGSRSV1"]["SONRS"]["STATUS"]["MESSAGE"])
[ "def testParseContent(self):\n # XXX not sure it is good to store parsed document everytime\n self.assertTrue(isinstance(self.oodocument.parsed_content, etree._Element))\n self.assertTrue(self.oodocument.parsed_content.tag.endswith(\n 'document-content'))", "def assertStateOK(root):", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reading a value from deep in the body of the OFX document.
def test_body_read(self): self.assertEqual("-5128.16", self.creditcardparse["body"]["OFX"]["CREDITCARDMSGSRSV1"]["CCSTMTTRNRS"]["CCSTMTRS"]["LEDGERBAL"]["BALAMT"])
[ "def test_read_nested_val(self):\n sample_json = {'level1': {'level2': {'level3': {'int': 42}}}}\n self.assertEqual(\n chrome_defaults.get_json_field(\n sample_json, 'level1.level2.level3.int'),\n 42)", "def retrieve_value(node):\n\n return node.value", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reading a header from the OFX document.
def test_header_read(self): self.assertEqual("100", self.checkparse["header"]["OFXHEADER"])
[ "def test_read_header():\n header = get_header(AIA_193_JP2)[0]\n assert isinstance(header, FileHeader)", "def test_header_record(header_record):\n rec = HeaderRecord()\n rec.load(header_record)\n\n assert rec.bank_app == 'T'\n assert rec.app_id == '363914'\n assert rec.edi_msg == 'HEADER'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert Matplotlib color spec to uint8 4tuple.
def uint8color(color): return tuple(int(255*v) for v in colorConverter.to_rgba(color))
[ "def colorTuple(c):\n return c.getRgb()", "def rgb_to_bytes(color):\n\treturn tuple(int(round(i * 255)) for i in color)", "def _convertColorsFromFloatToUint8(colors):\n # Each bin is [N, N+1[ except the last one: [255, 256]\n return numpy.clip(\n colors.astype(numpy.float64) * 256, 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Lambdaready zip file of the current virtualenvironment and working directory. Returns path to that file.
def create_lambda_zip(self, prefix='lambda_package', handler_file=None, minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None): import pip print("Packaging project as zip...") if not venv: if 'VIRTUAL_ENV' in os.environ: venv = os.envir...
[ "def build(function_name=None):\n if not function_name:\n abort('Must provide function_name')\n\n lambda_root = os.path.join(LAMBDA_DIR, function_name)\n module_dir = os.path.join(lambda_root, function_name)\n lambda_config_dir = os.path.join(lambda_root, LAMBDA_CONFIG_SUBDIR)\n staging_dir = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
classes to extract the annotations and smart deploy instances form the abs program
def __init__(self): ABSVisitor.__init__(self) self.smart_dep_json = [] self.dc_json = {} self.deploy_annotations = [] self.module_name = "" self.classes = {} self.interfaces = {}
[ "def main():\n MODEL_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel\" # noqa: E501\n MODEL_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.caffemodel\"\n PROTOTXT_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/Mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Preload the dataframe of df_responses to prepare for the fit function in case you need to resume the fit function failed previously.
def preload_fit(self, df_responses, mapping_matrix): self.df_responses = df_responses self.mapping_matrix = mapping_matrix self.to_resume = True
[ "def set_results_df(context):\n context.results_df = pd.DataFrame()\n context.desired_outputs = []", "def res_df(self):\n if not hasattr(self, 'res_dict'):\n print('you must perform the fit first! ...e.g. call performfit()')\n return\n\n vals = self._assignvals(self.res_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a column on "curr_item" index to the IncrementalSparseMatrix matrix_builder with the samples inside response_df.
def add_sample_responses_to_matrix_builder(self, matrix_builder, agg_strategy, filter_sample_method, response_df, curr_item, mapping): filtered_response_df = self.FILTER_SAMPLES_METHODS[filter_sample_method].filter_samples(response_df) solution_list = self....
[ "def build_similarity_matrix(self, df_responses, agg_strategy, filter_sample_method, mapping_matrix):\n n_items = self.URM_train.shape[1]\n if mapping_matrix is None:\n mapping_matrix = np.repeat(np.reshape(np.arange(0, n_items), newshape=(1, n_items)), repeats=n_items, axis=0)\n mat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It builds the similarity matrix by using a dataframe with all the samples collected from the solver in the fit function. The samples obtained from the solver are postprocessed with a filtering operation (i.e. filter_strategy) and an aggregation operation (i.e. agg_strategy). At the end of this pipeline, it outputs a si...
def build_similarity_matrix(self, df_responses, agg_strategy, filter_sample_method, mapping_matrix): n_items = self.URM_train.shape[1] if mapping_matrix is None: mapping_matrix = np.repeat(np.reshape(np.arange(0, n_items), newshape=(1, n_items)), repeats=n_items, axis=0) matrix_build...
[ "def similarities(attributes_df, columns, joint = False, metric = \"eucl_dist\"):\n if len(columns) == 0:\n return None\n if len(attributes_df) == 0:\n return None\n\n if type(columns[0]) == float or type(columns[0]) == int:\n columns = [int(i) for i in columns]\n column_names =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SkinCluster Data Class Initializer
def __init__(self, skinCluster=''): # Execute Super Class Initilizer super(SkinClusterData, self).__init__() # SkinCluster Custom Attributes self._data['attrValueList'].append('skinningMethod') self._data['attrValueList'].append('useComponents') self._data['attrValueList...
[ "def __init__(self):\n\n self.clusters = [ ]", "def buildData(self, skinCluster):\n # ==========\n # - Checks -\n # ==========\n\n # Check skinCluster\n self.verifySkinCluster(skinCluster)\n\n # Clear Data\n self.reset()\n\n # =======================\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build skinCluster data and store as class object dictionary entries
def buildData(self, skinCluster): # ========== # - Checks - # ========== # Check skinCluster self.verifySkinCluster(skinCluster) # Clear Data self.reset() # ======================= # - Build Deformer Data - # ======================= ...
[ "def __init__(self, skinCluster=''):\n # Execute Super Class Initilizer\n super(SkinClusterData, self).__init__()\n\n # SkinCluster Custom Attributes\n self._data['attrValueList'].append('skinningMethod')\n self._data['attrValueList'].append('useComponents')\n self._data['a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild the skinCluster using stored data
def rebuild(self): # ========== # - Checks - # ========== # Check geometry skinGeo = self._data['affectedGeometry'][0] if not cmds.objExists(skinGeo): raise Exception( 'SkinCluster geometry "' + skinGeo + '" does not exist! Use remapGeometry()...
[ "def rebuild(self, skinClusterList):\n # Start timer\n timer = cmds.timerX()\n\n # For Each SkinCluster\n for skinCluster in skinClusterList:\n\n # Check skinClusterData\n if not self._data.has_key(skinCluster):\n print('No data stored for skinCluster...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the stored skinCluster weights to the specified skinCluster.
def setWeights(self, componentList=[]): print('!!! - DEPRICATED: skinClusterData.setWeights()! Use loadWeights() method instead - !!!') # ========== # - Checks - # ========== # Check SkinCluster skinCluster = self._data['name'] self.verifySkinCluster(skinCluster...
[ "def loadWeights(self,\n skinCluster=None,\n influenceList=None,\n componentList=None,\n normalize=True):\n # ==========\n # - Checks -\n # ==========\n\n # Check SkinCluster\n if not skinCluster: skinClus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the stored skinCluster weights.
def loadWeights(self, skinCluster=None, influenceList=None, componentList=None, normalize=True): # ========== # - Checks - # ========== # Check SkinCluster if not skinCluster: skinCluster = self._dat...
[ "def setWeights(self, componentList=[]):\n print('!!! - DEPRICATED: skinClusterData.setWeights()! Use loadWeights() method instead - !!!')\n\n # ==========\n # - Checks -\n # ==========\n\n # Check SkinCluster\n skinCluster = self._data['name']\n self.verifySkinClust...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Swap influence weight values between 2 skinCluster influeneces.
def swapWeights(self, inf1, inf2): # Check Influences if not self._influenceData.has_key(inf1): raise Exception('No influence data for "' + inf1 + '"! Unable to swap weights...') if not self._influenceData.has_key(inf2): raise Exception('No influence data for "' + inf2 + ...
[ "def transfer_weights_replace(source, target):\n skinToReset = set()\n\n # TODO : Ensure that the transfered lockInfluenceWeight attr work correctly (The lock icon doesn't appear in the skinCluster)\n if source.hasAttr('lockInfluenceWeights'):\n attr_lockInfluenceWeights_src = source.lockInfluenceWe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move influence weight values from one skinCluster influenece to another.
def moveWeights(self, sourceInf, targetInf, mode='add'): # Check Influences if not self._influenceData.has_key(sourceInf): raise Exception('No influence data for source influence "' + sourceInf + '"! Unable to move weights...') if not self._influenceData.has_key(targetInf): ...
[ "def transfer_weights_replace(source, target):\n skinToReset = set()\n\n # TODO : Ensure that the transfered lockInfluenceWeight attr work correctly (The lock icon doesn't appear in the skinCluster)\n if source.hasAttr('lockInfluenceWeights'):\n attr_lockInfluenceWeights_src = source.lockInfluenceWe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine stored skinCluster influence data from a list of source influences to a single target influence. Source influences data will be removed.
def combineInfluence(self, sourceInfluenceList, targetInfluence, removeSource=False): # =========================== # - Check Source Influences - # =========================== skipSource = [] for i in range(len(sourceInfluenceList)): # Check influence if...
[ "def transfer_weights_replace(source, target):\n skinToReset = set()\n\n # TODO : Ensure that the transfered lockInfluenceWeight attr work correctly (The lock icon doesn't appear in the skinCluster)\n if source.hasAttr('lockInfluenceWeights'):\n attr_lockInfluenceWeights_src = source.lockInfluenceWe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remap the skinCluster data for one geometry to another
def remapGeometry(self, geometry): # Checks oldGeometry = self._data['affectedGeometry'][0] if geometry == oldGeometry: return geometry # Check Skin Geo Data if not self._data.has_key(oldGeometry): raise Exception('SkinClusterData: No skin geometry data for affected ...
[ "def rebuild(self):\n # ==========\n # - Checks -\n # ==========\n\n # Check geometry\n skinGeo = self._data['affectedGeometry'][0]\n if not cmds.objExists(skinGeo):\n raise Exception(\n 'SkinCluster geometry \"' + skinGeo + '\" does not exist! Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild the skinCluster deformer membership and weight arrays for the specified geometry using the stored world space geometry data.
def rebuildWorldSpaceData(self, targetGeo='', method='closestPoint'): # Start timer timer = cmds.timerX() # Display Progress glTools.utils.progressBar.init(status=('Rebuilding world space skinCluster data...'), maxValue=100) # ========== # - Checks - # =========...
[ "def rebuild(self):\n # ==========\n # - Checks -\n # ==========\n\n # Check geometry\n skinGeo = self._data['affectedGeometry'][0]\n if not cmds.objExists(skinGeo):\n raise Exception(\n 'SkinCluster geometry \"' + skinGeo + '\" does not exist! Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mirror SkinCluster Data using search and replace for naming. This method doesn not perform closest point on surface mirroring.
def mirror(self, search='lf', replace='rt'): # ========== # - Checks - # ========== # =========================== # - Search and Replace Name - # =========================== if self._data['name'].count(search): self._data['name'] = self._data['name']...
[ "def rebuild(self):\n # ==========\n # - Checks -\n # ==========\n\n # Check geometry\n skinGeo = self._data['affectedGeometry'][0]\n if not cmds.objExists(skinGeo):\n raise Exception(\n 'SkinCluster geometry \"' + skinGeo + '\" does not exist! Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild a list of skinClusters from the stored SkinClusterListData
def rebuild(self, skinClusterList): # Start timer timer = cmds.timerX() # For Each SkinCluster for skinCluster in skinClusterList: # Check skinClusterData if not self._data.has_key(skinCluster): print('No data stored for skinCluster "' + skinClus...
[ "def rebuild(self):\n # ==========\n # - Checks -\n # ==========\n\n # Check geometry\n skinGeo = self._data['affectedGeometry'][0]\n if not cmds.objExists(skinGeo):\n raise Exception(\n 'SkinCluster geometry \"' + skinGeo + '\" does not exist! Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds powers of the perm.
def __pow__(self, n): if n < 0: return pow(~self, -n) elif n == 0: return Perm() elif n == 1: return self elif n == 2: return self * self else: # binary exponentiation perm = self res = ...
[ "def _compute_powers(self):\n self.base = self.theprime\n power = 1\n for idx in xrange(self.hashlen-1):\n power *= self.base\n power = to_int64(power)\n\n for idx in xrange(256):\n self.powers.append(to_int64(idx * power))", "def find_io_perms(n):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the available longitude and latitude list
def read_long_lat(url, longitude_start, longitude_end, latitude_start, latitude_end): # Download the list of the available coordinates from the server request = urllib2.Request(url) response = urllib2.urlopen(request) try: data = response.read() except URLError as e: print e.rea...
[ "def gps_read_locations(lfile):\n gps_locations = list()\n\n if os.path.isfile(lfile):\n with open(lfile, \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 4)\n gps_tuple = list()\n gps_tuple.append(float(x[0]))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test osqlcliclient pipeline for sending request and receiving response works.
def test_osqlcliclient_request_response(self): def get_test_baseline(file_name): """ Helper method to get baseline file. """ return os.path.abspath( os.path.join( os.path.abspath(__file__), u'..', ...
[ "def test_send_result(self):\n pass", "def test_flow_triggered(self):\n message = Mock()\n message.id = \"test-id\"\n message.data = {\"_vnd\": {\"v1\": {\"chat\": {\"owner\": \"27820001001\"}}}}\n tasks.rapidpro = TembaClient(\"textit.in\", \"test-token\")\n responses.ad...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to get baseline file.
def get_test_baseline(file_name): return os.path.abspath( os.path.join( os.path.abspath(__file__), u'..', u'..', u'osqlcli', u'jsonrpc', u'contracts', u...
[ "def readBaseline(BASELINE):\n\tbaselineDictionnary={\"blast\":{},\"protProfiles\":{}}\n\ttry:\n\t\twith open(BASELINE, \"r\") as f:\n\t\t\tfor line in f:\n\t\t\t\t####\tcheck the content of the file and look for > character (It indicates if the keyword is specific or nonSpecific)\n\t\t\t\tkeywords=re.search(r'>([^...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify osqlcliclient's tables, views, columns, and schema are populated.
def test_schema_table_views_and_columns_query(self): try: client = create_osql_cli_client() list(client.execute_query('CREATE TABLE tabletest1 (a int, b varchar(25));')) list(client.execute_query('CREATE TABLE tabletest2 (x int, y varchar(25), z bit);')) list(clie...
[ "def verify(self):\n with self.connection() as conn:\n self._ensure_tables(conn)", "def test_create_tables(self):\n self._db.create_tables()\n tables = json.loads(self._db.get_database_info())\n expected_tables = db_connection.Database.get_columns().keys()\n for table...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify if the OsqlCliClient can successfully reset its connection
def test_osqlcliclient_reset_connection(self): try: osqlcli = create_osql_cli() osqlcli.reset() finally: shutdown(osqlcli.osqlcliclient_main)
[ "def test_master_reset_connection(self):\n with mock.patch(\"locust.runners.FALLBACK_INTERVAL\", new=0.1):\n with mock.patch(\"locust.rpc.rpc.Server\", mocked_rpc(raise_on_close=False)) as server:\n master = self.get_runner()\n self.assertEqual(0, len(master.clients))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the results of running a stored proc with multiple result sets
def test_stored_proc_multiple_result_sets(self): try: client = create_osql_cli_client() create_stored_proc = u"CREATE PROC sp_osqlcli_multiple_results " \ u"AS " \ u"BEGIN " \ u"SELECT 'Morning' as [Name] UNION...
[ "def _test_large_result_set(self, schema, graphson):\n self.execute_graph(schema.fixtures.large(), graphson)\n g = self.fetch_traversal_source(graphson)\n traversal = g.V()\n vertices = self.execute_traversal(traversal, graphson)\n for vertex in vertices:\n self._valida...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper for inserting floats features into Example proto.
def _floats_feature(value): return tf.train.Feature(float_list = tf.train.FloatList(value=[value]))
[ "def add_float(self, name, value):\r\n self.__add_field('float', name, value)\r\n return self", "async def put_float( # pylint: disable=inconsistent-return-statements\n self, complex_body: _models.FloatWrapper, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate and score the results of category elimination.
def validate(source, output, threshold=0.75, headers=True): src = Taxonomy(source) out = Taxonomy(output) def results(success=False, pairs=0, cats=0, supercats=0, merges=0, msg=None): return { "success": success, "pairs": pairs, "cats": cats, "superca...
[ "def test_compare_categories_categorical_variables(self):\n for method in self.cat_methods:\n compare_categories(self.dm1_fp, self.map1_fp, method,\n self.cat_categories, self.num_perms, self.test_dir)\n results_fp = join(self.test_dir, '%s_results.txt' % method)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a UnionFind of homogeneous categories
def union_find(cats, debug=False): cats = list(cats) uf = UnionFind(cats) for i in range(0, len(cats)): for j in range(i + 1, len(cats)): artsi = src.cat_arts[cats[i]] artsj = src.cat_arts[cats[j]] if jaccard(artsi, artsj) > threshold:...
[ "def get_setunion(tree):\n class Visitor(L.NodeVisitor):\n def process(self, tree):\n self.parts = []\n super().process(tree)\n return self.parts\n \n def generic_visit(self, node):\n # Don't recurse. Only traverse BinOps of BitOrs.\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a metering message for the counter and publish it.
def _publish_counter(self, counter): ctxt = context.get_admin_context() publish.publish_counter(ctxt, counter)
[ "def _create_meter(self, **kwargs):\n name = self._generate_random_name()\n samples = self.clients(\"ceilometer\").samples.create(\n counter_name=name, **kwargs)\n return samples[0]", "def meter(self, name, count):\n self.calls.append((\"meter\", name, count))", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables mediasite catalog downloads using provided catalog ID
def enable_catalog_downloads(self, catalog_id): logging.info("Enabling catalog downloads for catalog: '"+catalog_id) #prepare patch data to be sent to mediasite patch_data = {"AllowPresentationDownload":"True"} #make the mediasite request using the catalog id and the patch data...
[ "def disable_catalog_allow_links(self, catalog_id):\r\n\r\n logging.info(\"Disabling catalog links for catalog: '\"+catalog_id)\r\n\r\n #prepare patch data to be sent to mediasite\r\n patch_data = {\"AllowCatalogLinks\":\"False\"}\r\n\r\n #make the mediasite request using the catalog id ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables mediasite catalog links using provided catalog ID
def disable_catalog_allow_links(self, catalog_id): logging.info("Disabling catalog links for catalog: '"+catalog_id) #prepare patch data to be sent to mediasite patch_data = {"AllowCatalogLinks":"False"} #make the mediasite request using the catalog id and the patch data found ...
[ "def enable_catalog_downloads(self, catalog_id):\r\n\r\n logging.info(\"Enabling catalog downloads for catalog: '\"+catalog_id)\r\n\r\n #prepare patch data to be sent to mediasite\r\n patch_data = {\"AllowPresentationDownload\":\"True\"}\r\n\r\n #make the mediasite request using the cata...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add mediasite module to catalog by catalog id and module guid
def add_module_to_catalog(self, catalog_id, module_guid): logging.info("Associating catalog: "+catalog_id+" to module: "+module_guid) #prepare patch data to be sent to mediasite post_data = {"MediasiteId":catalog_id} #make the mediasite request using the catalog id and the patc...
[ "def add_part(self, part: Part, rel_location):\n self.modules.insert(rel_location, Module(part))\n self.update_parts()\n return self", "def add_module( self, to_add ):\n self.modules.append( to_add )", "def addMedia(self, m):", "def add_module(self, module):\n self.modules.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates mediasite catalog using provided catalog name, description, and parent folder id
def create_catalog(self, catalog_name, description="", parent_id=None): logging.info("Creating catalog '"+catalog_name+"' under parent folder "+str(parent_id)) post_data = {"Name":catalog_name, "Description":description, "LimitSearchToCatalog":True ...
[ "def create_catalog(self, prefix, parent_catalog_name, description):\n\n return OrderedDict(\n [\n (\"stac_version\", \"0.6.0\"),\n (\"id\", prefix),\n (\"description\", description),\n (\n \"links\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove all '\n's, ','s and ' 's
def prepare_str(_str): return _str.replace('\n', '').replace(',', '').replace(' ', '')
[ "def remove_from_text(text):\n # 0 - preprocessing\n \"\"\"text = re.sub(', ', ' ', str(text))\n text = re.sub(',', '', str(text))\"\"\"\n text = re.sub('\\n ', '', str(text))\n text = re.sub('\\n', '', str(text))\n\n return text", "def clean(s):\n # return dedent(s)\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
page number should in [1,200]
def correct_page_num(page): if page < 0 or page > 200: return 1 return page
[ "def pages_check(pages):\n if not pages:\n pages = 1\n return pages", "def check_pages(soup):\n review_count = int(soup.find(itemprop='reviewCount').text.strip('()'))\n pages = 1\n if review_count > 20:\n pages = ceil(review_count / 20)\n return pages", "def tiebaSpider(url, begi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
blog list page url
def page_blog_list(self, page=1): self.correct_page_num(page) return 'http://mypage.syosetu.com/mypageblog/list/userid/' \ + self.id + '/index.php?p=' + str(page)
[ "def get_blog_url(self, obj):\n return obj.blogs.all()[0].page", "def post_list(request, tag=''):\n page = request.GET.get('page', '')\n context_dict = {\n 'active_page': 'blog',\n 'page': page,\n 'tag': tag,\n }\n return render(request, 'blog/post_list.html', context_dict)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
bookmark list page url
def page_bookmark_list(self, page=1): self.correct_page_num(page) return 'http://mypage.syosetu.com/mypagefavnovelmain/list/userid/' \ + self.id + '/index.php?p=' + str(page)
[ "def page_following_list(self, page=1):\n self.correct_page_num(page)\n return 'http://mypage.syosetu.com/mypagefavuser/list/userid/' \\\n + self.id + '/index.php?p=' + str(page)", "def retrieve_listing_page_urls(self) -> List[str]:\n return [\"https://fatabyyano.net/newsface/0/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
following list page url
def page_following_list(self, page=1): self.correct_page_num(page) return 'http://mypage.syosetu.com/mypagefavuser/list/userid/' \ + self.id + '/index.php?p=' + str(page)
[ "def retrieve_listing_page_urls(self) -> List[str]:\n return [\"https://fatabyyano.net/newsface/0/\"]", "def get_next_url(self):\n return None", "def page_bookmark_list(self, page=1):\n self.correct_page_num(page)\n return 'http://mypage.syosetu.com/mypagefavnovelmain/list/userid/' \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
commented novel list page url
def page_commented_novel_list(self, page=1): self.correct_page_num(page) return 'http://mypage.syosetu.com/mypagenovelhyoka/list/userid/' \ + self.id + '/index.php?p=' + str(page)
[ "def list_commented_novels(self, page_num=10):\n count = self.get_count(self.page_commented_novel_list())\n if count == 0:\n return\n for i in range(1, (count - 1) / page_num + 2):\n soup = get_soup(self.page_commented_novel_list(i))\n if soup is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
review list page url
def page_review_list(self, page=1): self.correct_page_num(page) return 'http://mypage.syosetu.com/mypage/reviewlist/userid/' \ + self.id + '/index.php?p=' + str(page)
[ "def get_url(self):\n return '%s' % (self.review_url)", "def program_review_url(self, program_data):\n return self.review_url(program_data.program.id)", "def get_show_url(self, name):", "def parse_reviews_url(self, html):\n sel = Selector(html)\n url = sel.xpath(self.reviews_listin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decompose count from specific url
def get_count(url, pos_to=-2): soup = get_soup(url) if soup is None: return 0 if soup.find(class_='allcount') is None: return 0 return int(soup.find(class_='allcount').string[1:pos_to])
[ "def get_unique_counter_from_url(sp):\n ...", "def get_pages(url):\n return url.json()['size'] // 10", "def getUrlsCounter(self):\r\n return self.handledUrlsCounter", "def getCountOfPages(url):\n # Making BeautifulSoup object\n htmlResponse = getHtml(url)\n soup = bs(htmlResponse.text, \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get commented novel list
def list_commented_novels(self, page_num=10): count = self.get_count(self.page_commented_novel_list()) if count == 0: return for i in range(1, (count - 1) / page_num + 2): soup = get_soup(self.page_commented_novel_list(i)) if soup is None: cont...
[ "def page_commented_novel_list(self, page=1):\n self.correct_page_num(page)\n return 'http://mypage.syosetu.com/mypagenovelhyoka/list/userid/' \\\n + self.id + '/index.php?p=' + str(page)", "def non_comment_lines(self):\n return [_ for _ in self.stripped_whole_lines() if not _.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the standardized Shannon entropy for the given string. (modificada por mim)
def calculate_shannon_entropy(string): #Não é mais necessário fazer essa checagem, já que string não faz mais parte do tipo 'unicode' #if isinstance(string, unicode): # string = string.encode("ascii") ent = 0.0 alphabet = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'] if le...
[ "def shannon_entropy(data: str, charset: str = BASE64_CHARS) -> float:\n char_freq = dict.fromkeys(charset, 0)\n\n for char in data:\n if char in char_freq:\n char_freq[char] += 1\n\n entropy = 0.0\n for char in char_freq:\n if char_freq[char] > 0:\n p_x = char_freq[c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }