query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Creates host objects on this Stack. If no arguments are given, then all hosts available based on the Stack's blueprint host definitions will be created. If args are given, then only the `count` for the given `host_definition` will be created. host_definition (BlueprintHostDefinition object); the host definition to use ...
def create_hosts(self, host_definition=None, count=None, backfill=False): created_hosts = [] if host_definition is None: host_definitions = self.blueprint.host_definitions.all() else: host_definitions = [host_definition] for hostdef in host_definitions: ...
[ "def create_hosts(self, count, start=0):\n n_int = len(self.pg_interfaces)\n macs_per_if = count // n_int\n i = -1\n for pg_if in self.pg_interfaces:\n i += 1\n start_nr = macs_per_if * i + start\n end_nr = (\n count + start if i == (n_int ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses saltcloud to query all the hosts for the given stack id.
def query_hosts(self, force=False): CACHE_KEY = 'salt-cloud-full-query' cached_result = cache.get(CACHE_KEY) if cached_result and not force: logger.debug('salt-cloud query result cached') result = cached_result else: logger.debug('salt-cloud query re...
[ "def ping_stack_hosts(stack):\n client = salt.client.LocalClient(\n settings.STACKDIO_CONFIG.salt_master_config\n )\n target = ' or '.join(\n [hd.hostname_template.format(namespace=stack.namespace,\n username=stack.owner.username,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an array it splits the array in two parts at index i
def split_i(array:list, i:int) -> (list, list): if i==len(array)-1: return array[i], array[:-1] else: pre = array[0:i] post = array[i+1:] l = pre + post x = array[i] return x, l
[ "def split_half(arr):\n n = len(arr)\n half = int(math.ceil(n/2))\n return arr[0:half], arr[half:n+1]", "def tsplit(a):\n\n a = np.asarray(a)\n\n return np.array([a[..., x] for x in range(a.shape[-1])])", "def split_array(array, size):\n arrays = []\n while len(array) > size:\n pice ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing for proper beaker kwargs usage
def test_beaker_kwargs(self): css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True) from fixtures import beaker_container self.assertEqual(beaker_container, beaker_kwargs) css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True, b...
[ "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call first_move() after good key was pressed. Good key will be saved in self.first_key
def wait_first_move(self): self.env.keyboard.listen_once(self.catch_key_first, key_down)
[ "def wait_second_move(self):\n self.qr_unregister()\n self.env.keyboard.listen_once(self.catch_key_second, key_down)", "def second_move(self):\n self.play_sound(self.second_key)\n self.end_move()", "def keyPressed(self, Event):\n if str(Event.keysym) == \"Up\":\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call second_move() after good key was pressed. Good key will be saved in self.second_key
def wait_second_move(self): self.qr_unregister() self.env.keyboard.listen_once(self.catch_key_second, key_down)
[ "def second_move(self):\n self.play_sound(self.second_key)\n self.end_move()", "def wait_first_move(self):\n self.env.keyboard.listen_once(self.catch_key_first, key_down)", "def keyboard(key, x, y):\n global actions\n global mesg1\n \n if key == chr(27) or key == \"q\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Responses for second_key press.
def second_move(self): self.play_sound(self.second_key) self.end_move()
[ "def wait_second_move(self):\n self.qr_unregister()\n self.env.keyboard.listen_once(self.catch_key_second, key_down)", "def goto_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the taken off message\n\tprint a2, ' >> ', msg\n\tif msg != ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a histogram of the pnorms of the solutions
def plot_p_norm(p=2, bins=500): plt.title(f"{p}-norms of solutions for lattice point quaternion polynomials") plt.hist([sum(abs(x)**p for x in abcd)**(1/p) for abcd in solutions], bins=bins)
[ "def plot_imag_p_norm(p=2, bins=500):\n plt.title(f\"{p}-norms of imaginary parts of solutions to polynomials with quaternion coefficients\")\n plt.hist([sum(abs(x) ** p for x in abcd[1:]) ** (1 / p) for abcd in solutions], bins=bins)", "def plotVowelProportionHistogram(wordList, numBins=15):\r\n val, me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a histogram of the pnorms of the imaginary parts of the solutions
def plot_imag_p_norm(p=2, bins=500): plt.title(f"{p}-norms of imaginary parts of solutions to polynomials with quaternion coefficients") plt.hist([sum(abs(x) ** p for x in abcd[1:]) ** (1 / p) for abcd in solutions], bins=bins)
[ "def plot_p_norm(p=2, bins=500):\n plt.title(f\"{p}-norms of solutions for lattice point quaternion polynomials\")\n plt.hist([sum(abs(x)**p for x in abcd)**(1/p) for abcd in solutions], bins=bins)", "def plotVowelProportionHistogram(wordList, numBins=15):\r\n val, mean, sd = Bin(numBins)\r\n pylab.hi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
First Python implementation method of the Levenshtein distance between strings
def levenshtein(s1, s2): if len(s1) < len(s2): return levenshtein(s2, s1) # len(s1) >= len(s2) if len(s2) == 0: return len(s1) previous_row = range(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = pr...
[ "def calculate_levenshtein_distance(str_1, str_2):\n distance = 0\n buffer_removed = buffer_added = 0\n for x in ndiff(str_1, str_2):\n code = x[0]\n # Code ? is ignored as it does not translate to any modification\n if code == ' ':\n distance += max(buffer_removed, buffer_a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It is reccommended that preprocessing has taken place before loading directly into object class in iterable format level to avoid inconsistencies due to document delimitation from the puctuations. Upon calling method `.fit()` on corpus, collection sets on entities and PoS will be identified to harmonise the extracted r...
def fit(self, corpus: Union[str, Iterable[str]], sent_delim: str='\.\s+|\r|\n', preferred_spacy_core: str='en_core_web_sm' ) -> None: # Initialise corpus if type(corpus) == str: self.__corpus__ = [sent+'.' if ('\.' in sent_delim and sent[-1] != '.') else sent...
[ "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse relation triplets over the following conditions 1. Remove triplets with pronouns and determinants in subj/obj; i.e. "we", "she" "I", "their", etc. 2. Harmonise duplicated triplets, return only the superset triplet Semantic comparison option using word mover distance & agglomerative clustering FastText via Gensim ...
def parse_triplets(self, levenshtein_thold: float=20., coph_scr: float=2.) -> Iterable[dict]: # Remove pronoun and determiners parse_triples = [triple for triple in self.__triples_corpus__ if (triple['subject'] not in self.__pron_det_pos_words__ and ...
[ "def prune_triples(file, worker_id):\n buf_triples_count = 0\n buf_triples = \"\"\n labels = {}\n aliases = {}\n descriptions = {}\n wikipedia_mappings = {}\n inverse_wikipedia_mappings = {}\n with open(file, \"r\") as fp:\n line = fp.readline()\n while line:\n curre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set defaul config to app
def configure_app(self, defaults: t.Optional[DefaultConfig]) -> None: self.config = Config(defaults)
[ "def set_app_default_config(app: AppT):\n # 详细的 config see: https://faust.readthedocs.io/en/latest/reference/faust.types.settings.html\n app.conf.web_enabled = False\n app.conf.logging_config = {\"level\": logging.INFO,\n \"format\": '%(asctime)s - [%(name)s,li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validating ProductsDataViewSet by giving Invalid data
def test_ProductsDataViewSet_with_post_Invalid_data(self): payload = { "name": "1234" } # Request the data by API call. response = self.client.post('/api/productsdata/', data=json.dumps(payload), content...
[ "def validate(self, data):\n if data.get('set_quantity') is not None and data.get('set_quantity') < 1 \\\n or data.get('entry_price') is not None and data.get('entry_price') < 0:\n raise serializers.ValidationError({\"error\": \"Check your input.\"})\n return data", "def va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validating ProductsDataViewSet using get request method
def test_ProductsDataViewSet_with_get_request(self): # Request the data by API call. response = self.client.get('/api/productsdata/') # Checking the response self.assertEqual(response.status_code, 200) self.assertEqual(response.json()['count'], 1) self.assertEqual(respon...
[ "def test_ProductsDataViewSet_with_get_request_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(\n self.product_id))\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validating ProductsDataViewSet using get request method with Id
def test_ProductsDataViewSet_with_get_request_id(self): # Request the data by API call. response = self.client.get('/api/productsdata/{}/'.format( self.product_id)) # Checking the response self.assertEqual(response.status_code, 200) self.assertIsNotNone(response.json...
[ "def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validating ProductsDataViewSet using get request method with Invalid Id
def test_ProductsDataViewSet_with_get_request_Invalid_id(self): # Request the data by API call. response = self.client.get('/api/productsdata/{}/'.format(-1)) # Checking the response self.assertEqual(response.status_code, 404) self.assertEqual(response.json()['detail'], 'Not fou...
[ "def test_ProductsDataViewSet_with_get_request_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(\n self.product_id))\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Map towers to 412 integer Examples >>> state([[1], [3], [5, 4], [2]]) 668 = 0 40 + 2 41 + 1 42 + 2 43 + 2 44
def state(towers): ret = 0 for i, row in enumerate(towers): for val in row: ret += i * 4**(val-1) return ret
[ "def NumToState(x):\n return {\n 0: 'AL',\n 1: 'AK',\n 2:'AZ',\n 3:'AR',\n 4:'CA',\n 5:'CO',\n 6:'CT',\n 7:'DE',\n 8:'FL',\n 9:'GA',\n 10:'HI',\n 11:'ID',\n 12:'IL',\n 13:'IN',\n 14:'IA',\n 15:'KS',\n 16:'KY',\n 17:'LA',\n 18:'ME',\n 19:'MD',\n 20:'MA',\n 21:'MI',\n 22:'MN',\n 23:'MS',\n 2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a CoverMultiWaySearchTree of n nodes with all the currency codes in the standard
def build_tree(n=None) -> CoverMultiWaySearchTree: tree = CoverMultiWaySearchTree() codes = [currency.code for currency in cur] shuffle(codes) currencies = [Currency(code) for code in codes] if n is None: n = len(currencies) for currency in currencies[:n]: tree[currency._code] = ...
[ "def ConstructTree(self):\n step = 0\n totalNodes = 0\n while step <= self.__steps:\n self.__nodes[step] = {}\n nUps = 0\n while nUps <= totalNodes:\n combins = BinomialOptionModel.__nCr(totalNodes, nUps)\n self.__nodes[step][nUps] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the number of items in nodes in range [a, b]
def get_number_of_useful_items(nodes, a: str, b: str) -> int: return sum(int(a <= item.key <= b) for node in nodes for item in node.elements)
[ "def f02_03_countElemBetween(l, a, b):\n return sum([a < x < b for x in l])", "def count_from_range(node: BinaryTree.Node, low: Any, hi: Any):\n if node is None:\n return 0\n return max(0, node.count - count_lower(node, low) - count_higher(node, hi))", "def count_entries(numbers):\n nodes = n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to compute the (k, c1, c2)cover of tree with the minimum number of nodes. It follows a greedylike approach.
def compute_cover(tree: CoverMultiWaySearchTree, k: int, c1: str, c2: str) -> Optional[Set[CoverMultiWaySearchTree.Position.Node]]: # Step 1: Find nodes useful for the (k, c1, c2)-cover nodes = tree.find_nodes_in_range(c1, c2) # Step 2: Count number of items in range [c1, c2] n = get_...
[ "def bounded_search_tree_k_vertex_cover(graph, k):\n if not isinstance(graph, Graph):\n raise TypeError(\n 'the first argument of brute_force_k_vertex_cover must be an instance of Graph')\n if not isinstance(k, int) or k < 0 or len(graph.vertices) < k:\n raise Exception(\n 'k must be a a positive ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given 2d image, lidar and camera as well as the current scan message, localizes the pixel against the lidar data
def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple: # ---OBJ-- # x r1 /\ r2 x # / \ #cam_ray / \ average_ray # / \ # / \ # CAM ----> LID # # has to be 2d assert (img_pos.size == 2) cam_...
[ "def LKTrackerImageToImage(imageOld, pixelCoordsOld, imageNew,\n pixelCoordsNew, windowSize):\n # imageOld = cv2.cvtColor(imageOld, cv2.COLOR_BGR2GRAY)\n # imageNew = cv2.cvtColor(imageNew, cv2.COLOR_BGR2GRAY)\n\n # Get top left corner of window.\n\n topLeftX1, topLeftY1 = pixel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the no_cache cache control header is set on the resopnse.
def test_no_cache(self): content = self.unique() self.assertViewBehavior( {"cache_control_no_cache": True, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "no-cache"})
[ "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def set_header_no_cache():\n if sys.version_info[0] < 3:\n response.headers[\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the no_transform cache control header is set on the resopnse.
def test_no_transform(self): content = self.unique() self.assertViewBehavior( {"cache_control_no_transform": True, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "no-transform"})
[ "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def set_header_no_cache():\n if sys.version_info[0] < 3:\n response.headers[\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the must_revalidate cache control header is set on the resopnse.
def test_must_revalidate(self): content = self.unique() self.assertViewBehavior( {"cache_control_must_revalidate": True, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "must-revalidate"})
[ "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def test_proxy_revalidate(self):\n content = self.unique()\n self.assertViewBeha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the proxy_revalidate cache control header is set on the response.
def test_proxy_revalidate(self): content = self.unique() self.assertViewBehavior( {"cache_control_proxy_revalidate": True, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "proxy-revalidate"})
[ "def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})", "def _may...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the max_age cache control header is set on the resopnse.
def test_max_age(self): content = self.unique() self.assertViewBehavior( {"cache_control_max_age": 1, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "max-age=1"})
[ "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the s_maxage cache control header is set on the response.
def test_s_maxage(self): content = self.unique() self.assertViewBehavior( {"cache_control_s_maxage": 1, "get": content}, status_code=200, content=content, headers_exact={"Cache-Control": "s-maxage=1"})
[ "def test_max_age(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_max_age\": 1, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"max-age=1\"})", "def test_client_max_age_3600(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the behavior is disabled when cache_control is falsy.
def test_disabled(self): content = self.unique() self.assertViewBehavior({ "cache_control": False, "cache_control_public": True, "get": content}, status_code=200, content=content, headers_exclude="Cache-Control")
[ "def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"never_cache\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")", "def test_no_cache(self):\n content = self.uniq...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the default HTTP method name protection takes precedence and that no cache control headers are set on the response.
def test_precedence(self): self.assertViewBehavior( {"cache_control_public": True}, status_code=405, headers_exclude="Cache-Control")
[ "def is_safe_method(self):\n safe_methods = ('GET', 'HEAD')\n return self.request.get('REQUEST_METHOD', 'GET').upper() in safe_methods", "def test_precedence(self):\n self.assertViewBehavior(\n status_code=405,\n headers_exclude=\"Cache-Control\")", "def filter_request...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the behavior is disabled when never_cache is falsy.
def test_disabled(self): content = self.unique() self.assertViewBehavior( {"never_cache": False, "get": content}, status_code=200, content=content, headers_exclude="Cache-Control")
[ "def cache_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"cache_enabled\")", "def set_do_not_cache(self):\n\n self.allow_cache = False\n self.request._cache_update_cache = False", "def test_disabled(self):\n content = self.unique()\n self.assertView...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the defualt HTTP method name protection takes precedence and that no cache control headers are set on the response.
def test_precedence(self): self.assertViewBehavior( status_code=405, headers_exclude="Cache-Control")
[ "def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")", "def is_safe_method(self):\n safe_methods = ('GET', 'HEAD')\n return self.request.get('REQUEST_METHOD', 'GET')....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all command parsers
def get_all_command_parsers(self) -> None: for command in self.commands: self.get_command_parser(command)
[ "def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]", "def get_parsers(self):\n return tuple([getattr(self, '_{}'.format(i)) for i in self.parsers_available])", "def list_parsers(self, *args):\n print('==== Available parsing...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if controller has commands
def has_commands(self) -> bool: return len(self.commands) > 0
[ "def check_commands(self):\n pass", "def check_commands(self):\n while self.new_messages_number(\"control.execute\"):\n call=self.pop_message(\"control.execute\")\n call()", "def can_handle_pre_command(self) -> bool:\n return False", "def check_subsystem_commands(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all controllers modules
def _get_modules(self) -> Dict[str, ModuleType]: modules = {} terminal_path = Path(openbb_terminal.__file__).parent for file in terminal_path.glob("**/*controller.py"): spec = spec_from_file_location(file.stem, file) if spec is not None and spec.loader is not None: ...
[ "def controllers(self) -> list:\n return self._controllers", "def get_modules(self) -> list:\n return self.config['modules']", "def get_modules(self):\n return self._modules", "def get_modules():\n app.logger.info('Searching for modules')\n data = modules_data()\n if data is None...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the ControllerDoc instance for a controller
def get_controller_doc(self, controller_name: str) -> ControllerDoc: if controller_name not in self.controller_docs: raise KeyError(f"Controller {controller_name} not found") return self.controller_docs[controller_name]
[ "def controller(self, controller):\n return self.controllers[controller.__name__]", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the mongodb session document or None
def _get_mongo_session(self, sid): return self.coll.find_one({'sid': sid})
[ "def __get_session(self):\n session = None\n try:\n session = self._srv.get_sessions('/')[0]\n except:\n pass\n return session", "def get_session(session_id):\n try:\n return datastore.Get(session_id)\n except datastore_errors.EntityNotFoundError:\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IEX Corporate Actions from the refdata endpoints
def get_iex_corporate_actions(start=None, **kwargs): return CorporateActions(start=start, **kwargs).fetch()
[ "def get_actions(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetActionsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IEX Dividends from the refdata endpoints
def get_iex_dividends(start=None, **kwargs): return Dividends(start=start, **kwargs).fetch()
[ "def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(term...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IEX Next Day Ex Date from the refdata endpoints
def get_iex_next_day_ex_date(start=None, **kwargs): return NextDay(start=start, **kwargs).fetch()
[ "def _get_date(self, relative_idx):\r\n return self.dl.dates[self._identified_date_id + relative_idx]", "def get_next_day(self):\n self.date += timedelta(days=1)\n return self.date", "def get_company_going_ex_date(self, days_fr_cur):\n ex_date_df = self.sgx_div_ex_date_df.copy()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IEX Listed Symbol Directory from the refdata endpoints
def get_iex_listed_symbol_dir(start=None, **kwargs): return ListedSymbolDir(start=start, **kwargs).fetch()
[ "def getManagedUriList(self):\n if self.__meta:\n return self.__meta.getUriList()\n r = []\n for i in self.prefixes:\n a = glob(toFs(os.path.join(fromFs(i),'db',th_ext_glob)))\n p = [fromFs(j) for j in a]\n r.extend(p)\n return r", "def getRe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configures CUDA environment variable and returns tensorflow GPU config.
def set_gpu(gpu): os.environ['CUDA_VISIBLE_DEVICES'] = gpu tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True return tf_config
[ "def get_configuration():\n config_args = dict(\n gpu_options=tf.GPUOptions(\n allow_growth=config.TF_GPU_ALLOW_GROWTH,\n per_process_gpu_memory_fraction=config.TF_GPU_MEM_FRAC,\n ),\n log_device_placement=config.TF_LOG_DEVICE_PLACEMENT,\n )\n if not config.TF_USE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create baseline convolutional recurrent model. Arguments
def create_baseline_model(filters, gru_units, dropout, bias, mels, nb_classes): inp = Input(shape=(259, mels, 1)) x = Conv2D(filters, (3,3), padding='same', activation='relu', use_bias=bias)(inp) x = MaxPooling2D(pool_size=(1,5))(x) x = Conv2D(filters, (3,3), padding='same', activation='relu', use_bias=...
[ "def make_baseline(cls: Type['ResNet'], *, d_in: int, n_blocks: int, d_main: int, d_hidden: int, dropout_first: float, dropout_second: float, d_out: int) ->'ResNet':\n return cls(d_in=d_in, n_blocks=n_blocks, d_main=d_main, d_hidden=d_hidden, dropout_first=dropout_first, dropout_second=dropout_second, normal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot and save the ROC with AUC value. Arguments
def plot_ROC(model, x_test, y_test, save_folder): predicted = model.predict(x_test).ravel() actual = y_test.ravel() fpr, tpr, thresholds = roc_curve(actual, predicted, pos_label=None) roc_auc = auc(fpr, tpr) plt.title('Test ROC AUC') plt.plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc) pl...
[ "def plot_roc_acc(self,x_test,y_test):\n if self.validated:\n preds = self.model_cv.predict_proba(x_test[self.feats])\n fpr, tpr, thresholds = metrics.roc_curve(y_test, preds[:,1])\n auc = metrics.roc_auc_score(y_test, preds[:,1])\n\n # get accuracy of class0 and c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves the network architecture as a .txt file. Arguments
def save_arch(model, save_folder): with open(save_folder + '/architecture.txt','w') as a_save: model.summary(print_fn=lambda x: a_save.write(x + '\n'))
[ "def _save_architecture(self, filename, ensemble):\n\n architecture = [\n \"{}:{}\".format(w.iteration_number, w.name)\n for w in ensemble.weighted_subnetworks\n ]\n # Make directories since model_dir may not have been created yet.\n tf.gfile.MakeDirs(os.path.dirname(filename))\n with t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute and store the QC metrics Runs the QC on the session and stores a map of the metrics for each datapoint for each test, and a map of which datapoints passed for each test
def compute(self, **kwargs): if self.extractor is None: kwargs['download_data'] = kwargs.pop('download_data', self.download_data) self.load_data(**kwargs) self.log.info(f"Session {self.session_path}: Running QC on behavior data...") self.metrics, self.passed = get_bpodqc_...
[ "def compute(self, download_data=None):\n if self.extractor is None:\n # If download_data is None, decide based on whether eid or session path was provided\n ensure_data = self.download_data if download_data is None else download_data\n self.load_data(download_data=ensure_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a dictionary of results, computes the overall session QC for each key and aggregates in a single value
def compute_session_status_from_dict(results): indices = np.zeros(len(results), dtype=int) for i, k in enumerate(results): if k in TaskQC.criteria.keys(): indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria[k]) else: indices[i]...
[ "def compute_metrics(self, results: list) -> dict:", "def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the overall session QC for each key and aggregates in a single value
def compute_session_status(self): if self.passed is None: raise AttributeError('passed is None; compute QC first') # Get mean passed of each check, or None if passed is None or all NaN results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v) for k, v...
[ "def compute_session_status_from_dict(results):\n indices = np.zeros(len(results), dtype=int)\n for i, k in enumerate(results):\n if k in TaskQC.criteria.keys():\n indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria[k])\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute and store the QC metrics Runs the QC on the session and stores a map of the metrics for each datapoint for each test, and a map of which datapoints passed for each test
def compute(self, download_data=None): if self.extractor is None: # If download_data is None, decide based on whether eid or session path was provided ensure_data = self.download_data if download_data is None else download_data self.load_data(download_data=ensure_data) ...
[ "def compute(self, **kwargs):\n if self.extractor is None:\n kwargs['download_data'] = kwargs.pop('download_data', self.download_data)\n self.load_data(**kwargs)\n self.log.info(f\"Session {self.session_path}: Running QC on behavior data...\")\n self.metrics, self.passed =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates all the QC metric functions in this module (those starting with 'check') and returns the results. The optional kwargs listed below are passed to each QC metric function.
def get_bpodqc_metrics_frame(data, **kwargs): def is_metric(x): return isfunction(x) and x.__name__.startswith('check_') # Find all methods that begin with 'check_' checks = getmembers(sys.modules[__name__], is_metric) prefix = '_task_' # Extended QC fields will start with this # Method 'ch...
[ "def run(self) -> None:\n self._does_apply = self.run_precondition()\n if not self._does_apply:\n self._check_result.status = CheckStatus.DOES_NOT_APPLY\n return\n\n self.calc_statistics()\n\n for statistic in self._check_result.statistics:\n\n if statist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the time difference between the onset of the visual stimulus and the onset of the go cue tone is positive and less than 10ms.
def check_stimOn_goCue_delays(data, **_): # Calculate the difference between stimOn and goCue times. # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. metric = np.nan_to_num(data["goCue_times"] - data["stimOn_times"], nan=np.inf) passed = (metric < 0.01) & (met...
[ "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def validate_gps_time(self):\n \n t_diff = np.zeros_like(self.gps_stamps['time'])\n \n for ii in range(len(t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the time difference between the response and the feedback onset (error sound or valve) is positive and less than 10ms.
def check_response_feedback_delays(data, **_): metric = np.nan_to_num(data["feedback_times"] - data["response_times"], nan=np.inf) passed = (metric < 0.01) & (metric > 0) assert data["intervals"].shape[0] == len(metric) == len(passed) return metric, passed
[ "def checkTenMilliSecondsDuration(self,mesg):\n\t\tif mesg.isRootTransactionEnd():\n\t\t\tif not float(mesg.getCalDuration())>10:\n\t\t\t\tce=CalError(\"E13\")\n\t\t\t\tce.printError([mesg])", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the time difference between the visual stimulus freezing and the response is positive and less than 100ms.
def check_response_stimFreeze_delays(data, **_): # Calculate the difference between stimOn and goCue times. # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. metric = np.nan_to_num(data["stimFreeze_times"] - data["response_times"], nan=np.inf) # Test for valid ...
[ "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def _check_delay(self):\n if self._previous_request_at:\n dif = round(time.time() - self._previous_request_at,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the wheel does move within 100ms of the feedback onset (error sound or valve).
def check_wheel_move_before_feedback(data, **_): # Get tuple of wheel times and positions within 100ms of feedback traces = traces_by_trial( data["wheel_timestamps"], data["wheel_position"], start=data["feedback_times"] - 0.05, end=data["feedback_times"] + 0.05, ) metric ...
[ "def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the wheel moves by approximately 35 degrees during the closedloop period on trials where a feedback (error sound or valve) is delivered.
def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_): # Get the Bpod extracted wheel data timestamps = data['wheel_timestamps'] position = data['wheel_position'] return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)
[ "def check_wheel_move_before_feedback(data, **_):\n # Get tuple of wheel times and positions within 100ms of feedback\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=data[\"feedback_times\"] - 0.05,\n end=data[\"feedback_times\"] + 0.05...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the wheel moves by approximately 35 degrees during the closedloop period on trials where a feedback (error sound or valve) is delivered. This check uses the Bpod wheel data (measured at a lower resolution) with a stricter tolerance (1 visual degree).
def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_): # Get the Bpod extracted wheel data timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps']) position = data.get('wheel_position_bpod', data['wheel_position']) return _wheel_move_during_closed_loop(timestamps, pos...
[ "def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)", "def check_wheel_move_befor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the wheel does not move more than 2 degrees in each direction during the quiescence interval before the stimulus appears.
def check_wheel_freeze_during_quiescence(data, **_): assert np.all(np.diff(data["wheel_timestamps"]) >= 0) assert data["quiescence"].size == data["stimOnTrigger_times"].size # Get tuple of wheel times and positions over each trial's quiescence period qevt_start_times = data["stimOnTrigger_times"] - data...
[ "def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the number events per trial is correct Within every trial interval there should be one of each trial event, except for goCueTrigger_times which should only be defined for incorrect trials
def check_n_trial_events(data, **_): intervals = data['intervals'] correct = data['correct'] err_trig = data['errorCueTrigger_times'] # Exclude these fields; valve and errorCue times are the same as feedback_times and we must # test errorCueTrigger_times separately # stimFreeze_times fails oft...
[ "def check_stimOn_goCue_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"stimOn_times\"], nan=np.inf)\n passed = (metric <...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the time difference between the visual stimulus offsetcommand being triggered and the visual stimulus effectively turning off on the screen is smaller than 150 ms.
def check_stimOff_delays(data, **_): metric = np.nan_to_num(data["stimOff_times"] - data["stimOffTrigger_times"], nan=np.inf) passed = (metric <= 0.15) & (metric > 0) assert data["intervals"].shape[0] == len(metric) == len(passed) return metric, passed
[ "def check_command_validity(self):\n if rospy.Time.now() - self.last_time > rospy.Duration(2):\n self.speed = 0\n self.steering = 0", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the time difference between the visual stimulus freezecommand being triggered and the visual stimulus effectively freezing on the screen is smaller than 150 ms.
def check_stimFreeze_delays(data, **_): metric = np.nan_to_num(data["stimFreeze_times"] - data["stimFreezeTrigger_times"], nan=np.inf) passed = (metric <= 0.15) & (metric > 0) assert data["intervals"].shape[0] == len(metric) == len(passed) return metric, passed
[ "def _checkUiFreeze(self):\r\n\r\n motionCountBefore = core.FW_conf['blackbox'].getCountMotionFrames()\r\n\r\n # swipe a bit to see if it causes motion\r\n yCoordinate = int(self.phone.uiState.getScreenHeight()/1.5)\r\n self.phone._touch.drawLine((self.phone.uiState.getScreenWidth()-2, y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the reward volume is between 1.5 and 3 uL for correct trials, 0 for incorrect.
def check_reward_volumes(data, **_): metric = data['rewardVolume'] correct = data['correct'] passed = np.zeros_like(metric, dtype=bool) # Check correct trials within correct range passed[correct] = (1.5 <= metric[correct]) & (metric[correct] <= 3.) # Check incorrect trials are 0 passed[~corr...
[ "def check_reward_volume_set(data, **_):\n metric = data[\"rewardVolume\"]\n passed = 0 < len(set(metric)) <= 2 and 0. in metric\n return metric, passed", "def reward_threshold(self) -> Optional[float]:", "def acquisition_function_expected_volume_removal(\n gp_reward_model: BasicGPRewardModel,\n) ->...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that there is only two reward volumes within a session, one of which is 0.
def check_reward_volume_set(data, **_): metric = data["rewardVolume"] passed = 0 < len(set(metric)) <= 2 and 0. in metric return metric, passed
[ "def check_reward_volumes(data, **_):\n metric = data['rewardVolume']\n correct = data['correct']\n passed = np.zeros_like(metric, dtype=bool)\n # Check correct trials within correct range\n passed[correct] = (1.5 <= metric[correct]) & (metric[correct] <= 3.)\n # Check incorrect trials are 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the difference between wheel position samples is close to the encoder resolution and that the wheel timestamps strictly increase.
def check_wheel_integrity(data, re_encoding='X1', enc_res=None, **_): if isinstance(re_encoding, str): re_encoding = int(re_encoding[-1]) # The expected difference between samples in the extracted units resolution = 1 / (enc_res or ephys_fpga.WHEEL_TICKS ) * np.pi * 2 * ephys_f...
[ "def check_wheel_freeze_during_quiescence(data, **_):\n assert np.all(np.diff(data[\"wheel_timestamps\"]) >= 0)\n assert data[\"quiescence\"].size == data[\"stimOnTrigger_times\"].size\n # Get tuple of wheel times and positions over each trial's quiescence period\n qevt_start_times = data[\"stimOnTrigge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that there are no audio outputs between the start of the trial and the go cue sound onset 20 ms.
def check_audio_pre_trial(data, audio=None, **_): if audio is None: _log.warning("No BNC2 input in function call, retuning None") return None s = audio["times"][~np.isnan(audio["times"])] # Audio TTLs with NaNs removed metric = np.array([], dtype=np.int8) for i, c in zip(data["intervals...
[ "def check_audio(self): #jordan\n try:\n audio = self.scene.sequence_editor.sequences_all\n except:\n print(\"ERROR: No Audio\") # not suppose to happen", "def quick_check(self):\n #loop three times and moce the servo \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the VTK version.
def vtk_version_ok(major, minor, build): requested_version = (100 * int(major) + int(minor)) * 100000000 + int(build) ver = vtkVersion() actual_version = (100 * ver.GetVTKMajorVersion() + ver.GetVTKMinorVersion()) \ * 100000000 + ver.GetVTKBuildVersion() if actual_version >= request...
[ "def vtk_version_ok(major, minor, build):\n needed_version = 10000000000 * int(major) + 100000000 * int(minor) + int(build)\n try:\n vtk_version_number = VTK_VERSION_NUMBER\n except AttributeError: # as error:\n ver = vtkVersion()\n vtk_version_number = 10000000000 * ver.GetVTKMajorVe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to read a file from subversion for inclusion in the wiki.
def GoogleCode_ReadSVNFile(wikifier, domain, path, start, end): gcurl = "http://%s.googlecode.com/svn/trunk/%s" % (domain,path) fdata = urllib.urlopen(gcurl).readlines() return gcurl, fdata[start-1:end]
[ "def test_get_file_with_svn_and_revision(self):\n self._test_get_file(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123')", "def svn_ra_get_file(*args):\r\n return _ra.svn_ra_get_file(*args)", "def svn_fs_file_content...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the distance between the weights vector of the node and a given vector.
def get_distance(self, vec): sum = 0 if len(self.weights) == len(vec): for i in range(len(vec)): sum += (self.weights[i] - vec[i]) * (self.weights[i] - vec[i]) return np.sqrt(sum) else: sys.exit("Error: dimension of nodes != input data...
[ "def distance(self, vector, gmst):\n return self.vector(gmst).multiply(-1).add(vector).length()", "def test_distances_with_vector_input(self):\n input_vector = self.vectors['dog.n.01']\n distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Game Object that was added.
def added_game_object(self) -> GameObject: return self._added_game_object
[ "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def added_game_object_id(self) -> int:\n return CommonObjectUtils.get_object_id(self.added_game_object)", "def added_object_guid(self) -> int:\n return CommonObjectUtils.get_object_guid(self.added_game_object)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The decimal identifier of the Game Object that was added.
def added_game_object_id(self) -> int: return CommonObjectUtils.get_object_id(self.added_game_object)
[ "def added_object_guid(self) -> int:\n return CommonObjectUtils.get_object_guid(self.added_game_object)", "def ID(self) -> int:", "def carrying_object_id(self) -> int:\n return self._carrying_object_id", "def object_identifier(self) -> str:\n return self._object_identifier", "def dot_id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The guid identifier of the Game Object that was added.
def added_object_guid(self) -> int: return CommonObjectUtils.get_object_guid(self.added_game_object)
[ "def added_game_object_id(self) -> int:\n return CommonObjectUtils.get_object_id(self.added_game_object)", "def guid(self):\n return self._guid", "def Guid(self) -> _n_0_t_7:", "def object_id(self) -> str:\n return self._event.get('object_id')", "def get_id(self):\n return self.u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the current voltage.
def voltage(self): return self._voltage
[ "def get_voltage(self):\n return self._ina220.get('VOLTAGE').reading", "def voltage(self):\n raw_value = self.aio.read()\n return raw_value / 4095.0 * 5.0", "def get_voltage(self):\n print(\"voici le voltage de la batterie\")", "def voltage(self) -> float:\n pass", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the thrust force for the given command.
def get_thrust_value(self, command): return self._gain * numpy.abs(command) * command
[ "def accelThrust(torque):\n return (torque * gearRatio * driveEfficiency) / tireRadius", "def calculate_force(self):\n pass", "def __call__(self, t, h):\n # Evaluate the force value\n fs = self.fs(t)\n\n # Compute force\n return fs * self.A * (0.25 * self.rho_ice * self.g *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a pydicom.FileDataset from the instance's Orthanc identifier
def get_pydicom(orthanc: Orthanc, instance_identifier: str) -> pydicom.FileDataset: dicom_bytes = orthanc.get_instances_id_file(instance_identifier) return pydicom.dcmread(BytesIO(dicom_bytes))
[ "def get_pydicom(self) -> pydicom.FileDataset:\n return util.get_pydicom(self.client, self.id_)", "def get_dataset(self, identifier):\n # Test if a subfolder for the given dataset identifier exists. If not\n # return None.\n dataset_dir = self.get_dataset_dir(identifier)\n if no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rasterize a collection of lon,lat shapes onto a DLTile.
def rasterize_shape( tile: Tile, shapes: AnyShapes, values: Sequence[int] = None, out: np.ndarray = None, mode="burn", dtype=np.byte, shape_coords="lonlat", all_touched=False, ) -> np.ndarray: shapes = normalize_polygons(shapes) if values is None: if mode == "burn": ...
[ "def rasterize(shapes, coords, fill=np.nan, **kwargs):\n from rasterio import features\n transform = transform_from_latlon(coords['lat'], coords['lon'])\n out_shape = (len(coords['lat']), len(coords['lon']))\n raster = features.rasterize(shapes, out_shape=out_shape,\n fill...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a PyUSB device for the Luxafor Flag. Will lazy load the device as necessary.
def get_device(l): if not l.device: l.device = find_device() setup_device(l.device) return l.device
[ "def _get_device(self, dev):\n\n if isinstance(dev, ctypes.c_long):\n dev = dev.value\n\n for device in self.devices:\n if device.dev == dev:\n return device\n\n return None", "def get_usb_device(self, nIndex):\n\t\treturn handle_to_object(call_sdk_functio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send values to the device. Expects the values to be a List of command byte codes. Refer to the individual commands for more information on the specific command codes.
def write(l, values): l.get_device().write(1, values) # Sometimes the flag simply ignores the command. Unknown if this # is an issue with PyUSB or the flag itself. But sending the # command again works a treat. l.get_device().write(1, values)
[ "def send(self, command_list):\n self.next_engine.receive(command_list)", "def send_command(self, value):\n self._cbmif.send_dlm(value)", "def _send_multiple(self, what, values, address, **kwargs):\n\n raise NotImplementedError('Multiple sending is not yet implemented for Modbus')", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fade a single LED or multiple LEDs from their current colour to a new colour for the supplied duration.
def do_fade_colour(l, leds, r, g, b, duration): l._do_multi_led_command( create_fade_colour_command, leds, r, g, b, duration )
[ "def color_fade(bt, col1, col2, duration=100):\n set_static_color(bt, col1)\n delta = [col2.red - col1.red, col2.green -\n col1.green, col2.blue - col1.blue]\n\n RGB = namedtuple('RGB', 'red, green, blue')\n red = col1.red\n green = col1.green\n blue = col1.blue\n wait_time = durati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Animate the flag with a wave pattern of the given type, using the specified colour, duration and number of times to repeat.
def do_wave(l, wave_type, r, g, b, duration, repeat): command = create_wave_command( wave_type, r, g, b, duration, repeat ) l.write(command)
[ "def animate_to(number, color):\n for _ in range(10):\n trellis.pixels.fill((0, 0, 0))\n display_number(random.randint(10, 99), color)\n time.sleep(0.1)\n trellis.pixels.fill((0, 0, 0))\n display_number(number, color)", "def flash_red(self, duration=0.2):\n self.pen_color = wx...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor for the SSH Timeout Exception class
def __init__(self, message="Remote operation timeout"): super(SshTimeout, self).__init__(message)
[ "def ansible_ssh_timeout(self):", "def __init__(self):\n\t\tself.name = 'timeout'\n\t\tsuper(Timeout, self).__init__([])", "def test_timeout_elapsed_exception(self):\n deadline = Deadline(-MS)\n with self.assertRaises(TimeoutError):\n deadline.timeout()", "def raise_timeout(self, *arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a shell connector from machine info object
def from_info(cls, info, user='root'): conn = None if not isinstance(info, MachineInfo): raise TypeError('info must be a MachineInfo') if user == 'cmuser': conn = cls(info.ip, 22, info.operator_user, info.operator_password, '') else: ...
[ "def test_shell_create(self):\n type(self).shell = self.shell_class()\n assert self.shell", "def createMachine():\n cd('/')\n machine = create(machineName, 'UnixMachine')\n cd('Machines/'+machineName+'/NodeManager/'+machineName)\n cmo.setName(machineName)\n cmo.setListenAddress(hostna...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the text labels that display the slider values
def updateLabels(self): # Intensity range self.minIntensityLabel.setText("Intensity: "+str(self.ABsettings["intensity_range"][0]).rjust(3)) self.labelMaxInt.setText(str(self.ABsettings["intensity_range"][1]).ljust(3)) # Z range self.minZLabel.setText("Z range: "+str(self.ABsettin...
[ "def _slider_change(self, slider, value):\n self.slider_label.text = str(float(value))", "def UpdateLabel(self) -> _n_6_t_0:", "def updatePicSliderValues(self):\n\t\tself.picCurrentWidthValue.setText(str(self.picWidthSlider.value()))\n\t\tself.picCurrentHeightValue.setText(str(self.picHeightSlider.value(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
encode string into numpy.ndarray using utf32.
def array_encode(s): return np.frombuffer(s.encode('utf32'), dtype=np.int32, offset=4)
[ "def to_np_unicode(string):\n # TODO: what's the clean of doing this with numpy?\n # It really wants to create a zero-d Un array here\n # which breaks the assignment below and we end up\n # with n copies of the first char.\n n = len(string)\n np_string = np.zeros(n, dtype=\"U\")\n for j in rang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test handling a poorly implemented locate_module method.
def test_handling_wrong_locate_module_implementation(method): loader = WrongEnamlImporter() with pytest.raises(ImportError): getattr(loader, method)('module_name')
[ "def test___find_corresponding_module_for_location_exceptions(self):\r\n # pylint: disable=protected-access\r\n with self.assertRaises(ItemNotFoundError):\r\n self.peer_grading._find_corresponding_module_for_location(\r\n Location('org', 'course', 'run', 'category', 'name', '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an enaml module in a tempdir and add it to sys.path.
def enaml_module(tmpdir): name = '__enaml_test_module__' folder = str(tmpdir) path = os.path.join(folder, name + '.enaml') with open(path, 'w') as f: f.write(SOURCE) sys.path.append(folder) yield name, folder, path sys.path.remove(folder) if name in sys.modules: del sys...
[ "def tmp_module(tmpdir, mock_cwd):\n mock_cwd()\n sys.path.insert(0, str(tmpdir))\n\n @no_absolute_path\n def _create_temp_module(path, content=None, invalid=False):\n\n tmp_path = pathlib.PosixPath(tmpdir)\n try:\n path = path.relative_to(tmp_path)\n except ValueError:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test importing a module and checking that the cache was generated.
def test_import_and_cache_generation(enaml_module): name, folder, _ = enaml_module with imports(): importlib.import_module(name) assert name in sys.modules # Check that the module attributes are properly populated mod = sys.modules[name] assert mod.__name__ == name assert mod.__fil...
[ "def test_import_when_cache_exists(enaml_module):\n name, folder, _ = enaml_module\n assert name not in sys.modules\n with imports():\n importlib.import_module(name)\n\n assert name in sys.modules\n del sys.modules[name]\n\n cache_folder = os.path.join(folder, '__enamlcache__')\n assert ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test importing a module when the cache exists.
def test_import_when_cache_exists(enaml_module): name, folder, _ = enaml_module assert name not in sys.modules with imports(): importlib.import_module(name) assert name in sys.modules del sys.modules[name] cache_folder = os.path.join(folder, '__enamlcache__') assert os.path.isdir(c...
[ "def test_cached_is_false_before_set():\n cache = ModuleCache()\n name = 'test'\n path = '/some/path/to/test'\n assert cache.cached(name=name, path=path) is False", "def _import_module(self, name):\r\n try:\r\n __import__(name)\r\n return True\r\n except ImportError:\r\n return Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Standard enaml importer whose state is restored after testing.
def enaml_importer(): print(imports, dir(imports)) old = imports.get_importers() yield imports imports._imports__importers = old
[ "def test_importer_management(enaml_importer):\n standard_importers_numbers = len(enaml_importer.get_importers())\n enaml_importer.add_importer(WrongEnamlImporter)\n assert WrongEnamlImporter in enaml_importer.get_importers()\n enaml_importer.add_importer(WrongEnamlImporter)\n assert (len(enaml_impor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test managing manually enaml importers.
def test_importer_management(enaml_importer): standard_importers_numbers = len(enaml_importer.get_importers()) enaml_importer.add_importer(WrongEnamlImporter) assert WrongEnamlImporter in enaml_importer.get_importers() enaml_importer.add_importer(WrongEnamlImporter) assert (len(enaml_importer.get_im...
[ "def enaml_importer():\n print(imports, dir(imports))\n old = imports.get_importers()\n\n yield imports\n\n imports._imports__importers = old", "def test_import_and_cache_generation(enaml_module):\n name, folder, _ = enaml_module\n with imports():\n importlib.import_module(name)\n\n as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Convert a Pico detection to a menpo.shape.PointDirectedGraph. This enforces a particular point ordering. The Pico detections are circles with a given diameter. Here we convert them to the tighest possible bounding box around the circle. No orientaton is currently provided.
def pointgraph_from_circle(fitting): diameter = fitting.diameter radius = diameter / 2.0 y, x = fitting.center y -= radius x -= radius return bounding_box((y, x), (y + diameter, x + diameter))
[ "def _order_points(self, pts):\n # initialzie a list of coordinates that will be ordered such that \n # 1st point -> Top left\n # 2nd point -> Top right\n # 3rd point -> Bottom right\n # 4th point -> Bottom left\n rect = np.zeros((4, 2), dtype=\"float32\")\n\n # the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes the horizontal box with buttons
def makeButtons(self): self.but_run = QtWidgets.QPushButton('Run') self.but_status = QtWidgets.QPushButton('Status') self.but_brow = QtWidgets.QPushButton('View') self.but_remove = QtWidgets.QPushButton('Remove files') self.hboxB = QtWidgets.QHBoxLayout() self.h...
[ "def button_box(self):\n button_box = Gtk.Box()\n button_box.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(.1, .1, .1, .1))\n self.prev_button = Gtk.Button.new_with_label(\"<<\")\n self.next_button = Gtk.Button.new_with_label(\">>\")\n self.prev_button.connect(\"clicked...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Integration test that logger will raise an exception if account does not exists.
def test_configure_no_account(self): config = self._getConfiguration() account = u'no-such-account' logger = manufacture.makeLogger() with self.assertRaises(UtilsError) as context: logger.configure(configuration=config, account=account) self.assertEqual(u'1026', con...
[ "def test_log_error(self):\n self.assertEqual(None, self.logger.error(\"test log error\", \"23\"))", "def test008_enable_non_exist_account(self):\n self.log.info(\"Enable non-exist account, should fail.\")\n random_account= random.randint(3000,5000)\n response= self.api.cloudbroker.acc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
def _extract_archive(file_path, path=".", archive_format="auto"): if archive_format is None: return False if archive_format == "auto": archive_format = ["tar", "zip"] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_f...
[ "def _extract_archive(filepath, path='.', archive_format='auto'):\n if archive_format is None:\n return False\n if archive_format == 'auto':\n archive_format = ['tar', 'zip']\n if isinstance(archive_format, six.string_types):\n archive_format = [archive_format]\n\n for archive_type ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Minimization of a scalar function of one or more variables using parallel CMAES retry.
def minimize(fun, bounds = None, value_limit = math.inf, num_retries = 1000, logger = None, workers = mp.cpu_count(), popsize = 31, max_evaluations = 50000, capacity = 500, stop_fittness = None, ...
[ "def minimize(self, cost_function:Callable, initial_params:np.ndarray, **kwargs) -> OptimizeResult:\n raise NotImplementedError", "def minimize(fun, *args, init=None, init_tries=1, opt_tries=1, verbose=False, **kwargs):\n if \"x0\" in kwargs:\n raise ValueError(\"Provide initialization function (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sorts all store entries, keep only the 90% best to make room for new ones.
def sort(self): # sort all entries to make room for new ones, determine best and worst ns = self.num_stored.value ys = np.asarray(self.ys[:ns]) yi = ys.argsort() sortRuns = [] for i in range(len(yi)): y = ys[yi[i]] xs = self.get_x(yi[i]) sortRu...
[ "def sort_and_reduce(self):\n self.data = sorted(self.data, key=lambda item: item.pubDate)\n if len(self.data) > MAX_SIZE:\n self.data = self.data[-MAX_SIZE:]", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'],...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates hash value of the state and its children.
def hash(self, hashed_states=None): if hashed_states is None: hashed_states = [] hashed_states.append(self) result = '1' if self.final else '0' result += str(len(self.children)) for symbol in self.children: child = self.children[symbol] if chi...
[ "def hash_children(self):\n if len(self.children)>0:\n return hash(tuple(self.children))\n else:\n return 0", "def _hash(self, value, get_val, get_child):\n hasher = getattr(hashlib, self.hash_func)\n children = get_child(value)\n\n # If leaf node\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies state and its children. Ignores parents.
def copy(self): new_state = State(self.final) for symbol in self.children: child = self.children[symbol] new_state.add_child(child.copy(), symbol) return new_state
[ "def clone_state(self):\n return self.strategy['state_handler'].clone(self.state)", "def copy( self ):\n\n\t\treturn State( **self.__dict__ )", "def copy(self, state):\n new_state = []\n for tower in state:\n new_tower = []\n for value in tower:\n new_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Login to APICEM northbound APIs in shell.
def login(): try: client = NbClientManager( server=APIC, username=APIC_USER, password=APIC_PASSWORD, connect=True) return client except requests.exceptions.HTTPError as exc_info: if exc_info.response.status_code == 401: ...
[ "def login():\n api_key = ibm_config[\"api_key\"]\n login_cmd = f\"ibmcloud login --apikey {api_key}\"\n account_id = ibm_config.get(\"account_id\")\n if account_id:\n login_cmd += f\" -c {account_id}\"\n region = config.ENV_DATA.get(\"region\")\n if region:\n login_cmd += f\" -r {re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
According to this function We Fetch data from the data base ordering by reverse Id here (N_post = Normal profile posts) (my_post_lists > refer fetching all post from the data base
def get(self, request, *args, **kwargs): my_normal_post_lists = NormalPosts.objects.filter(uploded_by=request.user.normalprofile).order_by("-id") return render(request, self.template_name, { 'my_normal_post_lists': my_normal_post_lists, })
[ "def load_posts(self):\n self.execute(f\"\"\"\n SELECT post.id, post.thread_id, post.root_post_id, post.user_id, post.body, post.anonymous_post, post.created_at\n FROM post\n INNER JOIN thread ON post.thread_id = thread.id\n WHERE post.thread_id = '{self.id}'\n \"\"\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check a given line to see if a move is valid. Return the squares that will change, if the move is valid, otherwise an empty list.
def valid_line(board, x, y, dx, dy): if not 0 <= x + dx + dx < 8: return [] if not 0 <= y + dy + dy < 8: return [] coords_1 = board.columns[x+dx] + board.rows[y+dy] coords_2 = board.columns[x+dx+dx] + board.rows[y+dy+dy] if board[coords_1] != -self.color: # If the neighbour square...
[ "def checkWin(self, player) -> List[int]:\n # current = player\n opponent = GoBoardUtil.opponent(player)\n winning_moves = []\n for line in self.lines:\n for i in range(len(line) - 4):\n emptyPos = -1\n for pos in line[i: i + 5]: # get five conse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given key is valid, with the format XY. X is the column, between A and H, and Y is the row, between 1 and 8.
def _is_valid_key(self, key): # If the key is not a string if not isinstance(key, str): return False else: key = str.upper(key) # If the given key does not match the standard notation XY if len(key) != 2: return False # If the key is out of the board if key[0] not in self.columns or key[...
[ "def _checkKey(self, key):\n x, y = self._convertNegativeTupleKeyToPositiveTupleKey(key)\n return x, y", "def _validate_key(self, key):\n if key == () and len(self.kdims) == 0: return ()\n key = util.wrap_tuple(key)\n assert len(key) == len(self.kdims)\n for ind, val in e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }