query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object
def unwrap(self): if self.algorithm == 'rsa': return self.asn1['private_key'].parsed if self.algorithm == 'dsa': params = self.asn1['private_key_algorithm']['parameters'] return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': self.public_key.unwrap(), 'private_key': self.asn1['private_key'].parsed, }) if self.algorithm == 'ec': output = self.asn1['private_key'].parsed output['parameters'] = self.asn1['private_key_algorithm']['parameters'] output['public_key'] = self.public_key.unwrap() return output
[ "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object
def unwrap(self): if self.algorithm == 'ec': return self.asn1['public_key'] return self.asn1['public_key'].parsed
[ "def parse_public_key( cls, public_key, lib):\n\n extra = {}\n try:\n # key literal?\n pubkey = CryptoKey.importKey( public_key )\n lib.public_key = pubkey.exportKey()\n return lib.public_key, extra\n except:\n # not a key literal\n # pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey.
def _unwrap_private_key_info(key_info): key_alg = key_info.algorithm if key_alg == 'rsa' or key_alg == 'rsassa_pss': return key_info['private_key'].parsed if key_alg == 'dsa': params = key_info['private_key_algorithm']['parameters'] parsed = key_info['private_key'].parsed return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': Integer(pow( params['g'].native, parsed.native, params['p'].native )), 'private_key': parsed, }) if key_alg == 'ec': parsed = key_info['private_key'].parsed parsed['parameters'] = key_info['private_key_algorithm']['parameters'] return parsed raise ValueError('Unsupported key_info.algorithm "%s"' % key_info.algorithm)
[ "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it.
def _unarmor_pem(data, password=None): object_type, headers, der_bytes = unarmor(data) type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)' armor_type = re.match(type_regex, object_type) if not armor_type: raise ValueError(pretty_message( ''' data does not seem to contain a PEM-encoded certificate, private key or public key ''' )) pem_header = armor_type.group(1) data = data.strip() # RSA private keys are encrypted after being DER-encoded, but before base64 # encoding, so they need to be handled specially if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']): algo = armor_type.group(2).lower() return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password)) key_type = pem_header.lower() algo = None if key_type == 'encrypted private key': key_type = 'private key' elif key_type == 'rsa public key': key_type = 'public key' algo = 'rsa' return (key_type, algo, der_bytes)
[ "def test_remove_pass(self):\n TEST_PASS = \"weakpass\"\n # Generate a test key with a password\n key = RSA.gen_key(2048, 5, callback=lambda: None)\n key_pem = key.as_pem(cipher='aes_256_cbc',\n callback=lambda x: TEST_PASS)\n # Now try to decrypt the k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys
def _parse_pkcs12(data, password, load_private_key): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, byte_cls): raise TypeError(pretty_message( ''' password must be a byte string, not %s ''', type_name(password) )) else: password = b'' certs = {} private_keys = {} pfx = Pfx.load(data) auth_safe = pfx['auth_safe'] if auth_safe['content_type'].native != 'data': raise ValueError(pretty_message( ''' Only password-protected PKCS12 files are currently supported ''' )) authenticated_safe = pfx.authenticated_safe mac_data = pfx['mac_data'] if mac_data: mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native key_length = { 'sha1': 20, 'sha224': 28, 'sha256': 32, 'sha384': 48, 'sha512': 64, 'sha512_224': 28, 'sha512_256': 32, }[mac_algo] mac_key = pkcs12_kdf( mac_algo, password, mac_data['mac_salt'].native, mac_data['iterations'].native, key_length, 3 # ID 3 is for generating an HMAC key ) hash_mod = getattr(hashlib, mac_algo) computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest() stored_hmac = mac_data['mac']['digest'].native if not constant_compare(computed_hmac, stored_hmac): raise ValueError('Password provided is invalid') for content_info in authenticated_safe: content = content_info['content'] if isinstance(content, OctetString): _parse_safe_contents(content.native, certs, private_keys, password, load_private_key) elif isinstance(content, EncryptedData): encrypted_content_info = content['encrypted_content_info'] encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm'] encrypted_content = encrypted_content_info['encrypted_content'].native decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password) _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key) else: raise ValueError(pretty_message( ''' Public-key-based PKCS12 files are not currently supported ''' )) key_fingerprints = set(private_keys.keys()) cert_fingerprints = set(certs.keys()) common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints)) key = None cert = None other_certs = [] if len(common_fingerprints) >= 1: fingerprint = common_fingerprints[0] key = private_keys[fingerprint] cert = certs[fingerprint] other_certs = [certs[f] for f in certs if f != fingerprint] return (key, cert, other_certs) if len(private_keys) > 0: first_key = sorted(list(private_keys.keys()))[0] key = private_keys[first_key] if len(certs) > 0: first_key = sorted(list(certs.keys()))[0] cert = certs[first_key] del certs[first_key] if len(certs) > 0: other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly) return (key, cert, other_certs)
[ "def asn1_to_x509(asn1):\n return der_to_x509(asn1_to_der(asn1))", "def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Lit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys
def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key): if isinstance(safe_contents, byte_cls): safe_contents = SafeContents.load(safe_contents) for safe_bag in safe_contents: bag_value = safe_bag['bag_value'] if isinstance(bag_value, CertBag): if bag_value['cert_id'].native == 'x509': cert = bag_value['cert_value'].parsed public_key_info = cert['tbs_certificate']['subject_public_key_info'] certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed elif isinstance(bag_value, PrivateKeyInfo): private_keys[_fingerprint(bag_value, load_private_key)] = bag_value elif isinstance(bag_value, EncryptedPrivateKeyInfo): encryption_algorithm_info = bag_value['encryption_algorithm'] encrypted_key_bytes = bag_value['encrypted_data'].native decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password) private_key = PrivateKeyInfo.load(decrypted_key_bytes) private_keys[_fingerprint(private_key, load_private_key)] = private_key elif isinstance(bag_value, SafeContents): _parse_safe_contents(bag_value, certs, private_keys, password, load_private_key) else: # We don't care about CRL bags or secret bags pass
[ "def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinsta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process and return selected confounds from the confounds file
def _select_confounds(confounds_file, selected_confounds): import pandas as pd import numpy as np confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a') # fill the first value of FramewiseDisplacement with the mean. if 'FramewiseDisplacement' in selected_confounds: confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna( np.mean(confounds_df['FramewiseDisplacement'])) desired_confounds = confounds_df[selected_confounds] return desired_confounds
[ "def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a cached copy of TestShib's metadata with a cacheDuration attribute
def cache_duration_metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member
[ "def tdcache():\n return cachetools.cached(cache=tdcache.tensor_description_cache)", "def get_metadata(self):\n return copy.copy(self.metadata)", "def test_cache_datastore_manifests(self, cache_audio: bool):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_exam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def _configure_testshib_provider(self, **kwargs): fetch_metadata = kwargs.pop('fetch_metadata', True) assert_metadata_updates = kwargs.pop('assert_metadata_updates', True) kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible', True) kwargs.setdefault("backend_name", "tpa-saml") kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG) kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID) kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL) kwargs.setdefault('icon_class', 'fa-university') kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName kwargs.setdefault('max_session_length', None) kwargs.setdefault('send_to_registration_first', False) kwargs.setdefault('skip_email_verification', False) saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member if fetch_metadata: assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata() if assert_metadata_updates: assert num_total == 1 # lint-amnesty, pylint: disable=no-member assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member assert num_updated == 1 # lint-amnesty, pylint: disable=no-member assert num_failed == 0 # lint-amnesty, pylint: disable=no-member assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member return saml_provider
[ "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config[\"SAML_PATH\"])\n return auth", "def add_tomc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure TestShib before running the login test
def test_login(self): self._configure_testshib_provider() self._test_login()
[ "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def _FakeLogin(self):\n self.testbed.setup_env(\n USER_EMAIL='user@example.com',\n USER_ID='123',\n overwrite=True)", "def setUp(self):\n self.login_handler = LoginHandler()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure TestShib before running the register test
def test_register(self): self._configure_testshib_provider() self._test_register()
[ "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def setUp(self):\n self.modules = {}", "def test_set_registration_configuration(self):\n pass", "def test_shell_manager_register(self):\n\n @self.shells.register\n class ATestShell(Sh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table.
def test_login_records_attributes(self): self.test_login() record = UserSocialAuth.objects.get( user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG ) attributes = record.extra_data assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['Member@testshib.org', 'Staff@testshib.org'] assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I'] assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself'] assert attributes.get('urn:oid:2.5.4.20') == ['555-5555'] # Phone number
[ "def test_user_attributes(self):\n user_attributes = (\n \"first_name\",\n \"last_name\",\n \"username\",\n \"email\",\n \"id\",\n \"email\",\n \"links\",\n )\n fetched_user = requests.get(\n user_list_url, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test SAML login logs with debug mode enabled or not
def test_debug_mode_login(self, debug_mode_enabled): self._configure_testshib_provider(debug_mode=debug_mode_enabled) with patch.object(saml_log, 'info') as mock_log: self._test_login() if debug_mode_enabled: # We expect that test_login() does two full logins, and each attempt generates two # logs - one for the request and one for the response assert mock_log.call_count == 4 expected_next_url = "/dashboard" (msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0] assert msg.startswith('SAML login %s') assert action_type == 'request' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset( {"idp": idp_name, "auth_entry": "login", "next": expected_next_url}, request_data ) assert next_url == expected_next_url assert '<samlp:AuthnRequest' in xml (msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1] assert msg.startswith('SAML login %s') assert action_type == 'response' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset({"RelayState": idp_name}, response_data) assert 'SAMLResponse' in response_data assert next_url == expected_next_url assert '<saml2p:Response' in xml else: assert not mock_log.called
[ "def test_get_login_flow(self):\n pass", "def test_logging(self):\n self._verify_logging()", "def test_successful_login(self):\n pass", "def test_login_required():\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def test_login...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out.
def test_login_with_testshib_provider_short_session_length(self): # Configure the provider with a 10-second timeout self._configure_testshib_provider(max_session_length=10) now = datetime.datetime.utcnow() with freeze_time(now): # Test the login flow, adding the user in the process self._test_login() # Wait 30 seconds; longer than the manually-set 10-second timeout later = now + datetime.timedelta(seconds=30) with freeze_time(later): # Test returning as a logged in user; this method verifies that we're logged out first. self._test_return_login(previous_session_timed_out=True)
[ "def test_server_timeouted_session(self):\n\n session = Mock()\n session.timeout = Mock()\n session.is_active = False\n session.inactivity = config.SESSION_TIMEOUT + 1\n\n self.app.sessions.running = Mock(return_value=[session])\n self.worker.start()\n time.sleep(1)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mock an error response when calling the OData API for user details.
def _mock_odata_api_for_error(self, odata_api_root_url, username): def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument """ Return a 500 error when someone tries to call the URL. """ headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d' headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number return 500, headers, 'Failure!' fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy()) url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format( root_url=odata_api_root_url, user_id=username, fields=fields, ) httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json') return url
[ "def test_api_user_get(self):\n pass", "def test_api_auth_retrieve_user_details_success(self):\n self.client.credentials(HTTP_AUTHORIZATION=\"Token \" + self.token.key)\n response = self.client.get(self.user_details_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_relevant_value(self): value_map = {'country': {'Australia': 'NZ'}} expected_country = 'NZ' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_other_value(self): value_map = {'country': {'United States': 'blahfake'}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
[ "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_chain_by_id
def test_get_chain_by_id(self): pass
[ "def test_fetch_all_by_chain_id(self):\n chain = models.Chain.query.filter(models.Chain.pmatches('2P33/0/A')).first()\n self.assertPaginatedResult('fetch_all_by_chain_id',\n chain.chain_id, chain.biomolecule_id)", "def test_get_chains(self):\n pass", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_chains
def test_get_chains(self): pass
[ "def test_get_chain_by_id(self):\n pass", "def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains", "def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(lis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for post_chain
def test_post_chain(self): pass
[ "def test_post_chain_search(self):\n pass", "def test_post_transaction_pattern(self):\n pass", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for post_chain_search
def test_post_chain_search(self): pass
[ "def test_post_chain(self):\n pass", "def test_search_process(self):\n pass", "def test_search_workflow(self):\n pass", "def test_search_workflow_step(self):\n pass", "def test_post_foods_search(self):\n pass", "def test_search_housekeeping(self):\n pass", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a networkx graph object from variables and relations.
def as_networkx_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables]) for r in relations: for p in all_pairs([e.name for e in r.dimensions]): graph.add_edge(*p) return graph
[ "def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a networkx graph object from variables and relations.
def as_networkx_bipartite_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables], bipartite=0) graph.add_nodes_from([r.name for r in relations], bipartite=1) for r in relations: for e in r.dimensions: graph.add_edge(r.name, e.name) return graph
[ "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def initial...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the variables and relation as a graph, using networkx and matplotlib.
def display_graph(variables, relations): graph = as_networkx_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt nx.draw_networkx(graph, with_labels=True) # nx.draw_random(graph) # nx.draw_circular(graph) # nx.draw_spectral(graph) plt.show() except ImportError: print("ERROR: cannot display graph, matplotlib is not installed")
[ "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graph(self) -> None:", "def Show_Network_an...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs.
def graph_diameter(variables, relations): diams = [] g = as_networkx_graph(variables, relations) components = (g.subgraph(c).copy() for c in nx.connected_components(g)) for c in components: diams.append(nx.diameter(c)) return diams
[ "def diameter(self):\n \n v = self.vertices() \n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_paths(s,e)\n smallest = sorted(paths, key=len)[0]\n sma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all possible pairs from the list of given elements.
def all_pairs(elements): if len(elements) < 2: return [] elif len(elements) == 2: return [(elements[0], elements[1])] else: new_pairs = [] for elt in elements[1:]: new_pairs.append((elements[0], elt)) return all_pairs(elements[1:]) + new_pairs
[ "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def all_pairs(iterable):\n return itertools.combinations(iterable, 2)", "def calc_rdf_tup(elements: List) -> List:\n if len(elements) != 2:\n raise ValueError(\"Element must be of length 2\")\n return [list(p)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
serialize internal keyvalue pair to byte_array, only pickle objects when necessary
def serialize(self): byte_array = bytearray() header = ( self.sequence_number | (1 << 63) if self.type == KeyType.PUT else self.sequence_number ) # append header first byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8)) pickle_key = pickle.dumps(self.key) # key length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key))) # key byte array byte_array.extend(pickle_key) # it is a put operation, value is needed if self.type == KeyType.PUT: pickle_value = pickle.dumps(self.value) # value length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value))) # value byte array byte_array.extend(pickle_value) return bytes(byte_array)
[ "def serialize(self, value):\r\n return pickle.dumps(value, protocol=self.protocol)", "def serialize(self, value) -> bytes:\n pass", "def _encode_value(self, value):\n return pickle.dumps(value)", "def encode(obj):\n byte_string = pickle.dumps(obj)\n return byte_string", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import ASHRAE data from a directory containing the .csv files.
def import_data(ashrae_dir, filenames=const.NAMES): print('Importing data from csv') ashrae_dir = pathlib.Path(ashrae_dir) data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames} return data
[ "def from_csv(self, folder, sep=','):\n os.chdir(folder)\n for f in glob.glob(\"*.csv\"):\n name = f[:-4]\n with open(f) as ps:\n for line in ps:\n args = tuple(line.replace(' ', '').replace('\\n', '').split(sep))\n self.add_predicate(name, args)", "def import_PAIPR(input_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import ASHRAE data with optional caching mechanism.
def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES): cache_file = pathlib.Path(cache_file) if cache_file is not None and cache_file.exists(): data = import_dict_from_cached(cache_file, filenames) else: data = import_data(ashrae_dir) _cache_data(data, cache_file) # Sanity check: the set of building ids should be the same in the train and test sets. assert set(data['train'].building_id) == set(data['test'].building_id) return data
[ "def load_aws_data(self):\n pass", "def load_azure_data(self):\n pass", "def import_(self, data):\n return self.__import(data)", "def load_data(self) -> None:", "def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of timestamps missing
def count_missing_timestamps(df): no_of_timestamps = len(df.timestamp) no_of_sites = len(set(df.site_id)) full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps print(f'There are {no_of_timestamps} timestamps in the data. The full date range is {len(full_date_range)} long and' f' there are {no_of_sites} sites so there should be {no_of_sites * len(full_date_range)} ' f'timestamps in the data. There are therefore {no_of_missing_timestamps} missing. ') return no_of_missing_timestamps
[ "def get_num_of_timestamps(self):\n return len(self)", "def missing_row_cnt(df: EDAFrame) -> Any:\n nulls = df.nulls\n rst = nulls.sum(1)\n rst = rst[rst > 0]\n\n return (rst > 0).sum()", "def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in
def add_missing_weather_data(df): full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') sites = list(set(df.site_id)) full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range), columns=['site_id', 'timestamp']) df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left') df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both')) return df_all_dates
[ "def fill_missing_time(row):\n\n row.index = pd.to_datetime(row.index)\n return row.interpolate(method=\"time\").fillna(method=\"backfill\")", "def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test)
def join_input_data_and_multi_index(data, dataset_name): meter_df = data[dataset_name] building_df = data['building_metadata'] weather_df = data['weather_' + dataset_name] # join meter and weather data building_n_meter = meter_df.merge(building_df, on='building_id', how='left') joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left') # Add time related columns joined_data['hour'] = joined_data['timestamp'].dt.hour joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek joined_data['week_number'] = joined_data['timestamp'].dt.week joined_data['month'] = joined_data['timestamp'].dt.month joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0) # multi index on building id and timestamp joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index() return joined_data
[ "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split the joined data into a dict with a df for each meter type
def split_on_meter_type(joined_data, meter_types): joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type] for meter_type in meter_types} return joined_data_dict
[ "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one.
def reset(self, setup=False): self._done = False self._nbSteps = 0 x = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): x = random.randint(0, self._width - 1) elif (self.startPosX == 'random' and not setup): x = self._initState[0] elif self.startPosX == 'center': x = self._width - 1 else: x = int(self.startPosX) y = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): y = random.randint(0, self._height - 1) elif (self.startPosY == 'random' and not setup): y = self._initState[1] elif self.startPosX == 'center': y = self._height - 1 else: y = int(self.startPosX) self._currentPos = (x, y) self._trajectory = [(x, y)] return (x, y)
[ "def _did_reset(self):\n # use this method to access the RAM of the emulator\n # and perform setup for each episode.\n # the method returns None\n pass", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cette fonction de la classe des Labyrinthe permet d'initialiser un labyrinthe à partir d'un fichier txt et du numéro de la carte associé.
def telecharger(self, path, numero_lab): with open(path,"r") as f : # on initialise les compteurs pour remplir la carte i = 1 ii = str(i) j = 1 jj = str(j) # on parcourt un à un les caractères du fichier txt qui contient le labyrinthe for line in f.readlines(): for letter in line : # on teste si la lettre est bien un caractère prévu if re.search(r"^[ O.XU]$", letter) is None: break else: self.carte[ii,jj] = letter if letter == "X": self.robot = [i,j] elif letter == "U": self.sortie = [i,j] j += 1 jj = str(j) self.largeur = j i += 1 ii = str(i) j =1 jj = str(j) self.longueur = i self.numero = numero_lab
[ "def __init__(self, filename=None, allLines=None):\n self.filename = str(filename)\n self.allLines = allLines #stores all lines from file", "def __init__(self):\n self.file_name = 'moes_tavern_lines.txt'\n self.path_to_file = abspath(join(getcwd(), '../data',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cette fonction permet si le joueur le souhaite d'enregistrer le labyrinthe et de le charger à la prochaine partie. Elle enregistre le labyrinthe dans le dossier cartes situé dans le doccier de travail.
def enregistrer(self, dir_path, nom_du_fichier): path = dir_path + "/cartes/{}.txt".format(nom_du_fichier) with open(path,"w") as labyrinthe_en_cours : labyrinthe_en_cours.write(str(self))
[ "def enregistrementPickle(dicoDesValeurs,pfxPickle=\"\"):\n\tprint(\"ENREGISTREMENT EN FORMAT BINAIRE PICKLE\")\n\n\tdicoPourEnregistrer = {\n\t\t\t'lesAttributs':dicoDesAttributs,\n\t\t\t'lesValeurs':dicoDesValeurs,\n\t}\n\n\tif not os.path.exists(os.path.normpath(DIR_STRUCT)):\n\t\tos.makedirs(os.path.normpath(DI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure the netcdf cc data handler operates correctly
def test_data_handling_nc_cc(): input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'), os.path.join(TEST_DATA_DIR, 'va_test.nc'), os.path.join(TEST_DATA_DIR, 'orog_test.nc'), os.path.join(TEST_DATA_DIR, 'zg_test.nc')] with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) target = (min_lat, min_lon) plevel = fh.plev[-1] ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0)) va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0)) handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) assert handler.data.shape == (20, 20, 20, 2) handler = DataHandlerNCforCC(input_files, features=[f'U_{int(plevel)}pa', f'V_{int(plevel)}pa'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) if handler.invert_lat: handler.data = handler.data[::-1] assert handler.data.shape == (20, 20, 20, 2) assert np.allclose(ua, handler.data[..., 0]) assert np.allclose(va, handler.data[..., 1])
[ "def test_solar_cc():\n\n features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']\n input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]\n nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file.
def test_solar_cc(): features = ['clearsky_ratio', 'rsds', 'clearsky_ghi'] input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')] nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5') with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) - 360 target = (min_lat, min_lon) shape = (len(fh.lat.values), len(fh.lon.values)) with pytest.raises(AssertionError): handler = DataHandlerNCforCC(input_files, features=features, target=target, shape=shape, val_split=0.0, worker_kwargs=dict(max_workers=1)) handler = DataHandlerNCforCC(input_files, features=features, nsrdb_source_fp=nsrdb_source_fp, target=target, shape=shape, temporal_slice=slice(0, 1), val_split=0.0, worker_kwargs=dict(max_workers=1)) cs_ratio = handler.data[..., 0] ghi = handler.data[..., 1] cs_ghi = handler.data[..., 2] cs_ratio_truth = ghi / cs_ghi assert cs_ratio.max() < 1 assert cs_ratio.min() > 0 assert (ghi < cs_ghi).all() assert np.allclose(cs_ratio, cs_ratio_truth) with Resource(nsrdb_source_fp) as res: meta = res.meta tree = KDTree(meta[['latitude', 'longitude']]) cs_ghi_true = res['clearsky_ghi'] # check a few sites against NSRDB source file for i in range(4): for j in range(4): test_coord = handler.lat_lon[i, j] _, inn = tree.query(test_coord) assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])
[ "def test_from_ctd():\n dfile = os.path.join(DATA_DIR,'ctd_BM54.cnv')\n\n # Load in the raw data using np.loadtxt\n raw = np.loadtxt(dfile, comments = '#', skiprows = 175,\n usecols = (0, 1, 3, 8, 9, 10, 12))\n\n # State the units of the input data (read by hand from the file)\n u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
keys_to_track order is important! Matches will be tested in this order.
def __init__(self, keys_to_track): self.keys_to_track = keys_to_track self.tracker = {} for key_to_track in self.keys_to_track: self.tracker[key_to_track] = {}
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid.
def add(self, obj, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if match_val is None or match_val == '': pass else: self.tracker[key_to_track][match_val] = obj
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid.
def match(self, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if self.tracker[key_to_track].has_key(match_val): return self.tracker[key_to_track][match_val] return None
[ "def _match_key(d, key, require=False):\n if not isinstance(d,dict):\n raise RuntimeError('Input object must be a dict, got %s' % d)\n keys = list( d.keys() )\n keyslow = [k.lower() for k in keys]\n keylow = key.lower()\n if keyslow.count(keylow) != 0:\n ind = keyslow.index(keylow)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function to populate key_matcher from self.records.
def _add_matches(self): for record in self.records: match_dict={key_to_track: record.get(key_to_track) for key_to_track in self.key_matcher.keys()} self.key_matcher.add(obj=record, match_dict=match_dict)
[ "def init_record_fields(self, run_record_key, record_fields):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the origin_imgs are flipped correctly.
def _check_flip(origin_imgs, result_imgs): h, w, c = origin_imgs.shape for i in range(h): for j in range(w): for k in range(c): if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]: return False return True
[ "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics.
def run_single_cycle(self, collector=None): self._timestamp = int(time.time()) # There are certain error conditions, such as the system not supporting # a particular proc file type, that we will never recover from. So, # just always early exit. if self._failed: return {} filename = self._file_pattern % self._pid if not collector: collector = {} if self._file is None: try: self._file = open(filename, "r") except IOError as e: # We take a simple approach. If we don't find the file or # don't have permissions for it, then just don't collect this # stat from now on. If the user changes the configuration file # we will try again to read the file then. self._failed = True if e.errno == errno.EACCES: self._logger.error( "The agent does not have permission to read %s. " "Maybe you should run it as root.", filename, ) elif e.errno == errno.ENOENT: self._logger.error( ( "The agent cannot read %s. Your system may not support that proc file " 'type or the process with pid "%s" doesn\'t exist' ), filename, self._pid, ) # Ignore 'process not found' errors (likely caused because the process exited # but re-raise the exception for all other errors elif e.errno != errno.ESRCH: raise e if self._file is not None: try: self._file.seek(0) return self.gather_sample(self._file, collector=collector) except IOError as e: # log the error if the errno isn't 'process not found'. Process not found likely means the # process exited, so we ignore that because it's within the realm of expected behaviour if e.errno != errno.ESRCH: self._logger.error( "Error gathering sample for file: '%s'\n\t%s" % (filename, six.text_type(e)) ) # close the file. This will cause the file to be reopened next call to run_single_cycle self.close() return collector
[ "def run(self):\r\n self.collect_data()", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel).
def __calculate_time_cs(self, jiffies): return int((jiffies * 100.0) / self._jiffies_per_sec)
[ "def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def ticks_us():\n\ttry:\n\t\t# pylint: dis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of milliseconds for the given number of jiffies (a weird timing unit used the kernel).
def calculate_time_ms(self, jiffies): return int((jiffies * 1000.0) / self._jiffies_per_sec)
[ "def __calculate_time_cs(self, jiffies):\n\n return int((jiffies * 100.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def ticks_us():\n\ttry:\n\t\t# pylint: disable=no-member\n\t\treturn time.ticks_us()\n\texcept:\n\t\treturn time....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gathers the metrics from the stat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} # The file format is just a single line of all the fields. line = stat_file.readlines()[0] # Chop off first part which is the pid and executable file. The # executable file is terminated with a paren so just search for that. line = line[(line.find(") ") + 2) :] fields = line.split() # Then the fields we want are just at fixed field positions in the # string. Just grab them. # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number # 19, but in our case it's 16 aka 19 - 3) process_uptime = self.__get_uptime_ms() - self.calculate_time_ms( int(fields[19]) ) collector.update( { Metric("app.cpu", "user"): self.__calculate_time_cs(int(fields[11])), Metric("app.cpu", "system"): self.__calculate_time_cs(int(fields[12])), Metric("app.uptime", None): process_uptime, Metric("app.nice", None): float(fields[16]), Metric("app.threads", None): int(fields[17]), Metric("app.mem.majflt", None): int(fields[9]), Metric("app.io.wait", None): int(fields[39]) if len(fields) >= 39 else 0, } ) return collector
[ "def _read_stat(self):\n stat_file = '/proc/{:d}/stat'.format(self.pid)\n with open(stat_file, 'r') as handle:\n self._stat = handle.read()", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gathers the metrics from the sockstat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} for line in stat_file: # We just look for the different "inuse" lines and output their # socket type along with the count. m = re.search(r"(\w+): inuse (\d+)", line) if m is not None: collector.update( { Metric("app.net.sockets_in_use", m.group(1).lower()): int( m.group(2) ) } ) return collector
[ "def _read_stat(self):\n stat_file = '/proc/{:d}/stat'.format(self.pid)\n with open(stat_file, 'r') as handle:\n self._stat = handle.read()", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects the metrics from the gathers
def collect(self): collector = {} for gather in self.gathers: try: stats = gather.run_single_cycle(collector=collector) if stats: collector.update(stats) except Exception as ex: self._logger.exception( "Exception while collecting metrics for PID: %s of type: %s. Details: %s", self.pid, type(gather), repr(ex), ) return collector
[ "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the process of the agent.
def current_process(self): return self._current_process
[ "def getProcess(self):\n\t\treturn self.getOpArgument(0)", "def getProcess(self):\n\t\treturn self.getOpArgument(1)", "def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subproce...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a process id, return all children processes (recursively)
def get_child_processes(self, ppid): all_children = [] children_to_explore = set() for _pid in self.parent_to_children_map[ppid]: all_children.append(_pid) children_to_explore.add(_pid) # get the children 'recursively' while children_to_explore: # the invariant child_to_explore = children_to_explore.pop() if not self.parent_to_children_map.get(child_to_explore): continue unvisited = self.parent_to_children_map[child_to_explore] for node in unvisited: if node not in all_children: children_to_explore.add(node) all_children.append(node) return list(set(all_children))
[ "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all running process ids
def get_running_processes(self): all_processes = [] for _process in self.processes: all_processes.append(_process["pid"]) return all_processes
[ "def get_all_running_processes():\n thispid = os.getpid()\n rpids = set()\n for pid in psutil.pids():\n try:\n if psutil.Process(pid).status() == 'running' or psutil.Process(pid).status() == 'disk-sleep':\n rpids.add(pid)\n except psutil.NoSuchProcess:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like get_matches_commandline method, given a string, match the processes on the name but also returns the matched processes' children
def get_matches_commandline_with_children(self, match_pattern): matched_pids = self.get_matches_commandline(match_pattern) for matched_pid in matched_pids: matched_pids.extend(self.get_child_processes(matched_pid)) return list(set(matched_pids))
[ "def globsearch_procs(s: str) -> List[Process]:\n pat = re.compile(fnmatch.translate(s))\n\n procs_ = procs()\n procs_out = list(filter(lambda p: re.search(pat, cmdline(p)) is not None, procs_))\n notify(msg=f\"Glob search returned {len(procs_out)} matching processes\")\n return procs_out", "def fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a process, record the metrics in a historical metrics collector Collects the historical result of each metric per process in __metrics_history
def record_metrics(self, pid, metrics): for _metric, _metric_value in metrics.items(): if not self.__metrics_history[pid].get(_metric): self.__metrics_history[pid][_metric] = [] self.__metrics_history[pid][_metric].append(_metric_value) # only keep the last 2 running history for any metric self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][ -2: ]
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
At the beginning of each process metric calculation, the absolute (noncumulative) metrics need to be overwritten to the combined process(es) result. Only the cumulative metrics need the previous value to calculate delta. We should set the absolute metric to 0 in the beginning of this "epoch"
def _reset_absolute_metrics(self): for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not _metric.is_cumulative: self.__aggregated_metrics[_metric] = 0
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the aggregated metric values based on the current running processes and the historical metric record
def _calculate_aggregated_metrics(self): # using the historical values, calculate the aggregate # there are two kinds of metrics: # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles) # b) absolute metrics - the last absolute value is used running_pids_set = set(self.__pids) for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not self.__aggregated_metrics.get(_metric): self.__aggregated_metrics[_metric] = 0 if _metric.is_cumulative: if pid in running_pids_set: if len(_metric_values) > 1: # only report the cumulative metrics for more than one sample self.__aggregated_metrics[_metric] += ( _metric_values[-1] - _metric_values[-2] ) else: if pid in running_pids_set: # absolute metric - accumulate the last reported value self.__aggregated_metrics[_metric] += _metric_values[-1]
[ "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def _update(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect the perprocess tracker for the monitored process(es).
def gather_sample(self): for _pid in self._select_processes(): if not self.__trackers.get(_pid): self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id) self._reset_absolute_metrics() for _tracker in self.__trackers.values(): _metrics = _tracker.collect() self.record_metrics(_tracker.pid, _metrics) self._calculate_aggregated_metrics() self._remove_dead_processes() self.print_metrics()
[ "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the PID of the process that was marked as $$TBD.
def set_pid(self, pid): # type: (int) -> None for i in range(len(self.__target_pids)): if self.__target_pids[i] == "$$TBD": self.__target_pids[i] = pid break
[ "def pid(self, pid):\n\n self._pid = pid", "def test_set_ultimate_pid(self):\n print(\"Testing setting ultimate PID\")\n pass", "def set_pid(self,san,key,val='',test=0):\n if val == str(self.pid):\n return (0,'')\n if self.state <> ObjState.created:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the probability of a word following a context. i.e. The conditional probability P(word|context)
def prob(self, word, context=None): if not context: context = () else: context = tuple(context) prob = 0 for i in range(len(context) + 1): prob += self.weights[i] * self.ngram_cpd[context[i:]][word] return prob
[ "def prob(self, word, context):\n assert type(context) == tuple\n result = 0\n\n # print(f\"p({word} | {context}) = \", end=\"\")\n\n\n # for each n-gram model, calculate the probability of the word given the context weigthed by the lambda\n # and add it to the result\n # s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
YOLOV3 network hybrid forward.
def hybrid_forward(self, F, x, *args): all_box_centers = [] all_box_scales = [] all_objectness = [] all_class_pred = [] all_anchors = [] all_offsets = [] all_feat_maps = [] all_detections = [] routes = [] for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs): x = stage(x) routes.append(x) # the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs): x, tip = block(x) if autograd.is_training(): dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip) all_box_centers.append(box_centers.reshape((0, -3, -1))) all_box_scales.append(box_scales.reshape((0, -3, -1))) all_objectness.append(objness.reshape((0, -3, -1))) all_class_pred.append(class_pred.reshape((0, -3, -1))) all_anchors.append(anchors) all_offsets.append(offsets) # here we use fake featmap to reduce memory consuption, only shape[2, 3] is used fake_featmap = F.zeros_like(tip.slice_axis( axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1)) all_feat_maps.append(fake_featmap) else: dets = output(tip) all_detections.append(dets) if i >= len(routes) - 1: break # add transition layers x = self.transitions[i](x) # upsample feature map reverse to shallow layers upsample = _upsample(x, stride=2) route_now = routes[::-1][i + 1] x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1) if autograd.is_training(): # during training, the network behaves differently since we don't need detection results if autograd.is_recording(): # generate losses and return them directly box_preds = F.concat(*all_detections, dim=1) all_preds = [F.concat(*p, dim=1) for p in [ all_objectness, all_box_centers, all_box_scales, all_class_pred]] all_targets = self._target_generator(box_preds, *args) return self._loss(*(all_preds + all_targets)) # return raw predictions, this is only used in DataLoader transform function. return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps, F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1), F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1)) # concat all detection results from different stages result = F.concat(*all_detections, dim=1) # apply nms per class if self.nms_thresh > 0 and self.nms_thresh < 1: result = F.contrib.box_nms( result, overlap_thresh=self.nms_thresh, valid_thresh=0.01, topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False) if self.post_nms > 0: result = result.slice_axis(axis=1, begin=0, end=self.post_nms) ids = result.slice_axis(axis=-1, begin=0, end=1) scores = result.slice_axis(axis=-1, begin=1, end=2) bboxes = result.slice_axis(axis=-1, begin=2, end=None) return ids, scores, bboxes
[ "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set nonmaximum suppression parameters.
def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100): self._clear_cached_op() self.nms_thresh = nms_thresh self.nms_topk = nms_topk self.post_nms = post_nms
[ "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def _non_max_suppression(scores, boxes, classes, max_boxes=10, iou_threshold=0.5):\n max_boxes_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset class categories and class predictors.
def reset_class(self, classes): self._clear_cached_op() self._classes = classes if self._pos_iou_thresh >= 1: self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh) for outputs in self.yolo_outputs: outputs.reset_class(classes)
[ "def _reset(self):\n self.classifier.reset()", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self.experts = [\n self._construct_new_expert()\n ]", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
YOLO3 multiscale with darknet53 base network on VOC dataset.
def yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs): from ...data import VOCDetection pretrained_base = False if pretrained else pretrained_base base_net = darknet53( pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices, **kwargs) stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]] anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] strides = [8, 16, 32] classes = VOCDetection.CLASSES return get_yolov3( 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc', pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)
[ "def YOLOV3Base(self):\n if self.conf['yolov3_base_model_load']:\n base = load_model('yolov3_base.h5')\n base.trainable = True\n return base\n\n yolov3 = make_yolov3_model()\n\n # Load the weights.\n weight_reader = WeightReader('yolov3.weights')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The uri returned from request.uri is not properly urlencoded (sometimes it's partially urldecoded) This is a weird hack to get werkzeug to return the proper urlencoded string uri
def _get_uri_from_request(request): uri = request.base_url if request.query_string: uri += '?' + request.query_string.decode('utf-8') return uri
[ "def requote_uri(uri):\n # To reduce tabulator import time\n import requests.utils\n if six.PY2:\n def url_encode_non_ascii(bytes):\n pattern = '[\\x80-\\xFF]'\n replace = lambda c: ('%%%02x' % ord(c.group(0))).upper()\n return re.sub(pattern, replace, bytes)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualize a particular column of Y_pred anf Y_test for a particular series
def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx): ser_idx = [i for i in range(0, len(y_test), num_win_ser)] if num_plots > len(ser_idx): print("Too many plots, reduce the mumber") else: indx = ser_idx[0:num_plots] days = range(num_win_ser) for idx in indx: CR = test_seq[idx][0][0][3] pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx] true = y_test[idx : idx+num_win_ser, window_out -1, col_idx] plt.title("Y_True V/S Y_Pred, CR: "+ str(CR)) plt.xlabel('Days') plt.ylabel(cols_y[col_idx]) plt.plot(days, pred, label = 'Pred') plt.plot(days, true, label = 'True') plt.legend() plt.show()
[ "def plotPreds(self, predictions, test_series=None, run_up=None,\\\n ylabel='units'):\n #set up figure\n plt.figure(figsize=(10,6))\n plt.ylabel(ylabel)\n plt.xlabel('datetime')\n \n #plot lines\n if run_up is None:\n run_up = self.validat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_move function
def test_calc_move(self): t = AioBaseTurtle() t.speed(speed=5) steps, delta = t._calc_move(Vec2D(0, 100)) self.assertEqual(steps, 20) self.assertAlmostEqual(delta[0], 0.0) self.assertAlmostEqual(delta[1], 5.0)
[ "def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_rotation function
def test_calc_rotation(self): t = AioBaseTurtle() t.speed(speed=2) orient, steps, delta = t._calc_rotation(120) self.assertEqual(steps, 21) self.assertAlmostEqual(delta, 120.0 / 21.0) self.assertAlmostEqual(orient[0], math.cos(math.radians(120))) self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))
[ "def test_speed_angle_rotate(self):\n print(\"Testing speed angle rotate!\")\n pass", "def get_rotation():\n return _rotation * 90", "def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_circle function
def test_calc_circle(self): t = AioBaseTurtle() steps, step_len, rot_step = t._calc_circle(100, extent=180) self.assertEqual(steps, 14) self.assertAlmostEqual(rot_step, 180.0 / 14.0) self.assertAlmostEqual(step_len, 22.3928952207)
[ "def makeCircle(r, a): \n myTurtle.circle(r, a)", "def draw_circle(c):\n turtle.circle(c.radius)", "def GetCircle(circle):\r\n pass", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._move_step function
def test_move_step(self): t = AioBaseTurtle() t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5)) self.assertAlmostEqual(t._position[0], 100) self.assertAlmostEqual(t._position[1], 100) t.screen._drawline.assert_called_once_with( t.currentLineItem, ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position "black", 1, False ) self.mock_update.assert_called_once_with()
[ "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def __movement_test(self):\n if self.SI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Temporarily overwrite the settings with test settings. This allows to use test datasets for testing.
def generate_test_settings(tmpdir, dataset): # When `tmpdir` is a path convert it to a string if isinstance(tmpdir, py._path.local.LocalPath): tmpdir = str(tmpdir) test_settings = { 'datasets': { 'mnist': { 'train': { 'images': "file://" + tmpdir + "/" + dataset + "/server/train-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/train-labels-idx1-ubyte.gz" }, 'test': { 'images': "file://" + tmpdir + "/" + dataset + "/server/t10k-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/t10k-labels-idx1-ubyte.gz" }, }, }, 'data-dir': tmpdir + "/" + dataset + "/data" } overwrite_settings(test_settings)
[ "def prepare_settings(self):\n\n self.settings = load_settings_as_template(DEFAULT_SETTINGS_PATH)\n self.settings['experiment']['file_paths'] = [os.path.join(TEST_DIR, _) for _ in self.file_paths]\n self.settings['experiment']['fasta_paths'] = [os.path.join(TEST_DIR, _) for _ in self.fasta_pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate archive files for the given test dataset in tmpdir
def generate_test_dataset_archive(filepath, dataset): # 'file:///some/path' to '/some/path' if filepath[:7] == 'file://': filepath = filepath[7:] # Check if the dataset exists. # When not been generate it. if not os.path.isfile(filepath): print("Generating", filepath) data = get_test_dataset(dataset) ensure_dir(os.path.dirname(filepath)) idxgz.save(filepath, data)
[ "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a test environment using the given dataset. The settings are temporarily overwritten to use the test data.
def generate_test_environment(tmpdir, dataset): # Overwrite settings with test settings generate_test_settings(tmpdir, dataset) # Generate the archive files for usage in ['train', 'test']: for dstype in ['images', 'labels']: dataset_type = usage + '.' + dstype mnist_dataset = 'datasets.mnist.' + dataset_type filepath = get_setting(mnist_dataset) test_dataset = dataset + '.' + dataset_type generate_test_dataset_archive(filepath, test_dataset)
[ "def generate_test_settings(tmpdir, dataset):\n\n # When `tmpdir` is a path convert it to a string\n if isinstance(tmpdir, py._path.local.LocalPath):\n tmpdir = str(tmpdir)\n \n test_settings = {\n \n 'datasets': {\n 'mnist': {\n 'train': {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts (typically) overlapping regular patches from a grayscale image Changing the offset and stride parameters will result in images reconstructed by reconstruct_from_grayscale_patches having different dimensions! Callers should pad and unpad as necessary!
def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ): px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0])) l, t = np.meshgrid( np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]), np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) ) l = l.ravel() t = t.ravel() x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1])) y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1])) return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l)
[ "def extract_patches(\n image,\n size,\n stride = 1,\n):\n if size == stride:\n # This function is reshape + transpose based and is always the fastest, but\n # of course only works if size == stride.\n return extract_patches_nonoverlapping(image, size, pad=False)\n return extract_patches_conv2d(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assert json schema for requests from api.openweathermap.org
def validate_schema_openweathermap(self, actual, schema): resources_dir = os.path.abspath(os.getcwd()) relative_schema_path = valid_json_schema if schema == 'Valid' else error_json_schema schema_data = open(os.path.join(resources_dir, relative_schema_path)) self.validate_schema(actual, json.load(schema_data)) return self
[ "def test_api_schema(self):\n response = self.client.get(\"/api/schema\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content)[\"_meta\"][\"title\"], \"Marsha API\")", "def test_trucks_api(self):\n resp = self.app.get('/trucks')\n self.as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of nonempty dicts/lists or other objects
def recursive_count(o): if isinstance(o, dict): c = 0 for v in o.values(): c += recursive_count(v) return c elif isinstance(o, list): c = 0 for v in o: c += recursive_count(v) return c else: return 1
[ "def __len__(self):\n return len(self._dicts)", "def test_count_empty() -> None:\n assert count([]) == {}", "def count(d):\n return sum(len(v) for v in d.values())", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count +=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list formed by the evaluation types present in criteria.
def get_evaluation_analysis_types(self, parameters): eval_types =[] for evaluation_criteria_id in parameters["clustering"]["evaluation"]["evaluation_criteria"]: # for subcriteria in parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id]: # eval_types.append(subcriteria) eval_types.extend(parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id].keys()) return list(set(eval_types))
[ "def listCriteriaTypes():", "def listSearchCriteriaTypes():", "def listSortCriteriaTypes():", "def listCriteria():", "def types(self):\n return [term for term in self._terms\n if isinstance(term, (TypeIdentifier, String, Regex))]", "def types(self):\n ret = []\n for sco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the 'details' field of a clustering.
def analysis_function_details(self,clustering): return clustering.details
[ "def getClusterInfo(self):\n pass", "def get_cluster_details(cluster):\n cmd = f\"ocm describe cluster {cluster} --json=true\"\n out = run_cmd(cmd)\n return json.loads(out)", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def detail(self):\n return self._detail", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of elements that are clusterized in this clustering (which may not be the total number of elements of the dataset if there were noisy elements)
def analysis_function_total_elements(self,clustering): return clustering.total_number_of_elements
[ "def n_clusters(self):\n return len(self.clusters)", "def numConnectedElements(self):\n \n pass", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def count_elements_in_dataset(data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the percentage of elements of the clustering that are in the 4 bigger clusters.
def analysis_function_top_4(self,clustering): clustering.sort_clusters_by_size() total = 0 percents = clustering.get_population_percent_of_n_bigger_clusters(4) for p in percents: total = total+p return total
[ "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def purity_score(clusters, classes):\n\n A = np.c_[(clusters, classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:, 0]):\n z = A[A[:, 0] == j, 1]\n x = np.ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the percent of noise elements in the dataset.
def analysis_function_noise_level(self, clustering, total_elements): return 100.-(clustering.total_number_of_elements/float(total_elements))*100.
[ "def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val,title=\"Pixel values considered as noise\")\n return np.var(img[img<last_val])", "def water_percentage(self):\n water = 1.00\n for ingredient in self.ingredients:\n water -= ingredien...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the mean cluster size.
def analysis_function_mean_cluster_size(self,clustering): sizes = get_cluster_sizes(clustering.clusters)[1] return numpy.mean(sizes)
[ "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method create a project in pivotal tracker
def create_project(): client = RequestManager() project_name = "".join(choices(string.ascii_letters + string.digits, k=10)) client.set_method("POST") client.set_endpoint("/projects") body = {"name": project_name} client.set_body(json.dumps(body)) response = client.execute_request() STORED_ID['project_id'] = response.json()['id']
[ "def project_create(project):\n client.project.create(project)", "def cmdop_projectcreate ( DbConnection, logger, project, icoordsys) :\n try :\n logger.info(\"Running cmdop_projectcreate\")\n command = PROJECTCREATE + \" -project \" + \"\\\"\" + str(project) + \"\\\"\" + \" -icoordsys \" + st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Static method for delete all projects.
def delete_all_projects(): client = RequestManager() client.set_method("GET") client.set_endpoint("/projects") response = client.execute_request() for project in response.json(): try: ProjectHelper.delete_project(project["id"]) except TypeError: LOGGER.info(project)
[ "def clear(self):\n for project in Project.objects:\n project.delete()", "def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()", "def delete(self):\n args = {\"id\": self.id}\n _perform_command(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that returns 403 status if user isn't logged in instead of redirecting to the LOGIN_URL
def login_required_403(view): @wraps(view) def dec_view(request, *args, **kwargs): if not request.user.is_authenticated(): return JsonResponse({"detail": "You have to log in"}, status=403) return view(request, *args, **kwargs) return dec_view
[ "def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authenticated():\n next_ = request.GET.get(REDIRECT_FIELD_NAME,\n LOGIN_REDIRECT_URL)\n return HttpResponseRedirect(next_)\n return func(request, *a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Login with an accesscode
def accesscode(request, code): employee = Employee.objects.get(access_code=code) user = employee.user user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return HttpResponseRedirect('/')
[ "def login():", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login_in():", "def login(self):", "def logi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for all employees (in company) or for current user dependent on employee role
def all_employees(request, company_id=None): current_employee = Employee.objects.get(user__pk=request.user.pk) company_super_user = current_employee.isCompanySuperUserOrHigher() if company_id: company = Company.objects.get(pk=company_id) else: company = current_employee.company if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: raise PermissionDenied() change_company_form = ChangeCompanyForm(initial=dict(company=company)) return TemplateResponse( request, 'all_employees.html', { 'user': request.user, 'company_super_user': company_super_user, 'company': company, 'change_company_form': change_company_form, } )
[ "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for all employees current user is a manager for with empty development plan
def get_manager_employees(request): current_employee = Employee.objects.get(user__pk=request.user.pk) manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all() if manager_employees: emp_list=[] for emp in manager_employees: emp_data={} emp_data["id"] = emp.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["manager_id"] = emp.manager.id # emp_data["status_questions"] = emp.status_questions # employee_role = EmployeeRole.objects.filter(employee=emp).all() # name_role_list = [] # for obj in employee_role: # name_role_list.append(obj.role.name) # emp_data["roles"] = name_role_list emp_list.append(emp_data) data = {"employees:": emp_list} return JsonResponse(status=201, data=data) else: return JsonResponse("The user with id={} isn't a manager for any user".format(current_employee.user.id), status=404)
[ "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for creating employee in company
def create_employee(request, company_id): company = Company.objects.get(pk=company_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: logUnauthorizedAccess("User tried to create_employee", request) raise PermissionDenied() form = EmployeeForm(request, initial=dict(company=company)) form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company) # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company=company) | Q(company__isnull=True)) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } return TemplateResponse( request, 'mus/create_employee_form.html', { 'employee_form': form, } ) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } # return JsonResponse(status=200, data=data)
[ "def post(self, request, *args, **kwargs):\n response = super().post(request, *args, **kwargs)\n company = self.object\n company.create_employee_data()\n return response", "def get(self, request):\n form = EmployeeForm()\n return render(request, 'employee/add-employee.htm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for editing employee
def edit_employee(request, employee_id): employee = Employee.objects.get(pk=int(employee_id)) current_employee = Employee.objects.get(user__pk=request.user.pk) assert isinstance(employee, Employee) assert isinstance(current_employee, Employee) # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: # raise PermissionDenied() if not current_employee.hasAccessTo(employee): raise PermissionDenied() form = EditEmployeeForm(request.user, employee, { 'first_name': employee.user.first_name, 'last_name': employee.user.last_name, 'email': employee.user.email, 'manager': employee.manager.id if employee.manager else 0, 'language_code': employee.language_code, # 'development_plan_type': employee.development_plan_type.id, 'is_manager': employee.is_manager }) if 'manager' in form.fields: managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk) form.fields['manager'].queryset = managerQS # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company__pk=employee.company.pk) | Q(company__isnull=True) # ) is_me = employee.user.pk == request.user.pk return TemplateResponse( request, 'mus/edit_employee_form.html', { 'edit_employee_form': form, 'employee_id': employee_id, 'me': is_me, 'name': employee.user.get_full_name() } )
[ "def admin_edit_employee(uuid):\n form = AdminEditProfileForm()\n employee = EmployeeApiController.get_employee_by_uuid(uuid)\n form.department.choices = [(dep[\"uuid\"], dep[\"name\"]) for dep in DepartmentApiController.get_all_departments()]\n fullname = employee[\"last_name\"] + \" \" + employee[\"fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for list of actions of (current) employee
def action_list(request, employee_id=None): if employee_id: employee = Employee.objects.get(pk=employee_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() else: employee = request.user.employee_user.first() actions = employee.action_set.all() return TemplateResponse( request, 'mus/action_list.html', dict( actions=actions, employee=employee ) )
[ "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for detail of action
def action_detail(request, action_id): employee = request.user.employee_user.first() action = Action.objects.get(pk=int(action_id)) # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk: if not employee.hasAccessTo(action.employee): raise PermissionDenied() if request.method == 'POST': form = ActionCommentForm(request.POST) if form.is_valid(): form.save(request.user, action) return HttpResponseRedirect('/action/%s' % action_id) else: form = ActionCommentForm() return TemplateResponse( request, 'mus/action_detail.html', dict( action=action, form=form ) )
[ "def action_detail(request, action_id):\n action = shortcuts.get_object_or_404(Action, pk=action_id)\n try:\n new_action = action.action_object.content_object\n if new_action.__class__.__name__ == 'Action':\n action = new_action\n except:\n pass\n return shortcuts.render_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create LeaderModel and send it as a PDF to the browser
def get_leader_model_pdf(currentEmpl, employees): lm = LeaderModel() employee_actions = {} legend = [] colors = {} errors = {'noactions': []} # numbered_actions = {} for empl in employees: if not currentEmpl.hasAccessTo(empl): raise PermissionDenied() actions = empl.action_set.all() if not len(actions): errors['noactions'].append(empl) continue lkey = empl.user.first_name + " " + empl.user.last_name legend.append(lkey) if not lkey in employee_actions: employee_actions[lkey] = {} for action in actions: if not action.difficulty or not action.type: errors['noactions'].append(empl) continue circle_number = lm.addCircle(action) latest_comment = action.getLatestComment() employee_actions[lkey][circle_number] = { 'name': action.title, 'type': action.type, 'difficulty': action.getDifficultyText(), 'comment': latest_comment } if lkey not in colors: color = lm.getEmployeeColors(empl.id) colors[lkey] = "rgb({}, {}, {})".format(color[0], color[1], color[2]) if len(errors['noactions']): return errors lm_filename = path.join(settings.STATIC_ROOT, "leadermodel_{}.png".format(currentEmpl.id)) lm.writeImage(lm_filename) # # Write PDF pdfFilename = path.join(settings.FILES_ROOT, "leadermodel_{}.pdf".format(currentEmpl.id)) template = get_template('mus/leader_model_pdf.html') context = Context({ 'site_url': settings.SITE_URL, 'lm_filename': lm_filename, 'employee_actions': employee_actions, 'colors': colors, 'legend': legend }) html = template.render(context) # html = html.replace('<li>','<li><img class="square" src="http://test.nxtlvl.dk/static/img/square.png" />') result = open(pdfFilename, 'wb') pisa.pisaDocument(StringIO.StringIO( html.encode("UTF-8")), dest=result) result.close() wrapper = FileWrapper(file(pdfFilename)) response = HttpResponse(wrapper, content_type='application/pdf') response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf' response['Content-Length'] = os.path.getsize(pdfFilename) return response # return HttpResponseRedirect('/employee/all/%d' % int(company_id))
[ "def pdfReceiver(request, model=''):\n\n\tinput_str = ''\n\tinput_str += parsePOST(request)\n\t# packet = io.StringIO() # write to memory\n\tpacket = io.BytesIO()\n\n\ttry:\n\t\tpisa.CreatePDF(input_str, dest=packet)\n\texcept ValueError as error:\n\t\t# triggered from the elusive invalid color value issue:\n\t\tl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for employee development plan details
def development_plan_details(request, development_plan_id): #, employee_id ): # employee = Employee.objects.get(user__pk=request.user.pk) # employee = Employee.objects.filter(pk=int(employee_id)).first() development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) current_employee = Employee.objects.filter(user__pk=request.user.pk).first() all_employees = development_plan.employee_relation.all() try: development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = development_plan.id dev_plan["deleted"] = development_plan.deleted if development_plan.type: dev_plan["type"] = development_plan.type.name # dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\ # .finished_at dev_plan["created_at"] = development_plan.created_at dev_plan["created_by"] = development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = development_plan.manager_relation.user.username manager_data["manager_first_name"] = development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions emp_data["dev_plan_finished_at"] = DevelopmentPlanToEmployeeRelation\ .objects.get(employee=emp, development_plan = development_plan)\ .finished_at employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) except DevelopmentPlan.DoesNotExist: return JsonResponse(data={"details":"Development Plan with this id doesn't exist"}, status=404)
[ "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=T...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a list of user's development plans for manager
def get_all_user_development_plans_for_manager(request, employee_id): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() employee = Employee.objects.filter(pk=int(employee_id)).first() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if not current_employee.isEnsoUser() and current_employee.is_manager: raise PermissionDenied() actions = employee.action_set.all() if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]: raise PermissionDenied("Employee with id={} is not assigned to you.".format(employee_id), 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
[ "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a list of development plans for active user
def get_all_development_plans_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
[ "def plan_list(request):\n if request.method == 'GET':\n try:\n plans = DietPlan.objects.filter(owner=request.user)\n except TypeError as e:\n print(e.msg)\n return HttpResponse(status=403)\n serializer = DietPlanSerializer(plans, many=True)\n return R...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View active development plan for active user
def get_active_development_plan_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) current_development_plan = DevelopmentPlan.objects.filter( employee_relation=current_employee, employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!! if not current_employee: raise PermissionDenied() if current_development_plan: data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = current_development_plan.id dev_plan["deleted"] = current_development_plan.deleted if current_development_plan.type: dev_plan["type"] = current_development_plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = current_development_plan)\ .finished_at dev_plan["created_at"] = current_development_plan.created_at dev_plan["created_by"] = current_development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = current_development_plan.manager_relation.user.username manager_data["manager_first_name"] = current_development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = current_development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = current_development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(current_development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = current_development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() print all_questions if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id, employee=current_employee).first() if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id, employee=current_employee).first() if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(current_development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) else: return JsonResponse(data={"details": "The user with id={} doesn't have an active Development Plan" .format(current_employee.user.id)}, status=404)
[ "def plan_list_manage(request, username):\n user = User.objects.get(username=username)\n plans = Plan.objects.filter(user=user)\n return render(request, 'nutrition/plan_list_manage.html', {'plans': plans})", "def list_plans():\n click.echo(PaymentPlan.list())", "def current_plan(self):\n # re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get or Update goal by id
def self_goal_by_id(request, goal_id): current_user = request.user fields_map = { 'goal_answers': lambda g: [ { 'id': answ.id, 'title': answ.title, "created_by": answ.created_by.username, "created_at": answ.created_at, "file": answ.file.url } for answ in g.goal_answers.all() ] } fields = ['title', 'goal_answers', 'id', 'is_achieved'] goal = Goal.objects.get(pk=goal_id) if request.method == 'POST': if goal.created_by != current_user: raise PermissionDenied("You can edit only your own goals") f = GoalForm(data=request.json_body) if not f.is_valid(): return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400) goal = f.save(current_user, goal) return JsonResponse( data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200 )
[ "def update_goal(goal_id):\n if goal_id == None:\n return make_response(404)\n\n else:\n goal = get_goal_from_id(goal_id)\n request_body = request.get_json()\n\n if \"title\" in request_body:\n goal.title = request_body[\"title\"]\n\n goal_response = goal.to_dict(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function takes a csv file as an argument deduplicates the file and writes the deduplicated dataset to a csv file if a path for the output file is provided as the second argument It returns the deduplicated dataframe Parameters , type, return values
def dataDedup_csv(infile, outfile=None): if fpath.isfile(infile): dataset = pd.read_csv(infile, sep=',', dtype='unicode') dedup_dataset = dataset.drop_duplicates() if outfile!=None: dedup_dataset.to_csv(outfile, encoding='utf-8', index=False, header=False) return dedup_dataset else: print("file \"%s\" does not exist... or is not a file..." %(infile))
[ "def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function checks for the size of a dataframe and splits it into parts containing approximately 1 million records as the default number of records for each dataframe.It also provides the option of writing the split dataframes to the disk. Parameters , type, return values
def dataFrameSplit(df, norec=1000000, outfile= None): # calculation of the no. of rows of the dataframe df_rsz = len(df.index) if df_rsz>norec: no_splits = np.ceil(df_rsz/norec) dfarr = np.array_split(df,no_splits) return dfarr else: print("The dataframe doesn't have sufficient records") # printing to disk when if outfile!=None: i=0 for arr in dfarr: arr.to_csv("D:\\ddf"+str(i+1)+".csv",encoding='utf-8', index=False, header=False) i = i+1
[ "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed words in a sequence using GLoVE model
def __glove_embed__(sequence, model): embedded = [] for word in sequence: embedded.append(model[word]) return embedded
[ "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def embed_text(self, text):\n\n marked_text = \"[CLS] \" + text + \" ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }