code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
_db = self.get_database(database_name, username, password) return _db[collection]
def get_collection(self, collection, database_name=None, username=None, password=None)
Get a pymongo collection handle. :param collection: Name of collection :param database_name: (optional) Name of database :param username: (optional) Username to login with :param password: (optional) Password to login with :return: Pymongo collection object
3.584255
3.970438
0.902735
def _balanced_subtree(leaves): if len(leaves) == 1: return leaves[0] elif len(leaves) == 2: return (leaves[0], leaves[1]) else: split = len(leaves) // 2 return (_balanced_subtree(leaves[:split]), _balanced_subtree(leaves[split:])) return _balanced_subtree(np.arange(n_leaves))
def balanced_binary_tree(n_leaves)
Create a balanced binary tree
2.04985
2.098854
0.976652
def _list(leaves): if len(leaves) == 2: return (leaves[0], leaves[1]) else: return (leaves[0], _list(leaves[1:])) return _list(np.arange(n_leaves))
def decision_list(n_leaves)
Create a decision list
2.722874
2.840026
0.95875
def _random_subtree(leaves): if len(leaves) == 1: return leaves[0] elif len(leaves) == 2: return (leaves[0], leaves[1]) else: split = npr.randint(1, len(leaves)-1) return (_random_subtree(leaves[:split]), _random_subtree(leaves[split:])) return _random_subtree(np.arange(n_leaves))
def random_tree(n_leaves)
Randomly partition the nodes
2.081503
2.195377
0.94813
lvs = [] def _leaves(node): if np.isscalar(node): lvs.append(node) elif isinstance(node, tuple) and len(node) == 2: _leaves(node[0]) _leaves(node[1]) else: raise Exception("Not a tree!") _leaves(tree) return lvs
def leaves(tree)
Return the leaves in this subtree.
2.359807
2.345588
1.006062
n = len(leaves(tree)) addr = np.nan * np.ones((n, n-1)) def _addresses(node, index, choices): # index is the index of the current internal node # choices is a list of (indice, 0/1) choices made if np.isscalar(node): for i, choice in choices: addr[node, i] = choice return index elif isinstance(node, tuple) and len(node) == 2: newindex = _addresses(node[0], index+1, choices + [(index, 0)]) newindex = _addresses(node[1], newindex, choices + [(index, 1)]) return newindex else: raise Exception("Not a tree!") _addresses(tree, 0, []) return addr
def choices(tree)
Get the 'address' of each leaf node in terms of internal node choices
3.921427
3.72211
1.05355
dd = ids(tree) N = len(dd) A = np.zeros((N, N)) def _adj(node): if np.isscalar(node): return elif isinstance(node, tuple) and len(node) == 2: A[dd[node], dd[node[0]]] = 1 A[dd[node[0]], dd[node]] = 1 _adj(node[0]) A[dd[node], dd[node[1]]] = 1 A[dd[node[1]], dd[node]] = 1 _adj(node[1]) _adj(tree) return A
def adjacency(tree)
Construct the adjacency matrix of the tree :param tree: :return:
2.2329
2.212719
1.009121
import autograd.numpy as anp from autograd import value_and_grad, hessian_vector_product from scipy.optimize import minimize assert weights is None assert stats is None if not isinstance(data, list): assert isinstance(data, tuple) and len(data) == 2 data = [data] # Define a helper function for the log of the logistic fn def loglogistic(psi): return psi - anp.log(1+anp.exp(psi)) # optimize each row of A and b for n in range(self.D_out): # Define an objective function for the n-th row of hstack((A, b)) # This is the negative log likelihood of the n-th column of data. def nll(abn): an, bn = abn[:-1], abn[-1] T = 0 ll = 0 for (x, y) in data: T += x.shape[0] yn = y[:, n] psi = anp.dot(x, an) + bn ll += anp.sum(yn * loglogistic(psi)) ll += anp.sum((1 - yn) * loglogistic(-1. * psi)) # Include a penalty on the weights ll -= lmbda * T * anp.sum(an**2) ll -= lmbda * T * bn**2 return -1 * ll / T abn0 = np.concatenate((self.A[n], self.b[n])) res = minimize(value_and_grad(nll), abn0, tol=1e-3, method="Newton-CG", jac=True, hessp=hessian_vector_product(nll)) assert res.success self.A[n] = res.x[:-1] self.b[n] = res.x[-1]
def max_likelihood(self, data, weights=None, stats=None, lmbda=0.1)
As an alternative to MCMC with Polya-gamma augmentation, we also implement maximum likelihood learning via gradient descent with autograd. This follows the pybasicbayes convention. :param data: list of tuples, (x,y), for each dataset. :param weights: Not used in this implementation. :param stats: Not used in this implementation.
3.327492
3.36188
0.989771
if not isinstance(data, list): assert isinstance(data, tuple) and len(data) == 2, \ "datas must be an (x,y) tuple or a list of such tuples" data = [data] if mask is None: mask = [np.ones(y.shape, dtype=bool) for x, y in data] # Resample auxiliary variables if they are not given if omega is None: omega = self._resample_auxiliary_variables(data) # Make copies of parameters (for sample collection in calling methods) self.A = self.A.copy() self.b = self.b.copy() D = self.D_in for n in range(self.D_out): # Resample C_{n,:} given z, omega[:,n], and kappa[:,n] prior_Sigma = np.zeros((D + 1, D + 1)) prior_Sigma[:D, :D] = self.sigmasq_A[n] prior_Sigma[D, D] = self.sigmasq_b[n] prior_J = np.linalg.inv(prior_Sigma) prior_h = prior_J.dot(np.concatenate((self.mu_A[n], [self.mu_b[n]]))) lkhd_h = np.zeros(D + 1) lkhd_J = np.zeros((D + 1, D + 1)) for d, m, o in zip(data, mask, omega): if isinstance(d, tuple): x, y = d else: x, y = d[:, :D], d[:, D:] augx = np.hstack((x, np.ones((x.shape[0], 1)))) J = o * m h = self.kappa_func(y) * m lkhd_J += (augx * J[:, n][:, None]).T.dot(augx) lkhd_h += h[:, n].T.dot(augx) post_h = prior_h + lkhd_h post_J = prior_J + lkhd_J joint_sample = sample_gaussian(J=post_J, h=post_h) self.A[n, :] = joint_sample[:D] self.b[n] = joint_sample[D]
def resample(self, data, mask=None, omega=None)
Multinomial regression is somewhat special. We have to compute the kappa functions for the entire dataset, not just for one column of the data at a time.
3.322058
3.255881
1.020325
# type: (str, Mapping[str, Optional[Mapping[str, Union[str, List[str]]]]) -> Dict[str, Union[str, List[str]]] userinfo = self._db[user_id] claims = {claim: userinfo[claim] for claim in requested_claims if claim in userinfo} return claims
def get_claims_for(self, user_id, requested_claims)
Filter the userinfo based on which claims where requested. :param user_id: user identifier :param requested_claims: see <a href="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter"> "OpenID Connect Core 1.0", Section 5.5</a> for structure :return: All requested claims available from the userinfo.
3.489676
4.191319
0.832596
# type: (oic.oic.message.AuthorizationRequest, str, Optional[List[str]]) -> str if not self._is_valid_subject_identifier(subject_identifier): raise InvalidSubjectIdentifier('{} unknown'.format(subject_identifier)) scope = ' '.join(scope or authorization_request['scope']) logger.debug('creating authz code for scope=%s', scope) authorization_code = rand_str() authz_info = { 'used': False, 'exp': int(time.time()) + self.authorization_code_lifetime, 'sub': subject_identifier, 'granted_scope': scope, self.KEY_AUTHORIZATION_REQUEST: authorization_request.to_dict() } self.authorization_codes[authorization_code] = authz_info logger.debug('new authz_code=%s to client_id=%s for sub=%s valid_until=%s', authorization_code, authorization_request['client_id'], subject_identifier, authz_info['exp']) return authorization_code
def create_authorization_code(self, authorization_request, subject_identifier, scope=None)
Creates an authorization code bound to the authorization request and the authenticated user identified by the subject identifier.
3.119541
3.344456
0.93275
# type: (oic.oic.message.AuthorizationRequest, str, Optional[List[str]]) -> se_leg_op.access_token.AccessToken if not self._is_valid_subject_identifier(subject_identifier): raise InvalidSubjectIdentifier('{} unknown'.format(subject_identifier)) scope = scope or authorization_request['scope'] return self._create_access_token(subject_identifier, authorization_request.to_dict(), ' '.join(scope))
def create_access_token(self, authorization_request, subject_identifier, scope=None)
Creates an access token bound to the authentication request and the authenticated user identified by the subject identifier.
4.481205
5.011134
0.89425
# type: (str, Mapping[str, Union[str, List[str]]], str, Optional[str]) -> se_leg_op.access_token.AccessToken access_token = AccessToken(rand_str(), self.access_token_lifetime) scope = current_scope or granted_scope logger.debug('creating access token for scope=%s', scope) authz_info = { 'iat': int(time.time()), 'exp': int(time.time()) + self.access_token_lifetime, 'sub': subject_identifier, 'client_id': auth_req['client_id'], 'aud': [auth_req['client_id']], 'scope': scope, 'granted_scope': granted_scope, 'token_type': access_token.BEARER_TOKEN_TYPE, self.KEY_AUTHORIZATION_REQUEST: auth_req } self.access_tokens[access_token.value] = authz_info logger.debug('new access_token=%s to client_id=%s for sub=%s valid_until=%s', access_token.value, auth_req['client_id'], subject_identifier, authz_info['exp']) return access_token
def _create_access_token(self, subject_identifier, auth_req, granted_scope, current_scope=None)
Creates an access token bound to the subject identifier, client id and requested scope.
2.875261
2.921811
0.984068
# type: (str) -> se_leg_op.access_token.AccessToken if authorization_code not in self.authorization_codes: raise InvalidAuthorizationCode('{} unknown'.format(authorization_code)) authz_info = self.authorization_codes[authorization_code] if authz_info['used']: logger.debug('detected already used authz_code=%s', authorization_code) raise InvalidAuthorizationCode('{} has already been used'.format(authorization_code)) elif authz_info['exp'] < int(time.time()): logger.debug('detected expired authz_code=%s, now=%s > exp=%s ', authorization_code, int(time.time()), authz_info['exp']) raise InvalidAuthorizationCode('{} has expired'.format(authorization_code)) authz_info['used'] = True access_token = self._create_access_token(authz_info['sub'], authz_info[self.KEY_AUTHORIZATION_REQUEST], authz_info['granted_scope']) logger.debug('authz_code=%s exchanged to access_token=%s', authorization_code, access_token.value) return access_token
def exchange_code_for_token(self, authorization_code)
Exchanges an authorization code for an access token.
2.988731
3.078641
0.970796
# type: (str) -> Dict[str, Union[str, List[str]]] if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) authz_info = self.access_tokens[access_token_value] introspection = {'active': authz_info['exp'] >= int(time.time())} introspection_params = {k: v for k, v in authz_info.items() if k in TokenIntrospectionResponse.c_param} introspection.update(introspection_params) return introspection
def introspect_access_token(self, access_token_value)
Returns authorization data associated with the access token. See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>.
3.00073
3.100391
0.967855
# type: (str) -> str if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) if not self.refresh_token_lifetime: logger.debug('no refresh token issued for for access_token=%s', access_token_value) return None refresh_token = rand_str() authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime} self.refresh_tokens[refresh_token] = authz_info logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'], access_token_value) return refresh_token
def create_refresh_token(self, access_token_value)
Creates an refresh token bound to the specified access token.
2.963805
3.141436
0.943456
# type (str, Optional[List[str]]) -> Tuple[se_leg_op.access_token.AccessToken, Optional[str]] if refresh_token not in self.refresh_tokens: raise InvalidRefreshToken('{} unknown'.format(refresh_token)) refresh_token_info = self.refresh_tokens[refresh_token] if 'exp' in refresh_token_info and refresh_token_info['exp'] < int(time.time()): raise InvalidRefreshToken('{} has expired'.format(refresh_token)) authz_info = self.access_tokens[refresh_token_info['access_token']] if scope: if not requested_scope_is_allowed(scope, authz_info['granted_scope']): logger.debug('trying to refresh token with superset scope, requested_scope=%s, granted_scope=%s', scope, authz_info['granted_scope']) raise InvalidScope('Requested scope includes non-granted value') scope = ' '.join(scope) logger.debug('refreshing token with new scope, old_scope=%s -> new_scope=%s', authz_info['scope'], scope) else: # OAuth 2.0: scope: "[...] if omitted is treated as equal to the scope originally granted by the resource owner" scope = authz_info['granted_scope'] new_access_token = self._create_access_token(authz_info['sub'], authz_info[self.KEY_AUTHORIZATION_REQUEST], authz_info['granted_scope'], scope) new_refresh_token = None if self.refresh_token_threshold \ and 'exp' in refresh_token_info \ and refresh_token_info['exp'] - int(time.time()) < self.refresh_token_threshold: # refresh token is close to expiry, issue a new one new_refresh_token = self.create_refresh_token(new_access_token.value) else: self.refresh_tokens[refresh_token]['access_token'] = new_access_token.value logger.debug('refreshed tokens, new_access_token=%s new_refresh_token=%s old_refresh_token=%s', new_access_token, new_refresh_token, refresh_token) return new_access_token, new_refresh_token
def use_refresh_token(self, refresh_token, scope=None)
Creates a new access token, and refresh token, based on the supplied refresh token. :return: new access token and new refresh token if the old one had an expiration time
2.954923
3.013353
0.98061
# type: (str, str, str) -> str if user_id not in self.subject_identifiers: self.subject_identifiers[user_id] = {} if subject_type == 'public': if 'public' not in self.subject_identifiers[user_id]: new_sub = self._subject_identifier_factory.create_public_identifier(user_id) self.subject_identifiers[user_id] = {'public': new_sub} logger.debug('created new public sub=% for user_id=%s', self.subject_identifiers[user_id]['public'], user_id) sub = self.subject_identifiers[user_id]['public'] logger.debug('returning public sub=%s', sub) return sub elif subject_type == 'pairwise': if not sector_identifier: raise ValueError('sector_identifier cannot be None or empty') subject_id = self._subject_identifier_factory.create_pairwise_identifier(user_id, sector_identifier) logger.debug('returning pairwise sub=%s for user_id=%s and sector_identifier=%s', subject_id, user_id, sector_identifier) sub = self.subject_identifiers[user_id] pairwise_set = set(sub.get('pairwise', [])) pairwise_set.add(subject_id) sub['pairwise'] = list(pairwise_set) self.subject_identifiers[user_id] = sub return subject_id raise ValueError('Unknown subject_type={}'.format(subject_type))
def get_subject_identifier(self, subject_type, user_id, sector_identifier=None)
Returns a subject identifier for the local user identifier. :param subject_type: 'pairwise' or 'public', see <a href="http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes"> "OpenID Connect Core 1.0", Section 8</a>. :param user_id: local user identifier :param sector_identifier: the client's sector identifier, see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology"> "OpenID Connect Core 1.0", Section 1.2</a>
2.002808
2.10468
0.951597
try: authentication_request.verify() except MessageException as e: raise InvalidAuthenticationRequest(str(e), authentication_request, oauth_error='invalid_request') from e
def authorization_request_verify(authentication_request)
Verifies that all required parameters and correct values are included in the authentication request. :param authentication_request: the authentication request to verify :raise InvalidAuthenticationRequest: if the authentication is incorrect
5.446063
5.602536
0.972071
if authentication_request['client_id'] not in provider.clients: logger.error('Unknown client_id \'{}\''.format(authentication_request['client_id'])) raise InvalidAuthenticationRequest('Unknown client_id', authentication_request, oauth_error='unauthorized_client')
def client_id_is_known(provider, authentication_request)
Verifies the client identifier is known. :param provider: provider instance :param authentication_request: the authentication request to verify :raise InvalidAuthenticationRequest: if the client_id is unknown
3.419837
3.363453
1.016764
error = InvalidAuthenticationRequest('Redirect uri is not registered', authentication_request, oauth_error="invalid_request") try: allowed_redirect_uris = provider.clients[authentication_request['client_id']]['redirect_uris'] except KeyError as e: logger.error('client metadata is missing redirect_uris') raise error if authentication_request['redirect_uri'] not in allowed_redirect_uris: logger.error("Redirect uri \'{0}\' is not registered for this client".format(authentication_request['redirect_uri'])) raise error
def redirect_uri_is_in_registered_redirect_uris(provider, authentication_request)
Verifies the redirect uri is registered for the client making the request. :param provider: provider instance :param authentication_request: authentication request to verify :raise InvalidAuthenticationRequest: if the redirect uri is not registered
3.341239
3.165551
1.0555
error = InvalidAuthenticationRequest('Response type is not registered', authentication_request, oauth_error='invalid_request') try: allowed_response_types = provider.clients[authentication_request['client_id']]['response_types'] except KeyError as e: logger.error('client metadata is missing response_types') raise error if not is_allowed_response_type(authentication_request['response_type'], allowed_response_types): logger.error('Response type \'{}\' is not registered'.format(' '.join(authentication_request['response_type']))) raise error
def response_type_is_in_registered_response_types(provider, authentication_request)
Verifies that the requested response type is allowed for the client making the request. :param provider: provider instance :param authentication_request: authentication request to verify :raise InvalidAuthenticationRequest: if the response type is not allowed
3.372904
3.511032
0.960659
will_issue_access_token = authentication_request['response_type'] != ['id_token'] contains_userinfo_claims_request = 'claims' in authentication_request and 'userinfo' in authentication_request[ 'claims'] if not will_issue_access_token and contains_userinfo_claims_request: raise InvalidAuthenticationRequest('Userinfo claims cannot be requested, when response_type=\'id_token\'', authentication_request, oauth_error='invalid_request')
def userinfo_claims_only_specified_when_access_token_is_issued(authentication_request)
According to <a href="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter"> "OpenID Connect Core 1.0", Section 5.5</a>: "When the userinfo member is used, the request MUST also use a response_type value that results in an Access Token being issued to the Client for use at the UserInfo Endpoint." :param authentication_request: the authentication request to verify :raise InvalidAuthenticationRequest: if the requested claims can not be returned according to the request
3.825876
4.002417
0.955891
try: registration_request.verify() except MessageException as e: raise InvalidClientRegistrationRequest(str(e), registration_request, oauth_error='invalid_request') from e
def registration_request_verify(registration_request)
Verifies that all required parameters and correct values are included in the client registration request. :param registration_request: the authentication request to verify :raise InvalidClientRegistrationRequest: if the registration is incorrect
5.681155
5.097745
1.114445
def match(client_preference, provider_capability): if isinstance(client_preference, list): # deal with comparing space separated values, e.g. 'response_types', without considering the order # at least one requested preference must be matched return len(find_common_values(client_preference, provider_capability)) > 0 return client_preference in provider_capability for client_preference in registration_request.keys(): if client_preference not in PREFERENCE2PROVIDER: # metadata parameter that shouldn't be matched continue provider_capability = PREFERENCE2PROVIDER[client_preference] if not match(registration_request[client_preference], provider.configuration_information[provider_capability]): raise InvalidClientRegistrationRequest( 'Could not match client preference {}={} with provider capability {}={}'.format( client_preference, registration_request[client_preference], provider_capability, provider.configuration_information[provider_capability]), registration_request, oauth_error='invalid_request')
def client_preferences_match_provider_capabilities(provider, registration_request)
Verifies that all requested preferences in the client metadata can be fulfilled by this provider. :param registration_request: the authentication request to verify :raise InvalidClientRegistrationRequest: if the registration is incorrect
4.582292
4.348503
1.053763
return 2**(b-1) / gamma(b) * (-1)**n * \ np.exp(gammaln(n+b) - gammaln(n+1) + np.log(2*n+b) - 0.5 * np.log(2*np.pi*x**3) - (2*n+b)**2 / (8.*x))
def _psi_n(x, n, b)
Compute the n-th term in the infinite sum of the Jacobi density.
4.497629
4.549716
0.988551
return np.cosh(psi/2.0)**b * np.exp(-psi**2/2.0 * omega)
def _tilt(omega, b, psi)
Compute the tilt of the PG density for value omega and tilt psi. :param omega: point at which to evaluate the density :param psi: tilt parameter
6.961483
10.554428
0.659579
ns = np.arange(trunc) psi_ns = np.array([_psi_n(omega, n, b) for n in ns]) pdf = np.sum(psi_ns, axis=0) # Account for tilting pdf *= _tilt(omega, b, psi) return pdf
def pgpdf(omega, b, psi, trunc=200)
Approximate the density log PG(omega | b, psi) using a truncation of the density written as an infinite sum. :param omega: point at which to evaluate density :param b: first parameter of PG :param psi: tilting of PG :param trunc: number of terms in sum
4.44183
5.434475
0.817343
if axis is None: if psi.ndim == 1: K = psi.size + 1 pi = np.zeros(K) # Set pi[1..K-1] stick = 1.0 for k in range(K-1): pi[k] = logistic(psi[k]) * stick stick -= pi[k] # Set the last output pi[-1] = stick # DEBUG assert np.allclose(pi.sum(), 1.0) elif psi.ndim == 2: M, Km1 = psi.shape K = Km1 + 1 pi = np.zeros((M,K)) # Set pi[1..K-1] stick = np.ones(M) for k in range(K-1): pi[:,k] = logistic(psi[:,k]) * stick stick -= pi[:,k] # Set the last output pi[:,-1] = stick # DEBUG assert np.allclose(pi.sum(axis=1), 1.0) else: raise ValueError("psi must be 1 or 2D") else: K = psi.shape[axis] + 1 pi = np.zeros([psi.shape[dim] if dim != axis else K for dim in range(psi.ndim)]) stick = np.ones(psi.shape[:axis] + psi.shape[axis+1:]) for k in range(K-1): inds = [slice(None) if dim != axis else k for dim in range(psi.ndim)] pi[inds] = logistic(psi[inds]) * stick stick -= pi[inds] pi[[slice(None) if dim != axis else -1 for dim in range(psi.ndim)]] = stick assert np.allclose(pi.sum(axis=axis), 1.) return pi
def psi_to_pi(psi, axis=None)
Convert psi to a probability vector pi :param psi: Length K-1 vector :return: Length K normalized probability vector
2.083222
2.052287
1.015073
# type: (Mapping[str, str], Mapping[str, Mapping[str, Any]], Optional[str]) -> bool client_id = None client_secret = None authn_method = None if authz_header: logger.debug('client authentication in Authorization header %s', authz_header) authz_scheme = authz_header.split(maxsplit=1)[0] if authz_scheme == 'Basic': authn_method = 'client_secret_basic' credentials = authz_header[len('Basic '):] missing_padding = 4 - len(credentials) % 4 if missing_padding: credentials += '=' * missing_padding try: auth = base64.urlsafe_b64decode(credentials.encode('utf-8')).decode('utf-8') except UnicodeDecodeError as e: raise InvalidClientAuthentication('Could not userid/password from authorization header'.format(authz_scheme)) client_id, client_secret = auth.split(':') else: raise InvalidClientAuthentication('Unknown scheme in authorization header, {} != Basic'.format(authz_scheme)) elif 'client_id' in parsed_request: logger.debug('client authentication in request body %s', parsed_request) client_id = parsed_request['client_id'] if 'client_secret' in parsed_request: authn_method = 'client_secret_post' client_secret = parsed_request['client_secret'] else: authn_method = 'none' client_secret = None if client_id not in clients: raise InvalidClientAuthentication('client_id \'{}\' unknown'.format(client_id)) client_info = clients[client_id] if client_secret != client_info.get('client_secret', None): raise InvalidClientAuthentication('Incorrect client_secret') expected_authn_method = client_info.get('token_endpoint_auth_method', 'client_secret_basic') if authn_method != expected_authn_method: raise InvalidClientAuthentication( 'Wrong authentication method used, MUST use \'{}\''.format(expected_authn_method)) return client_id
def verify_client_authentication(clients, parsed_request, authz_header=None)
Verifies client authentication at the token endpoint, see <a href="https://tools.ietf.org/html/rfc6749#section-2.3.1">"The OAuth 2.0 Authorization Framework", Section 2.3.1</a> :param parsed_request: key-value pairs from parsed urlencoded request :param clients: clients db :param authz_header: the HTTP Authorization header value :return: the unmodified parsed request :raise InvalidClientAuthentication: if the client authentication was incorrect
2.103941
2.150751
0.978235
new_panel_index = self.get_max_index()+1 if col and row: new_panel = { 'col': col, 'row': row, 'size_x': size_x, 'size_y': size_y, 'panelIndex': new_panel_index, 'type': 'visualization', 'id': visualization.id } self.panels.append(new_panel) return new_panel else: new_panel = append_panel(self.panels, size_x, size_y) if new_panel: new_panel['id'] = visualization.id new_panel['panelIndex'] = new_panel_index new_panel['type'] = 'visualization' return new_panel
def add_visualization(self, visualization, size_x=6, size_y=3, col=0, row=0)
Adds the visualization to the dashboard. Leave col and row = 0 for automatic placement of the visualization. Visualizations are placed on a grid with 12 columns and unlimited rows. :param visualization: previously loaded visualization :param size_x width of the panel :param size_y height of the panel :param col 1-based column of the top left corner, leave 0 for automatic placement :param row 1-based row of the top left corner, leave 0 for automatic placement :return: newly created panel or None
2.244264
2.234396
1.004416
for i, panel in enumerate(self.panels): if panel['id'] == visualization_id: del self.panels[i]
def remove_visualization(self, visualization_id)
Removes all visualizations with the specified id from the dashboard :param visualization_id: :return:
3.510778
4.061594
0.864384
res = self.es.search(index=self.index, doc_type=self.doc_type, body={'query': {'match_all': {}}}) if not res['hits']['total']: return [] return [Visualization.from_kibana(hit) for hit in res['hits']['hits']]
def get_all(self)
Returns a list of all visualizations :return: list of the Visualization instances
3.031722
2.634483
1.150785
res = self.es.create(index=self.index, id=visualization.id or str(uuid.uuid1()), doc_type=self.doc_type, body=visualization.to_kibana(), refresh=True) return res
def add(self, visualization)
Creates a new visualization :param visualization: instance of Visualization :return:
3.995502
4.120222
0.96973
res = self.es.update(index=self.index, id=visualization.id, doc_type=self.doc_type, body={'doc': visualization.to_kibana()}, refresh=True) return res
def update(self, visualization)
Updates existing visualization :param visualization: instance of Visualization that was previously loaded :return:
4.174636
4.217799
0.989767
res = self.es.delete(index=self.index, id=visualization.id, doc_type=self.doc_type, refresh=True) return res
def remove(self, visualization)
Deletes the visualization :param visualization: instance of Visualization that was previously loaded :return:
3.494954
3.855923
0.906386
bottom_lines = [(p['col'], p['row'] + p['size_y'], p['col'] + p['size_x'], p['row'] + p['size_y']) for p in panels] return sorted(bottom_lines, key=lambda l: l[1], reverse=True)
def bottoms(panels)
Finds bottom lines of all panels :param panels: :return: sorted by row list of tuples representing lines (col, row , col + len, row)
3.053489
2.360931
1.293341
shape = [1] * max_len for i in range(max_len): for line in bottom_lines: if line[0] <= i + 1 < line[2]: shape[i] = line[1] break return shape
def find_shape(bottom_lines, max_len)
Finds a shape of lowest horizontal lines with step=1 :param bottom_lines: :param max_len: :return: list of levels (row values), list indexes are columns
2.406527
2.813553
0.855334
lines = [] for level in set(shape): count = 0 for i in range(len(shape)): if shape[i] <= level: count += 1 elif count: lines.append({'row': level, 'col': i - count + 1, 'len': count}) count = 0 if count: lines.append({'row': level, 'col': i - count + 2, 'len': count}) return sorted(lines, key=lambda l: l['row'])
def longest_lines(shape)
Creates lines from shape :param shape: :return: list of dictionaries with col,row,len fields
2.494358
2.427758
1.027433
bottom_lines = bottoms(panels) shape = find_shape(bottom_lines, max_col) lines = longest_lines(shape) line = find_place(lines, size_x) if not line: return panel = { 'col': line['col'], 'row': line['row'], 'size_x': size_x, 'size_y': size_y, } panels.append(panel) return panel
def append_panel(panels, size_x, size_y, max_col=12)
Appends a panel to the list of panels. Finds the highest palce at the left for the new panel. :param panels: :param size_x: :param size_y: :param max_col: :return: a new panel or None if it is not possible to place a panel with such size_x
3.586715
3.600237
0.996244
self.test_runner = test_runner_class super(Command, self).run_from_argv(argv)
def run_from_argv(self, argv)
Set the default Gherkin test runner for its options to be parsed.
6.666154
4.689579
1.421483
if not options.get('testrunner', None): options['testrunner'] = test_runner_class return super(Command, self).handle(*test_labels, **options)
def handle(self, *test_labels, **options)
Set the default Gherkin test runner.
4.176837
3.217069
1.298336
base_url = step.test.live_server_url if url: return urljoin(base_url, url) else: return base_url
def django_url(step, url=None)
The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append.
4.170027
3.892046
1.071423
fields = get_apphook_field_names(self.model) if not fields: raise ValueError( ugettext( 'Can\'t find any relation to an ApphookConfig model in {0}' ).format(self.model.__name__) ) if to and to not in fields: raise ValueError( ugettext( 'Can\'t find relation to ApphookConfig model named ' '"{0}" in "{1}"' ).format(to, self.model.__name__) ) if len(fields) > 1 and to not in fields: raise ValueError( ugettext( '"{0}" has {1} relations to an ApphookConfig model.' ' Please, specify which one to use in argument "to".' ' Choices are: {2}' ).format( self.model.__name__, len(fields), ', '.join(fields) ) ) else: if not to: to = fields[0] lookup = '{0}__namespace'.format(to) kwargs = {lookup: namespace} return self.filter(**kwargs)
def namespace(self, namespace, to=None)
Filter by namespace. Try to guess which field to use in lookup. Accept 'to' argument if you need to specify.
2.800775
2.671415
1.048424
if not obj and not request.GET.get(self.app_config_attribute, False): config_model = get_apphook_model(self.model, self.app_config_attribute) if config_model.objects.count() == 1: return config_model.objects.first() return None elif obj and getattr(obj, self.app_config_attribute, False): return getattr(obj, self.app_config_attribute) elif request.GET.get(self.app_config_attribute, False): config_model = get_apphook_model(self.model, self.app_config_attribute) return config_model.objects.get( pk=int(request.GET.get(self.app_config_attribute, False)) ) return False
def _app_config_select(self, request, obj)
Return the select value for apphook configs :param request: request object :param obj: current object :return: False if no preselected value is available (more than one or no apphook config is present), apphook config instance if exactly one apphook config is defined or apphook config defined in the request or in the current object, False otherwise
1.996627
1.854884
1.076417
for config_option, field in self.app_config_values.items(): if field in form.base_fields: form.base_fields[field].initial = self.get_config_data(request, obj, config_option) return form
def _set_config_defaults(self, request, form, obj=None)
Cycle through app_config_values and sets the form value according to the options in the current apphook config. self.app_config_values is a dictionary containing config options as keys, form fields as values:: app_config_values = { 'apphook_config': 'form_field', ... } :param request: request object :param form: model form for the current model :param obj: current object :return: form with defaults set
3.586649
3.363302
1.066407
app_config_default = self._app_config_select(request, obj) if app_config_default is None and request.method == 'GET': return (_(self.app_config_selection_title), {'fields': (self.app_config_attribute, ), 'description': _(self.app_config_selection_desc)}), else: return super(ModelAppHookConfig, self).get_fieldsets(request, obj)
def get_fieldsets(self, request, obj=None)
If the apphook config must be selected first, returns a fieldset with just the app config field and help text :param request: :param obj: :return:
4.446247
3.734612
1.190551
return_value = None config = None if obj: try: config = getattr(obj, self.app_config_attribute, False) except ObjectDoesNotExist: # pragma: no cover pass if not config and self.app_config_attribute in request.GET: config_model = get_apphook_model(self.model, self.app_config_attribute) try: config = config_model.objects.get(pk=request.GET[self.app_config_attribute]) except config_model.DoesNotExist: # pragma: no cover pass if config: return_value = getattr(config, name) return return_value
def get_config_data(self, request, obj, name)
Method that retrieves a configuration option for a specific AppHookConfig instance :param request: the request object :param obj: the model instance :param name: name of the config option as defined in the config form :return value: config value or None if no app config is found
2.283328
2.348676
0.972177
form = super(ModelAppHookConfig, self).get_form(request, obj, **kwargs) if self.app_config_attribute not in form.base_fields: return form app_config_default = self._app_config_select(request, obj) if app_config_default: form.base_fields[self.app_config_attribute].initial = app_config_default get = copy.copy(request.GET) get[self.app_config_attribute] = app_config_default.pk request.GET = get elif app_config_default is None and request.method == 'GET': class InitialForm(form): class Meta(form.Meta): fields = (self.app_config_attribute,) form = InitialForm form = self._set_config_defaults(request, form, obj) return form
def get_form(self, request, obj=None, **kwargs)
Provides a flexible way to get the right form according to the context For the add view it checks whether the app_config is set; if not, a special form to select the namespace is shown, which is reloaded after namespace selection. If only one namespace exists, the current is selected and the normal form is used.
2.962014
2.894223
1.023423
for app in apps.get_app_configs(): for model in app.get_models(): yield (str(model._meta.verbose_name).lower(), model) yield (str(model._meta.verbose_name_plural).lower(), model)
def _models_generator()
Build a hash of model verbose names to models
2.844525
2.245266
1.266899
model = MODELS.get(name.lower(), None) assert model, "Could not locate model by name '%s'" % name return model
def get_model(name)
Convert a model's verbose name to the model class. This allows us to use the models verbose name in steps.
5.173631
4.865333
1.063366
sql = connection.ops.sequence_reset_sql(no_style(), [model]) for cmd in sql: connection.cursor().execute(cmd)
def reset_sequence(model)
Reset the ID sequence for a model.
4.047262
3.626447
1.116041
fields = [] for field in model._meta.fields: fields.append((field.name, str(getattr(model, field.name)))) if attrs is not None: for attr in attrs: fields.append((attr, str(getattr(model, attr)))) for field in model._meta.many_to_many: vals = getattr(model, field.name) fields.append((field.name, '{val} ({count})'.format( val=', '.join(map(str, vals.all())), count=vals.count(), ))) print(', '.join( '{0}={1}'.format(field, value) for field, value in fields ))
def _dump_model(model, attrs=None)
Dump the model fields for debugging.
2.275177
2.059115
1.10493
model = get_model(model) data = guess_types(self.hashes) queryset = model.objects try: existence_check = _TEST_MODEL[model] except KeyError: existence_check = test_existence failed = 0 try: for hash_ in data: match = existence_check(queryset, hash_) if should_exist: assert match, \ "%s does not exist: %s" % (model.__name__, hash_) else: assert not match, \ "%s exists: %s" % (model.__name__, hash_) except AssertionError as exc: print(exc) failed += 1 if failed: print("Rows in DB are:") for existing_model in queryset.all(): _dump_model(existing_model, attrs=[k[1:] for k in data[0].keys() if k.startswith('@')]) if should_exist: raise AssertionError("%i rows missing" % failed) else: raise AssertionError("%i rows found" % failed)
def _model_exists_step(self, model, should_exist)
Test for the existence of a model matching the given data.
4.377998
4.275227
1.024039
written = [] for hash_ in data: if field: if field not in hash_: raise KeyError(("The \"%s\" field is required for all update " "operations") % field) model_kwargs = {field: hash_[field]} model_obj = model.objects.get(**model_kwargs) for to_set, val in hash_.items(): setattr(model_obj, to_set, val) model_obj.save() else: model_obj = model.objects.create(**hash_) written.append(model_obj) reset_sequence(model) return written
def write_models(model, data, field)
:param model: a Django model class :param data: a list of hashes to build models from :param field: a field name to match models on, or None :returns: a list of models written Create or update models for each data hash. `field` is the field that is used to get the existing models out of the database to update them; otherwise, if ``field=None``, new models are created. Useful when registering custom tests with :func:`writes_models`.
3.705687
3.370688
1.099386
model = get_model(model) data = guess_types(self.hashes) try: func = _WRITE_MODEL[model] except KeyError: func = partial(write_models, model) func(data, field)
def _write_models_step(self, model, field=None)
Write or update a model.
8.282892
7.616946
1.08743
model = get_model(model) lookup = {rel_key: rel_value} rel_model = get_model(rel_model_name).objects.get(**lookup) data = guess_types(self.hashes) for hash_ in data: hash_['%s' % rel_model_name] = rel_model try: func = _WRITE_MODEL[model] except KeyError: func = partial(write_models, model) func(data, None)
def _create_models_for_relation_step(self, rel_model_name, rel_key, rel_value, model)
Create a new model linked to the given model. Syntax: And `model` with `field` "`value`" has `new model` in the database: Example: .. code-block:: gherkin And project with name "Ball Project" has goals in the database: | description | | To have fun playing with balls of twine |
5.631572
6.669588
0.844366
lookup = {rel_key: rel_value} rel_model = get_model(rel_model_name).objects.get(**lookup) relation = None for m2m in rel_model._meta.many_to_many: if relation_name in (m2m.name, m2m.verbose_name): relation = getattr(rel_model, m2m.name) break if not relation: try: relation = getattr(rel_model, relation_name) except AttributeError: pass assert relation, \ "%s does not have a many-to-many relation named '%s'" % ( rel_model._meta.verbose_name.capitalize(), relation_name, ) m2m_model = relation.model for hash_ in self.hashes: relation.add(m2m_model.objects.get(**hash_))
def _create_m2m_links_step(self, rel_model_name, rel_key, rel_value, relation_name)
Link many-to-many models together. Syntax: And `model` with `field` "`value`" is linked to `other model` in the database: Example: .. code-block:: gherkin And article with name "Guidelines" is linked to tags in the database: | name | | coding | | style |
2.643986
2.669293
0.990519
model = get_model(model) expected = int(count) found = model.objects.count() assert found == expected, "Expected %d %s, found %d." % \ (expected, model._meta.verbose_name_plural, found)
def _model_count_step(self, count, model)
Count the number of models in the database. Example: .. code-block:: gherkin Then there should be 0 goals in the database
4.354269
5.590106
0.778924
expected = int(count) actual = len(mail.outbox) assert expected == actual, \ "Expected to send {0} email(s), got {1}.".format(expected, actual)
def mail_sent_count(self, count)
Test that `count` mails have been sent. Syntax: I have sent `count` emails Example: .. code-block:: gherkin Then I have sent 2 emails
3.818753
4.609028
0.828538
if not any(text in getattr(email, part) for email in mail.outbox): dump_emails(part) raise AssertionError( "No email contained expected text in the {0}.".format(part))
def mail_sent_content(self, text, part)
Test an email contains (assert text in) the given text in the relevant message part (accessible as an attribute on the email object). This step strictly applies whitespace. Syntax: I have sent an email with "`text`" in the `part` Example: .. code-block:: gherkin Then I have sent an email with "pandas" in the body
10.970198
11.072555
0.990756
for email in mail.outbox: try: html = next(content for content, mime in email.alternatives if mime == 'text/html') dom1 = parse_html(html) dom2 = parse_html(self.multiline) assert_in(dom1, dom2) except AssertionError as exc: print("Email did not match", exc) # we intentionally eat the exception continue return True raise AssertionError("No email contained the HTML")
def mail_sent_contains_html(self)
Test that an email contains the HTML (assert HTML in) in the multiline as one of its MIME alternatives. The HTML is normalised by passing through Django's :func:`django.test.html.parse_html`. Example: .. code-block:: gherkin And I have sent an email with the following HTML alternative: \"\"\" <p><strong>Name:</strong> Sir Panda</p> <p><strong>Phone:</strong> 0400000000</p> <p><strong>Email:</strong> sir.panda@pand.as</p> \"\"\"
6.369125
6.084841
1.04672
print("Sent emails:") for email in mail.outbox: print(getattr(email, part))
def dump_emails(part)
Show the sent emails' tested parts, to aid in debugging.
10.036993
5.986418
1.676628
namespace = kwargs.pop('namespace', None) if not namespace: namespace, __ = get_app_instance(context['request']) if namespace: namespace += ':' reverse = partial( urls.reverse, '{0:s}{1:s}'.format(namespace, view_name)) # We're explicitly NOT happy to just re-raise the exception, as that may # adversely affect stack traces. if 'default' not in kwargs: if kwargs: return reverse(kwargs=kwargs) elif args: return reverse(args=args) else: return reverse() default = kwargs.pop('default', None) try: if kwargs: return reverse(kwargs=kwargs) elif args: return reverse(args=args) else: return reverse() except urls.NoReverseMatch: return default
def namespace_url(context, view_name, *args, **kwargs)
Returns an absolute URL matching named view with its parameters and the provided application instance namespace. If no namespace is passed as a kwarg (or it is "" or None), this templatetag will look into the request object for the app_config's namespace. If there is still no namespace found, this tag will act like the normal {% url ... %} template tag. Normally, this tag will return whatever is returned by the ultimate call to reverse, which also means it will raise NoReverseMatch if reverse() cannot find a match. This behaviour can be override by suppling a 'default' kwarg with the value of what should be returned when no match is found.
3.581276
3.429405
1.044285
app = None if getattr(request, 'current_page', None) and request.current_page.application_urls: app = apphook_pool.get_apphook(request.current_page.application_urls) if app and app.app_config: try: config = None with override(get_language_from_request(request, check_path=True)): namespace = resolve(request.path_info).namespace config = app.get_config(namespace) return namespace, config except Resolver404: pass return '', None
def get_app_instance(request)
Returns a tuple containing the current namespace and the AppHookConfig instance :param request: request object :return: namespace, config
3.172654
2.88053
1.101413
# allow use as a decorator if config_model is None: return setup_config(form_class, form_class.model) app_registry.register('config', AppDataContainer.from_form(form_class), config_model)
def setup_config(form_class, config_model=None)
Register the provided form as config form for the provided config model This can be used as a decorator by adding a `model` attribute to the config form:: @setup_config class ExampleConfigForm(AppDataForm): model = ExampleConfig :param form_class: Form class derived from AppDataForm :param config_model: Model class derived from AppHookConfig :return:
7.035301
6.664623
1.055619
from .models import AppHookConfig # avoid circular dependencies fields = [] for field in model._meta.fields: if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig): fields.append(field) return [field.name for field in fields]
def _get_apphook_field_names(model)
Return all foreign key field names for a AppHookConfig based model
2.61148
2.12431
1.229331
key = APP_CONFIG_FIELDS_KEY.format( app_label=model._meta.app_label, model_name=model._meta.object_name ).lower() if not hasattr(model, key): field_names = _get_apphook_field_names(model) setattr(model, key, field_names) return getattr(model, key)
def get_apphook_field_names(model)
Cache app-hook field names on model :param model: model class or object :return: list of foreign key field names to AppHookConfigs
2.566076
2.857802
0.897919
keys = get_apphook_field_names(obj) return [getattr(obj, key) for key in keys] if keys else []
def get_apphook_configs(obj)
Get apphook configs for an object obj :param obj: any model instance :return: list of apphook configs for given obj
4.630051
5.708808
0.811036
_LOGGER.debug("Starting: data_received") _LOGGER.debug('Received %d bytes from PLM: %s', len(data), binascii.hexlify(data)) self._buffer.put_nowait(data) asyncio.ensure_future(self._peel_messages_from_buffer(), loop=self._loop) _LOGGER.debug("Finishing: data_received")
def data_received(self, data)
Receive data from the protocol. Called when asyncio.Protocol detects received data from network.
4.196796
4.096958
1.024369
if exc is None: _LOGGER.warning('End of file received from Insteon Modem') else: _LOGGER.warning('Lost connection to Insteon Modem: %s', exc) self.transport = None asyncio.ensure_future(self.pause_writing(), loop=self.loop) if self._connection_lost_callback: self._connection_lost_callback()
def connection_lost(self, exc)
Reestablish the connection to the transport. Called when asyncio.Protocol loses the network connection.
3.911982
3.716631
1.052561
_LOGGER.debug('Added new callback %s ', callback) self._cb_load_all_link_db_done.append(callback)
def add_all_link_done_callback(self, callback)
Register a callback to be invoked when the ALDB is loaded.
11.665444
9.006779
1.295185
_LOGGER.debug('Added new callback %s ', callback) self._cb_device_not_active.append(callback)
def add_device_not_active_callback(self, callback)
Register callback to be invoked when a device is not responding.
7.232254
6.75364
1.070867
for addr in self.devices: device = self.devices[addr] if not device.address.is_x10: device.async_refresh_state()
def poll_devices(self)
Request status updates from each device.
7.030286
6.082239
1.155871
msg_info = MessageInfo(msg=msg, wait_nak=wait_nak, wait_timeout=wait_timeout) _LOGGER.debug("Queueing msg: %s", msg) self._send_queue.put_nowait(msg_info)
def send_msg(self, msg, wait_nak=True, wait_timeout=WAIT_TIMEOUT)
Place a message on the send queue for sending. Message are sent in the order they are placed in the queue.
3.182448
3.004698
1.059158
msg = StartAllLinking(mode, group) self.send_msg(msg)
def start_all_linking(self, mode, group)
Put the IM into All-Linking mode. Puts the IM into All-Linking mode for 4 minutes. Parameters: mode: 0 | 1 | 3 | 255 0 - PLM is responder 1 - PLM is controller 3 - Device that initiated All-Linking is Controller 255 = Delete All-Link group: All-Link group number (0 - 255)
5.645901
6.311922
0.894482
device = insteonplm.devices.create_x10(self, housecode, unitcode, feature) if device: self.devices[device.address.id] = device return device
def add_x10_device(self, housecode, unitcode, feature='OnOff')
Add an X10 device based on a feature description. Current features are: - OnOff - Dimmable - Sensor - AllUnitsOff - AllLightsOn - AllLightsOff
4.283117
5.279877
0.811215
self.aldb_device_handled(addr) for callback in self._cb_device_not_active: callback(addr)
def device_not_active(self, addr)
Handle inactive devices.
7.653081
7.421054
1.031266
if isinstance(addr, Address): remove_addr = addr.id else: remove_addr = addr try: self._aldb_devices.pop(remove_addr) _LOGGER.debug('Removed ALDB device %s', remove_addr) except KeyError: _LOGGER.debug('Device %s not in ALDB device list', remove_addr) _LOGGER.debug('ALDB device count: %d', len(self._aldb_devices))
def aldb_device_handled(self, addr)
Remove device from ALDB device list.
2.6307
2.354428
1.117341
self._restart_writer = False if self._writer_task: self._writer_task.remove_done_callback(self.restart_writing) self._writer_task.cancel() await self._writer_task await asyncio.sleep(0, loop=self._loop)
async def pause_writing(self)
Pause writing.
3.286096
2.967596
1.107326
if self._restart_writer: self._writer_task = asyncio.ensure_future( self._get_message_from_send_queue(), loop=self._loop) self._writer_task.add_done_callback(self.restart_writing)
def restart_writing(self, task=None)
Resume writing.
3.846769
3.678078
1.045864
_LOGGER.info('Requesting Insteon Modem Info') msg = GetImInfo() self.send_msg(msg, wait_nak=True, wait_timeout=.5)
def _get_plm_info(self)
Request PLM Info.
13.6287
11.216863
1.215019
_LOGGER.debug("Starting: _load_all_link_database") self.devices.state = 'loading' self._get_first_all_link_record() _LOGGER.debug("Ending: _load_all_link_database")
def _load_all_link_database(self)
Load the ALL-Link Database into object.
5.499391
4.520266
1.216608
_LOGGER.debug("Starting: _get_first_all_link_record") _LOGGER.info('Requesting ALL-Link Records') if self.aldb.status == ALDBStatus.LOADED: self._next_all_link_rec_nak_retries = 3 self._handle_get_next_all_link_record_nak(None) return self.aldb.clear() self._next_all_link_rec_nak_retries = 0 msg = GetFirstAllLinkRecord() self.send_msg(msg, wait_nak=True, wait_timeout=.5) _LOGGER.debug("Ending: _get_first_all_link_record")
def _get_first_all_link_record(self)
Request first ALL-Link record.
4.432579
4.008269
1.105858
_LOGGER.debug("Starting: _get_next_all_link_record") _LOGGER.debug("Requesting Next All-Link Record") msg = GetNextAllLinkRecord() self.send_msg(msg, wait_nak=True, wait_timeout=.5) _LOGGER.debug("Ending: _get_next_all_link_record")
def _get_next_all_link_record(self)
Request next ALL-Link record.
4.20823
3.566793
1.179836
if isinstance(housecode, str): housecode = housecode.upper() else: raise TypeError('Housecode must be a string') msg = X10Send.command_msg(housecode, X10_COMMAND_ALL_UNITS_OFF) self.send_msg(msg) self._x10_command_to_device(housecode, X10_COMMAND_ALL_UNITS_OFF, msg)
def x10_all_units_off(self, housecode)
Send the X10 All Units Off command.
3.05868
2.908947
1.051473
msg = X10Send.command_msg(housecode, X10_COMMAND_ALL_LIGHTS_OFF) self.send_msg(msg) self._x10_command_to_device(housecode, X10_COMMAND_ALL_LIGHTS_OFF, msg)
def x10_all_lights_off(self, housecode)
Send the X10 All Lights Off command.
3.657066
3.352494
1.090849
msg = X10Send.command_msg(housecode, X10_COMMAND_ALL_LIGHTS_ON) self.send_msg(msg) self._x10_command_to_device(housecode, X10_COMMAND_ALL_LIGHTS_ON, msg)
def x10_all_lights_on(self, housecode)
Send the X10 All Lights Off command.
3.703262
3.536062
1.047284
_LOGGER.info('Connection established to PLM') self.transport = transport self._restart_writer = True self.restart_writing() # Testing to see if this fixes the 2413S issue self.transport.serial.timeout = 1 self.transport.serial.write_timeout = 1 self.transport.set_write_buffer_limits(128) # limit = self.transport.get_write_buffer_size() # _LOGGER.debug('Write buffer size is %d', limit) if self._aldb.status != ALDBStatus.LOADED: asyncio.ensure_future(self._setup_devices(), loop=self._loop)
def connection_made(self, transport)
Start the PLM connection process. Called when asyncio.Protocol establishes the network connection.
5.556303
5.164101
1.075948
_LOGGER.info('Connection established to Hub') _LOGGER.debug('Transport: %s', transport) self.transport = transport self._restart_writer = True self.restart_writing() if self._aldb.status != ALDBStatus.LOADED: asyncio.ensure_future(self._setup_devices(), loop=self._loop)
def connection_made(self, transport)
Start the Hub connection process. Called when asyncio.Protocol establishes the network connection.
6.400786
5.753134
1.112574
from insteonplm.devices.ipdb import IPDB ipdb = IPDB() product = ipdb[[cat, subcat]] deviceclass = product.deviceclass device = None if deviceclass is not None: device = deviceclass(plm, address, cat, subcat, product.product_key, product.description, product.model) return device
def create(plm, address, cat, subcat, firmware=None)
Create a device from device info data.
4.596855
4.207446
1.092552
from insteonplm.devices.ipdb import IPDB ipdb = IPDB() product = ipdb.x10(feature) deviceclass = product.deviceclass device = None if deviceclass: device = deviceclass(plm, housecode, unitcode) return device
def create_x10(plm, housecode, unitcode, feature)
Create an X10 device from a feature definition.
4.619734
4.142932
1.115088
import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) _LOGGER.debug('caller name: %s', calframe[1][3]) msg = StandardSend(self.address, COMMAND_ID_REQUEST_0X10_0X00) self._plm.send_msg(msg)
def id_request(self)
Request a device ID from a device.
5.286707
4.756902
1.111376
msg = StandardSend(self._address, COMMAND_PRODUCT_DATA_REQUEST_0X03_0X00) self._send_msg(msg)
def product_data_request(self)
Request product data from a device. Not supported by all devices. Required after 01-Feb-2007.
10.976391
9.252911
1.186264
msg = StandardSend(self._address, COMMAND_ASSIGN_TO_ALL_LINK_GROUP_0X01_NONE, cmd2=group) self._send_msg(msg)
def assign_to_all_link_group(self, group=0x01)
Assign a device to an All-Link Group. The default is group 0x01.
8.418631
8.624377
0.976144
msg = StandardSend(self._address, COMMAND_DELETE_FROM_ALL_LINK_GROUP_0X02_NONE, cmd2=group) self._send_msg(msg)
def delete_from_all_link_group(self, group)
Delete a device to an All-Link Group.
10.675492
8.731343
1.222663
msg = StandardSend(self._address, COMMAND_FX_USERNAME_0X03_0X01) self._send_msg(msg)
def fx_username(self)
Get FX Username. Only required for devices that support FX Commands. FX Addressee responds with an ED 0x0301 FX Username Response message
14.577207
11.058887
1.318144
msg = StandardSend(self._address, COMMAND_FX_USERNAME_0X03_0X01) self._send_msg(msg)
def device_text_string_request(self)
Get FX Username. Only required for devices that support FX Commands. FX Addressee responds with an ED 0x0301 FX Username Response message.
19.991262
9.894484
2.020445
msg = ExtendedSend(self._address, COMMAND_ENTER_LINKING_MODE_0X09_NONE, cmd2=group, userdata=Userdata()) msg.set_checksum() self._send_msg(msg)
def enter_linking_mode(self, group=0x01)
Tell a device to enter All-Linking Mode. Same as holding down the Set button for 10 sec. Default group is 0x01. Not supported by i1 devices.
9.739702
11.625609
0.83778
msg = StandardSend(self._address, COMMAND_ENTER_UNLINKING_MODE_0X0A_NONE, cmd2=group) self._send_msg(msg)
def enter_unlinking_mode(self, group)
Unlink a device from an All-Link group. Not supported by i1 devices.
12.104785
11.295853
1.071613