repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
jgillick/LendingClub
lendingclub/filters.py
Filter.__normalize
def __normalize(self): """ Adjusts the values of the filters to be correct. For example, if you set grade 'B' to True, then 'All' should be set to False """ # Don't normalize if we're already normalizing or intializing if self.__normalizing is True or self.__initialized is False: return self.__normalizing = True self.__normalize_grades() self.__normalize_progress() self.__normalizing = False
python
def __normalize(self): """ Adjusts the values of the filters to be correct. For example, if you set grade 'B' to True, then 'All' should be set to False """ # Don't normalize if we're already normalizing or intializing if self.__normalizing is True or self.__initialized is False: return self.__normalizing = True self.__normalize_grades() self.__normalize_progress() self.__normalizing = False
[ "def", "__normalize", "(", "self", ")", ":", "# Don't normalize if we're already normalizing or intializing", "if", "self", ".", "__normalizing", "is", "True", "or", "self", ".", "__initialized", "is", "False", ":", "return", "self", ".", "__normalizing", "=", "True", "self", ".", "__normalize_grades", "(", ")", "self", ".", "__normalize_progress", "(", ")", "self", ".", "__normalizing", "=", "False" ]
Adjusts the values of the filters to be correct. For example, if you set grade 'B' to True, then 'All' should be set to False
[ "Adjusts", "the", "values", "of", "the", "filters", "to", "be", "correct", ".", "For", "example", "if", "you", "set", "grade", "B", "to", "True", "then", "All", "should", "be", "set", "to", "False" ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L221-L235
jgillick/LendingClub
lendingclub/filters.py
Filter.validate_one
def validate_one(self, loan): """ Validate a single loan result record against the filters Parameters ---------- loan : dict A single loan note record Returns ------- boolean True or raises FilterValidationError Raises ------ FilterValidationError If the loan does not match the filter criteria """ assert type(loan) is dict, 'loan parameter must be a dictionary object' # Map the loan value keys to the filter keys req = { 'loanGUID': 'loan_id', 'loanGrade': 'grade', 'loanLength': 'term', 'loanUnfundedAmount': 'progress', 'loanAmountRequested': 'progress', 'alreadyInvestedIn': 'exclude_existing', 'purpose': 'loan_purpose', } # Throw an error if the loan does not contain one of the criteria keys that this filter has for key, criteria in req.iteritems(): if criteria in self and key not in loan: raise FilterValidationError('Loan does not have a "{0}" value.'.format(key), loan, criteria) # Loan ID if 'loan_id' in self: loan_ids = str(self['loan_id']).split(',') if str(loan['loanGUID']) not in loan_ids: raise FilterValidationError('Did not meet filter criteria for loan ID. {0} does not match {1}'.format(loan['loanGUID'], self['loan_id']), loan=loan, criteria='loan ID') # Grade grade = loan['loanGrade'][0] # Extract the letter portion of the loan if 'grades' in self and self['grades']['All'] is not True: if grade not in self['grades']: raise FilterValidationError('Loan grade "{0}" is unknown'.format(grade), loan, 'grade') elif self['grades'][grade] is False: raise FilterValidationError(loan=loan, criteria='grade') # Term if 'term' in self and self['term'] is not None: if loan['loanLength'] == 36 and self['term']['Year3'] is False: raise FilterValidationError(loan=loan, criteria='loan term') elif loan['loanLength'] == 60 and self['term']['Year5'] is False: raise FilterValidationError(loan=loan, criteria='loan term') # Progress if 'funding_progress' in self: loan_progress = (1 - (loan['loanUnfundedAmount'] / loan['loanAmountRequested'])) * 100 if self['funding_progress'] > loan_progress: raise FilterValidationError(loan=loan, criteria='funding progress') # Exclude existing if 'exclude_existing' in self: if self['exclude_existing'] is True and loan['alreadyInvestedIn'] is True: raise FilterValidationError(loan=loan, criteria='exclude loans you are invested in') # Loan purpose (either an array or single value) if 'loan_purpose' in self and loan['purpose'] is not False: purpose = self['loan_purpose'] if type(purpose) is not dict: purpose = {purpose: True} if 'All' not in purpose or purpose['All'] is False: if loan['purpose'] not in purpose: raise FilterValidationError(loan=loan, criteria='loan purpose') return True
python
def validate_one(self, loan): """ Validate a single loan result record against the filters Parameters ---------- loan : dict A single loan note record Returns ------- boolean True or raises FilterValidationError Raises ------ FilterValidationError If the loan does not match the filter criteria """ assert type(loan) is dict, 'loan parameter must be a dictionary object' # Map the loan value keys to the filter keys req = { 'loanGUID': 'loan_id', 'loanGrade': 'grade', 'loanLength': 'term', 'loanUnfundedAmount': 'progress', 'loanAmountRequested': 'progress', 'alreadyInvestedIn': 'exclude_existing', 'purpose': 'loan_purpose', } # Throw an error if the loan does not contain one of the criteria keys that this filter has for key, criteria in req.iteritems(): if criteria in self and key not in loan: raise FilterValidationError('Loan does not have a "{0}" value.'.format(key), loan, criteria) # Loan ID if 'loan_id' in self: loan_ids = str(self['loan_id']).split(',') if str(loan['loanGUID']) not in loan_ids: raise FilterValidationError('Did not meet filter criteria for loan ID. {0} does not match {1}'.format(loan['loanGUID'], self['loan_id']), loan=loan, criteria='loan ID') # Grade grade = loan['loanGrade'][0] # Extract the letter portion of the loan if 'grades' in self and self['grades']['All'] is not True: if grade not in self['grades']: raise FilterValidationError('Loan grade "{0}" is unknown'.format(grade), loan, 'grade') elif self['grades'][grade] is False: raise FilterValidationError(loan=loan, criteria='grade') # Term if 'term' in self and self['term'] is not None: if loan['loanLength'] == 36 and self['term']['Year3'] is False: raise FilterValidationError(loan=loan, criteria='loan term') elif loan['loanLength'] == 60 and self['term']['Year5'] is False: raise FilterValidationError(loan=loan, criteria='loan term') # Progress if 'funding_progress' in self: loan_progress = (1 - (loan['loanUnfundedAmount'] / loan['loanAmountRequested'])) * 100 if self['funding_progress'] > loan_progress: raise FilterValidationError(loan=loan, criteria='funding progress') # Exclude existing if 'exclude_existing' in self: if self['exclude_existing'] is True and loan['alreadyInvestedIn'] is True: raise FilterValidationError(loan=loan, criteria='exclude loans you are invested in') # Loan purpose (either an array or single value) if 'loan_purpose' in self and loan['purpose'] is not False: purpose = self['loan_purpose'] if type(purpose) is not dict: purpose = {purpose: True} if 'All' not in purpose or purpose['All'] is False: if loan['purpose'] not in purpose: raise FilterValidationError(loan=loan, criteria='loan purpose') return True
[ "def", "validate_one", "(", "self", ",", "loan", ")", ":", "assert", "type", "(", "loan", ")", "is", "dict", ",", "'loan parameter must be a dictionary object'", "# Map the loan value keys to the filter keys", "req", "=", "{", "'loanGUID'", ":", "'loan_id'", ",", "'loanGrade'", ":", "'grade'", ",", "'loanLength'", ":", "'term'", ",", "'loanUnfundedAmount'", ":", "'progress'", ",", "'loanAmountRequested'", ":", "'progress'", ",", "'alreadyInvestedIn'", ":", "'exclude_existing'", ",", "'purpose'", ":", "'loan_purpose'", ",", "}", "# Throw an error if the loan does not contain one of the criteria keys that this filter has", "for", "key", ",", "criteria", "in", "req", ".", "iteritems", "(", ")", ":", "if", "criteria", "in", "self", "and", "key", "not", "in", "loan", ":", "raise", "FilterValidationError", "(", "'Loan does not have a \"{0}\" value.'", ".", "format", "(", "key", ")", ",", "loan", ",", "criteria", ")", "# Loan ID", "if", "'loan_id'", "in", "self", ":", "loan_ids", "=", "str", "(", "self", "[", "'loan_id'", "]", ")", ".", "split", "(", "','", ")", "if", "str", "(", "loan", "[", "'loanGUID'", "]", ")", "not", "in", "loan_ids", ":", "raise", "FilterValidationError", "(", "'Did not meet filter criteria for loan ID. {0} does not match {1}'", ".", "format", "(", "loan", "[", "'loanGUID'", "]", ",", "self", "[", "'loan_id'", "]", ")", ",", "loan", "=", "loan", ",", "criteria", "=", "'loan ID'", ")", "# Grade", "grade", "=", "loan", "[", "'loanGrade'", "]", "[", "0", "]", "# Extract the letter portion of the loan", "if", "'grades'", "in", "self", "and", "self", "[", "'grades'", "]", "[", "'All'", "]", "is", "not", "True", ":", "if", "grade", "not", "in", "self", "[", "'grades'", "]", ":", "raise", "FilterValidationError", "(", "'Loan grade \"{0}\" is unknown'", ".", "format", "(", "grade", ")", ",", "loan", ",", "'grade'", ")", "elif", "self", "[", "'grades'", "]", "[", "grade", "]", "is", "False", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'grade'", ")", "# Term", "if", "'term'", "in", "self", "and", "self", "[", "'term'", "]", "is", "not", "None", ":", "if", "loan", "[", "'loanLength'", "]", "==", "36", "and", "self", "[", "'term'", "]", "[", "'Year3'", "]", "is", "False", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'loan term'", ")", "elif", "loan", "[", "'loanLength'", "]", "==", "60", "and", "self", "[", "'term'", "]", "[", "'Year5'", "]", "is", "False", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'loan term'", ")", "# Progress", "if", "'funding_progress'", "in", "self", ":", "loan_progress", "=", "(", "1", "-", "(", "loan", "[", "'loanUnfundedAmount'", "]", "/", "loan", "[", "'loanAmountRequested'", "]", ")", ")", "*", "100", "if", "self", "[", "'funding_progress'", "]", ">", "loan_progress", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'funding progress'", ")", "# Exclude existing", "if", "'exclude_existing'", "in", "self", ":", "if", "self", "[", "'exclude_existing'", "]", "is", "True", "and", "loan", "[", "'alreadyInvestedIn'", "]", "is", "True", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'exclude loans you are invested in'", ")", "# Loan purpose (either an array or single value)", "if", "'loan_purpose'", "in", "self", "and", "loan", "[", "'purpose'", "]", "is", "not", "False", ":", "purpose", "=", "self", "[", "'loan_purpose'", "]", "if", "type", "(", "purpose", ")", "is", "not", "dict", ":", "purpose", "=", "{", "purpose", ":", "True", "}", "if", "'All'", "not", "in", "purpose", "or", "purpose", "[", "'All'", "]", "is", "False", ":", "if", "loan", "[", "'purpose'", "]", "not", "in", "purpose", ":", "raise", "FilterValidationError", "(", "loan", "=", "loan", ",", "criteria", "=", "'loan purpose'", ")", "return", "True" ]
Validate a single loan result record against the filters Parameters ---------- loan : dict A single loan note record Returns ------- boolean True or raises FilterValidationError Raises ------ FilterValidationError If the loan does not match the filter criteria
[ "Validate", "a", "single", "loan", "result", "record", "against", "the", "filters" ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L265-L344
jgillick/LendingClub
lendingclub/filters.py
Filter.search_string
def search_string(self): """" Returns the JSON string that LendingClub expects for it's search """ self.__normalize() # Get the template tmpl_source = unicode(open(self.tmpl_file).read()) # Process template compiler = Compiler() template = compiler.compile(tmpl_source) out = template(self) if not out: return False out = ''.join(out) # # Cleanup output and remove all extra space # # remove extra spaces out = re.sub('\n', '', out) out = re.sub('\s{3,}', ' ', out) # Remove hanging commas i.e: [1, 2,] out = re.sub(',\s*([}\\]])', '\\1', out) # Space between brackets i.e: ], [ out = re.sub('([{\\[}\\]])(,?)\s*([{\\[}\\]])', '\\1\\2\\3', out) # Cleanup spaces around [, {, }, ], : and , characters out = re.sub('\s*([{\\[\\]}:,])\s*', '\\1', out) return out
python
def search_string(self): """" Returns the JSON string that LendingClub expects for it's search """ self.__normalize() # Get the template tmpl_source = unicode(open(self.tmpl_file).read()) # Process template compiler = Compiler() template = compiler.compile(tmpl_source) out = template(self) if not out: return False out = ''.join(out) # # Cleanup output and remove all extra space # # remove extra spaces out = re.sub('\n', '', out) out = re.sub('\s{3,}', ' ', out) # Remove hanging commas i.e: [1, 2,] out = re.sub(',\s*([}\\]])', '\\1', out) # Space between brackets i.e: ], [ out = re.sub('([{\\[}\\]])(,?)\s*([{\\[}\\]])', '\\1\\2\\3', out) # Cleanup spaces around [, {, }, ], : and , characters out = re.sub('\s*([{\\[\\]}:,])\s*', '\\1', out) return out
[ "def", "search_string", "(", "self", ")", ":", "self", ".", "__normalize", "(", ")", "# Get the template", "tmpl_source", "=", "unicode", "(", "open", "(", "self", ".", "tmpl_file", ")", ".", "read", "(", ")", ")", "# Process template", "compiler", "=", "Compiler", "(", ")", "template", "=", "compiler", ".", "compile", "(", "tmpl_source", ")", "out", "=", "template", "(", "self", ")", "if", "not", "out", ":", "return", "False", "out", "=", "''", ".", "join", "(", "out", ")", "#", "# Cleanup output and remove all extra space", "#", "# remove extra spaces", "out", "=", "re", ".", "sub", "(", "'\\n'", ",", "''", ",", "out", ")", "out", "=", "re", ".", "sub", "(", "'\\s{3,}'", ",", "' '", ",", "out", ")", "# Remove hanging commas i.e: [1, 2,]", "out", "=", "re", ".", "sub", "(", "',\\s*([}\\\\]])'", ",", "'\\\\1'", ",", "out", ")", "# Space between brackets i.e: ], [", "out", "=", "re", ".", "sub", "(", "'([{\\\\[}\\\\]])(,?)\\s*([{\\\\[}\\\\]])'", ",", "'\\\\1\\\\2\\\\3'", ",", "out", ")", "# Cleanup spaces around [, {, }, ], : and , characters", "out", "=", "re", ".", "sub", "(", "'\\s*([{\\\\[\\\\]}:,])\\s*'", ",", "'\\\\1'", ",", "out", ")", "return", "out" ]
Returns the JSON string that LendingClub expects for it's search
[ "Returns", "the", "JSON", "string", "that", "LendingClub", "expects", "for", "it", "s", "search" ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L346-L380
jgillick/LendingClub
lendingclub/filters.py
SavedFilter.all_filters
def all_filters(lc): """ Get a list of all your saved filters Parameters ---------- lc : :py:class:`lendingclub.LendingClub` An instance of the authenticated LendingClub class Returns ------- list A list of lendingclub.filters.SavedFilter objects """ filters = [] response = lc.session.get('/browse/getSavedFiltersAj.action') json_response = response.json() # Load all filters if lc.session.json_success(json_response): for saved in json_response['filters']: filters.append(SavedFilter(lc, saved['id'])) return filters
python
def all_filters(lc): """ Get a list of all your saved filters Parameters ---------- lc : :py:class:`lendingclub.LendingClub` An instance of the authenticated LendingClub class Returns ------- list A list of lendingclub.filters.SavedFilter objects """ filters = [] response = lc.session.get('/browse/getSavedFiltersAj.action') json_response = response.json() # Load all filters if lc.session.json_success(json_response): for saved in json_response['filters']: filters.append(SavedFilter(lc, saved['id'])) return filters
[ "def", "all_filters", "(", "lc", ")", ":", "filters", "=", "[", "]", "response", "=", "lc", ".", "session", ".", "get", "(", "'/browse/getSavedFiltersAj.action'", ")", "json_response", "=", "response", ".", "json", "(", ")", "# Load all filters", "if", "lc", ".", "session", ".", "json_success", "(", "json_response", ")", ":", "for", "saved", "in", "json_response", "[", "'filters'", "]", ":", "filters", ".", "append", "(", "SavedFilter", "(", "lc", ",", "saved", "[", "'id'", "]", ")", ")", "return", "filters" ]
Get a list of all your saved filters Parameters ---------- lc : :py:class:`lendingclub.LendingClub` An instance of the authenticated LendingClub class Returns ------- list A list of lendingclub.filters.SavedFilter objects
[ "Get", "a", "list", "of", "all", "your", "saved", "filters" ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L431-L455
jgillick/LendingClub
lendingclub/filters.py
SavedFilter.load
def load(self): """ Load the filter from the server """ # Attempt to load the saved filter payload = { 'id': self.id } response = self.lc.session.get('/browse/getSavedFilterAj.action', query=payload) self.response = response json_response = response.json() if self.lc.session.json_success(json_response) and json_response['filterName'] != 'No filters': self.name = json_response['filterName'] # # Parse out the filter JSON string manually from the response JSON. # If the filter JSON is modified at all, or any value is out of order, # LendingClub will reject the filter and perform a wildcard search instead, # without any error. So we need to retain the filter JSON value exactly how it is given to us. # text = response.text # Cut off everything before "filter": [...] text = re.sub('\n', '', text) text = re.sub('^.*?,\s*["\']filter["\']:\s*\[(.*)', '[\\1', text) # Now loop through the string until we find the end of the filter block # This is a simple parser that keeps track of block elements, quotes and # escape characters blockTracker = [] blockChars = { '[': ']', '{': '}' } inQuote = False lastChar = None json_text = "" for char in text: json_text += char # Escape char if char == '\\': if lastChar == '\\': lastChar = '' else: lastChar = char continue # Quotes if char == "'" or char == '"': if inQuote is False: # Starting a quote block inQuote = char elif inQuote == char: # Ending a quote block inQuote = False lastChar = char continue # Start of a block if char in blockChars.keys(): blockTracker.insert(0, blockChars[char]) # End of a block, remove from block path elif len(blockTracker) > 0 and char == blockTracker[0]: blockTracker.pop(0) # No more blocks in the tracker which means we're at the end of the filter block if len(blockTracker) == 0 and lastChar is not None: break lastChar = char # Verify valid JSON try: if json_text.strip() == '': raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response) json_test = json.loads(json_text) # Make sure it looks right assert type(json_test) is list, 'Expecting a list, instead received a {0}'.format(type(json_test)) assert 'm_id' in json_test[0], 'Expecting a \'m_id\' property in each filter' assert 'm_value' in json_test[0], 'Expecting a \'m_value\' property in each filter' self.json = json_test except Exception as e: raise SavedFilterError('Could not parse filter from the JSON response: {0}'.format(str(e))) self.json_text = json_text self.__analyze() else: raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response)
python
def load(self): """ Load the filter from the server """ # Attempt to load the saved filter payload = { 'id': self.id } response = self.lc.session.get('/browse/getSavedFilterAj.action', query=payload) self.response = response json_response = response.json() if self.lc.session.json_success(json_response) and json_response['filterName'] != 'No filters': self.name = json_response['filterName'] # # Parse out the filter JSON string manually from the response JSON. # If the filter JSON is modified at all, or any value is out of order, # LendingClub will reject the filter and perform a wildcard search instead, # without any error. So we need to retain the filter JSON value exactly how it is given to us. # text = response.text # Cut off everything before "filter": [...] text = re.sub('\n', '', text) text = re.sub('^.*?,\s*["\']filter["\']:\s*\[(.*)', '[\\1', text) # Now loop through the string until we find the end of the filter block # This is a simple parser that keeps track of block elements, quotes and # escape characters blockTracker = [] blockChars = { '[': ']', '{': '}' } inQuote = False lastChar = None json_text = "" for char in text: json_text += char # Escape char if char == '\\': if lastChar == '\\': lastChar = '' else: lastChar = char continue # Quotes if char == "'" or char == '"': if inQuote is False: # Starting a quote block inQuote = char elif inQuote == char: # Ending a quote block inQuote = False lastChar = char continue # Start of a block if char in blockChars.keys(): blockTracker.insert(0, blockChars[char]) # End of a block, remove from block path elif len(blockTracker) > 0 and char == blockTracker[0]: blockTracker.pop(0) # No more blocks in the tracker which means we're at the end of the filter block if len(blockTracker) == 0 and lastChar is not None: break lastChar = char # Verify valid JSON try: if json_text.strip() == '': raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response) json_test = json.loads(json_text) # Make sure it looks right assert type(json_test) is list, 'Expecting a list, instead received a {0}'.format(type(json_test)) assert 'm_id' in json_test[0], 'Expecting a \'m_id\' property in each filter' assert 'm_value' in json_test[0], 'Expecting a \'m_value\' property in each filter' self.json = json_test except Exception as e: raise SavedFilterError('Could not parse filter from the JSON response: {0}'.format(str(e))) self.json_text = json_text self.__analyze() else: raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response)
[ "def", "load", "(", "self", ")", ":", "# Attempt to load the saved filter", "payload", "=", "{", "'id'", ":", "self", ".", "id", "}", "response", "=", "self", ".", "lc", ".", "session", ".", "get", "(", "'/browse/getSavedFilterAj.action'", ",", "query", "=", "payload", ")", "self", ".", "response", "=", "response", "json_response", "=", "response", ".", "json", "(", ")", "if", "self", ".", "lc", ".", "session", ".", "json_success", "(", "json_response", ")", "and", "json_response", "[", "'filterName'", "]", "!=", "'No filters'", ":", "self", ".", "name", "=", "json_response", "[", "'filterName'", "]", "#", "# Parse out the filter JSON string manually from the response JSON.", "# If the filter JSON is modified at all, or any value is out of order,", "# LendingClub will reject the filter and perform a wildcard search instead,", "# without any error. So we need to retain the filter JSON value exactly how it is given to us.", "#", "text", "=", "response", ".", "text", "# Cut off everything before \"filter\": [...]", "text", "=", "re", ".", "sub", "(", "'\\n'", ",", "''", ",", "text", ")", "text", "=", "re", ".", "sub", "(", "'^.*?,\\s*[\"\\']filter[\"\\']:\\s*\\[(.*)'", ",", "'[\\\\1'", ",", "text", ")", "# Now loop through the string until we find the end of the filter block", "# This is a simple parser that keeps track of block elements, quotes and", "# escape characters", "blockTracker", "=", "[", "]", "blockChars", "=", "{", "'['", ":", "']'", ",", "'{'", ":", "'}'", "}", "inQuote", "=", "False", "lastChar", "=", "None", "json_text", "=", "\"\"", "for", "char", "in", "text", ":", "json_text", "+=", "char", "# Escape char", "if", "char", "==", "'\\\\'", ":", "if", "lastChar", "==", "'\\\\'", ":", "lastChar", "=", "''", "else", ":", "lastChar", "=", "char", "continue", "# Quotes", "if", "char", "==", "\"'\"", "or", "char", "==", "'\"'", ":", "if", "inQuote", "is", "False", ":", "# Starting a quote block", "inQuote", "=", "char", "elif", "inQuote", "==", "char", ":", "# Ending a quote block", "inQuote", "=", "False", "lastChar", "=", "char", "continue", "# Start of a block", "if", "char", "in", "blockChars", ".", "keys", "(", ")", ":", "blockTracker", ".", "insert", "(", "0", ",", "blockChars", "[", "char", "]", ")", "# End of a block, remove from block path", "elif", "len", "(", "blockTracker", ")", ">", "0", "and", "char", "==", "blockTracker", "[", "0", "]", ":", "blockTracker", ".", "pop", "(", "0", ")", "# No more blocks in the tracker which means we're at the end of the filter block", "if", "len", "(", "blockTracker", ")", "==", "0", "and", "lastChar", "is", "not", "None", ":", "break", "lastChar", "=", "char", "# Verify valid JSON", "try", ":", "if", "json_text", ".", "strip", "(", ")", "==", "''", ":", "raise", "SavedFilterError", "(", "'A saved filter could not be found for ID {0}'", ".", "format", "(", "self", ".", "id", ")", ",", "response", ")", "json_test", "=", "json", ".", "loads", "(", "json_text", ")", "# Make sure it looks right", "assert", "type", "(", "json_test", ")", "is", "list", ",", "'Expecting a list, instead received a {0}'", ".", "format", "(", "type", "(", "json_test", ")", ")", "assert", "'m_id'", "in", "json_test", "[", "0", "]", ",", "'Expecting a \\'m_id\\' property in each filter'", "assert", "'m_value'", "in", "json_test", "[", "0", "]", ",", "'Expecting a \\'m_value\\' property in each filter'", "self", ".", "json", "=", "json_test", "except", "Exception", "as", "e", ":", "raise", "SavedFilterError", "(", "'Could not parse filter from the JSON response: {0}'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "self", ".", "json_text", "=", "json_text", "self", ".", "__analyze", "(", ")", "else", ":", "raise", "SavedFilterError", "(", "'A saved filter could not be found for ID {0}'", ".", "format", "(", "self", ".", "id", ")", ",", "response", ")" ]
Load the filter from the server
[ "Load", "the", "filter", "from", "the", "server" ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L468-L561
jgillick/LendingClub
lendingclub/filters.py
SavedFilter.__analyze
def __analyze(self): """ Analyze the filter JSON and attempt to parse out the individual filters. """ filter_values = {} # ID to filter name mapping name_map = { 10: 'grades', 11: 'loan_purpose', 13: 'approved', 15: 'funding_progress', 38: 'exclude_existing', 39: 'term', 43: 'keyword' } if self.json is not None: filters = self.json for f in filters: if 'm_id' in f: name = f['m_id'] # Get the name to represent this filter if f['m_id'] in name_map: name = name_map[f['m_id']] # Get values if 'm_value' in f: raw_values = f['m_value'] value = {} # No value, skip it if raw_values is None: continue # Loop through multiple values if type(raw_values) is list: # A single non string value, is THE value if len(raw_values) == 1 and type(raw_values[0]['value']) not in [str, unicode]: value = raw_values[0]['value'] # Create a dict of values: name = True for val in raw_values: if type(val['value']) in [str, unicode]: value[val['value']] = True # A single value else: value = raw_values # Normalize grades array if name == 'grades': if 'All' not in value: value['All'] = False # Add filter value filter_values[name] = value dict.__setitem__(self, name, value) return filter_values
python
def __analyze(self): """ Analyze the filter JSON and attempt to parse out the individual filters. """ filter_values = {} # ID to filter name mapping name_map = { 10: 'grades', 11: 'loan_purpose', 13: 'approved', 15: 'funding_progress', 38: 'exclude_existing', 39: 'term', 43: 'keyword' } if self.json is not None: filters = self.json for f in filters: if 'm_id' in f: name = f['m_id'] # Get the name to represent this filter if f['m_id'] in name_map: name = name_map[f['m_id']] # Get values if 'm_value' in f: raw_values = f['m_value'] value = {} # No value, skip it if raw_values is None: continue # Loop through multiple values if type(raw_values) is list: # A single non string value, is THE value if len(raw_values) == 1 and type(raw_values[0]['value']) not in [str, unicode]: value = raw_values[0]['value'] # Create a dict of values: name = True for val in raw_values: if type(val['value']) in [str, unicode]: value[val['value']] = True # A single value else: value = raw_values # Normalize grades array if name == 'grades': if 'All' not in value: value['All'] = False # Add filter value filter_values[name] = value dict.__setitem__(self, name, value) return filter_values
[ "def", "__analyze", "(", "self", ")", ":", "filter_values", "=", "{", "}", "# ID to filter name mapping", "name_map", "=", "{", "10", ":", "'grades'", ",", "11", ":", "'loan_purpose'", ",", "13", ":", "'approved'", ",", "15", ":", "'funding_progress'", ",", "38", ":", "'exclude_existing'", ",", "39", ":", "'term'", ",", "43", ":", "'keyword'", "}", "if", "self", ".", "json", "is", "not", "None", ":", "filters", "=", "self", ".", "json", "for", "f", "in", "filters", ":", "if", "'m_id'", "in", "f", ":", "name", "=", "f", "[", "'m_id'", "]", "# Get the name to represent this filter", "if", "f", "[", "'m_id'", "]", "in", "name_map", ":", "name", "=", "name_map", "[", "f", "[", "'m_id'", "]", "]", "# Get values", "if", "'m_value'", "in", "f", ":", "raw_values", "=", "f", "[", "'m_value'", "]", "value", "=", "{", "}", "# No value, skip it", "if", "raw_values", "is", "None", ":", "continue", "# Loop through multiple values", "if", "type", "(", "raw_values", ")", "is", "list", ":", "# A single non string value, is THE value", "if", "len", "(", "raw_values", ")", "==", "1", "and", "type", "(", "raw_values", "[", "0", "]", "[", "'value'", "]", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "value", "=", "raw_values", "[", "0", "]", "[", "'value'", "]", "# Create a dict of values: name = True", "for", "val", "in", "raw_values", ":", "if", "type", "(", "val", "[", "'value'", "]", ")", "in", "[", "str", ",", "unicode", "]", ":", "value", "[", "val", "[", "'value'", "]", "]", "=", "True", "# A single value", "else", ":", "value", "=", "raw_values", "# Normalize grades array", "if", "name", "==", "'grades'", ":", "if", "'All'", "not", "in", "value", ":", "value", "[", "'All'", "]", "=", "False", "# Add filter value", "filter_values", "[", "name", "]", "=", "value", "dict", ".", "__setitem__", "(", "self", ",", "name", ",", "value", ")", "return", "filter_values" ]
Analyze the filter JSON and attempt to parse out the individual filters.
[ "Analyze", "the", "filter", "JSON", "and", "attempt", "to", "parse", "out", "the", "individual", "filters", "." ]
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L572-L634
vnmabus/dcor
dcor/_dcor_internals.py
_float_copy_to_out
def _float_copy_to_out(out, origin): """ Copy origin to out and return it. If ``out`` is None, a new copy (casted to floating point) is used. If ``out`` and ``origin`` are the same, we simply return it. Otherwise we copy the values. """ if out is None: out = origin / 1 # The division forces cast to a floating point type elif out is not origin: np.copyto(out, origin) return out
python
def _float_copy_to_out(out, origin): """ Copy origin to out and return it. If ``out`` is None, a new copy (casted to floating point) is used. If ``out`` and ``origin`` are the same, we simply return it. Otherwise we copy the values. """ if out is None: out = origin / 1 # The division forces cast to a floating point type elif out is not origin: np.copyto(out, origin) return out
[ "def", "_float_copy_to_out", "(", "out", ",", "origin", ")", ":", "if", "out", "is", "None", ":", "out", "=", "origin", "/", "1", "# The division forces cast to a floating point type", "elif", "out", "is", "not", "origin", ":", "np", ".", "copyto", "(", "out", ",", "origin", ")", "return", "out" ]
Copy origin to out and return it. If ``out`` is None, a new copy (casted to floating point) is used. If ``out`` and ``origin`` are the same, we simply return it. Otherwise we copy the values.
[ "Copy", "origin", "to", "out", "and", "return", "it", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L40-L53
vnmabus/dcor
dcor/_dcor_internals.py
_double_centered_imp
def _double_centered_imp(a, out=None): """ Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2. """ out = _float_copy_to_out(out, a) dim = np.size(a, 0) mu = np.sum(a) / (dim * dim) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) mu_cols = sum_cols / dim mu_rows = sum_rows / dim # Do one operation at a time, to improve broadcasting memory usage. out -= mu_rows out -= mu_cols out += mu return out
python
def _double_centered_imp(a, out=None): """ Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2. """ out = _float_copy_to_out(out, a) dim = np.size(a, 0) mu = np.sum(a) / (dim * dim) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) mu_cols = sum_cols / dim mu_rows = sum_rows / dim # Do one operation at a time, to improve broadcasting memory usage. out -= mu_rows out -= mu_cols out += mu return out
[ "def", "_double_centered_imp", "(", "a", ",", "out", "=", "None", ")", ":", "out", "=", "_float_copy_to_out", "(", "out", ",", "a", ")", "dim", "=", "np", ".", "size", "(", "a", ",", "0", ")", "mu", "=", "np", ".", "sum", "(", "a", ")", "/", "(", "dim", "*", "dim", ")", "sum_cols", "=", "np", ".", "sum", "(", "a", ",", "0", ",", "keepdims", "=", "True", ")", "sum_rows", "=", "np", ".", "sum", "(", "a", ",", "1", ",", "keepdims", "=", "True", ")", "mu_cols", "=", "sum_cols", "/", "dim", "mu_rows", "=", "sum_rows", "/", "dim", "# Do one operation at a time, to improve broadcasting memory usage.", "out", "-=", "mu_rows", "out", "-=", "mu_cols", "out", "+=", "mu", "return", "out" ]
Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2.
[ "Real", "implementation", "of", ":", "func", ":", "double_centered", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L56-L79
vnmabus/dcor
dcor/_dcor_internals.py
_u_centered_imp
def _u_centered_imp(a, out=None): """ Real implementation of :func:`u_centered`. This function is used to make parameter ``out`` keyword-only in Python 2. """ out = _float_copy_to_out(out, a) dim = np.size(a, 0) u_mu = np.sum(a) / ((dim - 1) * (dim - 2)) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) u_mu_cols = np.ones((dim, 1)).dot(sum_cols / (dim - 2)) u_mu_rows = (sum_rows / (dim - 2)).dot(np.ones((1, dim))) # Do one operation at a time, to improve broadcasting memory usage. out -= u_mu_rows out -= u_mu_cols out += u_mu # The diagonal is zero out[np.eye(dim, dtype=bool)] = 0 return out
python
def _u_centered_imp(a, out=None): """ Real implementation of :func:`u_centered`. This function is used to make parameter ``out`` keyword-only in Python 2. """ out = _float_copy_to_out(out, a) dim = np.size(a, 0) u_mu = np.sum(a) / ((dim - 1) * (dim - 2)) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) u_mu_cols = np.ones((dim, 1)).dot(sum_cols / (dim - 2)) u_mu_rows = (sum_rows / (dim - 2)).dot(np.ones((1, dim))) # Do one operation at a time, to improve broadcasting memory usage. out -= u_mu_rows out -= u_mu_cols out += u_mu # The diagonal is zero out[np.eye(dim, dtype=bool)] = 0 return out
[ "def", "_u_centered_imp", "(", "a", ",", "out", "=", "None", ")", ":", "out", "=", "_float_copy_to_out", "(", "out", ",", "a", ")", "dim", "=", "np", ".", "size", "(", "a", ",", "0", ")", "u_mu", "=", "np", ".", "sum", "(", "a", ")", "/", "(", "(", "dim", "-", "1", ")", "*", "(", "dim", "-", "2", ")", ")", "sum_cols", "=", "np", ".", "sum", "(", "a", ",", "0", ",", "keepdims", "=", "True", ")", "sum_rows", "=", "np", ".", "sum", "(", "a", ",", "1", ",", "keepdims", "=", "True", ")", "u_mu_cols", "=", "np", ".", "ones", "(", "(", "dim", ",", "1", ")", ")", ".", "dot", "(", "sum_cols", "/", "(", "dim", "-", "2", ")", ")", "u_mu_rows", "=", "(", "sum_rows", "/", "(", "dim", "-", "2", ")", ")", ".", "dot", "(", "np", ".", "ones", "(", "(", "1", ",", "dim", ")", ")", ")", "# Do one operation at a time, to improve broadcasting memory usage.", "out", "-=", "u_mu_rows", "out", "-=", "u_mu_cols", "out", "+=", "u_mu", "# The diagonal is zero", "out", "[", "np", ".", "eye", "(", "dim", ",", "dtype", "=", "bool", ")", "]", "=", "0", "return", "out" ]
Real implementation of :func:`u_centered`. This function is used to make parameter ``out`` keyword-only in Python 2.
[ "Real", "implementation", "of", ":", "func", ":", "u_centered", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L146-L172
vnmabus/dcor
dcor/_dcor_internals.py
u_product
def u_product(a, b): r""" Inner product in the Hilbert space of :math:`U`-centered distance matrices. This inner product is defined as .. math:: \frac{1}{n(n-3)} \sum_{i,j=1}^n a_{i, j} b_{i, j} Parameters ---------- a: array_like First input array to be multiplied. b: array_like Second input array to be multiplied. Returns ------- numpy scalar Inner product. See Also -------- mean_product Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> dcor.u_product(u_a, u_a) 6.0 >>> dcor.u_product(u_a, u_b) -8.0 Note that the formula is well defined as long as the matrices involved are square and have the same dimensions, even if they are not in the Hilbert space of :math:`U`-centered distance matrices >>> dcor.u_product(a, a) 132.0 Also the formula produces a division by 0 for 3x3 matrices >>> import warnings >>> b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> with warnings.catch_warnings(): ... warnings.simplefilter("ignore") ... dcor.u_product(b, b) inf """ n = np.size(a, 0) return np.sum(a * b) / (n * (n - 3))
python
def u_product(a, b): r""" Inner product in the Hilbert space of :math:`U`-centered distance matrices. This inner product is defined as .. math:: \frac{1}{n(n-3)} \sum_{i,j=1}^n a_{i, j} b_{i, j} Parameters ---------- a: array_like First input array to be multiplied. b: array_like Second input array to be multiplied. Returns ------- numpy scalar Inner product. See Also -------- mean_product Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> dcor.u_product(u_a, u_a) 6.0 >>> dcor.u_product(u_a, u_b) -8.0 Note that the formula is well defined as long as the matrices involved are square and have the same dimensions, even if they are not in the Hilbert space of :math:`U`-centered distance matrices >>> dcor.u_product(a, a) 132.0 Also the formula produces a division by 0 for 3x3 matrices >>> import warnings >>> b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> with warnings.catch_warnings(): ... warnings.simplefilter("ignore") ... dcor.u_product(b, b) inf """ n = np.size(a, 0) return np.sum(a * b) / (n * (n - 3))
[ "def", "u_product", "(", "a", ",", "b", ")", ":", "n", "=", "np", ".", "size", "(", "a", ",", "0", ")", "return", "np", ".", "sum", "(", "a", "*", "b", ")", "/", "(", "n", "*", "(", "n", "-", "3", ")", ")" ]
r""" Inner product in the Hilbert space of :math:`U`-centered distance matrices. This inner product is defined as .. math:: \frac{1}{n(n-3)} \sum_{i,j=1}^n a_{i, j} b_{i, j} Parameters ---------- a: array_like First input array to be multiplied. b: array_like Second input array to be multiplied. Returns ------- numpy scalar Inner product. See Also -------- mean_product Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> dcor.u_product(u_a, u_a) 6.0 >>> dcor.u_product(u_a, u_b) -8.0 Note that the formula is well defined as long as the matrices involved are square and have the same dimensions, even if they are not in the Hilbert space of :math:`U`-centered distance matrices >>> dcor.u_product(a, a) 132.0 Also the formula produces a division by 0 for 3x3 matrices >>> import warnings >>> b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> with warnings.catch_warnings(): ... warnings.simplefilter("ignore") ... dcor.u_product(b, b) inf
[ "r", "Inner", "product", "in", "the", "Hilbert", "space", "of", ":", "math", ":", "U", "-", "centered", "distance", "matrices", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L294-L367
vnmabus/dcor
dcor/_dcor_internals.py
u_projection
def u_projection(a): r""" Return the orthogonal projection function over :math:`a`. The function returned computes the orthogonal projection over :math:`a` in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A` is defined as .. math:: \text{proj}_A(B) = \begin{cases} \frac{\langle A, B \rangle}{\langle A, A \rangle} A, & \text{if} \langle A, A \rangle \neq 0, \\ 0, & \text{if} \langle A, A \rangle = 0. \end{cases} where :math:`\langle {}\cdot{}, {}\cdot{} \rangle` is the scalar product in the Hilbert space of :math:`U`-centered distance matrices, given by the function :py:func:`u_product`. Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrix and computes its orthogonal projection over :math:`a`. See Also -------- u_complementary_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_projection(u_a) >>> proj_a(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> proj_a(u_b) array([[-0. , 2.66666667, -1.33333333, -1.33333333], [ 2.66666667, -0. , -1.33333333, -1.33333333], [-1.33333333, -1.33333333, -0. , 2.66666667], [-1.33333333, -1.33333333, 2.66666667, -0. ]]) The function gives the correct result if :math:`\\langle A, A \\rangle = 0`. >>> proj_null = dcor.u_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) """ c = a denominator = u_product(c, c) docstring = """ Orthogonal projection over a :math:`U`-centered distance matrix. This function was returned by :code:`u_projection`. The complete usage information is in the documentation of :code:`u_projection`. See Also -------- u_projection """ if denominator == 0: def projection(a): # noqa return np.zeros_like(c) else: def projection(a): # noqa return u_product(a, c) / denominator * c projection.__doc__ = docstring return projection
python
def u_projection(a): r""" Return the orthogonal projection function over :math:`a`. The function returned computes the orthogonal projection over :math:`a` in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A` is defined as .. math:: \text{proj}_A(B) = \begin{cases} \frac{\langle A, B \rangle}{\langle A, A \rangle} A, & \text{if} \langle A, A \rangle \neq 0, \\ 0, & \text{if} \langle A, A \rangle = 0. \end{cases} where :math:`\langle {}\cdot{}, {}\cdot{} \rangle` is the scalar product in the Hilbert space of :math:`U`-centered distance matrices, given by the function :py:func:`u_product`. Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrix and computes its orthogonal projection over :math:`a`. See Also -------- u_complementary_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_projection(u_a) >>> proj_a(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> proj_a(u_b) array([[-0. , 2.66666667, -1.33333333, -1.33333333], [ 2.66666667, -0. , -1.33333333, -1.33333333], [-1.33333333, -1.33333333, -0. , 2.66666667], [-1.33333333, -1.33333333, 2.66666667, -0. ]]) The function gives the correct result if :math:`\\langle A, A \\rangle = 0`. >>> proj_null = dcor.u_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) """ c = a denominator = u_product(c, c) docstring = """ Orthogonal projection over a :math:`U`-centered distance matrix. This function was returned by :code:`u_projection`. The complete usage information is in the documentation of :code:`u_projection`. See Also -------- u_projection """ if denominator == 0: def projection(a): # noqa return np.zeros_like(c) else: def projection(a): # noqa return u_product(a, c) / denominator * c projection.__doc__ = docstring return projection
[ "def", "u_projection", "(", "a", ")", ":", "c", "=", "a", "denominator", "=", "u_product", "(", "c", ",", "c", ")", "docstring", "=", "\"\"\"\n Orthogonal projection over a :math:`U`-centered distance matrix.\n\n This function was returned by :code:`u_projection`. The complete\n usage information is in the documentation of :code:`u_projection`.\n\n See Also\n --------\n u_projection\n \"\"\"", "if", "denominator", "==", "0", ":", "def", "projection", "(", "a", ")", ":", "# noqa", "return", "np", ".", "zeros_like", "(", "c", ")", "else", ":", "def", "projection", "(", "a", ")", ":", "# noqa", "return", "u_product", "(", "a", ",", "c", ")", "/", "denominator", "*", "c", "projection", ".", "__doc__", "=", "docstring", "return", "projection" ]
r""" Return the orthogonal projection function over :math:`a`. The function returned computes the orthogonal projection over :math:`a` in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A` is defined as .. math:: \text{proj}_A(B) = \begin{cases} \frac{\langle A, B \rangle}{\langle A, A \rangle} A, & \text{if} \langle A, A \rangle \neq 0, \\ 0, & \text{if} \langle A, A \rangle = 0. \end{cases} where :math:`\langle {}\cdot{}, {}\cdot{} \rangle` is the scalar product in the Hilbert space of :math:`U`-centered distance matrices, given by the function :py:func:`u_product`. Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrix and computes its orthogonal projection over :math:`a`. See Also -------- u_complementary_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_projection(u_a) >>> proj_a(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> proj_a(u_b) array([[-0. , 2.66666667, -1.33333333, -1.33333333], [ 2.66666667, -0. , -1.33333333, -1.33333333], [-1.33333333, -1.33333333, -0. , 2.66666667], [-1.33333333, -1.33333333, 2.66666667, -0. ]]) The function gives the correct result if :math:`\\langle A, A \\rangle = 0`. >>> proj_null = dcor.u_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
[ "r", "Return", "the", "orthogonal", "projection", "function", "over", ":", "math", ":", "a", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L370-L480
vnmabus/dcor
dcor/_dcor_internals.py
u_complementary_projection
def u_complementary_projection(a): r""" Return the orthogonal projection function over :math:`a^{\perp}`. The function returned computes the orthogonal projection over :math:`a^{\perp}` (the complementary projection over a) in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A^{\perp}` is defined as .. math:: \text{proj}_{A^{\perp}}(B) = B - \text{proj}_A(B) Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrices and computes its orthogonal projection over :math:`a^{\perp}`. See Also -------- u_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_complementary_projection(u_a) >>> proj_a(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) >>> proj_a(u_b) array([[ 0.0000000e+00, -4.4408921e-16, 4.0000000e+00, -4.0000000e+00], [-4.4408921e-16, 0.0000000e+00, -4.0000000e+00, 4.0000000e+00], [ 4.0000000e+00, -4.0000000e+00, 0.0000000e+00, -4.4408921e-16], [-4.0000000e+00, 4.0000000e+00, -4.4408921e-16, 0.0000000e+00]]) >>> proj_null = dcor.u_complementary_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) """ proj = u_projection(a) def projection(a): """ Orthogonal projection over the complementary space. This function was returned by :code:`u_complementary_projection`. The complete usage information is in the documentation of :code:`u_complementary_projection`. See Also -------- u_complementary_projection """ return a - proj(a) return projection
python
def u_complementary_projection(a): r""" Return the orthogonal projection function over :math:`a^{\perp}`. The function returned computes the orthogonal projection over :math:`a^{\perp}` (the complementary projection over a) in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A^{\perp}` is defined as .. math:: \text{proj}_{A^{\perp}}(B) = B - \text{proj}_A(B) Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrices and computes its orthogonal projection over :math:`a^{\perp}`. See Also -------- u_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_complementary_projection(u_a) >>> proj_a(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) >>> proj_a(u_b) array([[ 0.0000000e+00, -4.4408921e-16, 4.0000000e+00, -4.0000000e+00], [-4.4408921e-16, 0.0000000e+00, -4.0000000e+00, 4.0000000e+00], [ 4.0000000e+00, -4.0000000e+00, 0.0000000e+00, -4.4408921e-16], [-4.0000000e+00, 4.0000000e+00, -4.4408921e-16, 0.0000000e+00]]) >>> proj_null = dcor.u_complementary_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) """ proj = u_projection(a) def projection(a): """ Orthogonal projection over the complementary space. This function was returned by :code:`u_complementary_projection`. The complete usage information is in the documentation of :code:`u_complementary_projection`. See Also -------- u_complementary_projection """ return a - proj(a) return projection
[ "def", "u_complementary_projection", "(", "a", ")", ":", "proj", "=", "u_projection", "(", "a", ")", "def", "projection", "(", "a", ")", ":", "\"\"\"\n Orthogonal projection over the complementary space.\n\n This function was returned by :code:`u_complementary_projection`.\n The complete usage information is in the documentation of\n :code:`u_complementary_projection`.\n\n See Also\n --------\n u_complementary_projection\n\n \"\"\"", "return", "a", "-", "proj", "(", "a", ")", "return", "projection" ]
r""" Return the orthogonal projection function over :math:`a^{\perp}`. The function returned computes the orthogonal projection over :math:`a^{\perp}` (the complementary projection over a) in the Hilbert space of :math:`U`-centered distance matrices. The projection of a matrix :math:`B` over a matrix :math:`A^{\perp}` is defined as .. math:: \text{proj}_{A^{\perp}}(B) = B - \text{proj}_A(B) Parameters ---------- a: array_like :math:`U`-centered distance matrix. Returns ------- callable Function that receives a :math:`U`-centered distance matrices and computes its orthogonal projection over :math:`a^{\perp}`. See Also -------- u_projection u_centered Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[ 0., 3., 11., 6.], ... [ 3., 0., 8., 3.], ... [ 11., 8., 0., 5.], ... [ 6., 3., 5., 0.]]) >>> b = np.array([[ 0., 13., 11., 3.], ... [ 13., 0., 2., 10.], ... [ 11., 2., 0., 8.], ... [ 3., 10., 8., 0.]]) >>> u_a = dcor.u_centered(a) >>> u_a array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]]) >>> u_b = dcor.u_centered(b) >>> u_b array([[ 0. , 2.66666667, 2.66666667, -5.33333333], [ 2.66666667, 0. , -5.33333333, 2.66666667], [ 2.66666667, -5.33333333, 0. , 2.66666667], [-5.33333333, 2.66666667, 2.66666667, 0. ]]) >>> proj_a = dcor.u_complementary_projection(u_a) >>> proj_a(u_a) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]) >>> proj_a(u_b) array([[ 0.0000000e+00, -4.4408921e-16, 4.0000000e+00, -4.0000000e+00], [-4.4408921e-16, 0.0000000e+00, -4.0000000e+00, 4.0000000e+00], [ 4.0000000e+00, -4.0000000e+00, 0.0000000e+00, -4.4408921e-16], [-4.0000000e+00, 4.0000000e+00, -4.4408921e-16, 0.0000000e+00]]) >>> proj_null = dcor.u_complementary_projection(np.zeros((4, 4))) >>> proj_null(u_a) array([[ 0., -2., 1., 1.], [-2., 0., 1., 1.], [ 1., 1., 0., -2.], [ 1., 1., -2., 0.]])
[ "r", "Return", "the", "orthogonal", "projection", "function", "over", ":", "math", ":", "a^", "{", "\\", "perp", "}", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L483-L573
vnmabus/dcor
dcor/_dcor_internals.py
_distance_matrix_generic
def _distance_matrix_generic(x, centering, exponent=1): """Compute a centered distance matrix given a matrix.""" _check_valid_dcov_exponent(exponent) x = _transform_to_2d(x) # Calculate distance matrices a = distances.pairwise_distances(x, exponent=exponent) # Double centering a = centering(a, out=a) return a
python
def _distance_matrix_generic(x, centering, exponent=1): """Compute a centered distance matrix given a matrix.""" _check_valid_dcov_exponent(exponent) x = _transform_to_2d(x) # Calculate distance matrices a = distances.pairwise_distances(x, exponent=exponent) # Double centering a = centering(a, out=a) return a
[ "def", "_distance_matrix_generic", "(", "x", ",", "centering", ",", "exponent", "=", "1", ")", ":", "_check_valid_dcov_exponent", "(", "exponent", ")", "x", "=", "_transform_to_2d", "(", "x", ")", "# Calculate distance matrices", "a", "=", "distances", ".", "pairwise_distances", "(", "x", ",", "exponent", "=", "exponent", ")", "# Double centering", "a", "=", "centering", "(", "a", ",", "out", "=", "a", ")", "return", "a" ]
Compute a centered distance matrix given a matrix.
[ "Compute", "a", "centered", "distance", "matrix", "given", "a", "matrix", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L576-L588
vnmabus/dcor
dcor/_dcor_internals.py
_af_inv_scaled
def _af_inv_scaled(x): """Scale a random vector for using the affinely invariant measures""" x = _transform_to_2d(x) cov_matrix = np.atleast_2d(np.cov(x, rowvar=False)) cov_matrix_power = _mat_sqrt_inv(cov_matrix) return x.dot(cov_matrix_power)
python
def _af_inv_scaled(x): """Scale a random vector for using the affinely invariant measures""" x = _transform_to_2d(x) cov_matrix = np.atleast_2d(np.cov(x, rowvar=False)) cov_matrix_power = _mat_sqrt_inv(cov_matrix) return x.dot(cov_matrix_power)
[ "def", "_af_inv_scaled", "(", "x", ")", ":", "x", "=", "_transform_to_2d", "(", "x", ")", "cov_matrix", "=", "np", ".", "atleast_2d", "(", "np", ".", "cov", "(", "x", ",", "rowvar", "=", "False", ")", ")", "cov_matrix_power", "=", "_mat_sqrt_inv", "(", "cov_matrix", ")", "return", "x", ".", "dot", "(", "cov_matrix_power", ")" ]
Scale a random vector for using the affinely invariant measures
[ "Scale", "a", "random", "vector", "for", "using", "the", "affinely", "invariant", "measures" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor_internals.py#L615-L623
vnmabus/dcor
dcor/_partial_dcor.py
partial_distance_covariance
def partial_distance_covariance(x, y, z): """ Partial distance covariance estimator. Compute the estimator for the partial distance covariance of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance covariance is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance covariance. See Also -------- partial_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> c = np.array([[1, 3, 4], ... [5, 7, 8], ... [9, 11, 15], ... [13, 15, 16]]) >>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS 0.0024298... >>> dcor.partial_distance_covariance(a, b, c) 0.0347030... >>> dcor.partial_distance_covariance(b, b, c) 0.4956241... """ a = _u_distance_matrix(x) b = _u_distance_matrix(y) c = _u_distance_matrix(z) proj = u_complementary_projection(c) return u_product(proj(a), proj(b))
python
def partial_distance_covariance(x, y, z): """ Partial distance covariance estimator. Compute the estimator for the partial distance covariance of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance covariance is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance covariance. See Also -------- partial_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> c = np.array([[1, 3, 4], ... [5, 7, 8], ... [9, 11, 15], ... [13, 15, 16]]) >>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS 0.0024298... >>> dcor.partial_distance_covariance(a, b, c) 0.0347030... >>> dcor.partial_distance_covariance(b, b, c) 0.4956241... """ a = _u_distance_matrix(x) b = _u_distance_matrix(y) c = _u_distance_matrix(z) proj = u_complementary_projection(c) return u_product(proj(a), proj(b))
[ "def", "partial_distance_covariance", "(", "x", ",", "y", ",", "z", ")", ":", "a", "=", "_u_distance_matrix", "(", "x", ")", "b", "=", "_u_distance_matrix", "(", "y", ")", "c", "=", "_u_distance_matrix", "(", "z", ")", "proj", "=", "u_complementary_projection", "(", "c", ")", "return", "u_product", "(", "proj", "(", "a", ")", ",", "proj", "(", "b", ")", ")" ]
Partial distance covariance estimator. Compute the estimator for the partial distance covariance of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance covariance is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance covariance. See Also -------- partial_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> c = np.array([[1, 3, 4], ... [5, 7, 8], ... [9, 11, 15], ... [13, 15, 16]]) >>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS 0.0024298... >>> dcor.partial_distance_covariance(a, b, c) 0.0347030... >>> dcor.partial_distance_covariance(b, b, c) 0.4956241...
[ "Partial", "distance", "covariance", "estimator", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_partial_dcor.py#L13-L70
vnmabus/dcor
dcor/_partial_dcor.py
partial_distance_correlation
def partial_distance_correlation(x, y, z): # pylint:disable=too-many-locals """ Partial distance correlation estimator. Compute the estimator for the partial distance correlation of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance correlation is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance correlation. See Also -------- partial_distance_covariance Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1], [1], [2], [2], [3]]) >>> b = np.array([[1], [2], [1], [2], [1]]) >>> c = np.array([[1], [2], [2], [1], [2]]) >>> dcor.partial_distance_correlation(a, a, c) 1.0 >>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS -0.5... >>> dcor.partial_distance_correlation(b, b, c) 1.0 >>> dcor.partial_distance_correlation(a, c, c) 0.0 """ a = _u_distance_matrix(x) b = _u_distance_matrix(y) c = _u_distance_matrix(z) aa = u_product(a, a) bb = u_product(b, b) cc = u_product(c, c) ab = u_product(a, b) ac = u_product(a, c) bc = u_product(b, c) denom_sqr = aa * bb r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_xy = np.clip(r_xy, -1, 1) denom_sqr = aa * cc r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_xz = np.clip(r_xz, -1, 1) denom_sqr = bb * cc r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_yz = np.clip(r_yz, -1, 1) denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2) return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom
python
def partial_distance_correlation(x, y, z): # pylint:disable=too-many-locals """ Partial distance correlation estimator. Compute the estimator for the partial distance correlation of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance correlation is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance correlation. See Also -------- partial_distance_covariance Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1], [1], [2], [2], [3]]) >>> b = np.array([[1], [2], [1], [2], [1]]) >>> c = np.array([[1], [2], [2], [1], [2]]) >>> dcor.partial_distance_correlation(a, a, c) 1.0 >>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS -0.5... >>> dcor.partial_distance_correlation(b, b, c) 1.0 >>> dcor.partial_distance_correlation(a, c, c) 0.0 """ a = _u_distance_matrix(x) b = _u_distance_matrix(y) c = _u_distance_matrix(z) aa = u_product(a, a) bb = u_product(b, b) cc = u_product(c, c) ab = u_product(a, b) ac = u_product(a, c) bc = u_product(b, c) denom_sqr = aa * bb r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_xy = np.clip(r_xy, -1, 1) denom_sqr = aa * cc r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_xz = np.clip(r_xz, -1, 1) denom_sqr = bb * cc r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr r_yz = np.clip(r_yz, -1, 1) denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2) return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom
[ "def", "partial_distance_correlation", "(", "x", ",", "y", ",", "z", ")", ":", "# pylint:disable=too-many-locals", "a", "=", "_u_distance_matrix", "(", "x", ")", "b", "=", "_u_distance_matrix", "(", "y", ")", "c", "=", "_u_distance_matrix", "(", "z", ")", "aa", "=", "u_product", "(", "a", ",", "a", ")", "bb", "=", "u_product", "(", "b", ",", "b", ")", "cc", "=", "u_product", "(", "c", ",", "c", ")", "ab", "=", "u_product", "(", "a", ",", "b", ")", "ac", "=", "u_product", "(", "a", ",", "c", ")", "bc", "=", "u_product", "(", "b", ",", "c", ")", "denom_sqr", "=", "aa", "*", "bb", "r_xy", "=", "ab", "/", "_sqrt", "(", "denom_sqr", ")", "if", "denom_sqr", "!=", "0", "else", "denom_sqr", "r_xy", "=", "np", ".", "clip", "(", "r_xy", ",", "-", "1", ",", "1", ")", "denom_sqr", "=", "aa", "*", "cc", "r_xz", "=", "ac", "/", "_sqrt", "(", "denom_sqr", ")", "if", "denom_sqr", "!=", "0", "else", "denom_sqr", "r_xz", "=", "np", ".", "clip", "(", "r_xz", ",", "-", "1", ",", "1", ")", "denom_sqr", "=", "bb", "*", "cc", "r_yz", "=", "bc", "/", "_sqrt", "(", "denom_sqr", ")", "if", "denom_sqr", "!=", "0", "else", "denom_sqr", "r_yz", "=", "np", ".", "clip", "(", "r_yz", ",", "-", "1", ",", "1", ")", "denom", "=", "_sqrt", "(", "1", "-", "r_xz", "**", "2", ")", "*", "_sqrt", "(", "1", "-", "r_yz", "**", "2", ")", "return", "(", "r_xy", "-", "r_xz", "*", "r_yz", ")", "/", "denom", "if", "denom", "!=", "0", "else", "denom" ]
Partial distance correlation estimator. Compute the estimator for the partial distance correlation of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance correlation is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance correlation. See Also -------- partial_distance_covariance Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1], [1], [2], [2], [3]]) >>> b = np.array([[1], [2], [1], [2], [1]]) >>> c = np.array([[1], [2], [2], [1], [2]]) >>> dcor.partial_distance_correlation(a, a, c) 1.0 >>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS -0.5... >>> dcor.partial_distance_correlation(b, b, c) 1.0 >>> dcor.partial_distance_correlation(a, c, c) 0.0
[ "Partial", "distance", "correlation", "estimator", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_partial_dcor.py#L73-L145
vnmabus/dcor
dcor/_energy.py
_energy_distance_from_distance_matrices
def _energy_distance_from_distance_matrices( distance_xx, distance_yy, distance_xy): """Compute energy distance with precalculated distance matrices.""" return (2 * np.mean(distance_xy) - np.mean(distance_xx) - np.mean(distance_yy))
python
def _energy_distance_from_distance_matrices( distance_xx, distance_yy, distance_xy): """Compute energy distance with precalculated distance matrices.""" return (2 * np.mean(distance_xy) - np.mean(distance_xx) - np.mean(distance_yy))
[ "def", "_energy_distance_from_distance_matrices", "(", "distance_xx", ",", "distance_yy", ",", "distance_xy", ")", ":", "return", "(", "2", "*", "np", ".", "mean", "(", "distance_xy", ")", "-", "np", ".", "mean", "(", "distance_xx", ")", "-", "np", ".", "mean", "(", "distance_yy", ")", ")" ]
Compute energy distance with precalculated distance matrices.
[ "Compute", "energy", "distance", "with", "precalculated", "distance", "matrices", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_energy.py#L24-L28
vnmabus/dcor
dcor/_energy.py
_energy_distance_imp
def _energy_distance_imp(x, y, exponent=1): """ Real implementation of :func:`energy_distance`. This function is used to make parameter ``exponent`` keyword-only in Python 2. """ x = _transform_to_2d(x) y = _transform_to_2d(y) _check_valid_energy_exponent(exponent) distance_xx = distances.pairwise_distances(x, exponent=exponent) distance_yy = distances.pairwise_distances(y, exponent=exponent) distance_xy = distances.pairwise_distances(x, y, exponent=exponent) return _energy_distance_from_distance_matrices(distance_xx=distance_xx, distance_yy=distance_yy, distance_xy=distance_xy)
python
def _energy_distance_imp(x, y, exponent=1): """ Real implementation of :func:`energy_distance`. This function is used to make parameter ``exponent`` keyword-only in Python 2. """ x = _transform_to_2d(x) y = _transform_to_2d(y) _check_valid_energy_exponent(exponent) distance_xx = distances.pairwise_distances(x, exponent=exponent) distance_yy = distances.pairwise_distances(y, exponent=exponent) distance_xy = distances.pairwise_distances(x, y, exponent=exponent) return _energy_distance_from_distance_matrices(distance_xx=distance_xx, distance_yy=distance_yy, distance_xy=distance_xy)
[ "def", "_energy_distance_imp", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "x", "=", "_transform_to_2d", "(", "x", ")", "y", "=", "_transform_to_2d", "(", "y", ")", "_check_valid_energy_exponent", "(", "exponent", ")", "distance_xx", "=", "distances", ".", "pairwise_distances", "(", "x", ",", "exponent", "=", "exponent", ")", "distance_yy", "=", "distances", ".", "pairwise_distances", "(", "y", ",", "exponent", "=", "exponent", ")", "distance_xy", "=", "distances", ".", "pairwise_distances", "(", "x", ",", "y", ",", "exponent", "=", "exponent", ")", "return", "_energy_distance_from_distance_matrices", "(", "distance_xx", "=", "distance_xx", ",", "distance_yy", "=", "distance_yy", ",", "distance_xy", "=", "distance_xy", ")" ]
Real implementation of :func:`energy_distance`. This function is used to make parameter ``exponent`` keyword-only in Python 2.
[ "Real", "implementation", "of", ":", "func", ":", "energy_distance", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_energy.py#L31-L50
vnmabus/dcor
dcor/_dcor.py
_distance_covariance_sqr_naive
def _distance_covariance_sqr_naive(x, y, exponent=1): """ Naive biased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ a = _distance_matrix(x, exponent=exponent) b = _distance_matrix(y, exponent=exponent) return mean_product(a, b)
python
def _distance_covariance_sqr_naive(x, y, exponent=1): """ Naive biased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ a = _distance_matrix(x, exponent=exponent) b = _distance_matrix(y, exponent=exponent) return mean_product(a, b)
[ "def", "_distance_covariance_sqr_naive", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "a", "=", "_distance_matrix", "(", "x", ",", "exponent", "=", "exponent", ")", "b", "=", "_distance_matrix", "(", "y", ",", "exponent", "=", "exponent", ")", "return", "mean_product", "(", "a", ",", "b", ")" ]
Naive biased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm.
[ "Naive", "biased", "estimator", "for", "distance", "covariance", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L34-L44
vnmabus/dcor
dcor/_dcor.py
_u_distance_covariance_sqr_naive
def _u_distance_covariance_sqr_naive(x, y, exponent=1): """ Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ a = _u_distance_matrix(x, exponent=exponent) b = _u_distance_matrix(y, exponent=exponent) return u_product(a, b)
python
def _u_distance_covariance_sqr_naive(x, y, exponent=1): """ Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ a = _u_distance_matrix(x, exponent=exponent) b = _u_distance_matrix(y, exponent=exponent) return u_product(a, b)
[ "def", "_u_distance_covariance_sqr_naive", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "a", "=", "_u_distance_matrix", "(", "x", ",", "exponent", "=", "exponent", ")", "b", "=", "_u_distance_matrix", "(", "y", ",", "exponent", "=", "exponent", ")", "return", "u_product", "(", "a", ",", "b", ")" ]
Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm.
[ "Naive", "unbiased", "estimator", "for", "distance", "covariance", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L47-L57
vnmabus/dcor
dcor/_dcor.py
_distance_sqr_stats_naive_generic
def _distance_sqr_stats_naive_generic(x, y, matrix_centered, product, exponent=1): """Compute generic squared stats.""" a = matrix_centered(x, exponent=exponent) b = matrix_centered(y, exponent=exponent) covariance_xy_sqr = product(a, b) variance_x_sqr = product(a, a) variance_y_sqr = product(b, b) denominator_sqr = np.absolute(variance_x_sqr * variance_y_sqr) denominator = _sqrt(denominator_sqr) # Comparisons using a tolerance can change results if the # covariance has a similar order of magnitude if denominator == 0.0: correlation_xy_sqr = 0.0 else: correlation_xy_sqr = covariance_xy_sqr / denominator return Stats(covariance_xy=covariance_xy_sqr, correlation_xy=correlation_xy_sqr, variance_x=variance_x_sqr, variance_y=variance_y_sqr)
python
def _distance_sqr_stats_naive_generic(x, y, matrix_centered, product, exponent=1): """Compute generic squared stats.""" a = matrix_centered(x, exponent=exponent) b = matrix_centered(y, exponent=exponent) covariance_xy_sqr = product(a, b) variance_x_sqr = product(a, a) variance_y_sqr = product(b, b) denominator_sqr = np.absolute(variance_x_sqr * variance_y_sqr) denominator = _sqrt(denominator_sqr) # Comparisons using a tolerance can change results if the # covariance has a similar order of magnitude if denominator == 0.0: correlation_xy_sqr = 0.0 else: correlation_xy_sqr = covariance_xy_sqr / denominator return Stats(covariance_xy=covariance_xy_sqr, correlation_xy=correlation_xy_sqr, variance_x=variance_x_sqr, variance_y=variance_y_sqr)
[ "def", "_distance_sqr_stats_naive_generic", "(", "x", ",", "y", ",", "matrix_centered", ",", "product", ",", "exponent", "=", "1", ")", ":", "a", "=", "matrix_centered", "(", "x", ",", "exponent", "=", "exponent", ")", "b", "=", "matrix_centered", "(", "y", ",", "exponent", "=", "exponent", ")", "covariance_xy_sqr", "=", "product", "(", "a", ",", "b", ")", "variance_x_sqr", "=", "product", "(", "a", ",", "a", ")", "variance_y_sqr", "=", "product", "(", "b", ",", "b", ")", "denominator_sqr", "=", "np", ".", "absolute", "(", "variance_x_sqr", "*", "variance_y_sqr", ")", "denominator", "=", "_sqrt", "(", "denominator_sqr", ")", "# Comparisons using a tolerance can change results if the", "# covariance has a similar order of magnitude", "if", "denominator", "==", "0.0", ":", "correlation_xy_sqr", "=", "0.0", "else", ":", "correlation_xy_sqr", "=", "covariance_xy_sqr", "/", "denominator", "return", "Stats", "(", "covariance_xy", "=", "covariance_xy_sqr", ",", "correlation_xy", "=", "correlation_xy_sqr", ",", "variance_x", "=", "variance_x_sqr", ",", "variance_y", "=", "variance_y_sqr", ")" ]
Compute generic squared stats.
[ "Compute", "generic", "squared", "stats", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L60-L83
vnmabus/dcor
dcor/_dcor.py
_distance_correlation_sqr_naive
def _distance_correlation_sqr_naive(x, y, exponent=1): """Biased distance correlation estimator between two matrices.""" return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_distance_matrix, product=mean_product, exponent=exponent).correlation_xy
python
def _distance_correlation_sqr_naive(x, y, exponent=1): """Biased distance correlation estimator between two matrices.""" return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_distance_matrix, product=mean_product, exponent=exponent).correlation_xy
[ "def", "_distance_correlation_sqr_naive", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "return", "_distance_sqr_stats_naive_generic", "(", "x", ",", "y", ",", "matrix_centered", "=", "_distance_matrix", ",", "product", "=", "mean_product", ",", "exponent", "=", "exponent", ")", ".", "correlation_xy" ]
Biased distance correlation estimator between two matrices.
[ "Biased", "distance", "correlation", "estimator", "between", "two", "matrices", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L86-L92
vnmabus/dcor
dcor/_dcor.py
_u_distance_correlation_sqr_naive
def _u_distance_correlation_sqr_naive(x, y, exponent=1): """Bias-corrected distance correlation estimator between two matrices.""" return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_u_distance_matrix, product=u_product, exponent=exponent).correlation_xy
python
def _u_distance_correlation_sqr_naive(x, y, exponent=1): """Bias-corrected distance correlation estimator between two matrices.""" return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_u_distance_matrix, product=u_product, exponent=exponent).correlation_xy
[ "def", "_u_distance_correlation_sqr_naive", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "return", "_distance_sqr_stats_naive_generic", "(", "x", ",", "y", ",", "matrix_centered", "=", "_u_distance_matrix", ",", "product", "=", "u_product", ",", "exponent", "=", "exponent", ")", ".", "correlation_xy" ]
Bias-corrected distance correlation estimator between two matrices.
[ "Bias", "-", "corrected", "distance", "correlation", "estimator", "between", "two", "matrices", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L95-L101
vnmabus/dcor
dcor/_dcor.py
_can_use_fast_algorithm
def _can_use_fast_algorithm(x, y, exponent=1): """ Check if the fast algorithm for distance stats can be used. The fast algorithm has complexity :math:`O(NlogN)`, better than the complexity of the naive algorithm (:math:`O(N^2)`). The algorithm can only be used for random variables (not vectors) where the number of instances is greater than 3. Also, the exponent must be 1. """ return (_is_random_variable(x) and _is_random_variable(y) and x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1)
python
def _can_use_fast_algorithm(x, y, exponent=1): """ Check if the fast algorithm for distance stats can be used. The fast algorithm has complexity :math:`O(NlogN)`, better than the complexity of the naive algorithm (:math:`O(N^2)`). The algorithm can only be used for random variables (not vectors) where the number of instances is greater than 3. Also, the exponent must be 1. """ return (_is_random_variable(x) and _is_random_variable(y) and x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1)
[ "def", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "return", "(", "_is_random_variable", "(", "x", ")", "and", "_is_random_variable", "(", "y", ")", "and", "x", ".", "shape", "[", "0", "]", ">", "3", "and", "y", ".", "shape", "[", "0", "]", ">", "3", "and", "exponent", "==", "1", ")" ]
Check if the fast algorithm for distance stats can be used. The fast algorithm has complexity :math:`O(NlogN)`, better than the complexity of the naive algorithm (:math:`O(N^2)`). The algorithm can only be used for random variables (not vectors) where the number of instances is greater than 3. Also, the exponent must be 1.
[ "Check", "if", "the", "fast", "algorithm", "for", "distance", "stats", "can", "be", "used", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L115-L127
vnmabus/dcor
dcor/_dcor.py
_dyad_update
def _dyad_update(y, c): # pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """ Inner function of the fast distance covariance. This function is compiled because otherwise it would become a bottleneck. """ n = y.shape[0] gamma = np.zeros(n, dtype=c.dtype) # Step 1: get the smallest l such that n <= 2^l l_max = int(math.ceil(np.log2(n))) # Step 2: assign s(l, k) = 0 s_len = 2 ** (l_max + 1) s = np.zeros(s_len, dtype=c.dtype) pos_sums = np.arange(l_max) pos_sums[:] = 2 ** (l_max - pos_sums) pos_sums = np.cumsum(pos_sums) # Step 3: iteration for i in range(1, n): # Step 3.a: update s(l, k) for l in range(l_max): k = int(math.ceil(y[i - 1] / 2 ** l)) pos = k - 1 if l > 0: pos += pos_sums[l - 1] s[pos] += c[i - 1] # Steps 3.b and 3.c for l in range(l_max): k = int(math.floor((y[i] - 1) / 2 ** l)) if k / 2 > math.floor(k / 2): pos = k - 1 if l > 0: pos += pos_sums[l - 1] gamma[i] = gamma[i] + s[pos] return gamma
python
def _dyad_update(y, c): # pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """ Inner function of the fast distance covariance. This function is compiled because otherwise it would become a bottleneck. """ n = y.shape[0] gamma = np.zeros(n, dtype=c.dtype) # Step 1: get the smallest l such that n <= 2^l l_max = int(math.ceil(np.log2(n))) # Step 2: assign s(l, k) = 0 s_len = 2 ** (l_max + 1) s = np.zeros(s_len, dtype=c.dtype) pos_sums = np.arange(l_max) pos_sums[:] = 2 ** (l_max - pos_sums) pos_sums = np.cumsum(pos_sums) # Step 3: iteration for i in range(1, n): # Step 3.a: update s(l, k) for l in range(l_max): k = int(math.ceil(y[i - 1] / 2 ** l)) pos = k - 1 if l > 0: pos += pos_sums[l - 1] s[pos] += c[i - 1] # Steps 3.b and 3.c for l in range(l_max): k = int(math.floor((y[i] - 1) / 2 ** l)) if k / 2 > math.floor(k / 2): pos = k - 1 if l > 0: pos += pos_sums[l - 1] gamma[i] = gamma[i] + s[pos] return gamma
[ "def", "_dyad_update", "(", "y", ",", "c", ")", ":", "# pylint:disable=too-many-locals", "# This function has many locals so it can be compared", "# with the original algorithm.", "n", "=", "y", ".", "shape", "[", "0", "]", "gamma", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "c", ".", "dtype", ")", "# Step 1: get the smallest l such that n <= 2^l", "l_max", "=", "int", "(", "math", ".", "ceil", "(", "np", ".", "log2", "(", "n", ")", ")", ")", "# Step 2: assign s(l, k) = 0", "s_len", "=", "2", "**", "(", "l_max", "+", "1", ")", "s", "=", "np", ".", "zeros", "(", "s_len", ",", "dtype", "=", "c", ".", "dtype", ")", "pos_sums", "=", "np", ".", "arange", "(", "l_max", ")", "pos_sums", "[", ":", "]", "=", "2", "**", "(", "l_max", "-", "pos_sums", ")", "pos_sums", "=", "np", ".", "cumsum", "(", "pos_sums", ")", "# Step 3: iteration", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "# Step 3.a: update s(l, k)", "for", "l", "in", "range", "(", "l_max", ")", ":", "k", "=", "int", "(", "math", ".", "ceil", "(", "y", "[", "i", "-", "1", "]", "/", "2", "**", "l", ")", ")", "pos", "=", "k", "-", "1", "if", "l", ">", "0", ":", "pos", "+=", "pos_sums", "[", "l", "-", "1", "]", "s", "[", "pos", "]", "+=", "c", "[", "i", "-", "1", "]", "# Steps 3.b and 3.c", "for", "l", "in", "range", "(", "l_max", ")", ":", "k", "=", "int", "(", "math", ".", "floor", "(", "(", "y", "[", "i", "]", "-", "1", ")", "/", "2", "**", "l", ")", ")", "if", "k", "/", "2", ">", "math", ".", "floor", "(", "k", "/", "2", ")", ":", "pos", "=", "k", "-", "1", "if", "l", ">", "0", ":", "pos", "+=", "pos_sums", "[", "l", "-", "1", "]", "gamma", "[", "i", "]", "=", "gamma", "[", "i", "]", "+", "s", "[", "pos", "]", "return", "gamma" ]
Inner function of the fast distance covariance. This function is compiled because otherwise it would become a bottleneck.
[ "Inner", "function", "of", "the", "fast", "distance", "covariance", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L131-L178
vnmabus/dcor
dcor/_dcor.py
_distance_covariance_sqr_fast_generic
def _distance_covariance_sqr_fast_generic( x, y, unbiased=False): # pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """Fast algorithm for the squared distance covariance.""" x = np.asarray(x) y = np.asarray(y) x = np.ravel(x) y = np.ravel(y) n = x.shape[0] assert n > 3 assert n == y.shape[0] temp = range(n) # Step 1 ix0 = np.argsort(x) vx = x[ix0] ix = np.zeros(n, dtype=int) ix[ix0] = temp iy0 = np.argsort(y) vy = y[iy0] iy = np.zeros(n, dtype=int) iy[iy0] = temp # Step 2 sx = np.cumsum(vx) sy = np.cumsum(vy) # Step 3 alpha_x = ix alpha_y = iy beta_x = sx[ix] - vx[ix] beta_y = sy[iy] - vy[iy] # Step 4 x_dot = np.sum(x) y_dot = np.sum(y) # Step 5 a_i_dot = x_dot + (2 * alpha_x - n) * x - 2 * beta_x b_i_dot = y_dot + (2 * alpha_y - n) * y - 2 * beta_y sum_ab = np.sum(a_i_dot * b_i_dot) # Step 6 a_dot_dot = 2 * np.sum(alpha_x * x) - 2 * np.sum(beta_x) b_dot_dot = 2 * np.sum(alpha_y * y) - 2 * np.sum(beta_y) # Step 7 gamma_1 = _partial_sum_2d(x, y, np.ones(n, dtype=x.dtype)) gamma_x = _partial_sum_2d(x, y, x) gamma_y = _partial_sum_2d(x, y, y) gamma_xy = _partial_sum_2d(x, y, x * y) # Step 8 aijbij = np.sum(x * y * gamma_1 + gamma_xy - x * gamma_y - y * gamma_x) if unbiased: d3 = (n - 3) d2 = (n - 2) d1 = (n - 1) else: d3 = d2 = d1 = n # Step 9 d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 + a_dot_dot / n * b_dot_dot / d1 / d2 / d3) return d_cov
python
def _distance_covariance_sqr_fast_generic( x, y, unbiased=False): # pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """Fast algorithm for the squared distance covariance.""" x = np.asarray(x) y = np.asarray(y) x = np.ravel(x) y = np.ravel(y) n = x.shape[0] assert n > 3 assert n == y.shape[0] temp = range(n) # Step 1 ix0 = np.argsort(x) vx = x[ix0] ix = np.zeros(n, dtype=int) ix[ix0] = temp iy0 = np.argsort(y) vy = y[iy0] iy = np.zeros(n, dtype=int) iy[iy0] = temp # Step 2 sx = np.cumsum(vx) sy = np.cumsum(vy) # Step 3 alpha_x = ix alpha_y = iy beta_x = sx[ix] - vx[ix] beta_y = sy[iy] - vy[iy] # Step 4 x_dot = np.sum(x) y_dot = np.sum(y) # Step 5 a_i_dot = x_dot + (2 * alpha_x - n) * x - 2 * beta_x b_i_dot = y_dot + (2 * alpha_y - n) * y - 2 * beta_y sum_ab = np.sum(a_i_dot * b_i_dot) # Step 6 a_dot_dot = 2 * np.sum(alpha_x * x) - 2 * np.sum(beta_x) b_dot_dot = 2 * np.sum(alpha_y * y) - 2 * np.sum(beta_y) # Step 7 gamma_1 = _partial_sum_2d(x, y, np.ones(n, dtype=x.dtype)) gamma_x = _partial_sum_2d(x, y, x) gamma_y = _partial_sum_2d(x, y, y) gamma_xy = _partial_sum_2d(x, y, x * y) # Step 8 aijbij = np.sum(x * y * gamma_1 + gamma_xy - x * gamma_y - y * gamma_x) if unbiased: d3 = (n - 3) d2 = (n - 2) d1 = (n - 1) else: d3 = d2 = d1 = n # Step 9 d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 + a_dot_dot / n * b_dot_dot / d1 / d2 / d3) return d_cov
[ "def", "_distance_covariance_sqr_fast_generic", "(", "x", ",", "y", ",", "unbiased", "=", "False", ")", ":", "# pylint:disable=too-many-locals", "# This function has many locals so it can be compared", "# with the original algorithm.", "x", "=", "np", ".", "asarray", "(", "x", ")", "y", "=", "np", ".", "asarray", "(", "y", ")", "x", "=", "np", ".", "ravel", "(", "x", ")", "y", "=", "np", ".", "ravel", "(", "y", ")", "n", "=", "x", ".", "shape", "[", "0", "]", "assert", "n", ">", "3", "assert", "n", "==", "y", ".", "shape", "[", "0", "]", "temp", "=", "range", "(", "n", ")", "# Step 1", "ix0", "=", "np", ".", "argsort", "(", "x", ")", "vx", "=", "x", "[", "ix0", "]", "ix", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "int", ")", "ix", "[", "ix0", "]", "=", "temp", "iy0", "=", "np", ".", "argsort", "(", "y", ")", "vy", "=", "y", "[", "iy0", "]", "iy", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "int", ")", "iy", "[", "iy0", "]", "=", "temp", "# Step 2", "sx", "=", "np", ".", "cumsum", "(", "vx", ")", "sy", "=", "np", ".", "cumsum", "(", "vy", ")", "# Step 3", "alpha_x", "=", "ix", "alpha_y", "=", "iy", "beta_x", "=", "sx", "[", "ix", "]", "-", "vx", "[", "ix", "]", "beta_y", "=", "sy", "[", "iy", "]", "-", "vy", "[", "iy", "]", "# Step 4", "x_dot", "=", "np", ".", "sum", "(", "x", ")", "y_dot", "=", "np", ".", "sum", "(", "y", ")", "# Step 5", "a_i_dot", "=", "x_dot", "+", "(", "2", "*", "alpha_x", "-", "n", ")", "*", "x", "-", "2", "*", "beta_x", "b_i_dot", "=", "y_dot", "+", "(", "2", "*", "alpha_y", "-", "n", ")", "*", "y", "-", "2", "*", "beta_y", "sum_ab", "=", "np", ".", "sum", "(", "a_i_dot", "*", "b_i_dot", ")", "# Step 6", "a_dot_dot", "=", "2", "*", "np", ".", "sum", "(", "alpha_x", "*", "x", ")", "-", "2", "*", "np", ".", "sum", "(", "beta_x", ")", "b_dot_dot", "=", "2", "*", "np", ".", "sum", "(", "alpha_y", "*", "y", ")", "-", "2", "*", "np", ".", "sum", "(", "beta_y", ")", "# Step 7", "gamma_1", "=", "_partial_sum_2d", "(", "x", ",", "y", ",", "np", ".", "ones", "(", "n", ",", "dtype", "=", "x", ".", "dtype", ")", ")", "gamma_x", "=", "_partial_sum_2d", "(", "x", ",", "y", ",", "x", ")", "gamma_y", "=", "_partial_sum_2d", "(", "x", ",", "y", ",", "y", ")", "gamma_xy", "=", "_partial_sum_2d", "(", "x", ",", "y", ",", "x", "*", "y", ")", "# Step 8", "aijbij", "=", "np", ".", "sum", "(", "x", "*", "y", "*", "gamma_1", "+", "gamma_xy", "-", "x", "*", "gamma_y", "-", "y", "*", "gamma_x", ")", "if", "unbiased", ":", "d3", "=", "(", "n", "-", "3", ")", "d2", "=", "(", "n", "-", "2", ")", "d1", "=", "(", "n", "-", "1", ")", "else", ":", "d3", "=", "d2", "=", "d1", "=", "n", "# Step 9", "d_cov", "=", "(", "aijbij", "/", "n", "/", "d3", "-", "2", "*", "sum_ab", "/", "n", "/", "d2", "/", "d3", "+", "a_dot_dot", "/", "n", "*", "b_dot_dot", "/", "d1", "/", "d2", "/", "d3", ")", "return", "d_cov" ]
Fast algorithm for the squared distance covariance.
[ "Fast", "algorithm", "for", "the", "squared", "distance", "covariance", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L229-L303
vnmabus/dcor
dcor/_dcor.py
_distance_stats_sqr_fast_generic
def _distance_stats_sqr_fast_generic(x, y, dcov_function): """Compute the distance stats using the fast algorithm.""" covariance_xy_sqr = dcov_function(x, y) variance_x_sqr = dcov_function(x, x) variance_y_sqr = dcov_function(y, y) denominator_sqr_signed = variance_x_sqr * variance_y_sqr denominator_sqr = np.absolute(denominator_sqr_signed) denominator = _sqrt(denominator_sqr) # Comparisons using a tolerance can change results if the # covariance has a similar order of magnitude if denominator == 0.0: correlation_xy_sqr = denominator.dtype.type(0) else: correlation_xy_sqr = covariance_xy_sqr / denominator return Stats(covariance_xy=covariance_xy_sqr, correlation_xy=correlation_xy_sqr, variance_x=variance_x_sqr, variance_y=variance_y_sqr)
python
def _distance_stats_sqr_fast_generic(x, y, dcov_function): """Compute the distance stats using the fast algorithm.""" covariance_xy_sqr = dcov_function(x, y) variance_x_sqr = dcov_function(x, x) variance_y_sqr = dcov_function(y, y) denominator_sqr_signed = variance_x_sqr * variance_y_sqr denominator_sqr = np.absolute(denominator_sqr_signed) denominator = _sqrt(denominator_sqr) # Comparisons using a tolerance can change results if the # covariance has a similar order of magnitude if denominator == 0.0: correlation_xy_sqr = denominator.dtype.type(0) else: correlation_xy_sqr = covariance_xy_sqr / denominator return Stats(covariance_xy=covariance_xy_sqr, correlation_xy=correlation_xy_sqr, variance_x=variance_x_sqr, variance_y=variance_y_sqr)
[ "def", "_distance_stats_sqr_fast_generic", "(", "x", ",", "y", ",", "dcov_function", ")", ":", "covariance_xy_sqr", "=", "dcov_function", "(", "x", ",", "y", ")", "variance_x_sqr", "=", "dcov_function", "(", "x", ",", "x", ")", "variance_y_sqr", "=", "dcov_function", "(", "y", ",", "y", ")", "denominator_sqr_signed", "=", "variance_x_sqr", "*", "variance_y_sqr", "denominator_sqr", "=", "np", ".", "absolute", "(", "denominator_sqr_signed", ")", "denominator", "=", "_sqrt", "(", "denominator_sqr", ")", "# Comparisons using a tolerance can change results if the", "# covariance has a similar order of magnitude", "if", "denominator", "==", "0.0", ":", "correlation_xy_sqr", "=", "denominator", ".", "dtype", ".", "type", "(", "0", ")", "else", ":", "correlation_xy_sqr", "=", "covariance_xy_sqr", "/", "denominator", "return", "Stats", "(", "covariance_xy", "=", "covariance_xy_sqr", ",", "correlation_xy", "=", "correlation_xy_sqr", ",", "variance_x", "=", "variance_x_sqr", ",", "variance_y", "=", "variance_y_sqr", ")" ]
Compute the distance stats using the fast algorithm.
[ "Compute", "the", "distance", "stats", "using", "the", "fast", "algorithm", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L316-L335
vnmabus/dcor
dcor/_dcor.py
distance_covariance_sqr
def distance_covariance_sqr(x, y, **kwargs): """ distance_covariance_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Biased estimator of the squared distance covariance. See Also -------- distance_covariance u_distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_covariance_sqr(a, a) 52.0 >>> dcor.distance_covariance_sqr(a, b) 1.0 >>> dcor.distance_covariance_sqr(b, b) 0.25 >>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.3705904... """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_covariance_sqr_fast(x, y) else: return _distance_covariance_sqr_naive(x, y, **kwargs)
python
def distance_covariance_sqr(x, y, **kwargs): """ distance_covariance_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Biased estimator of the squared distance covariance. See Also -------- distance_covariance u_distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_covariance_sqr(a, a) 52.0 >>> dcor.distance_covariance_sqr(a, b) 1.0 >>> dcor.distance_covariance_sqr(b, b) 0.25 >>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.3705904... """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_covariance_sqr_fast(x, y) else: return _distance_covariance_sqr_naive(x, y, **kwargs)
[ "def", "distance_covariance_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_distance_covariance_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_distance_covariance_sqr_naive", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
distance_covariance_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Biased estimator of the squared distance covariance. See Also -------- distance_covariance u_distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_covariance_sqr(a, a) 52.0 >>> dcor.distance_covariance_sqr(a, b) 1.0 >>> dcor.distance_covariance_sqr(b, b) 0.25 >>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.3705904...
[ "distance_covariance_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L360-L417
vnmabus/dcor
dcor/_dcor.py
u_distance_covariance_sqr
def u_distance_covariance_sqr(x, y, **kwargs): """ u_distance_covariance_sqr(x, y, *, exponent=1) Computes the unbiased estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the unbiased estimator of the squared distance covariance. See Also -------- distance_covariance distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_covariance_sqr(a, a) # doctest: +ELLIPSIS 42.6666666... >>> dcor.u_distance_covariance_sqr(a, b) # doctest: +ELLIPSIS -2.6666666... >>> dcor.u_distance_covariance_sqr(b, b) # doctest: +ELLIPSIS 0.6666666... >>> dcor.u_distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS -0.2996598... """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_covariance_sqr_fast(x, y) else: return _u_distance_covariance_sqr_naive(x, y, **kwargs)
python
def u_distance_covariance_sqr(x, y, **kwargs): """ u_distance_covariance_sqr(x, y, *, exponent=1) Computes the unbiased estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the unbiased estimator of the squared distance covariance. See Also -------- distance_covariance distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_covariance_sqr(a, a) # doctest: +ELLIPSIS 42.6666666... >>> dcor.u_distance_covariance_sqr(a, b) # doctest: +ELLIPSIS -2.6666666... >>> dcor.u_distance_covariance_sqr(b, b) # doctest: +ELLIPSIS 0.6666666... >>> dcor.u_distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS -0.2996598... """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_covariance_sqr_fast(x, y) else: return _u_distance_covariance_sqr_naive(x, y, **kwargs)
[ "def", "u_distance_covariance_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_u_distance_covariance_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_u_distance_covariance_sqr_naive", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
u_distance_covariance_sqr(x, y, *, exponent=1) Computes the unbiased estimator for the squared distance covariance between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the unbiased estimator of the squared distance covariance. See Also -------- distance_covariance distance_covariance_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_covariance_sqr(a, a) # doctest: +ELLIPSIS 42.6666666... >>> dcor.u_distance_covariance_sqr(a, b) # doctest: +ELLIPSIS -2.6666666... >>> dcor.u_distance_covariance_sqr(b, b) # doctest: +ELLIPSIS 0.6666666... >>> dcor.u_distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS -0.2996598...
[ "u_distance_covariance_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L420-L477
vnmabus/dcor
dcor/_dcor.py
distance_stats_sqr
def distance_stats_sqr(x, y, **kwargs): """ distance_stats_sqr(x, y, *, exponent=1) Computes the usual (biased) estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- distance_covariance_sqr distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0, variance_y=52.0) >>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.2773500..., variance_x=52.0, variance_y=0.25) >>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25, variance_y=0.25) >>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308..., variance_x=2.7209220..., variance_y=0.25) """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_stats_sqr_fast(x, y) else: return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_distance_matrix, product=mean_product, **kwargs)
python
def distance_stats_sqr(x, y, **kwargs): """ distance_stats_sqr(x, y, *, exponent=1) Computes the usual (biased) estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- distance_covariance_sqr distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0, variance_y=52.0) >>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.2773500..., variance_x=52.0, variance_y=0.25) >>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25, variance_y=0.25) >>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308..., variance_x=2.7209220..., variance_y=0.25) """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_stats_sqr_fast(x, y) else: return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_distance_matrix, product=mean_product, **kwargs)
[ "def", "distance_stats_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_distance_stats_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_distance_sqr_stats_naive_generic", "(", "x", ",", "y", ",", "matrix_centered", "=", "_distance_matrix", ",", "product", "=", "mean_product", ",", "*", "*", "kwargs", ")" ]
distance_stats_sqr(x, y, *, exponent=1) Computes the usual (biased) estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- distance_covariance_sqr distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0, variance_y=52.0) >>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.2773500..., variance_x=52.0, variance_y=0.25) >>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25, variance_y=0.25) >>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308..., variance_x=2.7209220..., variance_y=0.25)
[ "distance_stats_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L537-L609
vnmabus/dcor
dcor/_dcor.py
u_distance_stats_sqr
def u_distance_stats_sqr(x, y, **kwargs): """ u_distance_stats_sqr(x, y, *, exponent=1) Computes the unbiased estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- u_distance_covariance_sqr u_distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=42.6666666..., correlation_xy=1.0, variance_x=42.6666666..., variance_y=42.6666666...) >>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5, variance_x=42.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6666666..., correlation_xy=1.0, variance_x=0.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479..., variance_x=0.8209855..., variance_y=0.6666666...) """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_stats_sqr_fast(x, y) else: return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_u_distance_matrix, product=u_product, **kwargs)
python
def u_distance_stats_sqr(x, y, **kwargs): """ u_distance_stats_sqr(x, y, *, exponent=1) Computes the unbiased estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- u_distance_covariance_sqr u_distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=42.6666666..., correlation_xy=1.0, variance_x=42.6666666..., variance_y=42.6666666...) >>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5, variance_x=42.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6666666..., correlation_xy=1.0, variance_x=0.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479..., variance_x=0.8209855..., variance_y=0.6666666...) """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_stats_sqr_fast(x, y) else: return _distance_sqr_stats_naive_generic( x, y, matrix_centered=_u_distance_matrix, product=u_product, **kwargs)
[ "def", "u_distance_stats_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_u_distance_stats_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_distance_sqr_stats_naive_generic", "(", "x", ",", "y", ",", "matrix_centered", "=", "_u_distance_matrix", ",", "product", "=", "u_product", ",", "*", "*", "kwargs", ")" ]
u_distance_stats_sqr(x, y, *, exponent=1) Computes the unbiased estimators for the squared distance covariance and squared distance correlation between two random vectors, and the individual squared distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Squared distance covariance, squared distance correlation, squared distance variance of the first random vector and squared distance variance of the second random vector. See Also -------- u_distance_covariance_sqr u_distance_correlation_sqr Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=42.6666666..., correlation_xy=1.0, variance_x=42.6666666..., variance_y=42.6666666...) >>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5, variance_x=42.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6666666..., correlation_xy=1.0, variance_x=0.6666666..., variance_y=0.6666666...) >>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479..., variance_x=0.8209855..., variance_y=0.6666666...)
[ "u_distance_stats_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L612-L687
vnmabus/dcor
dcor/_dcor.py
distance_stats
def distance_stats(x, y, **kwargs): """ distance_stats(x, y, *, exponent=1) Computes the usual (biased) estimators for the distance covariance and distance correlation between two random vectors, and the individual distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Distance covariance, distance correlation, distance variance of the first random vector and distance variance of the second random vector. See Also -------- distance_covariance distance_correlation Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=7.2111025..., correlation_xy=1.0, variance_x=7.2111025..., variance_y=7.2111025...) >>> dcor.distance_stats(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.5266403..., variance_x=7.2111025..., variance_y=0.5) >>> dcor.distance_stats(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.5, correlation_xy=1.0, variance_x=0.5, variance_y=0.5) >>> dcor.distance_stats(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6087614..., correlation_xy=0.6703214..., variance_x=1.6495217..., variance_y=0.5) """ return Stats(*[_sqrt(s) for s in distance_stats_sqr(x, y, **kwargs)])
python
def distance_stats(x, y, **kwargs): """ distance_stats(x, y, *, exponent=1) Computes the usual (biased) estimators for the distance covariance and distance correlation between two random vectors, and the individual distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Distance covariance, distance correlation, distance variance of the first random vector and distance variance of the second random vector. See Also -------- distance_covariance distance_correlation Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=7.2111025..., correlation_xy=1.0, variance_x=7.2111025..., variance_y=7.2111025...) >>> dcor.distance_stats(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.5266403..., variance_x=7.2111025..., variance_y=0.5) >>> dcor.distance_stats(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.5, correlation_xy=1.0, variance_x=0.5, variance_y=0.5) >>> dcor.distance_stats(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6087614..., correlation_xy=0.6703214..., variance_x=1.6495217..., variance_y=0.5) """ return Stats(*[_sqrt(s) for s in distance_stats_sqr(x, y, **kwargs)])
[ "def", "distance_stats", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "Stats", "(", "*", "[", "_sqrt", "(", "s", ")", "for", "s", "in", "distance_stats_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", "]", ")" ]
distance_stats(x, y, *, exponent=1) Computes the usual (biased) estimators for the distance covariance and distance correlation between two random vectors, and the individual distance variances. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- Stats Distance covariance, distance correlation, distance variance of the first random vector and distance variance of the second random vector. See Also -------- distance_covariance distance_correlation Notes ----- It is less efficient to compute the statistics separately, rather than using this function, because some computations can be shared. The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_stats(a, a) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=7.2111025..., correlation_xy=1.0, variance_x=7.2111025..., variance_y=7.2111025...) >>> dcor.distance_stats(a, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=1.0, correlation_xy=0.5266403..., variance_x=7.2111025..., variance_y=0.5) >>> dcor.distance_stats(b, b) # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.5, correlation_xy=1.0, variance_x=0.5, variance_y=0.5) >>> dcor.distance_stats(a, b, exponent=0.5) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE Stats(covariance_xy=0.6087614..., correlation_xy=0.6703214..., variance_x=1.6495217..., variance_y=0.5)
[ "distance_stats", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L690-L755
vnmabus/dcor
dcor/_dcor.py
distance_correlation_sqr
def distance_correlation_sqr(x, y, **kwargs): """ distance_correlation_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the biased estimator of the squared distance correlation. See Also -------- distance_correlation u_distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_sqr(a, a) 1.0 >>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS 0.2773500... >>> dcor.distance_correlation_sqr(b, b) 1.0 >>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.4493308... """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_correlation_sqr_fast(x, y) else: return _distance_correlation_sqr_naive(x, y, **kwargs)
python
def distance_correlation_sqr(x, y, **kwargs): """ distance_correlation_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the biased estimator of the squared distance correlation. See Also -------- distance_correlation u_distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_sqr(a, a) 1.0 >>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS 0.2773500... >>> dcor.distance_correlation_sqr(b, b) 1.0 >>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.4493308... """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_correlation_sqr_fast(x, y) else: return _distance_correlation_sqr_naive(x, y, **kwargs)
[ "def", "distance_correlation_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_distance_correlation_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_distance_correlation_sqr_naive", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
distance_correlation_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the biased estimator of the squared distance correlation. See Also -------- distance_correlation u_distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_sqr(a, a) 1.0 >>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS 0.2773500... >>> dcor.distance_correlation_sqr(b, b) 1.0 >>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.4493308...
[ "distance_correlation_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L758-L815
vnmabus/dcor
dcor/_dcor.py
u_distance_correlation_sqr
def u_distance_correlation_sqr(x, y, **kwargs): """ u_distance_correlation_sqr(x, y, *, exponent=1) Computes the bias-corrected estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the bias-corrected estimator of the squared distance correlation. See Also -------- distance_correlation distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_correlation_sqr(a, a) 1.0 >>> dcor.u_distance_correlation_sqr(a, b) -0.5 >>> dcor.u_distance_correlation_sqr(b, b) 1.0 >>> dcor.u_distance_correlation_sqr(a, b, exponent=0.5) ... # doctest: +ELLIPSIS -0.4050479... """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_correlation_sqr_fast(x, y) else: return _u_distance_correlation_sqr_naive(x, y, **kwargs)
python
def u_distance_correlation_sqr(x, y, **kwargs): """ u_distance_correlation_sqr(x, y, *, exponent=1) Computes the bias-corrected estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the bias-corrected estimator of the squared distance correlation. See Also -------- distance_correlation distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_correlation_sqr(a, a) 1.0 >>> dcor.u_distance_correlation_sqr(a, b) -0.5 >>> dcor.u_distance_correlation_sqr(b, b) 1.0 >>> dcor.u_distance_correlation_sqr(a, b, exponent=0.5) ... # doctest: +ELLIPSIS -0.4050479... """ if _can_use_fast_algorithm(x, y, **kwargs): return _u_distance_correlation_sqr_fast(x, y) else: return _u_distance_correlation_sqr_naive(x, y, **kwargs)
[ "def", "u_distance_correlation_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_u_distance_correlation_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_u_distance_correlation_sqr_naive", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
u_distance_correlation_sqr(x, y, *, exponent=1) Computes the bias-corrected estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the bias-corrected estimator of the squared distance correlation. See Also -------- distance_correlation distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.u_distance_correlation_sqr(a, a) 1.0 >>> dcor.u_distance_correlation_sqr(a, b) -0.5 >>> dcor.u_distance_correlation_sqr(b, b) 1.0 >>> dcor.u_distance_correlation_sqr(a, b, exponent=0.5) ... # doctest: +ELLIPSIS -0.4050479...
[ "u_distance_correlation_sqr", "(", "x", "y", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L818-L877
vnmabus/dcor
dcor/_dcor.py
distance_correlation_af_inv_sqr
def distance_correlation_af_inv_sqr(x, y): """ Square of the affinely invariant distance correlation. Computes the estimator for the square of the affinely invariant distance correlation between two random vectors. .. warning:: The return value of this function is undefined when the covariance matrix of :math:`x` or :math:`y` is singular. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the squared affinely invariant distance correlation. See Also -------- distance_correlation u_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 3, 2, 5], ... [5, 7, 6, 8], ... [9, 10, 11, 12], ... [13, 15, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_af_inv_sqr(a, a) 1.0 >>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS 0.5773502... >>> dcor.distance_correlation_af_inv_sqr(b, b) 1.0 """ x = _af_inv_scaled(x) y = _af_inv_scaled(y) correlation = distance_correlation_sqr(x, y) return 0 if np.isnan(correlation) else correlation
python
def distance_correlation_af_inv_sqr(x, y): """ Square of the affinely invariant distance correlation. Computes the estimator for the square of the affinely invariant distance correlation between two random vectors. .. warning:: The return value of this function is undefined when the covariance matrix of :math:`x` or :math:`y` is singular. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the squared affinely invariant distance correlation. See Also -------- distance_correlation u_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 3, 2, 5], ... [5, 7, 6, 8], ... [9, 10, 11, 12], ... [13, 15, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_af_inv_sqr(a, a) 1.0 >>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS 0.5773502... >>> dcor.distance_correlation_af_inv_sqr(b, b) 1.0 """ x = _af_inv_scaled(x) y = _af_inv_scaled(y) correlation = distance_correlation_sqr(x, y) return 0 if np.isnan(correlation) else correlation
[ "def", "distance_correlation_af_inv_sqr", "(", "x", ",", "y", ")", ":", "x", "=", "_af_inv_scaled", "(", "x", ")", "y", "=", "_af_inv_scaled", "(", "y", ")", "correlation", "=", "distance_correlation_sqr", "(", "x", ",", "y", ")", "return", "0", "if", "np", ".", "isnan", "(", "correlation", ")", "else", "correlation" ]
Square of the affinely invariant distance correlation. Computes the estimator for the square of the affinely invariant distance correlation between two random vectors. .. warning:: The return value of this function is undefined when the covariance matrix of :math:`x` or :math:`y` is singular. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the squared affinely invariant distance correlation. See Also -------- distance_correlation u_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 3, 2, 5], ... [5, 7, 6, 8], ... [9, 10, 11, 12], ... [13, 15, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_af_inv_sqr(a, a) 1.0 >>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS 0.5773502... >>> dcor.distance_correlation_af_inv_sqr(b, b) 1.0
[ "Square", "of", "the", "affinely", "invariant", "distance", "correlation", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L937-L988
vnmabus/dcor
dcor/_pairwise.py
pairwise
def pairwise(function, x, y=None, **kwargs): """ pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) """ return _pairwise_imp(function, x, y, **kwargs)
python
def pairwise(function, x, y=None, **kwargs): """ pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) """ return _pairwise_imp(function, x, y, **kwargs)
[ "def", "pairwise", "(", "function", ",", "x", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_pairwise_imp", "(", "function", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
pairwise(function, x, y=None, *, pool=None, is_symmetric=None, **kwargs) Computes a dependency measure between each pair of elements. Parameters ---------- function: Dependency measure function. x: iterable of array_like First list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second list of random vectors. The columns of each vector correspond with the individual random variables while the rows are individual instances of the random vector. If None, the :math:`x` array is used. pool: object implementing multiprocessing.Pool interface Pool of processes/threads used to delegate computations. is_symmetric: bool or None If True, the dependency function is assumed to be symmetric. If False, it is assumed non-symmetric. If None (the default value), the attribute :code:`is_symmetric` of the function object is inspected to determine if the function is symmetric. If this attribute is absent, the function is assumed to not be symmetric. kwargs: dictionary Additional options necessary. Returns ------- numpy ndarray A :math:`n \times m` matrix where the :math:`(i, j)`-th entry is the dependency between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = [np.array([[1, 1], ... [2, 4], ... [3, 8], ... [4, 16]]), ... np.array([[9, 10], ... [11, 12], ... [13, 14], ... [15, 16]]) ... ] >>> b = [np.array([[0, 1], ... [3, 1], ... [6, 2], ... [9, 3]]), ... np.array([[5, 1], ... [8, 1], ... [13, 1], ... [21, 1]]) ... ] >>> dcor.pairwise(dcor.distance_covariance, a) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_correlation, a, b) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) A pool object can be used to improve performance for a large number of computations: >>> import multiprocessing >>> pool = multiprocessing.Pool() >>> dcor.pairwise(dcor.distance_correlation, a, b, pool=pool) array([[0.98182263, 0.99901855], [0.99989466, 0.98320103]]) It is possible to force to consider that the function is symmetric or not (useful only if :math:`y` is :code:`None`): >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=True) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]]) >>> dcor.pairwise(dcor.distance_covariance, a, is_symmetric=False) array([[4.61229635, 3.35991482], [3.35991482, 2.54950976]])
[ "pairwise", "(", "function", "x", "y", "=", "None", "*", "pool", "=", "None", "is_symmetric", "=", "None", "**", "kwargs", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_pairwise.py#L10-L94
vnmabus/dcor
dcor/_pairwise.py
_pairwise_imp
def _pairwise_imp(function, x, y=None, pool=None, is_symmetric=None, **kwargs): """ Real implementation of :func:`pairwise`. This function is used to make several parameters keyword-only in Python 2. """ map_function = pool.map if pool else map if is_symmetric is None: is_symmetric = getattr(function, 'is_symmetric', False) pairwise_function = getattr(function, 'pairwise_function', None) if pairwise_function: return pairwise_function(x, y, pool=pool, is_symmetric=is_symmetric, **kwargs) if y is None and is_symmetric: partial = functools.partial(_map_aux_func_symmetric, x=x, function=function) dependencies = np.array(list(map_function(partial, enumerate(x)))) for i in range(len(x)): for j in range(i, len(x)): dependencies[j, i] = dependencies[i, j] return dependencies else: if y is None: y = x partial = functools.partial(_map_aux_func, y=y, function=function) return np.array(list(map_function(partial, x)))
python
def _pairwise_imp(function, x, y=None, pool=None, is_symmetric=None, **kwargs): """ Real implementation of :func:`pairwise`. This function is used to make several parameters keyword-only in Python 2. """ map_function = pool.map if pool else map if is_symmetric is None: is_symmetric = getattr(function, 'is_symmetric', False) pairwise_function = getattr(function, 'pairwise_function', None) if pairwise_function: return pairwise_function(x, y, pool=pool, is_symmetric=is_symmetric, **kwargs) if y is None and is_symmetric: partial = functools.partial(_map_aux_func_symmetric, x=x, function=function) dependencies = np.array(list(map_function(partial, enumerate(x)))) for i in range(len(x)): for j in range(i, len(x)): dependencies[j, i] = dependencies[i, j] return dependencies else: if y is None: y = x partial = functools.partial(_map_aux_func, y=y, function=function) return np.array(list(map_function(partial, x)))
[ "def", "_pairwise_imp", "(", "function", ",", "x", ",", "y", "=", "None", ",", "pool", "=", "None", ",", "is_symmetric", "=", "None", ",", "*", "*", "kwargs", ")", ":", "map_function", "=", "pool", ".", "map", "if", "pool", "else", "map", "if", "is_symmetric", "is", "None", ":", "is_symmetric", "=", "getattr", "(", "function", ",", "'is_symmetric'", ",", "False", ")", "pairwise_function", "=", "getattr", "(", "function", ",", "'pairwise_function'", ",", "None", ")", "if", "pairwise_function", ":", "return", "pairwise_function", "(", "x", ",", "y", ",", "pool", "=", "pool", ",", "is_symmetric", "=", "is_symmetric", ",", "*", "*", "kwargs", ")", "if", "y", "is", "None", "and", "is_symmetric", ":", "partial", "=", "functools", ".", "partial", "(", "_map_aux_func_symmetric", ",", "x", "=", "x", ",", "function", "=", "function", ")", "dependencies", "=", "np", ".", "array", "(", "list", "(", "map_function", "(", "partial", ",", "enumerate", "(", "x", ")", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "for", "j", "in", "range", "(", "i", ",", "len", "(", "x", ")", ")", ":", "dependencies", "[", "j", ",", "i", "]", "=", "dependencies", "[", "i", ",", "j", "]", "return", "dependencies", "else", ":", "if", "y", "is", "None", ":", "y", "=", "x", "partial", "=", "functools", ".", "partial", "(", "_map_aux_func", ",", "y", "=", "y", ",", "function", "=", "function", ")", "return", "np", ".", "array", "(", "list", "(", "map_function", "(", "partial", ",", "x", ")", ")", ")" ]
Real implementation of :func:`pairwise`. This function is used to make several parameters keyword-only in Python 2.
[ "Real", "implementation", "of", ":", "func", ":", "pairwise", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_pairwise.py#L97-L134
vnmabus/dcor
dcor/_utils.py
_jit
def _jit(function): """ Compile a function using a jit compiler. The function is always compiled to check errors, but is only used outside tests, so that code coverage analysis can be performed in jitted functions. The tests set sys._called_from_test in conftest.py. """ import sys compiled = numba.jit(function) if hasattr(sys, '_called_from_test'): return function else: # pragma: no cover return compiled
python
def _jit(function): """ Compile a function using a jit compiler. The function is always compiled to check errors, but is only used outside tests, so that code coverage analysis can be performed in jitted functions. The tests set sys._called_from_test in conftest.py. """ import sys compiled = numba.jit(function) if hasattr(sys, '_called_from_test'): return function else: # pragma: no cover return compiled
[ "def", "_jit", "(", "function", ")", ":", "import", "sys", "compiled", "=", "numba", ".", "jit", "(", "function", ")", "if", "hasattr", "(", "sys", ",", "'_called_from_test'", ")", ":", "return", "function", "else", ":", "# pragma: no cover", "return", "compiled" ]
Compile a function using a jit compiler. The function is always compiled to check errors, but is only used outside tests, so that code coverage analysis can be performed in jitted functions. The tests set sys._called_from_test in conftest.py.
[ "Compile", "a", "function", "using", "a", "jit", "compiler", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_utils.py#L10-L27
vnmabus/dcor
dcor/_utils.py
_sqrt
def _sqrt(x): """ Return square root of an ndarray. This sqrt function for ndarrays tries to use the exponentiation operator if the objects stored do not supply a sqrt method. """ x = np.clip(x, a_min=0, a_max=None) try: return np.sqrt(x) except AttributeError: exponent = 0.5 try: exponent = np.take(x, 0).from_float(exponent) except AttributeError: pass return x ** exponent
python
def _sqrt(x): """ Return square root of an ndarray. This sqrt function for ndarrays tries to use the exponentiation operator if the objects stored do not supply a sqrt method. """ x = np.clip(x, a_min=0, a_max=None) try: return np.sqrt(x) except AttributeError: exponent = 0.5 try: exponent = np.take(x, 0).from_float(exponent) except AttributeError: pass return x ** exponent
[ "def", "_sqrt", "(", "x", ")", ":", "x", "=", "np", ".", "clip", "(", "x", ",", "a_min", "=", "0", ",", "a_max", "=", "None", ")", "try", ":", "return", "np", ".", "sqrt", "(", "x", ")", "except", "AttributeError", ":", "exponent", "=", "0.5", "try", ":", "exponent", "=", "np", ".", "take", "(", "x", ",", "0", ")", ".", "from_float", "(", "exponent", ")", "except", "AttributeError", ":", "pass", "return", "x", "**", "exponent" ]
Return square root of an ndarray. This sqrt function for ndarrays tries to use the exponentiation operator if the objects stored do not supply a sqrt method.
[ "Return", "square", "root", "of", "an", "ndarray", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_utils.py#L30-L50
vnmabus/dcor
dcor/_utils.py
_transform_to_2d
def _transform_to_2d(t): """Convert vectors to column matrices, to always have a 2d shape.""" t = np.asarray(t) dim = len(t.shape) assert dim <= 2 if dim < 2: t = np.atleast_2d(t).T return t
python
def _transform_to_2d(t): """Convert vectors to column matrices, to always have a 2d shape.""" t = np.asarray(t) dim = len(t.shape) assert dim <= 2 if dim < 2: t = np.atleast_2d(t).T return t
[ "def", "_transform_to_2d", "(", "t", ")", ":", "t", "=", "np", ".", "asarray", "(", "t", ")", "dim", "=", "len", "(", "t", ".", "shape", ")", "assert", "dim", "<=", "2", "if", "dim", "<", "2", ":", "t", "=", "np", ".", "atleast_2d", "(", "t", ")", ".", "T", "return", "t" ]
Convert vectors to column matrices, to always have a 2d shape.
[ "Convert", "vectors", "to", "column", "matrices", "to", "always", "have", "a", "2d", "shape", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_utils.py#L53-L63
vnmabus/dcor
dcor/_utils.py
_can_be_double
def _can_be_double(x): """ Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works). """ return ((np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize <= np.dtype(float).itemsize) or (np.issubdtype(x.dtype, np.signedinteger) and np.can_cast(x, float)))
python
def _can_be_double(x): """ Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works). """ return ((np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize <= np.dtype(float).itemsize) or (np.issubdtype(x.dtype, np.signedinteger) and np.can_cast(x, float)))
[ "def", "_can_be_double", "(", "x", ")", ":", "return", "(", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "floating", ")", "and", "x", ".", "dtype", ".", "itemsize", "<=", "np", ".", "dtype", "(", "float", ")", ".", "itemsize", ")", "or", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "signedinteger", ")", "and", "np", ".", "can_cast", "(", "x", ",", "float", ")", ")", ")" ]
Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works).
[ "Return", "if", "the", "array", "can", "be", "safely", "converted", "to", "double", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_utils.py#L66-L78
vnmabus/dcor
dcor/distances.py
_cdist_naive
def _cdist_naive(x, y, exponent=1): """Pairwise distance, custom implementation.""" squared_norms = ((x[_np.newaxis, :, :] - y[:, _np.newaxis, :]) ** 2).sum(2) exponent = exponent / 2 try: exponent = squared_norms.take(0).from_float(exponent) except AttributeError: pass return squared_norms ** exponent
python
def _cdist_naive(x, y, exponent=1): """Pairwise distance, custom implementation.""" squared_norms = ((x[_np.newaxis, :, :] - y[:, _np.newaxis, :]) ** 2).sum(2) exponent = exponent / 2 try: exponent = squared_norms.take(0).from_float(exponent) except AttributeError: pass return squared_norms ** exponent
[ "def", "_cdist_naive", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "squared_norms", "=", "(", "(", "x", "[", "_np", ".", "newaxis", ",", ":", ",", ":", "]", "-", "y", "[", ":", ",", "_np", ".", "newaxis", ",", ":", "]", ")", "**", "2", ")", ".", "sum", "(", "2", ")", "exponent", "=", "exponent", "/", "2", "try", ":", "exponent", "=", "squared_norms", ".", "take", "(", "0", ")", ".", "from_float", "(", "exponent", ")", "except", "AttributeError", ":", "pass", "return", "squared_norms", "**", "exponent" ]
Pairwise distance, custom implementation.
[ "Pairwise", "distance", "custom", "implementation", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L18-L28
vnmabus/dcor
dcor/distances.py
_pdist_scipy
def _pdist_scipy(x, exponent=1): """Pairwise distance between points in a set.""" metric = 'euclidean' if exponent != 1: metric = 'sqeuclidean' distances = _spatial.distance.pdist(x, metric=metric) distances = _spatial.distance.squareform(distances) if exponent != 1: distances **= exponent / 2 return distances
python
def _pdist_scipy(x, exponent=1): """Pairwise distance between points in a set.""" metric = 'euclidean' if exponent != 1: metric = 'sqeuclidean' distances = _spatial.distance.pdist(x, metric=metric) distances = _spatial.distance.squareform(distances) if exponent != 1: distances **= exponent / 2 return distances
[ "def", "_pdist_scipy", "(", "x", ",", "exponent", "=", "1", ")", ":", "metric", "=", "'euclidean'", "if", "exponent", "!=", "1", ":", "metric", "=", "'sqeuclidean'", "distances", "=", "_spatial", ".", "distance", ".", "pdist", "(", "x", ",", "metric", "=", "metric", ")", "distances", "=", "_spatial", ".", "distance", ".", "squareform", "(", "distances", ")", "if", "exponent", "!=", "1", ":", "distances", "**=", "exponent", "/", "2", "return", "distances" ]
Pairwise distance between points in a set.
[ "Pairwise", "distance", "between", "points", "in", "a", "set", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L31-L44
vnmabus/dcor
dcor/distances.py
_cdist_scipy
def _cdist_scipy(x, y, exponent=1): """Pairwise distance between the points in two sets.""" metric = 'euclidean' if exponent != 1: metric = 'sqeuclidean' distances = _spatial.distance.cdist(x, y, metric=metric) if exponent != 1: distances **= exponent / 2 return distances
python
def _cdist_scipy(x, y, exponent=1): """Pairwise distance between the points in two sets.""" metric = 'euclidean' if exponent != 1: metric = 'sqeuclidean' distances = _spatial.distance.cdist(x, y, metric=metric) if exponent != 1: distances **= exponent / 2 return distances
[ "def", "_cdist_scipy", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "metric", "=", "'euclidean'", "if", "exponent", "!=", "1", ":", "metric", "=", "'sqeuclidean'", "distances", "=", "_spatial", ".", "distance", ".", "cdist", "(", "x", ",", "y", ",", "metric", "=", "metric", ")", "if", "exponent", "!=", "1", ":", "distances", "**=", "exponent", "/", "2", "return", "distances" ]
Pairwise distance between the points in two sets.
[ "Pairwise", "distance", "between", "the", "points", "in", "two", "sets", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L47-L59
vnmabus/dcor
dcor/distances.py
_pdist
def _pdist(x, exponent=1): """ Pairwise distance between points in a set. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double. """ if _can_be_double(x): return _pdist_scipy(x, exponent) else: return _cdist_naive(x, x, exponent)
python
def _pdist(x, exponent=1): """ Pairwise distance between points in a set. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double. """ if _can_be_double(x): return _pdist_scipy(x, exponent) else: return _cdist_naive(x, x, exponent)
[ "def", "_pdist", "(", "x", ",", "exponent", "=", "1", ")", ":", "if", "_can_be_double", "(", "x", ")", ":", "return", "_pdist_scipy", "(", "x", ",", "exponent", ")", "else", ":", "return", "_cdist_naive", "(", "x", ",", "x", ",", "exponent", ")" ]
Pairwise distance between points in a set. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double.
[ "Pairwise", "distance", "between", "points", "in", "a", "set", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L62-L74
vnmabus/dcor
dcor/distances.py
_cdist
def _cdist(x, y, exponent=1): """ Pairwise distance between points in two sets. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double. """ if _can_be_double(x) and _can_be_double(y): return _cdist_scipy(x, y, exponent) else: return _cdist_naive(x, y, exponent)
python
def _cdist(x, y, exponent=1): """ Pairwise distance between points in two sets. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double. """ if _can_be_double(x) and _can_be_double(y): return _cdist_scipy(x, y, exponent) else: return _cdist_naive(x, y, exponent)
[ "def", "_cdist", "(", "x", ",", "y", ",", "exponent", "=", "1", ")", ":", "if", "_can_be_double", "(", "x", ")", "and", "_can_be_double", "(", "y", ")", ":", "return", "_cdist_scipy", "(", "x", ",", "y", ",", "exponent", ")", "else", ":", "return", "_cdist_naive", "(", "x", ",", "y", ",", "exponent", ")" ]
Pairwise distance between points in two sets. As Scipy converts every value to double, this wrapper uses a less efficient implementation if the original dtype can not be converted to double.
[ "Pairwise", "distance", "between", "points", "in", "two", "sets", "." ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L77-L89
vnmabus/dcor
dcor/distances.py
pairwise_distances
def pairwise_distances(x, y=None, **kwargs): r""" pairwise_distances(x, y=None, *, exponent=1) Pairwise distance between points. Return the pairwise distance between points in two sets, or in the same set if only one set is passed. Parameters ---------- x: array_like An :math:`n \times m` array of :math:`n` observations in a :math:`m`-dimensional space. y: array_like An :math:`l \times m` array of :math:`l` observations in a :math:`m`-dimensional space. If None, the distances will be computed between the points in :math:`x`. exponent: float Exponent of the Euclidean distance. Returns ------- numpy ndarray A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the distance between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[16, 15, 14, 13], ... [12, 11, 10, 9], ... [8, 7, 6, 5], ... [4, 3, 2, 1]]) >>> dcor.distances.pairwise_distances(a) array([[ 0., 8., 16., 24.], [ 8., 0., 8., 16.], [16., 8., 0., 8.], [24., 16., 8., 0.]]) >>> dcor.distances.pairwise_distances(a, b) array([[24.41311123, 16.61324773, 9.16515139, 4.47213595], [16.61324773, 9.16515139, 4.47213595, 9.16515139], [ 9.16515139, 4.47213595, 9.16515139, 16.61324773], [ 4.47213595, 9.16515139, 16.61324773, 24.41311123]]) """ x = _transform_to_2d(x) if y is None or y is x: return _pdist(x, **kwargs) else: y = _transform_to_2d(y) return _cdist(x, y, **kwargs)
python
def pairwise_distances(x, y=None, **kwargs): r""" pairwise_distances(x, y=None, *, exponent=1) Pairwise distance between points. Return the pairwise distance between points in two sets, or in the same set if only one set is passed. Parameters ---------- x: array_like An :math:`n \times m` array of :math:`n` observations in a :math:`m`-dimensional space. y: array_like An :math:`l \times m` array of :math:`l` observations in a :math:`m`-dimensional space. If None, the distances will be computed between the points in :math:`x`. exponent: float Exponent of the Euclidean distance. Returns ------- numpy ndarray A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the distance between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[16, 15, 14, 13], ... [12, 11, 10, 9], ... [8, 7, 6, 5], ... [4, 3, 2, 1]]) >>> dcor.distances.pairwise_distances(a) array([[ 0., 8., 16., 24.], [ 8., 0., 8., 16.], [16., 8., 0., 8.], [24., 16., 8., 0.]]) >>> dcor.distances.pairwise_distances(a, b) array([[24.41311123, 16.61324773, 9.16515139, 4.47213595], [16.61324773, 9.16515139, 4.47213595, 9.16515139], [ 9.16515139, 4.47213595, 9.16515139, 16.61324773], [ 4.47213595, 9.16515139, 16.61324773, 24.41311123]]) """ x = _transform_to_2d(x) if y is None or y is x: return _pdist(x, **kwargs) else: y = _transform_to_2d(y) return _cdist(x, y, **kwargs)
[ "def", "pairwise_distances", "(", "x", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "x", "=", "_transform_to_2d", "(", "x", ")", "if", "y", "is", "None", "or", "y", "is", "x", ":", "return", "_pdist", "(", "x", ",", "*", "*", "kwargs", ")", "else", ":", "y", "=", "_transform_to_2d", "(", "y", ")", "return", "_cdist", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
r""" pairwise_distances(x, y=None, *, exponent=1) Pairwise distance between points. Return the pairwise distance between points in two sets, or in the same set if only one set is passed. Parameters ---------- x: array_like An :math:`n \times m` array of :math:`n` observations in a :math:`m`-dimensional space. y: array_like An :math:`l \times m` array of :math:`l` observations in a :math:`m`-dimensional space. If None, the distances will be computed between the points in :math:`x`. exponent: float Exponent of the Euclidean distance. Returns ------- numpy ndarray A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the distance between :math:`x[i]` and :math:`y[j]`. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[16, 15, 14, 13], ... [12, 11, 10, 9], ... [8, 7, 6, 5], ... [4, 3, 2, 1]]) >>> dcor.distances.pairwise_distances(a) array([[ 0., 8., 16., 24.], [ 8., 0., 8., 16.], [16., 8., 0., 8.], [24., 16., 8., 0.]]) >>> dcor.distances.pairwise_distances(a, b) array([[24.41311123, 16.61324773, 9.16515139, 4.47213595], [16.61324773, 9.16515139, 4.47213595, 9.16515139], [ 9.16515139, 4.47213595, 9.16515139, 16.61324773], [ 4.47213595, 9.16515139, 16.61324773, 24.41311123]])
[ "r", "pairwise_distances", "(", "x", "y", "=", "None", "*", "exponent", "=", "1", ")" ]
train
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/distances.py#L92-L149
kumar303/mohawk
mohawk/receiver.py
Receiver.respond
def respond(self, content=EmptyValue, content_type=EmptyValue, always_hash_content=True, ext=None): """ Respond to the request. This generates the :attr:`mohawk.Receiver.response_header` attribute. :param content=EmptyValue: Byte string of response body that will be sent. :type content=EmptyValue: str :param content_type=EmptyValue: content-type header value for response. :type content_type=EmptyValue: str :param always_hash_content=True: When True, ``content`` and ``content_type`` must be provided. Read :ref:`skipping-content-checks` to learn more. :type always_hash_content=True: bool :param ext=None: An external `Hawk`_ string. If not None, this value will be signed so that the sender can trust it. :type ext=None: str .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('generating response header') resource = Resource(url=self.resource.url, credentials=self.resource.credentials, ext=ext, app=self.parsed_header.get('app', None), dlg=self.parsed_header.get('dlg', None), method=self.resource.method, content=content, content_type=content_type, always_hash_content=always_hash_content, nonce=self.parsed_header['nonce'], timestamp=self.parsed_header['ts']) mac = calculate_mac('response', resource, resource.gen_content_hash()) self.response_header = self._make_header(resource, mac, additional_keys=['ext']) return self.response_header
python
def respond(self, content=EmptyValue, content_type=EmptyValue, always_hash_content=True, ext=None): """ Respond to the request. This generates the :attr:`mohawk.Receiver.response_header` attribute. :param content=EmptyValue: Byte string of response body that will be sent. :type content=EmptyValue: str :param content_type=EmptyValue: content-type header value for response. :type content_type=EmptyValue: str :param always_hash_content=True: When True, ``content`` and ``content_type`` must be provided. Read :ref:`skipping-content-checks` to learn more. :type always_hash_content=True: bool :param ext=None: An external `Hawk`_ string. If not None, this value will be signed so that the sender can trust it. :type ext=None: str .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('generating response header') resource = Resource(url=self.resource.url, credentials=self.resource.credentials, ext=ext, app=self.parsed_header.get('app', None), dlg=self.parsed_header.get('dlg', None), method=self.resource.method, content=content, content_type=content_type, always_hash_content=always_hash_content, nonce=self.parsed_header['nonce'], timestamp=self.parsed_header['ts']) mac = calculate_mac('response', resource, resource.gen_content_hash()) self.response_header = self._make_header(resource, mac, additional_keys=['ext']) return self.response_header
[ "def", "respond", "(", "self", ",", "content", "=", "EmptyValue", ",", "content_type", "=", "EmptyValue", ",", "always_hash_content", "=", "True", ",", "ext", "=", "None", ")", ":", "log", ".", "debug", "(", "'generating response header'", ")", "resource", "=", "Resource", "(", "url", "=", "self", ".", "resource", ".", "url", ",", "credentials", "=", "self", ".", "resource", ".", "credentials", ",", "ext", "=", "ext", ",", "app", "=", "self", ".", "parsed_header", ".", "get", "(", "'app'", ",", "None", ")", ",", "dlg", "=", "self", ".", "parsed_header", ".", "get", "(", "'dlg'", ",", "None", ")", ",", "method", "=", "self", ".", "resource", ".", "method", ",", "content", "=", "content", ",", "content_type", "=", "content_type", ",", "always_hash_content", "=", "always_hash_content", ",", "nonce", "=", "self", ".", "parsed_header", "[", "'nonce'", "]", ",", "timestamp", "=", "self", ".", "parsed_header", "[", "'ts'", "]", ")", "mac", "=", "calculate_mac", "(", "'response'", ",", "resource", ",", "resource", ".", "gen_content_hash", "(", ")", ")", "self", ".", "response_header", "=", "self", ".", "_make_header", "(", "resource", ",", "mac", ",", "additional_keys", "=", "[", "'ext'", "]", ")", "return", "self", ".", "response_header" ]
Respond to the request. This generates the :attr:`mohawk.Receiver.response_header` attribute. :param content=EmptyValue: Byte string of response body that will be sent. :type content=EmptyValue: str :param content_type=EmptyValue: content-type header value for response. :type content_type=EmptyValue: str :param always_hash_content=True: When True, ``content`` and ``content_type`` must be provided. Read :ref:`skipping-content-checks` to learn more. :type always_hash_content=True: bool :param ext=None: An external `Hawk`_ string. If not None, this value will be signed so that the sender can trust it. :type ext=None: str .. _`Hawk`: https://github.com/hueniverse/hawk
[ "Respond", "to", "the", "request", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/receiver.py#L123-L171
kumar303/mohawk
mohawk/util.py
calculate_payload_hash
def calculate_payload_hash(payload, algorithm, content_type): """Calculates a hash for a given payload.""" p_hash = hashlib.new(algorithm) parts = [] parts.append('hawk.' + str(HAWK_VER) + '.payload\n') parts.append(parse_content_type(content_type) + '\n') parts.append(payload or '') parts.append('\n') for i, p in enumerate(parts): # Make sure we are about to hash binary strings. if not isinstance(p, six.binary_type): p = p.encode('utf8') p_hash.update(p) parts[i] = p log.debug('calculating payload hash from:\n{parts}' .format(parts=pprint.pformat(parts))) return b64encode(p_hash.digest())
python
def calculate_payload_hash(payload, algorithm, content_type): """Calculates a hash for a given payload.""" p_hash = hashlib.new(algorithm) parts = [] parts.append('hawk.' + str(HAWK_VER) + '.payload\n') parts.append(parse_content_type(content_type) + '\n') parts.append(payload or '') parts.append('\n') for i, p in enumerate(parts): # Make sure we are about to hash binary strings. if not isinstance(p, six.binary_type): p = p.encode('utf8') p_hash.update(p) parts[i] = p log.debug('calculating payload hash from:\n{parts}' .format(parts=pprint.pformat(parts))) return b64encode(p_hash.digest())
[ "def", "calculate_payload_hash", "(", "payload", ",", "algorithm", ",", "content_type", ")", ":", "p_hash", "=", "hashlib", ".", "new", "(", "algorithm", ")", "parts", "=", "[", "]", "parts", ".", "append", "(", "'hawk.'", "+", "str", "(", "HAWK_VER", ")", "+", "'.payload\\n'", ")", "parts", ".", "append", "(", "parse_content_type", "(", "content_type", ")", "+", "'\\n'", ")", "parts", ".", "append", "(", "payload", "or", "''", ")", "parts", ".", "append", "(", "'\\n'", ")", "for", "i", ",", "p", "in", "enumerate", "(", "parts", ")", ":", "# Make sure we are about to hash binary strings.", "if", "not", "isinstance", "(", "p", ",", "six", ".", "binary_type", ")", ":", "p", "=", "p", ".", "encode", "(", "'utf8'", ")", "p_hash", ".", "update", "(", "p", ")", "parts", "[", "i", "]", "=", "p", "log", ".", "debug", "(", "'calculating payload hash from:\\n{parts}'", ".", "format", "(", "parts", "=", "pprint", ".", "pformat", "(", "parts", ")", ")", ")", "return", "b64encode", "(", "p_hash", ".", "digest", "(", ")", ")" ]
Calculates a hash for a given payload.
[ "Calculates", "a", "hash", "for", "a", "given", "payload", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L49-L69
kumar303/mohawk
mohawk/util.py
calculate_mac
def calculate_mac(mac_type, resource, content_hash): """Calculates a message authorization code (MAC).""" normalized = normalize_string(mac_type, resource, content_hash) log.debug(u'normalized resource for mac calc: {norm}' .format(norm=normalized)) digestmod = getattr(hashlib, resource.credentials['algorithm']) # Make sure we are about to hash binary strings. if not isinstance(normalized, six.binary_type): normalized = normalized.encode('utf8') key = resource.credentials['key'] if not isinstance(key, six.binary_type): key = key.encode('ascii') result = hmac.new(key, normalized, digestmod) return b64encode(result.digest())
python
def calculate_mac(mac_type, resource, content_hash): """Calculates a message authorization code (MAC).""" normalized = normalize_string(mac_type, resource, content_hash) log.debug(u'normalized resource for mac calc: {norm}' .format(norm=normalized)) digestmod = getattr(hashlib, resource.credentials['algorithm']) # Make sure we are about to hash binary strings. if not isinstance(normalized, six.binary_type): normalized = normalized.encode('utf8') key = resource.credentials['key'] if not isinstance(key, six.binary_type): key = key.encode('ascii') result = hmac.new(key, normalized, digestmod) return b64encode(result.digest())
[ "def", "calculate_mac", "(", "mac_type", ",", "resource", ",", "content_hash", ")", ":", "normalized", "=", "normalize_string", "(", "mac_type", ",", "resource", ",", "content_hash", ")", "log", ".", "debug", "(", "u'normalized resource for mac calc: {norm}'", ".", "format", "(", "norm", "=", "normalized", ")", ")", "digestmod", "=", "getattr", "(", "hashlib", ",", "resource", ".", "credentials", "[", "'algorithm'", "]", ")", "# Make sure we are about to hash binary strings.", "if", "not", "isinstance", "(", "normalized", ",", "six", ".", "binary_type", ")", ":", "normalized", "=", "normalized", ".", "encode", "(", "'utf8'", ")", "key", "=", "resource", ".", "credentials", "[", "'key'", "]", "if", "not", "isinstance", "(", "key", ",", "six", ".", "binary_type", ")", ":", "key", "=", "key", ".", "encode", "(", "'ascii'", ")", "result", "=", "hmac", ".", "new", "(", "key", ",", "normalized", ",", "digestmod", ")", "return", "b64encode", "(", "result", ".", "digest", "(", ")", ")" ]
Calculates a message authorization code (MAC).
[ "Calculates", "a", "message", "authorization", "code", "(", "MAC", ")", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L72-L88
kumar303/mohawk
mohawk/util.py
calculate_ts_mac
def calculate_ts_mac(ts, credentials): """Calculates a message authorization code (MAC) for a timestamp.""" normalized = ('hawk.{hawk_ver}.ts\n{ts}\n' .format(hawk_ver=HAWK_VER, ts=ts)) log.debug(u'normalized resource for ts mac calc: {norm}' .format(norm=normalized)) digestmod = getattr(hashlib, credentials['algorithm']) if not isinstance(normalized, six.binary_type): normalized = normalized.encode('utf8') key = credentials['key'] if not isinstance(key, six.binary_type): key = key.encode('ascii') result = hmac.new(key, normalized, digestmod) return b64encode(result.digest())
python
def calculate_ts_mac(ts, credentials): """Calculates a message authorization code (MAC) for a timestamp.""" normalized = ('hawk.{hawk_ver}.ts\n{ts}\n' .format(hawk_ver=HAWK_VER, ts=ts)) log.debug(u'normalized resource for ts mac calc: {norm}' .format(norm=normalized)) digestmod = getattr(hashlib, credentials['algorithm']) if not isinstance(normalized, six.binary_type): normalized = normalized.encode('utf8') key = credentials['key'] if not isinstance(key, six.binary_type): key = key.encode('ascii') result = hmac.new(key, normalized, digestmod) return b64encode(result.digest())
[ "def", "calculate_ts_mac", "(", "ts", ",", "credentials", ")", ":", "normalized", "=", "(", "'hawk.{hawk_ver}.ts\\n{ts}\\n'", ".", "format", "(", "hawk_ver", "=", "HAWK_VER", ",", "ts", "=", "ts", ")", ")", "log", ".", "debug", "(", "u'normalized resource for ts mac calc: {norm}'", ".", "format", "(", "norm", "=", "normalized", ")", ")", "digestmod", "=", "getattr", "(", "hashlib", ",", "credentials", "[", "'algorithm'", "]", ")", "if", "not", "isinstance", "(", "normalized", ",", "six", ".", "binary_type", ")", ":", "normalized", "=", "normalized", ".", "encode", "(", "'utf8'", ")", "key", "=", "credentials", "[", "'key'", "]", "if", "not", "isinstance", "(", "key", ",", "six", ".", "binary_type", ")", ":", "key", "=", "key", ".", "encode", "(", "'ascii'", ")", "result", "=", "hmac", ".", "new", "(", "key", ",", "normalized", ",", "digestmod", ")", "return", "b64encode", "(", "result", ".", "digest", "(", ")", ")" ]
Calculates a message authorization code (MAC) for a timestamp.
[ "Calculates", "a", "message", "authorization", "code", "(", "MAC", ")", "for", "a", "timestamp", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L91-L106
kumar303/mohawk
mohawk/util.py
normalize_string
def normalize_string(mac_type, resource, content_hash): """Serializes mac_type and resource into a HAWK string.""" normalized = [ 'hawk.' + str(HAWK_VER) + '.' + mac_type, normalize_header_attr(resource.timestamp), normalize_header_attr(resource.nonce), normalize_header_attr(resource.method or ''), normalize_header_attr(resource.name or ''), normalize_header_attr(resource.host), normalize_header_attr(resource.port), normalize_header_attr(content_hash or '') ] # The blank lines are important. They follow what the Node Hawk lib does. normalized.append(normalize_header_attr(resource.ext or '')) if resource.app: normalized.append(normalize_header_attr(resource.app)) normalized.append(normalize_header_attr(resource.dlg or '')) # Add trailing new line. normalized.append('') normalized = '\n'.join(normalized) return normalized
python
def normalize_string(mac_type, resource, content_hash): """Serializes mac_type and resource into a HAWK string.""" normalized = [ 'hawk.' + str(HAWK_VER) + '.' + mac_type, normalize_header_attr(resource.timestamp), normalize_header_attr(resource.nonce), normalize_header_attr(resource.method or ''), normalize_header_attr(resource.name or ''), normalize_header_attr(resource.host), normalize_header_attr(resource.port), normalize_header_attr(content_hash or '') ] # The blank lines are important. They follow what the Node Hawk lib does. normalized.append(normalize_header_attr(resource.ext or '')) if resource.app: normalized.append(normalize_header_attr(resource.app)) normalized.append(normalize_header_attr(resource.dlg or '')) # Add trailing new line. normalized.append('') normalized = '\n'.join(normalized) return normalized
[ "def", "normalize_string", "(", "mac_type", ",", "resource", ",", "content_hash", ")", ":", "normalized", "=", "[", "'hawk.'", "+", "str", "(", "HAWK_VER", ")", "+", "'.'", "+", "mac_type", ",", "normalize_header_attr", "(", "resource", ".", "timestamp", ")", ",", "normalize_header_attr", "(", "resource", ".", "nonce", ")", ",", "normalize_header_attr", "(", "resource", ".", "method", "or", "''", ")", ",", "normalize_header_attr", "(", "resource", ".", "name", "or", "''", ")", ",", "normalize_header_attr", "(", "resource", ".", "host", ")", ",", "normalize_header_attr", "(", "resource", ".", "port", ")", ",", "normalize_header_attr", "(", "content_hash", "or", "''", ")", "]", "# The blank lines are important. They follow what the Node Hawk lib does.", "normalized", ".", "append", "(", "normalize_header_attr", "(", "resource", ".", "ext", "or", "''", ")", ")", "if", "resource", ".", "app", ":", "normalized", ".", "append", "(", "normalize_header_attr", "(", "resource", ".", "app", ")", ")", "normalized", ".", "append", "(", "normalize_header_attr", "(", "resource", ".", "dlg", "or", "''", ")", ")", "# Add trailing new line.", "normalized", ".", "append", "(", "''", ")", "normalized", "=", "'\\n'", ".", "join", "(", "normalized", ")", "return", "normalized" ]
Serializes mac_type and resource into a HAWK string.
[ "Serializes", "mac_type", "and", "resource", "into", "a", "HAWK", "string", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L109-L136
kumar303/mohawk
mohawk/util.py
parse_authorization_header
def parse_authorization_header(auth_header): """ Example Authorization header: 'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="' """ if len(auth_header) > MAX_LENGTH: raise BadHeaderValue('Header exceeds maximum length of {max_length}'.format( max_length=MAX_LENGTH)) # Make sure we have a unicode object for consistency. if isinstance(auth_header, six.binary_type): auth_header = auth_header.decode('utf8') scheme, attributes_string = auth_header.split(' ', 1) if scheme.lower() != 'hawk': raise HawkFail("Unknown scheme '{scheme}' when parsing header" .format(scheme=scheme)) attributes = {} def replace_attribute(match): """Extract the next key="value"-pair in the header.""" key = match.group('key') value = match.group('value') if key not in allowable_header_keys: raise HawkFail("Unknown Hawk key '{key}' when parsing header" .format(key=key)) validate_header_attr(value, name=key) if key in attributes: raise BadHeaderValue('Duplicate key in header: {key}'.format(key=key)) attributes[key] = value # Iterate over all the key="value"-pairs in the header, replace them with # an empty string, and store the extracted attribute in the attributes # dict. Correctly formed headers will then leave nothing unparsed (''). unparsed_header = HAWK_HEADER_RE.sub(replace_attribute, attributes_string) if unparsed_header != '': raise BadHeaderValue("Couldn't parse Hawk header", unparsed_header) log.debug('parsed Hawk header: {header} into: \n{parsed}' .format(header=auth_header, parsed=pprint.pformat(attributes))) return attributes
python
def parse_authorization_header(auth_header): """ Example Authorization header: 'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="' """ if len(auth_header) > MAX_LENGTH: raise BadHeaderValue('Header exceeds maximum length of {max_length}'.format( max_length=MAX_LENGTH)) # Make sure we have a unicode object for consistency. if isinstance(auth_header, six.binary_type): auth_header = auth_header.decode('utf8') scheme, attributes_string = auth_header.split(' ', 1) if scheme.lower() != 'hawk': raise HawkFail("Unknown scheme '{scheme}' when parsing header" .format(scheme=scheme)) attributes = {} def replace_attribute(match): """Extract the next key="value"-pair in the header.""" key = match.group('key') value = match.group('value') if key not in allowable_header_keys: raise HawkFail("Unknown Hawk key '{key}' when parsing header" .format(key=key)) validate_header_attr(value, name=key) if key in attributes: raise BadHeaderValue('Duplicate key in header: {key}'.format(key=key)) attributes[key] = value # Iterate over all the key="value"-pairs in the header, replace them with # an empty string, and store the extracted attribute in the attributes # dict. Correctly formed headers will then leave nothing unparsed (''). unparsed_header = HAWK_HEADER_RE.sub(replace_attribute, attributes_string) if unparsed_header != '': raise BadHeaderValue("Couldn't parse Hawk header", unparsed_header) log.debug('parsed Hawk header: {header} into: \n{parsed}' .format(header=auth_header, parsed=pprint.pformat(attributes))) return attributes
[ "def", "parse_authorization_header", "(", "auth_header", ")", ":", "if", "len", "(", "auth_header", ")", ">", "MAX_LENGTH", ":", "raise", "BadHeaderValue", "(", "'Header exceeds maximum length of {max_length}'", ".", "format", "(", "max_length", "=", "MAX_LENGTH", ")", ")", "# Make sure we have a unicode object for consistency.", "if", "isinstance", "(", "auth_header", ",", "six", ".", "binary_type", ")", ":", "auth_header", "=", "auth_header", ".", "decode", "(", "'utf8'", ")", "scheme", ",", "attributes_string", "=", "auth_header", ".", "split", "(", "' '", ",", "1", ")", "if", "scheme", ".", "lower", "(", ")", "!=", "'hawk'", ":", "raise", "HawkFail", "(", "\"Unknown scheme '{scheme}' when parsing header\"", ".", "format", "(", "scheme", "=", "scheme", ")", ")", "attributes", "=", "{", "}", "def", "replace_attribute", "(", "match", ")", ":", "\"\"\"Extract the next key=\"value\"-pair in the header.\"\"\"", "key", "=", "match", ".", "group", "(", "'key'", ")", "value", "=", "match", ".", "group", "(", "'value'", ")", "if", "key", "not", "in", "allowable_header_keys", ":", "raise", "HawkFail", "(", "\"Unknown Hawk key '{key}' when parsing header\"", ".", "format", "(", "key", "=", "key", ")", ")", "validate_header_attr", "(", "value", ",", "name", "=", "key", ")", "if", "key", "in", "attributes", ":", "raise", "BadHeaderValue", "(", "'Duplicate key in header: {key}'", ".", "format", "(", "key", "=", "key", ")", ")", "attributes", "[", "key", "]", "=", "value", "# Iterate over all the key=\"value\"-pairs in the header, replace them with", "# an empty string, and store the extracted attribute in the attributes", "# dict. Correctly formed headers will then leave nothing unparsed ('').", "unparsed_header", "=", "HAWK_HEADER_RE", ".", "sub", "(", "replace_attribute", ",", "attributes_string", ")", "if", "unparsed_header", "!=", "''", ":", "raise", "BadHeaderValue", "(", "\"Couldn't parse Hawk header\"", ",", "unparsed_header", ")", "log", ".", "debug", "(", "'parsed Hawk header: {header} into: \\n{parsed}'", ".", "format", "(", "header", "=", "auth_header", ",", "parsed", "=", "pprint", ".", "pformat", "(", "attributes", ")", ")", ")", "return", "attributes" ]
Example Authorization header: 'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="'
[ "Example", "Authorization", "header", ":" ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L147-L192
kumar303/mohawk
mohawk/bewit.py
get_bewit
def get_bewit(resource): """ Returns a bewit identifier for the resource as a string. :param resource: Resource to generate a bewit for :type resource: `mohawk.base.Resource` """ if resource.method != 'GET': raise ValueError('bewits can only be generated for GET requests') if resource.nonce != '': raise ValueError('bewits must use an empty nonce') mac = calculate_mac( 'bewit', resource, None, ) if isinstance(mac, six.binary_type): mac = mac.decode('ascii') if resource.ext is None: ext = '' else: validate_header_attr(resource.ext, name='ext') ext = resource.ext # b64encode works only with bytes in python3, but all of our parameters are # in unicode, so we need to encode them. The cleanest way to do this that # works in both python 2 and 3 is to use string formatting to get a # unicode string, and then explicitly encode it to bytes. inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format( id=resource.credentials['id'], exp=resource.timestamp, mac=mac, ext=ext, ) inner_bewit_bytes = inner_bewit.encode('ascii') bewit_bytes = urlsafe_b64encode(inner_bewit_bytes) # Now decode the resulting bytes back to a unicode string return bewit_bytes.decode('ascii')
python
def get_bewit(resource): """ Returns a bewit identifier for the resource as a string. :param resource: Resource to generate a bewit for :type resource: `mohawk.base.Resource` """ if resource.method != 'GET': raise ValueError('bewits can only be generated for GET requests') if resource.nonce != '': raise ValueError('bewits must use an empty nonce') mac = calculate_mac( 'bewit', resource, None, ) if isinstance(mac, six.binary_type): mac = mac.decode('ascii') if resource.ext is None: ext = '' else: validate_header_attr(resource.ext, name='ext') ext = resource.ext # b64encode works only with bytes in python3, but all of our parameters are # in unicode, so we need to encode them. The cleanest way to do this that # works in both python 2 and 3 is to use string formatting to get a # unicode string, and then explicitly encode it to bytes. inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format( id=resource.credentials['id'], exp=resource.timestamp, mac=mac, ext=ext, ) inner_bewit_bytes = inner_bewit.encode('ascii') bewit_bytes = urlsafe_b64encode(inner_bewit_bytes) # Now decode the resulting bytes back to a unicode string return bewit_bytes.decode('ascii')
[ "def", "get_bewit", "(", "resource", ")", ":", "if", "resource", ".", "method", "!=", "'GET'", ":", "raise", "ValueError", "(", "'bewits can only be generated for GET requests'", ")", "if", "resource", ".", "nonce", "!=", "''", ":", "raise", "ValueError", "(", "'bewits must use an empty nonce'", ")", "mac", "=", "calculate_mac", "(", "'bewit'", ",", "resource", ",", "None", ",", ")", "if", "isinstance", "(", "mac", ",", "six", ".", "binary_type", ")", ":", "mac", "=", "mac", ".", "decode", "(", "'ascii'", ")", "if", "resource", ".", "ext", "is", "None", ":", "ext", "=", "''", "else", ":", "validate_header_attr", "(", "resource", ".", "ext", ",", "name", "=", "'ext'", ")", "ext", "=", "resource", ".", "ext", "# b64encode works only with bytes in python3, but all of our parameters are", "# in unicode, so we need to encode them. The cleanest way to do this that", "# works in both python 2 and 3 is to use string formatting to get a", "# unicode string, and then explicitly encode it to bytes.", "inner_bewit", "=", "u\"{id}\\\\{exp}\\\\{mac}\\\\{ext}\"", ".", "format", "(", "id", "=", "resource", ".", "credentials", "[", "'id'", "]", ",", "exp", "=", "resource", ".", "timestamp", ",", "mac", "=", "mac", ",", "ext", "=", "ext", ",", ")", "inner_bewit_bytes", "=", "inner_bewit", ".", "encode", "(", "'ascii'", ")", "bewit_bytes", "=", "urlsafe_b64encode", "(", "inner_bewit_bytes", ")", "# Now decode the resulting bytes back to a unicode string", "return", "bewit_bytes", ".", "decode", "(", "'ascii'", ")" ]
Returns a bewit identifier for the resource as a string. :param resource: Resource to generate a bewit for :type resource: `mohawk.base.Resource`
[ "Returns", "a", "bewit", "identifier", "for", "the", "resource", "as", "a", "string", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/bewit.py#L21-L61
kumar303/mohawk
mohawk/bewit.py
parse_bewit
def parse_bewit(bewit): """ Returns a `bewittuple` representing the parts of an encoded bewit string. This has the following named attributes: (id, expiration, mac, ext) :param bewit: A base64 encoded bewit string :type bewit: str """ decoded_bewit = b64decode(bewit).decode('ascii') bewit_parts = decoded_bewit.split("\\") if len(bewit_parts) != 4: raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit) return bewittuple(*bewit_parts)
python
def parse_bewit(bewit): """ Returns a `bewittuple` representing the parts of an encoded bewit string. This has the following named attributes: (id, expiration, mac, ext) :param bewit: A base64 encoded bewit string :type bewit: str """ decoded_bewit = b64decode(bewit).decode('ascii') bewit_parts = decoded_bewit.split("\\") if len(bewit_parts) != 4: raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit) return bewittuple(*bewit_parts)
[ "def", "parse_bewit", "(", "bewit", ")", ":", "decoded_bewit", "=", "b64decode", "(", "bewit", ")", ".", "decode", "(", "'ascii'", ")", "bewit_parts", "=", "decoded_bewit", ".", "split", "(", "\"\\\\\"", ")", "if", "len", "(", "bewit_parts", ")", "!=", "4", ":", "raise", "InvalidBewit", "(", "'Expected 4 parts to bewit: %s'", "%", "decoded_bewit", ")", "return", "bewittuple", "(", "*", "bewit_parts", ")" ]
Returns a `bewittuple` representing the parts of an encoded bewit string. This has the following named attributes: (id, expiration, mac, ext) :param bewit: A base64 encoded bewit string :type bewit: str
[ "Returns", "a", "bewittuple", "representing", "the", "parts", "of", "an", "encoded", "bewit", "string", ".", "This", "has", "the", "following", "named", "attributes", ":", "(", "id", "expiration", "mac", "ext", ")" ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/bewit.py#L67-L81
kumar303/mohawk
mohawk/bewit.py
strip_bewit
def strip_bewit(url): """ Strips the bewit parameter out of a url. Returns (encoded_bewit, stripped_url) Raises InvalidBewit if no bewit found. :param url: The url containing a bewit parameter :type url: str """ m = re.search('[?&]bewit=([^&]+)', url) if not m: raise InvalidBewit('no bewit data found') bewit = m.group(1) stripped_url = url[:m.start()] + url[m.end():] return bewit, stripped_url
python
def strip_bewit(url): """ Strips the bewit parameter out of a url. Returns (encoded_bewit, stripped_url) Raises InvalidBewit if no bewit found. :param url: The url containing a bewit parameter :type url: str """ m = re.search('[?&]bewit=([^&]+)', url) if not m: raise InvalidBewit('no bewit data found') bewit = m.group(1) stripped_url = url[:m.start()] + url[m.end():] return bewit, stripped_url
[ "def", "strip_bewit", "(", "url", ")", ":", "m", "=", "re", ".", "search", "(", "'[?&]bewit=([^&]+)'", ",", "url", ")", "if", "not", "m", ":", "raise", "InvalidBewit", "(", "'no bewit data found'", ")", "bewit", "=", "m", ".", "group", "(", "1", ")", "stripped_url", "=", "url", "[", ":", "m", ".", "start", "(", ")", "]", "+", "url", "[", "m", ".", "end", "(", ")", ":", "]", "return", "bewit", ",", "stripped_url" ]
Strips the bewit parameter out of a url. Returns (encoded_bewit, stripped_url) Raises InvalidBewit if no bewit found. :param url: The url containing a bewit parameter :type url: str
[ "Strips", "the", "bewit", "parameter", "out", "of", "a", "url", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/bewit.py#L84-L101
kumar303/mohawk
mohawk/bewit.py
check_bewit
def check_bewit(url, credential_lookup, now=None): """ Validates the given bewit. Returns True if the resource has a valid bewit parameter attached, or raises a subclass of HawkFail otherwise. :param credential_lookup: Callable to look up the credentials dict by sender ID. The credentials dict must have the keys: ``id``, ``key``, and ``algorithm``. See :ref:`receiving-request` for an example. :type credential_lookup: callable :param now=None: Unix epoch time for the current time to determine if bewit has expired. If None, then the current time as given by utc_now() is used. :type now=None: integer """ raw_bewit, stripped_url = strip_bewit(url) bewit = parse_bewit(raw_bewit) try: credentials = credential_lookup(bewit.id) except LookupError: raise CredentialsLookupError('Could not find credentials for ID {0}' .format(bewit.id)) res = Resource(url=stripped_url, method='GET', credentials=credentials, timestamp=bewit.expiration, nonce='', ext=bewit.ext, ) mac = calculate_mac('bewit', res, None) mac = mac.decode('ascii') if not strings_match(mac, bewit.mac): raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}' .format(bewit_mac=bewit.mac, expected_mac=mac)) # Check that the timestamp isn't expired if now is None: # TODO: Add offset/skew now = utc_now() if int(bewit.expiration) < now: # TODO: Refactor TokenExpired to handle this better raise TokenExpired('bewit with UTC timestamp {ts} has expired; ' 'it was compared to {now}' .format(ts=bewit.expiration, now=now), localtime_in_seconds=now, www_authenticate='' ) return True
python
def check_bewit(url, credential_lookup, now=None): """ Validates the given bewit. Returns True if the resource has a valid bewit parameter attached, or raises a subclass of HawkFail otherwise. :param credential_lookup: Callable to look up the credentials dict by sender ID. The credentials dict must have the keys: ``id``, ``key``, and ``algorithm``. See :ref:`receiving-request` for an example. :type credential_lookup: callable :param now=None: Unix epoch time for the current time to determine if bewit has expired. If None, then the current time as given by utc_now() is used. :type now=None: integer """ raw_bewit, stripped_url = strip_bewit(url) bewit = parse_bewit(raw_bewit) try: credentials = credential_lookup(bewit.id) except LookupError: raise CredentialsLookupError('Could not find credentials for ID {0}' .format(bewit.id)) res = Resource(url=stripped_url, method='GET', credentials=credentials, timestamp=bewit.expiration, nonce='', ext=bewit.ext, ) mac = calculate_mac('bewit', res, None) mac = mac.decode('ascii') if not strings_match(mac, bewit.mac): raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}' .format(bewit_mac=bewit.mac, expected_mac=mac)) # Check that the timestamp isn't expired if now is None: # TODO: Add offset/skew now = utc_now() if int(bewit.expiration) < now: # TODO: Refactor TokenExpired to handle this better raise TokenExpired('bewit with UTC timestamp {ts} has expired; ' 'it was compared to {now}' .format(ts=bewit.expiration, now=now), localtime_in_seconds=now, www_authenticate='' ) return True
[ "def", "check_bewit", "(", "url", ",", "credential_lookup", ",", "now", "=", "None", ")", ":", "raw_bewit", ",", "stripped_url", "=", "strip_bewit", "(", "url", ")", "bewit", "=", "parse_bewit", "(", "raw_bewit", ")", "try", ":", "credentials", "=", "credential_lookup", "(", "bewit", ".", "id", ")", "except", "LookupError", ":", "raise", "CredentialsLookupError", "(", "'Could not find credentials for ID {0}'", ".", "format", "(", "bewit", ".", "id", ")", ")", "res", "=", "Resource", "(", "url", "=", "stripped_url", ",", "method", "=", "'GET'", ",", "credentials", "=", "credentials", ",", "timestamp", "=", "bewit", ".", "expiration", ",", "nonce", "=", "''", ",", "ext", "=", "bewit", ".", "ext", ",", ")", "mac", "=", "calculate_mac", "(", "'bewit'", ",", "res", ",", "None", ")", "mac", "=", "mac", ".", "decode", "(", "'ascii'", ")", "if", "not", "strings_match", "(", "mac", ",", "bewit", ".", "mac", ")", ":", "raise", "MacMismatch", "(", "'bewit with mac {bewit_mac} did not match expected mac {expected_mac}'", ".", "format", "(", "bewit_mac", "=", "bewit", ".", "mac", ",", "expected_mac", "=", "mac", ")", ")", "# Check that the timestamp isn't expired", "if", "now", "is", "None", ":", "# TODO: Add offset/skew", "now", "=", "utc_now", "(", ")", "if", "int", "(", "bewit", ".", "expiration", ")", "<", "now", ":", "# TODO: Refactor TokenExpired to handle this better", "raise", "TokenExpired", "(", "'bewit with UTC timestamp {ts} has expired; '", "'it was compared to {now}'", ".", "format", "(", "ts", "=", "bewit", ".", "expiration", ",", "now", "=", "now", ")", ",", "localtime_in_seconds", "=", "now", ",", "www_authenticate", "=", "''", ")", "return", "True" ]
Validates the given bewit. Returns True if the resource has a valid bewit parameter attached, or raises a subclass of HawkFail otherwise. :param credential_lookup: Callable to look up the credentials dict by sender ID. The credentials dict must have the keys: ``id``, ``key``, and ``algorithm``. See :ref:`receiving-request` for an example. :type credential_lookup: callable :param now=None: Unix epoch time for the current time to determine if bewit has expired. If None, then the current time as given by utc_now() is used. :type now=None: integer
[ "Validates", "the", "given", "bewit", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/bewit.py#L104-L159
kumar303/mohawk
mohawk/sender.py
Sender.accept_response
def accept_response(self, response_header, content=EmptyValue, content_type=EmptyValue, accept_untrusted_content=False, localtime_offset_in_seconds=0, timestamp_skew_in_seconds=default_ts_skew_in_seconds, **auth_kw): """ Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('accepting response {header}' .format(header=response_header)) parsed_header = parse_authorization_header(response_header) resource = Resource(ext=parsed_header.get('ext', None), content=content, content_type=content_type, # The following response attributes are # in reference to the original request, # not to the reponse header: timestamp=self.req_resource.timestamp, nonce=self.req_resource.nonce, url=self.req_resource.url, method=self.req_resource.method, app=self.req_resource.app, dlg=self.req_resource.dlg, credentials=self.credentials, seen_nonce=self.seen_nonce) self._authorize( 'response', parsed_header, resource, # Per Node lib, a responder macs the *sender's* timestamp. # It does not create its own timestamp. # I suppose a slow response could time out here. Maybe only check # mac failures, not timeouts? their_timestamp=resource.timestamp, timestamp_skew_in_seconds=timestamp_skew_in_seconds, localtime_offset_in_seconds=localtime_offset_in_seconds, accept_untrusted_content=accept_untrusted_content, **auth_kw)
python
def accept_response(self, response_header, content=EmptyValue, content_type=EmptyValue, accept_untrusted_content=False, localtime_offset_in_seconds=0, timestamp_skew_in_seconds=default_ts_skew_in_seconds, **auth_kw): """ Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('accepting response {header}' .format(header=response_header)) parsed_header = parse_authorization_header(response_header) resource = Resource(ext=parsed_header.get('ext', None), content=content, content_type=content_type, # The following response attributes are # in reference to the original request, # not to the reponse header: timestamp=self.req_resource.timestamp, nonce=self.req_resource.nonce, url=self.req_resource.url, method=self.req_resource.method, app=self.req_resource.app, dlg=self.req_resource.dlg, credentials=self.credentials, seen_nonce=self.seen_nonce) self._authorize( 'response', parsed_header, resource, # Per Node lib, a responder macs the *sender's* timestamp. # It does not create its own timestamp. # I suppose a slow response could time out here. Maybe only check # mac failures, not timeouts? their_timestamp=resource.timestamp, timestamp_skew_in_seconds=timestamp_skew_in_seconds, localtime_offset_in_seconds=localtime_offset_in_seconds, accept_untrusted_content=accept_untrusted_content, **auth_kw)
[ "def", "accept_response", "(", "self", ",", "response_header", ",", "content", "=", "EmptyValue", ",", "content_type", "=", "EmptyValue", ",", "accept_untrusted_content", "=", "False", ",", "localtime_offset_in_seconds", "=", "0", ",", "timestamp_skew_in_seconds", "=", "default_ts_skew_in_seconds", ",", "*", "*", "auth_kw", ")", ":", "log", ".", "debug", "(", "'accepting response {header}'", ".", "format", "(", "header", "=", "response_header", ")", ")", "parsed_header", "=", "parse_authorization_header", "(", "response_header", ")", "resource", "=", "Resource", "(", "ext", "=", "parsed_header", ".", "get", "(", "'ext'", ",", "None", ")", ",", "content", "=", "content", ",", "content_type", "=", "content_type", ",", "# The following response attributes are", "# in reference to the original request,", "# not to the reponse header:", "timestamp", "=", "self", ".", "req_resource", ".", "timestamp", ",", "nonce", "=", "self", ".", "req_resource", ".", "nonce", ",", "url", "=", "self", ".", "req_resource", ".", "url", ",", "method", "=", "self", ".", "req_resource", ".", "method", ",", "app", "=", "self", ".", "req_resource", ".", "app", ",", "dlg", "=", "self", ".", "req_resource", ".", "dlg", ",", "credentials", "=", "self", ".", "credentials", ",", "seen_nonce", "=", "self", ".", "seen_nonce", ")", "self", ".", "_authorize", "(", "'response'", ",", "parsed_header", ",", "resource", ",", "# Per Node lib, a responder macs the *sender's* timestamp.", "# It does not create its own timestamp.", "# I suppose a slow response could time out here. Maybe only check", "# mac failures, not timeouts?", "their_timestamp", "=", "resource", ".", "timestamp", ",", "timestamp_skew_in_seconds", "=", "timestamp_skew_in_seconds", ",", "localtime_offset_in_seconds", "=", "localtime_offset_in_seconds", ",", "accept_untrusted_content", "=", "accept_untrusted_content", ",", "*", "*", "auth_kw", ")" ]
Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk
[ "Accept", "a", "response", "to", "this", "request", "." ]
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/sender.py#L106-L175
kajic/django-model-changes
django_model_changes/changes.py
ChangesMixin.current_state
def current_state(self): """ Returns a ``field -> value`` dict of the current state of the instance. """ field_names = set() [field_names.add(f.name) for f in self._meta.local_fields] [field_names.add(f.attname) for f in self._meta.local_fields] return dict([(field_name, getattr(self, field_name)) for field_name in field_names])
python
def current_state(self): """ Returns a ``field -> value`` dict of the current state of the instance. """ field_names = set() [field_names.add(f.name) for f in self._meta.local_fields] [field_names.add(f.attname) for f in self._meta.local_fields] return dict([(field_name, getattr(self, field_name)) for field_name in field_names])
[ "def", "current_state", "(", "self", ")", ":", "field_names", "=", "set", "(", ")", "[", "field_names", ".", "add", "(", "f", ".", "name", ")", "for", "f", "in", "self", ".", "_meta", ".", "local_fields", "]", "[", "field_names", ".", "add", "(", "f", ".", "attname", ")", "for", "f", "in", "self", ".", "_meta", ".", "local_fields", "]", "return", "dict", "(", "[", "(", "field_name", ",", "getattr", "(", "self", ",", "field_name", ")", ")", "for", "field_name", "in", "field_names", "]", ")" ]
Returns a ``field -> value`` dict of the current state of the instance.
[ "Returns", "a", "field", "-", ">", "value", "dict", "of", "the", "current", "state", "of", "the", "instance", "." ]
train
https://github.com/kajic/django-model-changes/blob/92124ebdf29cba930eb1ced00135823b961041d3/django_model_changes/changes.py#L98-L105
kajic/django-model-changes
django_model_changes/changes.py
ChangesMixin.was_persisted
def was_persisted(self): """ Returns true if the instance was persisted (saved) in its old state. Examples:: >>> user = User() >>> user.save() >>> user.was_persisted() False >>> user = User.objects.get(pk=1) >>> user.delete() >>> user.was_persisted() True """ pk_name = self._meta.pk.name return bool(self.old_state()[pk_name])
python
def was_persisted(self): """ Returns true if the instance was persisted (saved) in its old state. Examples:: >>> user = User() >>> user.save() >>> user.was_persisted() False >>> user = User.objects.get(pk=1) >>> user.delete() >>> user.was_persisted() True """ pk_name = self._meta.pk.name return bool(self.old_state()[pk_name])
[ "def", "was_persisted", "(", "self", ")", ":", "pk_name", "=", "self", ".", "_meta", ".", "pk", ".", "name", "return", "bool", "(", "self", ".", "old_state", "(", ")", "[", "pk_name", "]", ")" ]
Returns true if the instance was persisted (saved) in its old state. Examples:: >>> user = User() >>> user.save() >>> user.was_persisted() False >>> user = User.objects.get(pk=1) >>> user.delete() >>> user.was_persisted() True
[ "Returns", "true", "if", "the", "instance", "was", "persisted", "(", "saved", ")", "in", "its", "old", "state", "." ]
train
https://github.com/kajic/django-model-changes/blob/92124ebdf29cba930eb1ced00135823b961041d3/django_model_changes/changes.py#L149-L167
nilp0inter/cpe
cpe/cpe.py
CPE._trim
def _trim(cls, s): """ Remove trailing colons from the URI back to the first non-colon. :param string s: input URI string :returns: URI string with trailing colons removed :rtype: string TEST: trailing colons necessary >>> s = '1:2::::' >>> CPE._trim(s) '1:2' TEST: trailing colons not necessary >>> s = '1:2:3:4:5:6' >>> CPE._trim(s) '1:2:3:4:5:6' """ reverse = s[::-1] idx = 0 for i in range(0, len(reverse)): if reverse[i] == ":": idx += 1 else: break # Return the substring after all trailing colons, # reversed back to its original character order. new_s = reverse[idx: len(reverse)] return new_s[::-1]
python
def _trim(cls, s): """ Remove trailing colons from the URI back to the first non-colon. :param string s: input URI string :returns: URI string with trailing colons removed :rtype: string TEST: trailing colons necessary >>> s = '1:2::::' >>> CPE._trim(s) '1:2' TEST: trailing colons not necessary >>> s = '1:2:3:4:5:6' >>> CPE._trim(s) '1:2:3:4:5:6' """ reverse = s[::-1] idx = 0 for i in range(0, len(reverse)): if reverse[i] == ":": idx += 1 else: break # Return the substring after all trailing colons, # reversed back to its original character order. new_s = reverse[idx: len(reverse)] return new_s[::-1]
[ "def", "_trim", "(", "cls", ",", "s", ")", ":", "reverse", "=", "s", "[", ":", ":", "-", "1", "]", "idx", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "reverse", ")", ")", ":", "if", "reverse", "[", "i", "]", "==", "\":\"", ":", "idx", "+=", "1", "else", ":", "break", "# Return the substring after all trailing colons,", "# reversed back to its original character order.", "new_s", "=", "reverse", "[", "idx", ":", "len", "(", "reverse", ")", "]", "return", "new_s", "[", ":", ":", "-", "1", "]" ]
Remove trailing colons from the URI back to the first non-colon. :param string s: input URI string :returns: URI string with trailing colons removed :rtype: string TEST: trailing colons necessary >>> s = '1:2::::' >>> CPE._trim(s) '1:2' TEST: trailing colons not necessary >>> s = '1:2:3:4:5:6' >>> CPE._trim(s) '1:2:3:4:5:6'
[ "Remove", "trailing", "colons", "from", "the", "URI", "back", "to", "the", "first", "non", "-", "colon", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L115-L146
nilp0inter/cpe
cpe/cpe.py
CPE._create_cpe_parts
def _create_cpe_parts(self, system, components): """ Create the structure to store the input type of system associated with components of CPE Name (hardware, operating system and software). :param string system: type of system associated with CPE Name :param dict components: CPE Name components to store :returns: None :exception: KeyError - incorrect system """ if system not in CPEComponent.SYSTEM_VALUES: errmsg = "Key '{0}' is not exist".format(system) raise ValueError(errmsg) elements = [] elements.append(components) pk = CPE._system_and_parts[system] self[pk] = elements
python
def _create_cpe_parts(self, system, components): """ Create the structure to store the input type of system associated with components of CPE Name (hardware, operating system and software). :param string system: type of system associated with CPE Name :param dict components: CPE Name components to store :returns: None :exception: KeyError - incorrect system """ if system not in CPEComponent.SYSTEM_VALUES: errmsg = "Key '{0}' is not exist".format(system) raise ValueError(errmsg) elements = [] elements.append(components) pk = CPE._system_and_parts[system] self[pk] = elements
[ "def", "_create_cpe_parts", "(", "self", ",", "system", ",", "components", ")", ":", "if", "system", "not", "in", "CPEComponent", ".", "SYSTEM_VALUES", ":", "errmsg", "=", "\"Key '{0}' is not exist\"", ".", "format", "(", "system", ")", "raise", "ValueError", "(", "errmsg", ")", "elements", "=", "[", "]", "elements", ".", "append", "(", "components", ")", "pk", "=", "CPE", ".", "_system_and_parts", "[", "system", "]", "self", "[", "pk", "]", "=", "elements" ]
Create the structure to store the input type of system associated with components of CPE Name (hardware, operating system and software). :param string system: type of system associated with CPE Name :param dict components: CPE Name components to store :returns: None :exception: KeyError - incorrect system
[ "Create", "the", "structure", "to", "store", "the", "input", "type", "of", "system", "associated", "with", "components", "of", "CPE", "Name", "(", "hardware", "operating", "system", "and", "software", ")", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L389-L408
nilp0inter/cpe
cpe/cpe.py
CPE._get_attribute_components
def _get_attribute_components(self, att): """ Returns the component list of input attribute. :param string att: Attribute name to get :returns: List of Component objects of the attribute in CPE Name :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att): errmsg = "Invalid attribute name '{0}' is not exist".format(att) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: lc.append(elem.get(att)) return lc
python
def _get_attribute_components(self, att): """ Returns the component list of input attribute. :param string att: Attribute name to get :returns: List of Component objects of the attribute in CPE Name :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att): errmsg = "Invalid attribute name '{0}' is not exist".format(att) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: lc.append(elem.get(att)) return lc
[ "def", "_get_attribute_components", "(", "self", ",", "att", ")", ":", "lc", "=", "[", "]", "if", "not", "CPEComponent", ".", "is_valid_attribute", "(", "att", ")", ":", "errmsg", "=", "\"Invalid attribute name '{0}' is not exist\"", ".", "format", "(", "att", ")", "raise", "ValueError", "(", "errmsg", ")", "for", "pk", "in", "CPE", ".", "CPE_PART_KEYS", ":", "elements", "=", "self", ".", "get", "(", "pk", ")", "for", "elem", "in", "elements", ":", "lc", ".", "append", "(", "elem", ".", "get", "(", "att", ")", ")", "return", "lc" ]
Returns the component list of input attribute. :param string att: Attribute name to get :returns: List of Component objects of the attribute in CPE Name :rtype: list :exception: ValueError - invalid attribute name
[ "Returns", "the", "component", "list", "of", "input", "attribute", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L410-L431
nilp0inter/cpe
cpe/cpe.py
CPE._pack_edition
def _pack_edition(self): """ Pack the values of the five arguments into the simple edition component. If all the values are blank, just return a blank. :returns: "edition", "sw_edition", "target_sw", "target_hw" and "other" attributes packed in a only value :rtype: string :exception: TypeError - incompatible version with pack operation """ COMP_KEYS = (CPEComponent.ATT_EDITION, CPEComponent.ATT_SW_EDITION, CPEComponent.ATT_TARGET_SW, CPEComponent.ATT_TARGET_HW, CPEComponent.ATT_OTHER) separator = CPEComponent2_3_URI_edpacked.SEPARATOR_COMP packed_ed = [] packed_ed.append(separator) for ck in COMP_KEYS: lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with URI".format( self.VERSION) raise TypeError(errmsg) comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): value = "" elif (isinstance(comp, CPEComponentNotApplicable)): value = CPEComponent2_3_URI.VALUE_NA else: # Component has some value; transform this original value # in URI value value = comp.as_uri_2_3() # Save the value of edition attribute if ck == CPEComponent.ATT_EDITION: ed = value # Packed the value of component packed_ed.append(value) packed_ed.append(separator) # Del the last separator packed_ed_str = "".join(packed_ed[:-1]) only_ed = [] only_ed.append(separator) only_ed.append(ed) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed_str = "".join(only_ed) if (packed_ed_str == only_ed_str): # All the extended attributes are blank, # so don't do any packing, just return ed return ed else: # Otherwise, pack the five values into a simple string # prefixed and internally delimited with the tilde return packed_ed_str
python
def _pack_edition(self): """ Pack the values of the five arguments into the simple edition component. If all the values are blank, just return a blank. :returns: "edition", "sw_edition", "target_sw", "target_hw" and "other" attributes packed in a only value :rtype: string :exception: TypeError - incompatible version with pack operation """ COMP_KEYS = (CPEComponent.ATT_EDITION, CPEComponent.ATT_SW_EDITION, CPEComponent.ATT_TARGET_SW, CPEComponent.ATT_TARGET_HW, CPEComponent.ATT_OTHER) separator = CPEComponent2_3_URI_edpacked.SEPARATOR_COMP packed_ed = [] packed_ed.append(separator) for ck in COMP_KEYS: lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with URI".format( self.VERSION) raise TypeError(errmsg) comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): value = "" elif (isinstance(comp, CPEComponentNotApplicable)): value = CPEComponent2_3_URI.VALUE_NA else: # Component has some value; transform this original value # in URI value value = comp.as_uri_2_3() # Save the value of edition attribute if ck == CPEComponent.ATT_EDITION: ed = value # Packed the value of component packed_ed.append(value) packed_ed.append(separator) # Del the last separator packed_ed_str = "".join(packed_ed[:-1]) only_ed = [] only_ed.append(separator) only_ed.append(ed) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed_str = "".join(only_ed) if (packed_ed_str == only_ed_str): # All the extended attributes are blank, # so don't do any packing, just return ed return ed else: # Otherwise, pack the five values into a simple string # prefixed and internally delimited with the tilde return packed_ed_str
[ "def", "_pack_edition", "(", "self", ")", ":", "COMP_KEYS", "=", "(", "CPEComponent", ".", "ATT_EDITION", ",", "CPEComponent", ".", "ATT_SW_EDITION", ",", "CPEComponent", ".", "ATT_TARGET_SW", ",", "CPEComponent", ".", "ATT_TARGET_HW", ",", "CPEComponent", ".", "ATT_OTHER", ")", "separator", "=", "CPEComponent2_3_URI_edpacked", ".", "SEPARATOR_COMP", "packed_ed", "=", "[", "]", "packed_ed", ".", "append", "(", "separator", ")", "for", "ck", "in", "COMP_KEYS", ":", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with URI\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "comp", "=", "lc", "[", "0", "]", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ")", ":", "value", "=", "\"\"", "elif", "(", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ")", ":", "value", "=", "CPEComponent2_3_URI", ".", "VALUE_NA", "else", ":", "# Component has some value; transform this original value", "# in URI value", "value", "=", "comp", ".", "as_uri_2_3", "(", ")", "# Save the value of edition attribute", "if", "ck", "==", "CPEComponent", ".", "ATT_EDITION", ":", "ed", "=", "value", "# Packed the value of component", "packed_ed", ".", "append", "(", "value", ")", "packed_ed", ".", "append", "(", "separator", ")", "# Del the last separator", "packed_ed_str", "=", "\"\"", ".", "join", "(", "packed_ed", "[", ":", "-", "1", "]", ")", "only_ed", "=", "[", "]", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "ed", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed_str", "=", "\"\"", ".", "join", "(", "only_ed", ")", "if", "(", "packed_ed_str", "==", "only_ed_str", ")", ":", "# All the extended attributes are blank,", "# so don't do any packing, just return ed", "return", "ed", "else", ":", "# Otherwise, pack the five values into a simple string", "# prefixed and internally delimited with the tilde", "return", "packed_ed_str" ]
Pack the values of the five arguments into the simple edition component. If all the values are blank, just return a blank. :returns: "edition", "sw_edition", "target_sw", "target_hw" and "other" attributes packed in a only value :rtype: string :exception: TypeError - incompatible version with pack operation
[ "Pack", "the", "values", "of", "the", "five", "arguments", "into", "the", "simple", "edition", "component", ".", "If", "all", "the", "values", "are", "blank", "just", "return", "a", "blank", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L433-L505
nilp0inter/cpe
cpe/cpe.py
CPE.as_uri_2_3
def as_uri_2_3(self): """ Returns the CPE Name as URI string of version 2.3. :returns: CPE Name as URI string of version 2.3 :rtype: string :exception: TypeError - incompatible version """ uri = [] uri.append("cpe:/") ordered_comp_parts = { 0: CPEComponent.ATT_PART, 1: CPEComponent.ATT_VENDOR, 2: CPEComponent.ATT_PRODUCT, 3: CPEComponent.ATT_VERSION, 4: CPEComponent.ATT_UPDATE, 5: CPEComponent.ATT_EDITION, 6: CPEComponent.ATT_LANGUAGE} # Indicates if the previous component must be set depending on the # value of current component set_prev_comp = False prev_comp_list = [] for i in range(0, len(ordered_comp_parts)): ck = ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with URI".format( self.VERSION) raise TypeError(errmsg) if ck == CPEComponent.ATT_EDITION: # Call the pack() helper function to compute the proper # binding for the edition element v = self._pack_edition() if not v: set_prev_comp = True prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY) continue else: comp = lc[0] if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): # Logical value any v = CPEComponent2_3_URI.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v = CPEComponent2_3_URI.VALUE_NA elif isinstance(comp, CPEComponentUndefined): set_prev_comp = True prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY) continue else: # Get the value of component encoded in URI v = comp.as_uri_2_3() # Append v to the URI and add a separator uri.append(v) uri.append(CPEComponent2_3_URI.SEPARATOR_COMP) if set_prev_comp: # Set the previous attribute as logical value any v = CPEComponent2_3_URI.VALUE_ANY pos_ini = max(len(uri) - len(prev_comp_list) - 1, 1) increment = 2 # Count of inserted values for p, val in enumerate(prev_comp_list): pos = pos_ini + (p * increment) uri.insert(pos, v) uri.insert(pos + 1, CPEComponent2_3_URI.SEPARATOR_COMP) set_prev_comp = False prev_comp_list = [] # Return the URI string, with trailing separator trimmed return CPE._trim("".join(uri[:-1]))
python
def as_uri_2_3(self): """ Returns the CPE Name as URI string of version 2.3. :returns: CPE Name as URI string of version 2.3 :rtype: string :exception: TypeError - incompatible version """ uri = [] uri.append("cpe:/") ordered_comp_parts = { 0: CPEComponent.ATT_PART, 1: CPEComponent.ATT_VENDOR, 2: CPEComponent.ATT_PRODUCT, 3: CPEComponent.ATT_VERSION, 4: CPEComponent.ATT_UPDATE, 5: CPEComponent.ATT_EDITION, 6: CPEComponent.ATT_LANGUAGE} # Indicates if the previous component must be set depending on the # value of current component set_prev_comp = False prev_comp_list = [] for i in range(0, len(ordered_comp_parts)): ck = ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with URI".format( self.VERSION) raise TypeError(errmsg) if ck == CPEComponent.ATT_EDITION: # Call the pack() helper function to compute the proper # binding for the edition element v = self._pack_edition() if not v: set_prev_comp = True prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY) continue else: comp = lc[0] if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): # Logical value any v = CPEComponent2_3_URI.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v = CPEComponent2_3_URI.VALUE_NA elif isinstance(comp, CPEComponentUndefined): set_prev_comp = True prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY) continue else: # Get the value of component encoded in URI v = comp.as_uri_2_3() # Append v to the URI and add a separator uri.append(v) uri.append(CPEComponent2_3_URI.SEPARATOR_COMP) if set_prev_comp: # Set the previous attribute as logical value any v = CPEComponent2_3_URI.VALUE_ANY pos_ini = max(len(uri) - len(prev_comp_list) - 1, 1) increment = 2 # Count of inserted values for p, val in enumerate(prev_comp_list): pos = pos_ini + (p * increment) uri.insert(pos, v) uri.insert(pos + 1, CPEComponent2_3_URI.SEPARATOR_COMP) set_prev_comp = False prev_comp_list = [] # Return the URI string, with trailing separator trimmed return CPE._trim("".join(uri[:-1]))
[ "def", "as_uri_2_3", "(", "self", ")", ":", "uri", "=", "[", "]", "uri", ".", "append", "(", "\"cpe:/\"", ")", "ordered_comp_parts", "=", "{", "0", ":", "CPEComponent", ".", "ATT_PART", ",", "1", ":", "CPEComponent", ".", "ATT_VENDOR", ",", "2", ":", "CPEComponent", ".", "ATT_PRODUCT", ",", "3", ":", "CPEComponent", ".", "ATT_VERSION", ",", "4", ":", "CPEComponent", ".", "ATT_UPDATE", ",", "5", ":", "CPEComponent", ".", "ATT_EDITION", ",", "6", ":", "CPEComponent", ".", "ATT_LANGUAGE", "}", "# Indicates if the previous component must be set depending on the", "# value of current component", "set_prev_comp", "=", "False", "prev_comp_list", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "ordered_comp_parts", ")", ")", ":", "ck", "=", "ordered_comp_parts", "[", "i", "]", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with URI\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "if", "ck", "==", "CPEComponent", ".", "ATT_EDITION", ":", "# Call the pack() helper function to compute the proper", "# binding for the edition element", "v", "=", "self", ".", "_pack_edition", "(", ")", "if", "not", "v", ":", "set_prev_comp", "=", "True", "prev_comp_list", ".", "append", "(", "CPEComponent2_3_URI", ".", "VALUE_ANY", ")", "continue", "else", ":", "comp", "=", "lc", "[", "0", "]", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ")", ":", "# Logical value any", "v", "=", "CPEComponent2_3_URI", ".", "VALUE_ANY", "elif", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ":", "# Logical value not applicable", "v", "=", "CPEComponent2_3_URI", ".", "VALUE_NA", "elif", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", ":", "set_prev_comp", "=", "True", "prev_comp_list", ".", "append", "(", "CPEComponent2_3_URI", ".", "VALUE_ANY", ")", "continue", "else", ":", "# Get the value of component encoded in URI", "v", "=", "comp", ".", "as_uri_2_3", "(", ")", "# Append v to the URI and add a separator", "uri", ".", "append", "(", "v", ")", "uri", ".", "append", "(", "CPEComponent2_3_URI", ".", "SEPARATOR_COMP", ")", "if", "set_prev_comp", ":", "# Set the previous attribute as logical value any", "v", "=", "CPEComponent2_3_URI", ".", "VALUE_ANY", "pos_ini", "=", "max", "(", "len", "(", "uri", ")", "-", "len", "(", "prev_comp_list", ")", "-", "1", ",", "1", ")", "increment", "=", "2", "# Count of inserted values", "for", "p", ",", "val", "in", "enumerate", "(", "prev_comp_list", ")", ":", "pos", "=", "pos_ini", "+", "(", "p", "*", "increment", ")", "uri", ".", "insert", "(", "pos", ",", "v", ")", "uri", ".", "insert", "(", "pos", "+", "1", ",", "CPEComponent2_3_URI", ".", "SEPARATOR_COMP", ")", "set_prev_comp", "=", "False", "prev_comp_list", "=", "[", "]", "# Return the URI string, with trailing separator trimmed", "return", "CPE", ".", "_trim", "(", "\"\"", ".", "join", "(", "uri", "[", ":", "-", "1", "]", ")", ")" ]
Returns the CPE Name as URI string of version 2.3. :returns: CPE Name as URI string of version 2.3 :rtype: string :exception: TypeError - incompatible version
[ "Returns", "the", "CPE", "Name", "as", "URI", "string", "of", "version", "2", ".", "3", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L517-L602
nilp0inter/cpe
cpe/cpe.py
CPE.as_wfn
def as_wfn(self): """ Returns the CPE Name as Well-Formed Name string of version 2.3. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version """ from .cpe2_3_wfn import CPE2_3_WFN wfn = [] wfn.append(CPE2_3_WFN.CPE_PREFIX) for i in range(0, len(CPEComponent.ordered_comp_parts)): ck = CPEComponent.ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with WFN".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] v = [] v.append(ck) v.append("=") if isinstance(comp, CPEComponentAnyValue): # Logical value any v.append(CPEComponent2_3_WFN.VALUE_ANY) elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v.append(CPEComponent2_3_WFN.VALUE_NA) elif (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty)): # Do not set the attribute continue else: # Get the simple value of WFN of component v.append('"') v.append(comp.as_wfn()) v.append('"') # Append v to the WFN and add a separator wfn.append("".join(v)) wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) # Del the last separator wfn = wfn[:-1] # Return the WFN string wfn.append(CPE2_3_WFN.CPE_SUFFIX) return "".join(wfn)
python
def as_wfn(self): """ Returns the CPE Name as Well-Formed Name string of version 2.3. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version """ from .cpe2_3_wfn import CPE2_3_WFN wfn = [] wfn.append(CPE2_3_WFN.CPE_PREFIX) for i in range(0, len(CPEComponent.ordered_comp_parts)): ck = CPEComponent.ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with WFN".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] v = [] v.append(ck) v.append("=") if isinstance(comp, CPEComponentAnyValue): # Logical value any v.append(CPEComponent2_3_WFN.VALUE_ANY) elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v.append(CPEComponent2_3_WFN.VALUE_NA) elif (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty)): # Do not set the attribute continue else: # Get the simple value of WFN of component v.append('"') v.append(comp.as_wfn()) v.append('"') # Append v to the WFN and add a separator wfn.append("".join(v)) wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) # Del the last separator wfn = wfn[:-1] # Return the WFN string wfn.append(CPE2_3_WFN.CPE_SUFFIX) return "".join(wfn)
[ "def", "as_wfn", "(", "self", ")", ":", "from", ".", "cpe2_3_wfn", "import", "CPE2_3_WFN", "wfn", "=", "[", "]", "wfn", ".", "append", "(", "CPE2_3_WFN", ".", "CPE_PREFIX", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "CPEComponent", ".", "ordered_comp_parts", ")", ")", ":", "ck", "=", "CPEComponent", ".", "ordered_comp_parts", "[", "i", "]", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with WFN\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "else", ":", "comp", "=", "lc", "[", "0", "]", "v", "=", "[", "]", "v", ".", "append", "(", "ck", ")", "v", ".", "append", "(", "\"=\"", ")", "if", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ":", "# Logical value any", "v", ".", "append", "(", "CPEComponent2_3_WFN", ".", "VALUE_ANY", ")", "elif", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ":", "# Logical value not applicable", "v", ".", "append", "(", "CPEComponent2_3_WFN", ".", "VALUE_NA", ")", "elif", "(", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", ")", ":", "# Do not set the attribute", "continue", "else", ":", "# Get the simple value of WFN of component", "v", ".", "append", "(", "'\"'", ")", "v", ".", "append", "(", "comp", ".", "as_wfn", "(", ")", ")", "v", ".", "append", "(", "'\"'", ")", "# Append v to the WFN and add a separator", "wfn", ".", "append", "(", "\"\"", ".", "join", "(", "v", ")", ")", "wfn", ".", "append", "(", "CPEComponent2_3_WFN", ".", "SEPARATOR_COMP", ")", "# Del the last separator", "wfn", "=", "wfn", "[", ":", "-", "1", "]", "# Return the WFN string", "wfn", ".", "append", "(", "CPE2_3_WFN", ".", "CPE_SUFFIX", ")", "return", "\"\"", ".", "join", "(", "wfn", ")" ]
Returns the CPE Name as Well-Formed Name string of version 2.3. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version
[ "Returns", "the", "CPE", "Name", "as", "Well", "-", "Formed", "Name", "string", "of", "version", "2", ".", "3", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L604-L666
nilp0inter/cpe
cpe/cpe.py
CPE.as_fs
def as_fs(self): """ Returns the CPE Name as formatted string of version 2.3. :returns: CPE Name as formatted string :rtype: string :exception: TypeError - incompatible version """ fs = [] fs.append("cpe:2.3:") for i in range(0, len(CPEComponent.ordered_comp_parts)): ck = CPEComponent.ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with formatted string".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): # Logical value any v = CPEComponent2_3_FS.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v = CPEComponent2_3_FS.VALUE_NA else: # Get the value of component encoded in formatted string v = comp.as_fs() # Append v to the formatted string then add a separator. fs.append(v) fs.append(CPEComponent2_3_FS.SEPARATOR_COMP) # Return the formatted string return CPE._trim("".join(fs[:-1]))
python
def as_fs(self): """ Returns the CPE Name as formatted string of version 2.3. :returns: CPE Name as formatted string :rtype: string :exception: TypeError - incompatible version """ fs = [] fs.append("cpe:2.3:") for i in range(0, len(CPEComponent.ordered_comp_parts)): ck = CPEComponent.ordered_comp_parts[i] lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with formatted string".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): # Logical value any v = CPEComponent2_3_FS.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v = CPEComponent2_3_FS.VALUE_NA else: # Get the value of component encoded in formatted string v = comp.as_fs() # Append v to the formatted string then add a separator. fs.append(v) fs.append(CPEComponent2_3_FS.SEPARATOR_COMP) # Return the formatted string return CPE._trim("".join(fs[:-1]))
[ "def", "as_fs", "(", "self", ")", ":", "fs", "=", "[", "]", "fs", ".", "append", "(", "\"cpe:2.3:\"", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "CPEComponent", ".", "ordered_comp_parts", ")", ")", ":", "ck", "=", "CPEComponent", ".", "ordered_comp_parts", "[", "i", "]", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with formatted string\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "else", ":", "comp", "=", "lc", "[", "0", "]", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ")", ":", "# Logical value any", "v", "=", "CPEComponent2_3_FS", ".", "VALUE_ANY", "elif", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ":", "# Logical value not applicable", "v", "=", "CPEComponent2_3_FS", ".", "VALUE_NA", "else", ":", "# Get the value of component encoded in formatted string", "v", "=", "comp", ".", "as_fs", "(", ")", "# Append v to the formatted string then add a separator.", "fs", ".", "append", "(", "v", ")", "fs", ".", "append", "(", "CPEComponent2_3_FS", ".", "SEPARATOR_COMP", ")", "# Return the formatted string", "return", "CPE", ".", "_trim", "(", "\"\"", ".", "join", "(", "fs", "[", ":", "-", "1", "]", ")", ")" ]
Returns the CPE Name as formatted string of version 2.3. :returns: CPE Name as formatted string :rtype: string :exception: TypeError - incompatible version
[ "Returns", "the", "CPE", "Name", "as", "formatted", "string", "of", "version", "2", ".", "3", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L668-L714
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple._is_alphanum
def _is_alphanum(cls, c): """ Returns True if c is an uppercase letter, a lowercase letter, a digit or an underscore, otherwise False. :param string c: Character to check :returns: True if char is alphanumeric or an underscore, False otherwise :rtype: boolean TEST: a wrong character >>> c = "#" >>> CPEComponentSimple._is_alphanum(c) False """ alphanum_rxc = re.compile(CPEComponentSimple._ALPHANUM_PATTERN) return (alphanum_rxc.match(c) is not None)
python
def _is_alphanum(cls, c): """ Returns True if c is an uppercase letter, a lowercase letter, a digit or an underscore, otherwise False. :param string c: Character to check :returns: True if char is alphanumeric or an underscore, False otherwise :rtype: boolean TEST: a wrong character >>> c = "#" >>> CPEComponentSimple._is_alphanum(c) False """ alphanum_rxc = re.compile(CPEComponentSimple._ALPHANUM_PATTERN) return (alphanum_rxc.match(c) is not None)
[ "def", "_is_alphanum", "(", "cls", ",", "c", ")", ":", "alphanum_rxc", "=", "re", ".", "compile", "(", "CPEComponentSimple", ".", "_ALPHANUM_PATTERN", ")", "return", "(", "alphanum_rxc", ".", "match", "(", "c", ")", "is", "not", "None", ")" ]
Returns True if c is an uppercase letter, a lowercase letter, a digit or an underscore, otherwise False. :param string c: Character to check :returns: True if char is alphanumeric or an underscore, False otherwise :rtype: boolean TEST: a wrong character >>> c = "#" >>> CPEComponentSimple._is_alphanum(c) False
[ "Returns", "True", "if", "c", "is", "an", "uppercase", "letter", "a", "lowercase", "letter", "a", "digit", "or", "an", "underscore", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L98-L115
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple._pct_encode_uri
def _pct_encode_uri(cls, c): """ Return the appropriate percent-encoding of character c (URI string). Certain characters are returned without encoding. :param string c: Character to check :returns: Encoded character as URI :rtype: string TEST: >>> c = '.' >>> CPEComponentSimple._pct_encode_uri(c) '.' TEST: >>> c = '@' >>> CPEComponentSimple._pct_encode_uri(c) '%40' """ CPEComponentSimple.spechar_to_pce['-'] = c # bound without encoding CPEComponentSimple.spechar_to_pce['.'] = c # bound without encoding return CPEComponentSimple.spechar_to_pce[c]
python
def _pct_encode_uri(cls, c): """ Return the appropriate percent-encoding of character c (URI string). Certain characters are returned without encoding. :param string c: Character to check :returns: Encoded character as URI :rtype: string TEST: >>> c = '.' >>> CPEComponentSimple._pct_encode_uri(c) '.' TEST: >>> c = '@' >>> CPEComponentSimple._pct_encode_uri(c) '%40' """ CPEComponentSimple.spechar_to_pce['-'] = c # bound without encoding CPEComponentSimple.spechar_to_pce['.'] = c # bound without encoding return CPEComponentSimple.spechar_to_pce[c]
[ "def", "_pct_encode_uri", "(", "cls", ",", "c", ")", ":", "CPEComponentSimple", ".", "spechar_to_pce", "[", "'-'", "]", "=", "c", "# bound without encoding", "CPEComponentSimple", ".", "spechar_to_pce", "[", "'.'", "]", "=", "c", "# bound without encoding", "return", "CPEComponentSimple", ".", "spechar_to_pce", "[", "c", "]" ]
Return the appropriate percent-encoding of character c (URI string). Certain characters are returned without encoding. :param string c: Character to check :returns: Encoded character as URI :rtype: string TEST: >>> c = '.' >>> CPEComponentSimple._pct_encode_uri(c) '.' TEST: >>> c = '@' >>> CPEComponentSimple._pct_encode_uri(c) '%40'
[ "Return", "the", "appropriate", "percent", "-", "encoding", "of", "character", "c", "(", "URI", "string", ")", ".", "Certain", "characters", "are", "returned", "without", "encoding", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L118-L143
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple._is_valid_language
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() lang_rxc = re.compile(CPEComponentSimple._LANGTAG_PATTERN) return lang_rxc.match(comp_str) is not None
python
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() lang_rxc = re.compile(CPEComponentSimple._LANGTAG_PATTERN) return lang_rxc.match(comp_str) is not None
[ "def", "_is_valid_language", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_encoded_value", ".", "lower", "(", ")", "lang_rxc", "=", "re", ".", "compile", "(", "CPEComponentSimple", ".", "_LANGTAG_PATTERN", ")", "return", "lang_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "value", "of", "component", "in", "attribute", "language", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L184-L195
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple._is_valid_part
def _is_valid_part(self): """ Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() part_rxc = re.compile(CPEComponentSimple._PART_PATTERN) return part_rxc.match(comp_str) is not None
python
def _is_valid_part(self): """ Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() part_rxc = re.compile(CPEComponentSimple._PART_PATTERN) return part_rxc.match(comp_str) is not None
[ "def", "_is_valid_part", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_encoded_value", ".", "lower", "(", ")", "part_rxc", "=", "re", ".", "compile", "(", "CPEComponentSimple", ".", "_PART_PATTERN", ")", "return", "part_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "value", "of", "component", "in", "attribute", "part", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L197-L208
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple._parse
def _parse(self, comp_att): """ Check if the value of component is correct in the attribute "comp_att". :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component """ errmsg = "Invalid attribute '{0}'".format(comp_att) if not CPEComponent.is_valid_attribute(comp_att): raise ValueError(errmsg) comp_str = self._encoded_value errmsg = "Invalid value of attribute '{0}': {1}".format( comp_att, comp_str) # Check part (system type) value if comp_att == CPEComponentSimple.ATT_PART: if not self._is_valid_part(): raise ValueError(errmsg) # Check language value elif comp_att == CPEComponentSimple.ATT_LANGUAGE: if not self._is_valid_language(): raise ValueError(errmsg) # Check edition value elif comp_att == CPEComponentSimple.ATT_EDITION: if not self._is_valid_edition(): raise ValueError(errmsg) # Check other type of component value elif not self._is_valid_value(): raise ValueError(errmsg)
python
def _parse(self, comp_att): """ Check if the value of component is correct in the attribute "comp_att". :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component """ errmsg = "Invalid attribute '{0}'".format(comp_att) if not CPEComponent.is_valid_attribute(comp_att): raise ValueError(errmsg) comp_str = self._encoded_value errmsg = "Invalid value of attribute '{0}': {1}".format( comp_att, comp_str) # Check part (system type) value if comp_att == CPEComponentSimple.ATT_PART: if not self._is_valid_part(): raise ValueError(errmsg) # Check language value elif comp_att == CPEComponentSimple.ATT_LANGUAGE: if not self._is_valid_language(): raise ValueError(errmsg) # Check edition value elif comp_att == CPEComponentSimple.ATT_EDITION: if not self._is_valid_edition(): raise ValueError(errmsg) # Check other type of component value elif not self._is_valid_value(): raise ValueError(errmsg)
[ "def", "_parse", "(", "self", ",", "comp_att", ")", ":", "errmsg", "=", "\"Invalid attribute '{0}'\"", ".", "format", "(", "comp_att", ")", "if", "not", "CPEComponent", ".", "is_valid_attribute", "(", "comp_att", ")", ":", "raise", "ValueError", "(", "errmsg", ")", "comp_str", "=", "self", ".", "_encoded_value", "errmsg", "=", "\"Invalid value of attribute '{0}': {1}\"", ".", "format", "(", "comp_att", ",", "comp_str", ")", "# Check part (system type) value", "if", "comp_att", "==", "CPEComponentSimple", ".", "ATT_PART", ":", "if", "not", "self", ".", "_is_valid_part", "(", ")", ":", "raise", "ValueError", "(", "errmsg", ")", "# Check language value", "elif", "comp_att", "==", "CPEComponentSimple", ".", "ATT_LANGUAGE", ":", "if", "not", "self", ".", "_is_valid_language", "(", ")", ":", "raise", "ValueError", "(", "errmsg", ")", "# Check edition value", "elif", "comp_att", "==", "CPEComponentSimple", ".", "ATT_EDITION", ":", "if", "not", "self", ".", "_is_valid_edition", "(", ")", ":", "raise", "ValueError", "(", "errmsg", ")", "# Check other type of component value", "elif", "not", "self", ".", "_is_valid_value", "(", ")", ":", "raise", "ValueError", "(", "errmsg", ")" ]
Check if the value of component is correct in the attribute "comp_att". :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component
[ "Check", "if", "the", "value", "of", "component", "is", "correct", "in", "the", "attribute", "comp_att", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L223-L259
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple.as_fs
def as_fs(self): """ Returns the value of component encoded as formatted string. Inspect each character in value of component. Certain nonalpha characters pass thru without escaping into the result, but most retain escaping. :returns: Formatted string associated with component :rtype: string """ s = self._standard_value result = [] idx = 0 while (idx < len(s)): c = s[idx] # get the idx'th character of s if c != "\\": # unquoted characters pass thru unharmed result.append(c) else: # Escaped characters are examined nextchr = s[idx + 1] if (nextchr == ".") or (nextchr == "-") or (nextchr == "_"): # the period, hyphen and underscore pass unharmed result.append(nextchr) idx += 1 else: # all others retain escaping result.append("\\") result.append(nextchr) idx += 2 continue idx += 1 return "".join(result)
python
def as_fs(self): """ Returns the value of component encoded as formatted string. Inspect each character in value of component. Certain nonalpha characters pass thru without escaping into the result, but most retain escaping. :returns: Formatted string associated with component :rtype: string """ s = self._standard_value result = [] idx = 0 while (idx < len(s)): c = s[idx] # get the idx'th character of s if c != "\\": # unquoted characters pass thru unharmed result.append(c) else: # Escaped characters are examined nextchr = s[idx + 1] if (nextchr == ".") or (nextchr == "-") or (nextchr == "_"): # the period, hyphen and underscore pass unharmed result.append(nextchr) idx += 1 else: # all others retain escaping result.append("\\") result.append(nextchr) idx += 2 continue idx += 1 return "".join(result)
[ "def", "as_fs", "(", "self", ")", ":", "s", "=", "self", ".", "_standard_value", "result", "=", "[", "]", "idx", "=", "0", "while", "(", "idx", "<", "len", "(", "s", ")", ")", ":", "c", "=", "s", "[", "idx", "]", "# get the idx'th character of s", "if", "c", "!=", "\"\\\\\"", ":", "# unquoted characters pass thru unharmed", "result", ".", "append", "(", "c", ")", "else", ":", "# Escaped characters are examined", "nextchr", "=", "s", "[", "idx", "+", "1", "]", "if", "(", "nextchr", "==", "\".\"", ")", "or", "(", "nextchr", "==", "\"-\"", ")", "or", "(", "nextchr", "==", "\"_\"", ")", ":", "# the period, hyphen and underscore pass unharmed", "result", ".", "append", "(", "nextchr", ")", "idx", "+=", "1", "else", ":", "# all others retain escaping", "result", ".", "append", "(", "\"\\\\\"", ")", "result", ".", "append", "(", "nextchr", ")", "idx", "+=", "2", "continue", "idx", "+=", "1", "return", "\"\"", ".", "join", "(", "result", ")" ]
Returns the value of component encoded as formatted string. Inspect each character in value of component. Certain nonalpha characters pass thru without escaping into the result, but most retain escaping. :returns: Formatted string associated with component :rtype: string
[ "Returns", "the", "value", "of", "component", "encoded", "as", "formatted", "string", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L261-L298
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple.as_uri_2_3
def as_uri_2_3(self): """ Returns the value of component encoded as URI string. Scans an input string s and applies the following transformations: - Pass alphanumeric characters thru untouched - Percent-encode quoted non-alphanumerics as needed - Unquoted special characters are mapped to their special forms. :returns: URI string associated with component :rtype: string """ s = self._standard_value result = [] idx = 0 while (idx < len(s)): thischar = s[idx] # get the idx'th character of s # alphanumerics (incl. underscore) pass untouched if (CPEComponentSimple._is_alphanum(thischar)): result.append(thischar) idx += 1 continue # escape character if (thischar == "\\"): idx += 1 nxtchar = s[idx] result.append(CPEComponentSimple._pct_encode_uri(nxtchar)) idx += 1 continue # Bind the unquoted '?' special character to "%01". if (thischar == "?"): result.append("%01") # Bind the unquoted '*' special character to "%02". if (thischar == "*"): result.append("%02") idx += 1 return "".join(result)
python
def as_uri_2_3(self): """ Returns the value of component encoded as URI string. Scans an input string s and applies the following transformations: - Pass alphanumeric characters thru untouched - Percent-encode quoted non-alphanumerics as needed - Unquoted special characters are mapped to their special forms. :returns: URI string associated with component :rtype: string """ s = self._standard_value result = [] idx = 0 while (idx < len(s)): thischar = s[idx] # get the idx'th character of s # alphanumerics (incl. underscore) pass untouched if (CPEComponentSimple._is_alphanum(thischar)): result.append(thischar) idx += 1 continue # escape character if (thischar == "\\"): idx += 1 nxtchar = s[idx] result.append(CPEComponentSimple._pct_encode_uri(nxtchar)) idx += 1 continue # Bind the unquoted '?' special character to "%01". if (thischar == "?"): result.append("%01") # Bind the unquoted '*' special character to "%02". if (thischar == "*"): result.append("%02") idx += 1 return "".join(result)
[ "def", "as_uri_2_3", "(", "self", ")", ":", "s", "=", "self", ".", "_standard_value", "result", "=", "[", "]", "idx", "=", "0", "while", "(", "idx", "<", "len", "(", "s", ")", ")", ":", "thischar", "=", "s", "[", "idx", "]", "# get the idx'th character of s", "# alphanumerics (incl. underscore) pass untouched", "if", "(", "CPEComponentSimple", ".", "_is_alphanum", "(", "thischar", ")", ")", ":", "result", ".", "append", "(", "thischar", ")", "idx", "+=", "1", "continue", "# escape character", "if", "(", "thischar", "==", "\"\\\\\"", ")", ":", "idx", "+=", "1", "nxtchar", "=", "s", "[", "idx", "]", "result", ".", "append", "(", "CPEComponentSimple", ".", "_pct_encode_uri", "(", "nxtchar", ")", ")", "idx", "+=", "1", "continue", "# Bind the unquoted '?' special character to \"%01\".", "if", "(", "thischar", "==", "\"?\"", ")", ":", "result", ".", "append", "(", "\"%01\"", ")", "# Bind the unquoted '*' special character to \"%02\".", "if", "(", "thischar", "==", "\"*\"", ")", ":", "result", ".", "append", "(", "\"%02\"", ")", "idx", "+=", "1", "return", "\"\"", ".", "join", "(", "result", ")" ]
Returns the value of component encoded as URI string. Scans an input string s and applies the following transformations: - Pass alphanumeric characters thru untouched - Percent-encode quoted non-alphanumerics as needed - Unquoted special characters are mapped to their special forms. :returns: URI string associated with component :rtype: string
[ "Returns", "the", "value", "of", "component", "encoded", "as", "URI", "string", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L300-L344
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
CPEComponentSimple.set_value
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_str: new value of component :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component """ old_value = self._encoded_value self._encoded_value = comp_str # Check the value of component try: self._parse(comp_att) except ValueError: # Restore old value of component self._encoded_value = old_value raise # Convert encoding value to standard value (WFN) self._decode()
python
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_str: new value of component :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component """ old_value = self._encoded_value self._encoded_value = comp_str # Check the value of component try: self._parse(comp_att) except ValueError: # Restore old value of component self._encoded_value = old_value raise # Convert encoding value to standard value (WFN) self._decode()
[ "def", "set_value", "(", "self", ",", "comp_str", ",", "comp_att", ")", ":", "old_value", "=", "self", ".", "_encoded_value", "self", ".", "_encoded_value", "=", "comp_str", "# Check the value of component", "try", ":", "self", ".", "_parse", "(", "comp_att", ")", "except", "ValueError", ":", "# Restore old value of component", "self", ".", "_encoded_value", "=", "old_value", "raise", "# Convert encoding value to standard value (WFN)", "self", ".", "_decode", "(", ")" ]
Set the value of component. By default, the component has a simple value. :param string comp_str: new value of component :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component
[ "Set", "the", "value", "of", "component", ".", "By", "default", "the", "component", "has", "a", "simple", "value", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L367-L390
nilp0inter/cpe
cpe/comp/cpecomp2_3_fs.py
CPEComponent2_3_FS._decode
def _decode(self): """ Convert the characters of string s to standard value (WFN value). Inspect each character in value of component. Copy quoted characters, with their escaping, into the result. Look for unquoted non alphanumerics and if not "*" or "?", add escaping. :exception: ValueError - invalid character in value of component """ result = [] idx = 0 s = self._encoded_value embedded = False errmsg = [] errmsg.append("Invalid character '") while (idx < len(s)): c = s[idx] # get the idx'th character of s errmsg.append(c) errmsg.append("'") errmsg_str = "".join(errmsg) if (CPEComponentSimple._is_alphanum(c)): # Alphanumeric characters pass untouched result.append(c) idx += 1 embedded = True continue if c == "\\": # Anything quoted in the bound string stays quoted # in the unbound string. result.append(s[idx: idx + 2]) idx += 2 embedded = True continue if (c == CPEComponent2_3_FS.WILDCARD_MULTI): # An unquoted asterisk must appear at the beginning or # end of the string. if (idx == 0) or (idx == (len(s) - 1)): result.append(c) idx += 1 embedded = True continue else: raise ValueError(errmsg_str) if (c == CPEComponent2_3_FS.WILDCARD_ONE): # An unquoted question mark must appear at the beginning or # end of the string, or in a leading or trailing sequence: # - ? legal at beginning or end # - embedded is false, so must be preceded by ? # - embedded is true, so must be followed by ? if (((idx == 0) or (idx == (len(s) - 1))) or ((not embedded) and (s[idx - 1] == CPEComponent2_3_FS.WILDCARD_ONE)) or (embedded and (s[idx + 1] == CPEComponent2_3_FS.WILDCARD_ONE))): result.append(c) idx += 1 embedded = False continue else: raise ValueError(errmsg_str) # all other characters must be quoted result.append("\\") result.append(c) idx += 1 embedded = True self._standard_value = "".join(result)
python
def _decode(self): """ Convert the characters of string s to standard value (WFN value). Inspect each character in value of component. Copy quoted characters, with their escaping, into the result. Look for unquoted non alphanumerics and if not "*" or "?", add escaping. :exception: ValueError - invalid character in value of component """ result = [] idx = 0 s = self._encoded_value embedded = False errmsg = [] errmsg.append("Invalid character '") while (idx < len(s)): c = s[idx] # get the idx'th character of s errmsg.append(c) errmsg.append("'") errmsg_str = "".join(errmsg) if (CPEComponentSimple._is_alphanum(c)): # Alphanumeric characters pass untouched result.append(c) idx += 1 embedded = True continue if c == "\\": # Anything quoted in the bound string stays quoted # in the unbound string. result.append(s[idx: idx + 2]) idx += 2 embedded = True continue if (c == CPEComponent2_3_FS.WILDCARD_MULTI): # An unquoted asterisk must appear at the beginning or # end of the string. if (idx == 0) or (idx == (len(s) - 1)): result.append(c) idx += 1 embedded = True continue else: raise ValueError(errmsg_str) if (c == CPEComponent2_3_FS.WILDCARD_ONE): # An unquoted question mark must appear at the beginning or # end of the string, or in a leading or trailing sequence: # - ? legal at beginning or end # - embedded is false, so must be preceded by ? # - embedded is true, so must be followed by ? if (((idx == 0) or (idx == (len(s) - 1))) or ((not embedded) and (s[idx - 1] == CPEComponent2_3_FS.WILDCARD_ONE)) or (embedded and (s[idx + 1] == CPEComponent2_3_FS.WILDCARD_ONE))): result.append(c) idx += 1 embedded = False continue else: raise ValueError(errmsg_str) # all other characters must be quoted result.append("\\") result.append(c) idx += 1 embedded = True self._standard_value = "".join(result)
[ "def", "_decode", "(", "self", ")", ":", "result", "=", "[", "]", "idx", "=", "0", "s", "=", "self", ".", "_encoded_value", "embedded", "=", "False", "errmsg", "=", "[", "]", "errmsg", ".", "append", "(", "\"Invalid character '\"", ")", "while", "(", "idx", "<", "len", "(", "s", ")", ")", ":", "c", "=", "s", "[", "idx", "]", "# get the idx'th character of s", "errmsg", ".", "append", "(", "c", ")", "errmsg", ".", "append", "(", "\"'\"", ")", "errmsg_str", "=", "\"\"", ".", "join", "(", "errmsg", ")", "if", "(", "CPEComponentSimple", ".", "_is_alphanum", "(", "c", ")", ")", ":", "# Alphanumeric characters pass untouched", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "True", "continue", "if", "c", "==", "\"\\\\\"", ":", "# Anything quoted in the bound string stays quoted", "# in the unbound string.", "result", ".", "append", "(", "s", "[", "idx", ":", "idx", "+", "2", "]", ")", "idx", "+=", "2", "embedded", "=", "True", "continue", "if", "(", "c", "==", "CPEComponent2_3_FS", ".", "WILDCARD_MULTI", ")", ":", "# An unquoted asterisk must appear at the beginning or", "# end of the string.", "if", "(", "idx", "==", "0", ")", "or", "(", "idx", "==", "(", "len", "(", "s", ")", "-", "1", ")", ")", ":", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "True", "continue", "else", ":", "raise", "ValueError", "(", "errmsg_str", ")", "if", "(", "c", "==", "CPEComponent2_3_FS", ".", "WILDCARD_ONE", ")", ":", "# An unquoted question mark must appear at the beginning or", "# end of the string, or in a leading or trailing sequence:", "# - ? legal at beginning or end", "# - embedded is false, so must be preceded by ?", "# - embedded is true, so must be followed by ?", "if", "(", "(", "(", "idx", "==", "0", ")", "or", "(", "idx", "==", "(", "len", "(", "s", ")", "-", "1", ")", ")", ")", "or", "(", "(", "not", "embedded", ")", "and", "(", "s", "[", "idx", "-", "1", "]", "==", "CPEComponent2_3_FS", ".", "WILDCARD_ONE", ")", ")", "or", "(", "embedded", "and", "(", "s", "[", "idx", "+", "1", "]", "==", "CPEComponent2_3_FS", ".", "WILDCARD_ONE", ")", ")", ")", ":", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "False", "continue", "else", ":", "raise", "ValueError", "(", "errmsg_str", ")", "# all other characters must be quoted", "result", ".", "append", "(", "\"\\\\\"", ")", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "True", "self", ".", "_standard_value", "=", "\"\"", ".", "join", "(", "result", ")" ]
Convert the characters of string s to standard value (WFN value). Inspect each character in value of component. Copy quoted characters, with their escaping, into the result. Look for unquoted non alphanumerics and if not "*" or "?", add escaping. :exception: ValueError - invalid character in value of component
[ "Convert", "the", "characters", "of", "string", "s", "to", "standard", "value", "(", "WFN", "value", ")", ".", "Inspect", "each", "character", "in", "value", "of", "component", ".", "Copy", "quoted", "characters", "with", "their", "escaping", "into", "the", "result", ".", "Look", "for", "unquoted", "non", "alphanumerics", "and", "if", "not", "*", "or", "?", "add", "escaping", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3_fs.py#L93-L165
nilp0inter/cpe
cpe/cpe1_1.py
CPE1_1._parse
def _parse(self): """ Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name """ # CPE Name must not have whitespaces if (self.cpe_str.find(" ") != -1): errmsg = "Bad-formed CPE Name: it must not have whitespaces" raise ValueError(errmsg) # Partitioning of CPE Name in parts parts_match = CPE1_1._parts_rxc.match(self.cpe_str) # ################################ # Validation of CPE Name parts # # ################################ if (parts_match is None): errmsg = "Bad-formed CPE Name: not correct definition of CPE Name parts" raise ValueError(errmsg) CPE_PART_KEYS = (CPE.KEY_HW, CPE.KEY_OS, CPE.KEY_APP) for pk in CPE_PART_KEYS: # Get part content part = parts_match.group(pk) elements = [] if (part is not None): # Part of CPE Name defined # ############################### # Validation of part elements # # ############################### # semicolon (;) is used to separate the part elements for part_elem in part.split(CPE1_1.ELEMENT_SEPARATOR): j = 1 # #################################### # Validation of element components # # #################################### components = dict() # colon (:) is used to separate the element components for elem_comp in part_elem.split(CPEComponent1_1.SEPARATOR_COMP): comp_att = CPEComponent.ordered_comp_parts[j] if elem_comp == CPEComponent1_1.VALUE_EMPTY: comp = CPEComponentEmpty() else: try: comp = CPEComponent1_1(elem_comp, comp_att) except ValueError: errmsg = "Bad-formed CPE Name: not correct value: {0}".format( elem_comp) raise ValueError(errmsg) # Identification of component name components[comp_att] = comp j += 1 # Adds the components of version 2.3 of CPE not defined # in version 1.1 for idx in range(j, len(CPEComponent.ordered_comp_parts)): comp_att = CPEComponent.ordered_comp_parts[idx] components[comp_att] = CPEComponentUndefined() # Get the type of system associated with CPE Name and # store it in element as component if (pk == CPE.KEY_HW): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_HW, CPEComponent.ATT_PART) elif (pk == CPE.KEY_OS): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_OS, CPEComponent.ATT_PART) elif (pk == CPE.KEY_APP): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_APP, CPEComponent.ATT_PART) # Store the element identified elements.append(components) # Store the part identified self[pk] = elements self[CPE.KEY_UNDEFINED] = []
python
def _parse(self): """ Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name """ # CPE Name must not have whitespaces if (self.cpe_str.find(" ") != -1): errmsg = "Bad-formed CPE Name: it must not have whitespaces" raise ValueError(errmsg) # Partitioning of CPE Name in parts parts_match = CPE1_1._parts_rxc.match(self.cpe_str) # ################################ # Validation of CPE Name parts # # ################################ if (parts_match is None): errmsg = "Bad-formed CPE Name: not correct definition of CPE Name parts" raise ValueError(errmsg) CPE_PART_KEYS = (CPE.KEY_HW, CPE.KEY_OS, CPE.KEY_APP) for pk in CPE_PART_KEYS: # Get part content part = parts_match.group(pk) elements = [] if (part is not None): # Part of CPE Name defined # ############################### # Validation of part elements # # ############################### # semicolon (;) is used to separate the part elements for part_elem in part.split(CPE1_1.ELEMENT_SEPARATOR): j = 1 # #################################### # Validation of element components # # #################################### components = dict() # colon (:) is used to separate the element components for elem_comp in part_elem.split(CPEComponent1_1.SEPARATOR_COMP): comp_att = CPEComponent.ordered_comp_parts[j] if elem_comp == CPEComponent1_1.VALUE_EMPTY: comp = CPEComponentEmpty() else: try: comp = CPEComponent1_1(elem_comp, comp_att) except ValueError: errmsg = "Bad-formed CPE Name: not correct value: {0}".format( elem_comp) raise ValueError(errmsg) # Identification of component name components[comp_att] = comp j += 1 # Adds the components of version 2.3 of CPE not defined # in version 1.1 for idx in range(j, len(CPEComponent.ordered_comp_parts)): comp_att = CPEComponent.ordered_comp_parts[idx] components[comp_att] = CPEComponentUndefined() # Get the type of system associated with CPE Name and # store it in element as component if (pk == CPE.KEY_HW): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_HW, CPEComponent.ATT_PART) elif (pk == CPE.KEY_OS): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_OS, CPEComponent.ATT_PART) elif (pk == CPE.KEY_APP): components[CPEComponent.ATT_PART] = CPEComponent1_1( CPEComponent.VALUE_PART_APP, CPEComponent.ATT_PART) # Store the element identified elements.append(components) # Store the part identified self[pk] = elements self[CPE.KEY_UNDEFINED] = []
[ "def", "_parse", "(", "self", ")", ":", "# CPE Name must not have whitespaces", "if", "(", "self", ".", "cpe_str", ".", "find", "(", "\" \"", ")", "!=", "-", "1", ")", ":", "errmsg", "=", "\"Bad-formed CPE Name: it must not have whitespaces\"", "raise", "ValueError", "(", "errmsg", ")", "# Partitioning of CPE Name in parts", "parts_match", "=", "CPE1_1", ".", "_parts_rxc", ".", "match", "(", "self", ".", "cpe_str", ")", "# ################################", "# Validation of CPE Name parts #", "# ################################", "if", "(", "parts_match", "is", "None", ")", ":", "errmsg", "=", "\"Bad-formed CPE Name: not correct definition of CPE Name parts\"", "raise", "ValueError", "(", "errmsg", ")", "CPE_PART_KEYS", "=", "(", "CPE", ".", "KEY_HW", ",", "CPE", ".", "KEY_OS", ",", "CPE", ".", "KEY_APP", ")", "for", "pk", "in", "CPE_PART_KEYS", ":", "# Get part content", "part", "=", "parts_match", ".", "group", "(", "pk", ")", "elements", "=", "[", "]", "if", "(", "part", "is", "not", "None", ")", ":", "# Part of CPE Name defined", "# ###############################", "# Validation of part elements #", "# ###############################", "# semicolon (;) is used to separate the part elements", "for", "part_elem", "in", "part", ".", "split", "(", "CPE1_1", ".", "ELEMENT_SEPARATOR", ")", ":", "j", "=", "1", "# ####################################", "# Validation of element components #", "# ####################################", "components", "=", "dict", "(", ")", "# colon (:) is used to separate the element components", "for", "elem_comp", "in", "part_elem", ".", "split", "(", "CPEComponent1_1", ".", "SEPARATOR_COMP", ")", ":", "comp_att", "=", "CPEComponent", ".", "ordered_comp_parts", "[", "j", "]", "if", "elem_comp", "==", "CPEComponent1_1", ".", "VALUE_EMPTY", ":", "comp", "=", "CPEComponentEmpty", "(", ")", "else", ":", "try", ":", "comp", "=", "CPEComponent1_1", "(", "elem_comp", ",", "comp_att", ")", "except", "ValueError", ":", "errmsg", "=", "\"Bad-formed CPE Name: not correct value: {0}\"", ".", "format", "(", "elem_comp", ")", "raise", "ValueError", "(", "errmsg", ")", "# Identification of component name", "components", "[", "comp_att", "]", "=", "comp", "j", "+=", "1", "# Adds the components of version 2.3 of CPE not defined", "# in version 1.1", "for", "idx", "in", "range", "(", "j", ",", "len", "(", "CPEComponent", ".", "ordered_comp_parts", ")", ")", ":", "comp_att", "=", "CPEComponent", ".", "ordered_comp_parts", "[", "idx", "]", "components", "[", "comp_att", "]", "=", "CPEComponentUndefined", "(", ")", "# Get the type of system associated with CPE Name and", "# store it in element as component", "if", "(", "pk", "==", "CPE", ".", "KEY_HW", ")", ":", "components", "[", "CPEComponent", ".", "ATT_PART", "]", "=", "CPEComponent1_1", "(", "CPEComponent", ".", "VALUE_PART_HW", ",", "CPEComponent", ".", "ATT_PART", ")", "elif", "(", "pk", "==", "CPE", ".", "KEY_OS", ")", ":", "components", "[", "CPEComponent", ".", "ATT_PART", "]", "=", "CPEComponent1_1", "(", "CPEComponent", ".", "VALUE_PART_OS", ",", "CPEComponent", ".", "ATT_PART", ")", "elif", "(", "pk", "==", "CPE", ".", "KEY_APP", ")", ":", "components", "[", "CPEComponent", ".", "ATT_PART", "]", "=", "CPEComponent1_1", "(", "CPEComponent", ".", "VALUE_PART_APP", ",", "CPEComponent", ".", "ATT_PART", ")", "# Store the element identified", "elements", ".", "append", "(", "components", ")", "# Store the part identified", "self", "[", "pk", "]", "=", "elements", "self", "[", "CPE", ".", "KEY_UNDEFINED", "]", "=", "[", "]" ]
Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name
[ "Checks", "if", "the", "CPE", "Name", "is", "valid", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe1_1.py#L173-L264
nilp0inter/cpe
cpe/cpe1_1.py
CPE1_1.get_attribute_values
def get_attribute_values(self, att_name): """ Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentUndefined)): value = CPEComponent1_1.VALUE_EMPTY else: value = comp.get_value() lc.append(value) return lc
python
def get_attribute_values(self, att_name): """ Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentUndefined)): value = CPEComponent1_1.VALUE_EMPTY else: value = comp.get_value() lc.append(value) return lc
[ "def", "get_attribute_values", "(", "self", ",", "att_name", ")", ":", "lc", "=", "[", "]", "if", "not", "CPEComponent", ".", "is_valid_attribute", "(", "att_name", ")", ":", "errmsg", "=", "\"Invalid attribute name: {0}\"", ".", "format", "(", "att_name", ")", "raise", "ValueError", "(", "errmsg", ")", "for", "pk", "in", "CPE", ".", "CPE_PART_KEYS", ":", "elements", "=", "self", ".", "get", "(", "pk", ")", "for", "elem", "in", "elements", ":", "comp", "=", "elem", ".", "get", "(", "att_name", ")", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", ")", ":", "value", "=", "CPEComponent1_1", ".", "VALUE_EMPTY", "else", ":", "value", "=", "comp", ".", "get_value", "(", ")", "lc", ".", "append", "(", "value", ")", "return", "lc" ]
Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name
[ "Returns", "the", "values", "of", "attribute", "att_name", "of", "CPE", "Name", ".", "By", "default", "a", "only", "element", "in", "each", "part", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe1_1.py#L318-L348
nilp0inter/cpe
cpe/comp/cpecomp2_3_uri_edpacked.py
CPEComponent2_3_URI_edpacked.set_value
def set_value(self, comp_str): """ Set the value of component. :param string comp_str: value of component :returns: None :exception: ValueError - incorrect value of component """ self._is_negated = False self._encoded_value = comp_str self._standard_value = super( CPEComponent2_3_URI_edpacked, self)._decode()
python
def set_value(self, comp_str): """ Set the value of component. :param string comp_str: value of component :returns: None :exception: ValueError - incorrect value of component """ self._is_negated = False self._encoded_value = comp_str self._standard_value = super( CPEComponent2_3_URI_edpacked, self)._decode()
[ "def", "set_value", "(", "self", ",", "comp_str", ")", ":", "self", ".", "_is_negated", "=", "False", "self", ".", "_encoded_value", "=", "comp_str", "self", ".", "_standard_value", "=", "super", "(", "CPEComponent2_3_URI_edpacked", ",", "self", ")", ".", "_decode", "(", ")" ]
Set the value of component. :param string comp_str: value of component :returns: None :exception: ValueError - incorrect value of component
[ "Set", "the", "value", "of", "component", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3_uri_edpacked.py#L91-L103
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
CPEComponent1_1._decode
def _decode(self): """ Convert the encoded value of component to standard value (WFN value). """ s = self._encoded_value elements = s.replace('~', '').split('!') dec_elements = [] for elem in elements: result = [] idx = 0 while (idx < len(elem)): # Get the idx'th character of s c = elem[idx] if (c in CPEComponent1_1.NON_STANDARD_VALUES): # Escape character result.append("\\") result.append(c) else: # Do nothing result.append(c) idx += 1 dec_elements.append("".join(result)) self._standard_value = dec_elements
python
def _decode(self): """ Convert the encoded value of component to standard value (WFN value). """ s = self._encoded_value elements = s.replace('~', '').split('!') dec_elements = [] for elem in elements: result = [] idx = 0 while (idx < len(elem)): # Get the idx'th character of s c = elem[idx] if (c in CPEComponent1_1.NON_STANDARD_VALUES): # Escape character result.append("\\") result.append(c) else: # Do nothing result.append(c) idx += 1 dec_elements.append("".join(result)) self._standard_value = dec_elements
[ "def", "_decode", "(", "self", ")", ":", "s", "=", "self", ".", "_encoded_value", "elements", "=", "s", ".", "replace", "(", "'~'", ",", "''", ")", ".", "split", "(", "'!'", ")", "dec_elements", "=", "[", "]", "for", "elem", "in", "elements", ":", "result", "=", "[", "]", "idx", "=", "0", "while", "(", "idx", "<", "len", "(", "elem", ")", ")", ":", "# Get the idx'th character of s", "c", "=", "elem", "[", "idx", "]", "if", "(", "c", "in", "CPEComponent1_1", ".", "NON_STANDARD_VALUES", ")", ":", "# Escape character", "result", ".", "append", "(", "\"\\\\\"", ")", "result", ".", "append", "(", "c", ")", "else", ":", "# Do nothing", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "dec_elements", ".", "append", "(", "\"\"", ".", "join", "(", "result", ")", ")", "self", ".", "_standard_value", "=", "dec_elements" ]
Convert the encoded value of component to standard value (WFN value).
[ "Convert", "the", "encoded", "value", "of", "component", "to", "standard", "value", "(", "WFN", "value", ")", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L156-L183
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
CPEComponent1_1._is_valid_value
def _is_valid_value(self): """ Return True if the value of component in generic attribute is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value value_pattern = [] value_pattern.append("^((") value_pattern.append("~[") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+") value_pattern.append(")|(") value_pattern.append("[") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+(![") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+)*") value_pattern.append("))$") value_rxc = re.compile("".join(value_pattern)) return value_rxc.match(comp_str) is not None
python
def _is_valid_value(self): """ Return True if the value of component in generic attribute is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value value_pattern = [] value_pattern.append("^((") value_pattern.append("~[") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+") value_pattern.append(")|(") value_pattern.append("[") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+(![") value_pattern.append(CPEComponent1_1._STRING) value_pattern.append("]+)*") value_pattern.append("))$") value_rxc = re.compile("".join(value_pattern)) return value_rxc.match(comp_str) is not None
[ "def", "_is_valid_value", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_encoded_value", "value_pattern", "=", "[", "]", "value_pattern", ".", "append", "(", "\"^((\"", ")", "value_pattern", ".", "append", "(", "\"~[\"", ")", "value_pattern", ".", "append", "(", "CPEComponent1_1", ".", "_STRING", ")", "value_pattern", ".", "append", "(", "\"]+\"", ")", "value_pattern", ".", "append", "(", "\")|(\"", ")", "value_pattern", ".", "append", "(", "\"[\"", ")", "value_pattern", ".", "append", "(", "CPEComponent1_1", ".", "_STRING", ")", "value_pattern", ".", "append", "(", "\"]+(![\"", ")", "value_pattern", ".", "append", "(", "CPEComponent1_1", ".", "_STRING", ")", "value_pattern", ".", "append", "(", "\"]+)*\"", ")", "value_pattern", ".", "append", "(", "\"))$\"", ")", "value_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "value_pattern", ")", ")", "return", "value_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the value of component in generic attribute is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "value", "of", "component", "in", "generic", "attribute", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L185-L210
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
CPEComponent1_1.as_wfn
def as_wfn(self): r""" Returns the value of compoment encoded as Well-Formed Name (WFN) string. :returns: WFN string :rtype: string TEST: >>> val = 'xp!vista' >>> comp1 = CPEComponent1_1(val, CPEComponentSimple.ATT_VERSION) >>> comp1.as_wfn() 'xp\\!vista' """ result = [] for s in self._standard_value: result.append(s) result.append(CPEComponent1_1._ESCAPE_SEPARATOR) return "".join(result[0:-1])
python
def as_wfn(self): r""" Returns the value of compoment encoded as Well-Formed Name (WFN) string. :returns: WFN string :rtype: string TEST: >>> val = 'xp!vista' >>> comp1 = CPEComponent1_1(val, CPEComponentSimple.ATT_VERSION) >>> comp1.as_wfn() 'xp\\!vista' """ result = [] for s in self._standard_value: result.append(s) result.append(CPEComponent1_1._ESCAPE_SEPARATOR) return "".join(result[0:-1])
[ "def", "as_wfn", "(", "self", ")", ":", "result", "=", "[", "]", "for", "s", "in", "self", ".", "_standard_value", ":", "result", ".", "append", "(", "s", ")", "result", ".", "append", "(", "CPEComponent1_1", ".", "_ESCAPE_SEPARATOR", ")", "return", "\"\"", ".", "join", "(", "result", "[", "0", ":", "-", "1", "]", ")" ]
r""" Returns the value of compoment encoded as Well-Formed Name (WFN) string. :returns: WFN string :rtype: string TEST: >>> val = 'xp!vista' >>> comp1 = CPEComponent1_1(val, CPEComponentSimple.ATT_VERSION) >>> comp1.as_wfn() 'xp\\!vista'
[ "r", "Returns", "the", "value", "of", "compoment", "encoded", "as", "Well", "-", "Formed", "Name", "(", "WFN", ")", "string", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L309-L331
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
CPEComponent1_1.set_value
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component TEST: >>> val = 'xp!vista' >>> val2 = 'sp2' >>> att = CPEComponentSimple.ATT_VERSION >>> comp1 = CPEComponent1_1(val, att) >>> comp1.set_value(val2, att) >>> comp1.get_value() 'sp2' """ super(CPEComponent1_1, self).set_value(comp_str, comp_att) self._is_negated = comp_str.startswith('~')
python
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component TEST: >>> val = 'xp!vista' >>> val2 = 'sp2' >>> att = CPEComponentSimple.ATT_VERSION >>> comp1 = CPEComponent1_1(val, att) >>> comp1.set_value(val2, att) >>> comp1.get_value() 'sp2' """ super(CPEComponent1_1, self).set_value(comp_str, comp_att) self._is_negated = comp_str.startswith('~')
[ "def", "set_value", "(", "self", ",", "comp_str", ",", "comp_att", ")", ":", "super", "(", "CPEComponent1_1", ",", "self", ")", ".", "set_value", "(", "comp_str", ",", "comp_att", ")", "self", ".", "_is_negated", "=", "comp_str", ".", "startswith", "(", "'~'", ")" ]
Set the value of component. By default, the component has a simple value. :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component TEST: >>> val = 'xp!vista' >>> val2 = 'sp2' >>> att = CPEComponentSimple.ATT_VERSION >>> comp1 = CPEComponent1_1(val, att) >>> comp1.set_value(val2, att) >>> comp1.get_value() 'sp2'
[ "Set", "the", "value", "of", "component", ".", "By", "default", "the", "component", "has", "a", "simple", "value", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L333-L354
nilp0inter/cpe
cpe/cpe2_3_uri.py
CPE2_3_URI._create_component
def _create_component(cls, att, value): """ Returns a component with value "value". :param string att: Attribute name :param string value: Attribute value :returns: Component object created :rtype: CPEComponent :exception: ValueError - invalid value of attribute """ if value == CPEComponent2_3_URI.VALUE_UNDEFINED: comp = CPEComponentUndefined() elif (value == CPEComponent2_3_URI.VALUE_ANY or value == CPEComponent2_3_URI.VALUE_EMPTY): comp = CPEComponentAnyValue() elif (value == CPEComponent2_3_URI.VALUE_NA): comp = CPEComponentNotApplicable() else: comp = CPEComponentNotApplicable() try: comp = CPEComponent2_3_URI(value, att) except ValueError: errmsg = "Invalid value of attribute '{0}': {1} ".format(att, value) raise ValueError(errmsg) return comp
python
def _create_component(cls, att, value): """ Returns a component with value "value". :param string att: Attribute name :param string value: Attribute value :returns: Component object created :rtype: CPEComponent :exception: ValueError - invalid value of attribute """ if value == CPEComponent2_3_URI.VALUE_UNDEFINED: comp = CPEComponentUndefined() elif (value == CPEComponent2_3_URI.VALUE_ANY or value == CPEComponent2_3_URI.VALUE_EMPTY): comp = CPEComponentAnyValue() elif (value == CPEComponent2_3_URI.VALUE_NA): comp = CPEComponentNotApplicable() else: comp = CPEComponentNotApplicable() try: comp = CPEComponent2_3_URI(value, att) except ValueError: errmsg = "Invalid value of attribute '{0}': {1} ".format(att, value) raise ValueError(errmsg) return comp
[ "def", "_create_component", "(", "cls", ",", "att", ",", "value", ")", ":", "if", "value", "==", "CPEComponent2_3_URI", ".", "VALUE_UNDEFINED", ":", "comp", "=", "CPEComponentUndefined", "(", ")", "elif", "(", "value", "==", "CPEComponent2_3_URI", ".", "VALUE_ANY", "or", "value", "==", "CPEComponent2_3_URI", ".", "VALUE_EMPTY", ")", ":", "comp", "=", "CPEComponentAnyValue", "(", ")", "elif", "(", "value", "==", "CPEComponent2_3_URI", ".", "VALUE_NA", ")", ":", "comp", "=", "CPEComponentNotApplicable", "(", ")", "else", ":", "comp", "=", "CPEComponentNotApplicable", "(", ")", "try", ":", "comp", "=", "CPEComponent2_3_URI", "(", "value", ",", "att", ")", "except", "ValueError", ":", "errmsg", "=", "\"Invalid value of attribute '{0}': {1} \"", ".", "format", "(", "att", ",", "value", ")", "raise", "ValueError", "(", "errmsg", ")", "return", "comp" ]
Returns a component with value "value". :param string att: Attribute name :param string value: Attribute value :returns: Component object created :rtype: CPEComponent :exception: ValueError - invalid value of attribute
[ "Returns", "a", "component", "with", "value", "value", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_uri.py#L104-L131
nilp0inter/cpe
cpe/cpe2_3_uri.py
CPE2_3_URI._unpack_edition
def _unpack_edition(cls, value): """ Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute """ components = value.split(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) d = dict() ed = components[1] sw_ed = components[2] t_sw = components[3] t_hw = components[4] oth = components[5] ck = CPEComponent.ATT_EDITION d[ck] = CPE2_3_URI._create_component(ck, ed) ck = CPEComponent.ATT_SW_EDITION d[ck] = CPE2_3_URI._create_component(ck, sw_ed) ck = CPEComponent.ATT_TARGET_SW d[ck] = CPE2_3_URI._create_component(ck, t_sw) ck = CPEComponent.ATT_TARGET_HW d[ck] = CPE2_3_URI._create_component(ck, t_hw) ck = CPEComponent.ATT_OTHER d[ck] = CPE2_3_URI._create_component(ck, oth) return d
python
def _unpack_edition(cls, value): """ Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute """ components = value.split(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) d = dict() ed = components[1] sw_ed = components[2] t_sw = components[3] t_hw = components[4] oth = components[5] ck = CPEComponent.ATT_EDITION d[ck] = CPE2_3_URI._create_component(ck, ed) ck = CPEComponent.ATT_SW_EDITION d[ck] = CPE2_3_URI._create_component(ck, sw_ed) ck = CPEComponent.ATT_TARGET_SW d[ck] = CPE2_3_URI._create_component(ck, t_sw) ck = CPEComponent.ATT_TARGET_HW d[ck] = CPE2_3_URI._create_component(ck, t_hw) ck = CPEComponent.ATT_OTHER d[ck] = CPE2_3_URI._create_component(ck, oth) return d
[ "def", "_unpack_edition", "(", "cls", ",", "value", ")", ":", "components", "=", "value", ".", "split", "(", "CPEComponent2_3_URI", ".", "SEPARATOR_PACKED_EDITION", ")", "d", "=", "dict", "(", ")", "ed", "=", "components", "[", "1", "]", "sw_ed", "=", "components", "[", "2", "]", "t_sw", "=", "components", "[", "3", "]", "t_hw", "=", "components", "[", "4", "]", "oth", "=", "components", "[", "5", "]", "ck", "=", "CPEComponent", ".", "ATT_EDITION", "d", "[", "ck", "]", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "ed", ")", "ck", "=", "CPEComponent", ".", "ATT_SW_EDITION", "d", "[", "ck", "]", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "sw_ed", ")", "ck", "=", "CPEComponent", ".", "ATT_TARGET_SW", "d", "[", "ck", "]", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "t_sw", ")", "ck", "=", "CPEComponent", ".", "ATT_TARGET_HW", "d", "[", "ck", "]", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "t_hw", ")", "ck", "=", "CPEComponent", ".", "ATT_OTHER", "d", "[", "ck", "]", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "oth", ")", "return", "d" ]
Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute
[ "Unpack", "its", "elements", "and", "set", "the", "attributes", "in", "wfn", "accordingly", ".", "Parse", "out", "the", "five", "elements", ":" ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_uri.py#L134-L166
nilp0inter/cpe
cpe/cpe2_3_uri.py
CPE2_3_URI._parse
def _parse(self): """ Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name """ # CPE Name must not have whitespaces if (self._str.find(" ") != -1): msg = "Bad-formed CPE Name: it must not have whitespaces" raise ValueError(msg) # Partitioning of CPE Name parts_match = CPE2_3_URI._parts_rxc.match(self._str) # Validation of CPE Name parts if (parts_match is None): msg = "Bad-formed CPE Name: validation of parts failed" raise ValueError(msg) components = dict() edition_parts = dict() for ck in CPEComponent.CPE_COMP_KEYS: value = parts_match.group(ck) try: if (ck == CPEComponent.ATT_EDITION and value is not None): if value[0] == CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION: # Unpack the edition part edition_parts = CPE2_3_URI._unpack_edition(value) else: comp = CPE2_3_URI._create_component(ck, value) else: comp = CPE2_3_URI._create_component(ck, value) except ValueError: errmsg = "Bad-formed CPE Name: not correct value '{0}'".format( value) raise ValueError(errmsg) else: components[ck] = comp components = dict(components, **edition_parts) # Adds the components of version 2.3 of CPE not defined in version 2.2 for ck2 in CPEComponent.CPE_COMP_KEYS_EXTENDED: if ck2 not in components.keys(): components[ck2] = CPEComponentUndefined() # Exchange the undefined values in middle attributes of CPE Name for # logical value ANY check_change = True # Start in the last attribute specififed in CPE Name for ck in CPEComponent.CPE_COMP_KEYS[::-1]: if ck in components: comp = components[ck] if check_change: check_change = ((ck != CPEComponent.ATT_EDITION) and (comp == CPEComponentUndefined()) or (ck == CPEComponent.ATT_EDITION and (len(edition_parts) == 0))) elif comp == CPEComponentUndefined(): comp = CPEComponentAnyValue() components[ck] = comp # Storage of CPE Name part_comp = components[CPEComponent.ATT_PART] if isinstance(part_comp, CPEComponentLogical): elements = [] elements.append(components) self[CPE.KEY_UNDEFINED] = elements else: # Create internal structure of CPE Name in parts: # one of them is filled with identified components, # the rest are empty system = parts_match.group(CPEComponent.ATT_PART) if system in CPEComponent.SYSTEM_VALUES: self._create_cpe_parts(system, components) else: self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED, components) # Fills the empty parts of internal structure of CPE Name for pk in CPE.CPE_PART_KEYS: if pk not in self.keys(): # Empty part self[pk] = []
python
def _parse(self): """ Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name """ # CPE Name must not have whitespaces if (self._str.find(" ") != -1): msg = "Bad-formed CPE Name: it must not have whitespaces" raise ValueError(msg) # Partitioning of CPE Name parts_match = CPE2_3_URI._parts_rxc.match(self._str) # Validation of CPE Name parts if (parts_match is None): msg = "Bad-formed CPE Name: validation of parts failed" raise ValueError(msg) components = dict() edition_parts = dict() for ck in CPEComponent.CPE_COMP_KEYS: value = parts_match.group(ck) try: if (ck == CPEComponent.ATT_EDITION and value is not None): if value[0] == CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION: # Unpack the edition part edition_parts = CPE2_3_URI._unpack_edition(value) else: comp = CPE2_3_URI._create_component(ck, value) else: comp = CPE2_3_URI._create_component(ck, value) except ValueError: errmsg = "Bad-formed CPE Name: not correct value '{0}'".format( value) raise ValueError(errmsg) else: components[ck] = comp components = dict(components, **edition_parts) # Adds the components of version 2.3 of CPE not defined in version 2.2 for ck2 in CPEComponent.CPE_COMP_KEYS_EXTENDED: if ck2 not in components.keys(): components[ck2] = CPEComponentUndefined() # Exchange the undefined values in middle attributes of CPE Name for # logical value ANY check_change = True # Start in the last attribute specififed in CPE Name for ck in CPEComponent.CPE_COMP_KEYS[::-1]: if ck in components: comp = components[ck] if check_change: check_change = ((ck != CPEComponent.ATT_EDITION) and (comp == CPEComponentUndefined()) or (ck == CPEComponent.ATT_EDITION and (len(edition_parts) == 0))) elif comp == CPEComponentUndefined(): comp = CPEComponentAnyValue() components[ck] = comp # Storage of CPE Name part_comp = components[CPEComponent.ATT_PART] if isinstance(part_comp, CPEComponentLogical): elements = [] elements.append(components) self[CPE.KEY_UNDEFINED] = elements else: # Create internal structure of CPE Name in parts: # one of them is filled with identified components, # the rest are empty system = parts_match.group(CPEComponent.ATT_PART) if system in CPEComponent.SYSTEM_VALUES: self._create_cpe_parts(system, components) else: self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED, components) # Fills the empty parts of internal structure of CPE Name for pk in CPE.CPE_PART_KEYS: if pk not in self.keys(): # Empty part self[pk] = []
[ "def", "_parse", "(", "self", ")", ":", "# CPE Name must not have whitespaces", "if", "(", "self", ".", "_str", ".", "find", "(", "\" \"", ")", "!=", "-", "1", ")", ":", "msg", "=", "\"Bad-formed CPE Name: it must not have whitespaces\"", "raise", "ValueError", "(", "msg", ")", "# Partitioning of CPE Name", "parts_match", "=", "CPE2_3_URI", ".", "_parts_rxc", ".", "match", "(", "self", ".", "_str", ")", "# Validation of CPE Name parts", "if", "(", "parts_match", "is", "None", ")", ":", "msg", "=", "\"Bad-formed CPE Name: validation of parts failed\"", "raise", "ValueError", "(", "msg", ")", "components", "=", "dict", "(", ")", "edition_parts", "=", "dict", "(", ")", "for", "ck", "in", "CPEComponent", ".", "CPE_COMP_KEYS", ":", "value", "=", "parts_match", ".", "group", "(", "ck", ")", "try", ":", "if", "(", "ck", "==", "CPEComponent", ".", "ATT_EDITION", "and", "value", "is", "not", "None", ")", ":", "if", "value", "[", "0", "]", "==", "CPEComponent2_3_URI", ".", "SEPARATOR_PACKED_EDITION", ":", "# Unpack the edition part", "edition_parts", "=", "CPE2_3_URI", ".", "_unpack_edition", "(", "value", ")", "else", ":", "comp", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "value", ")", "else", ":", "comp", "=", "CPE2_3_URI", ".", "_create_component", "(", "ck", ",", "value", ")", "except", "ValueError", ":", "errmsg", "=", "\"Bad-formed CPE Name: not correct value '{0}'\"", ".", "format", "(", "value", ")", "raise", "ValueError", "(", "errmsg", ")", "else", ":", "components", "[", "ck", "]", "=", "comp", "components", "=", "dict", "(", "components", ",", "*", "*", "edition_parts", ")", "# Adds the components of version 2.3 of CPE not defined in version 2.2", "for", "ck2", "in", "CPEComponent", ".", "CPE_COMP_KEYS_EXTENDED", ":", "if", "ck2", "not", "in", "components", ".", "keys", "(", ")", ":", "components", "[", "ck2", "]", "=", "CPEComponentUndefined", "(", ")", "# Exchange the undefined values in middle attributes of CPE Name for", "# logical value ANY", "check_change", "=", "True", "# Start in the last attribute specififed in CPE Name", "for", "ck", "in", "CPEComponent", ".", "CPE_COMP_KEYS", "[", ":", ":", "-", "1", "]", ":", "if", "ck", "in", "components", ":", "comp", "=", "components", "[", "ck", "]", "if", "check_change", ":", "check_change", "=", "(", "(", "ck", "!=", "CPEComponent", ".", "ATT_EDITION", ")", "and", "(", "comp", "==", "CPEComponentUndefined", "(", ")", ")", "or", "(", "ck", "==", "CPEComponent", ".", "ATT_EDITION", "and", "(", "len", "(", "edition_parts", ")", "==", "0", ")", ")", ")", "elif", "comp", "==", "CPEComponentUndefined", "(", ")", ":", "comp", "=", "CPEComponentAnyValue", "(", ")", "components", "[", "ck", "]", "=", "comp", "# Storage of CPE Name", "part_comp", "=", "components", "[", "CPEComponent", ".", "ATT_PART", "]", "if", "isinstance", "(", "part_comp", ",", "CPEComponentLogical", ")", ":", "elements", "=", "[", "]", "elements", ".", "append", "(", "components", ")", "self", "[", "CPE", ".", "KEY_UNDEFINED", "]", "=", "elements", "else", ":", "# Create internal structure of CPE Name in parts:", "# one of them is filled with identified components,", "# the rest are empty", "system", "=", "parts_match", ".", "group", "(", "CPEComponent", ".", "ATT_PART", ")", "if", "system", "in", "CPEComponent", ".", "SYSTEM_VALUES", ":", "self", ".", "_create_cpe_parts", "(", "system", ",", "components", ")", "else", ":", "self", ".", "_create_cpe_parts", "(", "CPEComponent", ".", "VALUE_PART_UNDEFINED", ",", "components", ")", "# Fills the empty parts of internal structure of CPE Name", "for", "pk", "in", "CPE", ".", "CPE_PART_KEYS", ":", "if", "pk", "not", "in", "self", ".", "keys", "(", ")", ":", "# Empty part", "self", "[", "pk", "]", "=", "[", "]" ]
Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name
[ "Checks", "if", "the", "CPE", "Name", "is", "valid", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_uri.py#L254-L343
nilp0inter/cpe
cpe/cpe2_3_uri.py
CPE2_3_URI.as_wfn
def as_wfn(self): """ Returns the CPE Name as Well-Formed Name string of version 2.3. If edition component is not packed, only shows the first seven components, otherwise shows all. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version """ if self._str.find(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) == -1: # Edition unpacked, only show the first seven components wfn = [] wfn.append(CPE2_3_WFN.CPE_PREFIX) for ck in CPEComponent.CPE_COMP_KEYS: lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with WFN".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] v = [] v.append(ck) v.append("=") if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty)): # Do not set the attribute continue elif isinstance(comp, CPEComponentAnyValue): # Logical value any v.append(CPEComponent2_3_WFN.VALUE_ANY) elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v.append(CPEComponent2_3_WFN.VALUE_NA) else: # Get the value of WFN of component v.append('"') v.append(comp.as_wfn()) v.append('"') # Append v to the WFN and add a separator wfn.append("".join(v)) wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) # Del the last separator wfn = wfn[:-1] # Return the WFN string wfn.append(CPE2_3_WFN.CPE_SUFFIX) return "".join(wfn) else: # Shows all components return super(CPE2_3_URI, self).as_wfn()
python
def as_wfn(self): """ Returns the CPE Name as Well-Formed Name string of version 2.3. If edition component is not packed, only shows the first seven components, otherwise shows all. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version """ if self._str.find(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) == -1: # Edition unpacked, only show the first seven components wfn = [] wfn.append(CPE2_3_WFN.CPE_PREFIX) for ck in CPEComponent.CPE_COMP_KEYS: lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with WFN".format( self.VERSION) raise TypeError(errmsg) else: comp = lc[0] v = [] v.append(ck) v.append("=") if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty)): # Do not set the attribute continue elif isinstance(comp, CPEComponentAnyValue): # Logical value any v.append(CPEComponent2_3_WFN.VALUE_ANY) elif isinstance(comp, CPEComponentNotApplicable): # Logical value not applicable v.append(CPEComponent2_3_WFN.VALUE_NA) else: # Get the value of WFN of component v.append('"') v.append(comp.as_wfn()) v.append('"') # Append v to the WFN and add a separator wfn.append("".join(v)) wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) # Del the last separator wfn = wfn[:-1] # Return the WFN string wfn.append(CPE2_3_WFN.CPE_SUFFIX) return "".join(wfn) else: # Shows all components return super(CPE2_3_URI, self).as_wfn()
[ "def", "as_wfn", "(", "self", ")", ":", "if", "self", ".", "_str", ".", "find", "(", "CPEComponent2_3_URI", ".", "SEPARATOR_PACKED_EDITION", ")", "==", "-", "1", ":", "# Edition unpacked, only show the first seven components", "wfn", "=", "[", "]", "wfn", ".", "append", "(", "CPE2_3_WFN", ".", "CPE_PREFIX", ")", "for", "ck", "in", "CPEComponent", ".", "CPE_COMP_KEYS", ":", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with WFN\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "else", ":", "comp", "=", "lc", "[", "0", "]", "v", "=", "[", "]", "v", ".", "append", "(", "ck", ")", "v", ".", "append", "(", "\"=\"", ")", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", ")", ":", "# Do not set the attribute", "continue", "elif", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ":", "# Logical value any", "v", ".", "append", "(", "CPEComponent2_3_WFN", ".", "VALUE_ANY", ")", "elif", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ":", "# Logical value not applicable", "v", ".", "append", "(", "CPEComponent2_3_WFN", ".", "VALUE_NA", ")", "else", ":", "# Get the value of WFN of component", "v", ".", "append", "(", "'\"'", ")", "v", ".", "append", "(", "comp", ".", "as_wfn", "(", ")", ")", "v", ".", "append", "(", "'\"'", ")", "# Append v to the WFN and add a separator", "wfn", ".", "append", "(", "\"\"", ".", "join", "(", "v", ")", ")", "wfn", ".", "append", "(", "CPEComponent2_3_WFN", ".", "SEPARATOR_COMP", ")", "# Del the last separator", "wfn", "=", "wfn", "[", ":", "-", "1", "]", "# Return the WFN string", "wfn", ".", "append", "(", "CPE2_3_WFN", ".", "CPE_SUFFIX", ")", "return", "\"\"", ".", "join", "(", "wfn", ")", "else", ":", "# Shows all components", "return", "super", "(", "CPE2_3_URI", ",", "self", ")", ".", "as_wfn", "(", ")" ]
Returns the CPE Name as Well-Formed Name string of version 2.3. If edition component is not packed, only shows the first seven components, otherwise shows all. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version
[ "Returns", "the", "CPE", "Name", "as", "Well", "-", "Formed", "Name", "string", "of", "version", "2", ".", "3", ".", "If", "edition", "component", "is", "not", "packed", "only", "shows", "the", "first", "seven", "components", "otherwise", "shows", "all", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_uri.py#L345-L415
nilp0inter/cpe
cpe/comp/cpecomp2_3_uri.py
CPEComponent2_3_URI._decode
def _decode(self): """ Convert the characters of character in value of component to standard value (WFN value). This function scans the value of component and returns a copy with all percent-encoded characters decoded. :exception: ValueError - invalid character in value of component """ result = [] idx = 0 s = self._encoded_value embedded = False errmsg = [] errmsg.append("Invalid value: ") while (idx < len(s)): errmsg.append(s) errmsg_str = "".join(errmsg) # Get the idx'th character of s c = s[idx] # Deal with dot, hyphen and tilde: decode with quoting if ((c == '.') or (c == '-') or (c == '~')): result.append("\\") result.append(c) idx += 1 embedded = True # a non-%01 encountered continue if (c != '%'): result.append(c) idx += 1 embedded = True # a non-%01 encountered continue # we get here if we have a substring starting w/ '%' form = s[idx: idx + 3] # get the three-char sequence if form == CPEComponent2_3_URI.WILDCARD_ONE: # If %01 legal at beginning or end # embedded is false, so must be preceded by %01 # embedded is true, so must be followed by %01 if (((idx == 0) or (idx == (len(s)-3))) or ((not embedded) and (s[idx - 3:idx] == CPEComponent2_3_URI.WILDCARD_ONE)) or (embedded and (len(s) >= idx + 6) and (s[idx + 3:idx + 6] == CPEComponent2_3_URI.WILDCARD_ONE))): # A percent-encoded question mark is found # at the beginning or the end of the string, # or embedded in sequence as required. # Decode to unquoted form. result.append(CPEComponent2_3_WFN.WILDCARD_ONE) idx += 3 continue else: raise ValueError(errmsg_str) elif form == CPEComponent2_3_URI.WILDCARD_MULTI: if ((idx == 0) or (idx == (len(s) - 3))): # Percent-encoded asterisk is at the beginning # or the end of the string, as required. # Decode to unquoted form. result.append(CPEComponent2_3_WFN.WILDCARD_MULTI) else: raise ValueError(errmsg_str) elif form in CPEComponent2_3_URI.pce_char_to_decode.keys(): value = CPEComponent2_3_URI.pce_char_to_decode[form] result.append(value) else: errmsg.append("Invalid percent-encoded character: ") errmsg.append(s) raise ValueError("".join(errmsg)) idx += 3 embedded = True # a non-%01 encountered. self._standard_value = "".join(result)
python
def _decode(self): """ Convert the characters of character in value of component to standard value (WFN value). This function scans the value of component and returns a copy with all percent-encoded characters decoded. :exception: ValueError - invalid character in value of component """ result = [] idx = 0 s = self._encoded_value embedded = False errmsg = [] errmsg.append("Invalid value: ") while (idx < len(s)): errmsg.append(s) errmsg_str = "".join(errmsg) # Get the idx'th character of s c = s[idx] # Deal with dot, hyphen and tilde: decode with quoting if ((c == '.') or (c == '-') or (c == '~')): result.append("\\") result.append(c) idx += 1 embedded = True # a non-%01 encountered continue if (c != '%'): result.append(c) idx += 1 embedded = True # a non-%01 encountered continue # we get here if we have a substring starting w/ '%' form = s[idx: idx + 3] # get the three-char sequence if form == CPEComponent2_3_URI.WILDCARD_ONE: # If %01 legal at beginning or end # embedded is false, so must be preceded by %01 # embedded is true, so must be followed by %01 if (((idx == 0) or (idx == (len(s)-3))) or ((not embedded) and (s[idx - 3:idx] == CPEComponent2_3_URI.WILDCARD_ONE)) or (embedded and (len(s) >= idx + 6) and (s[idx + 3:idx + 6] == CPEComponent2_3_URI.WILDCARD_ONE))): # A percent-encoded question mark is found # at the beginning or the end of the string, # or embedded in sequence as required. # Decode to unquoted form. result.append(CPEComponent2_3_WFN.WILDCARD_ONE) idx += 3 continue else: raise ValueError(errmsg_str) elif form == CPEComponent2_3_URI.WILDCARD_MULTI: if ((idx == 0) or (idx == (len(s) - 3))): # Percent-encoded asterisk is at the beginning # or the end of the string, as required. # Decode to unquoted form. result.append(CPEComponent2_3_WFN.WILDCARD_MULTI) else: raise ValueError(errmsg_str) elif form in CPEComponent2_3_URI.pce_char_to_decode.keys(): value = CPEComponent2_3_URI.pce_char_to_decode[form] result.append(value) else: errmsg.append("Invalid percent-encoded character: ") errmsg.append(s) raise ValueError("".join(errmsg)) idx += 3 embedded = True # a non-%01 encountered. self._standard_value = "".join(result)
[ "def", "_decode", "(", "self", ")", ":", "result", "=", "[", "]", "idx", "=", "0", "s", "=", "self", ".", "_encoded_value", "embedded", "=", "False", "errmsg", "=", "[", "]", "errmsg", ".", "append", "(", "\"Invalid value: \"", ")", "while", "(", "idx", "<", "len", "(", "s", ")", ")", ":", "errmsg", ".", "append", "(", "s", ")", "errmsg_str", "=", "\"\"", ".", "join", "(", "errmsg", ")", "# Get the idx'th character of s", "c", "=", "s", "[", "idx", "]", "# Deal with dot, hyphen and tilde: decode with quoting", "if", "(", "(", "c", "==", "'.'", ")", "or", "(", "c", "==", "'-'", ")", "or", "(", "c", "==", "'~'", ")", ")", ":", "result", ".", "append", "(", "\"\\\\\"", ")", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "True", "# a non-%01 encountered", "continue", "if", "(", "c", "!=", "'%'", ")", ":", "result", ".", "append", "(", "c", ")", "idx", "+=", "1", "embedded", "=", "True", "# a non-%01 encountered", "continue", "# we get here if we have a substring starting w/ '%'", "form", "=", "s", "[", "idx", ":", "idx", "+", "3", "]", "# get the three-char sequence", "if", "form", "==", "CPEComponent2_3_URI", ".", "WILDCARD_ONE", ":", "# If %01 legal at beginning or end", "# embedded is false, so must be preceded by %01", "# embedded is true, so must be followed by %01", "if", "(", "(", "(", "idx", "==", "0", ")", "or", "(", "idx", "==", "(", "len", "(", "s", ")", "-", "3", ")", ")", ")", "or", "(", "(", "not", "embedded", ")", "and", "(", "s", "[", "idx", "-", "3", ":", "idx", "]", "==", "CPEComponent2_3_URI", ".", "WILDCARD_ONE", ")", ")", "or", "(", "embedded", "and", "(", "len", "(", "s", ")", ">=", "idx", "+", "6", ")", "and", "(", "s", "[", "idx", "+", "3", ":", "idx", "+", "6", "]", "==", "CPEComponent2_3_URI", ".", "WILDCARD_ONE", ")", ")", ")", ":", "# A percent-encoded question mark is found", "# at the beginning or the end of the string,", "# or embedded in sequence as required.", "# Decode to unquoted form.", "result", ".", "append", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_ONE", ")", "idx", "+=", "3", "continue", "else", ":", "raise", "ValueError", "(", "errmsg_str", ")", "elif", "form", "==", "CPEComponent2_3_URI", ".", "WILDCARD_MULTI", ":", "if", "(", "(", "idx", "==", "0", ")", "or", "(", "idx", "==", "(", "len", "(", "s", ")", "-", "3", ")", ")", ")", ":", "# Percent-encoded asterisk is at the beginning", "# or the end of the string, as required.", "# Decode to unquoted form.", "result", ".", "append", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_MULTI", ")", "else", ":", "raise", "ValueError", "(", "errmsg_str", ")", "elif", "form", "in", "CPEComponent2_3_URI", ".", "pce_char_to_decode", ".", "keys", "(", ")", ":", "value", "=", "CPEComponent2_3_URI", ".", "pce_char_to_decode", "[", "form", "]", "result", ".", "append", "(", "value", ")", "else", ":", "errmsg", ".", "append", "(", "\"Invalid percent-encoded character: \"", ")", "errmsg", ".", "append", "(", "s", ")", "raise", "ValueError", "(", "\"\"", ".", "join", "(", "errmsg", ")", ")", "idx", "+=", "3", "embedded", "=", "True", "# a non-%01 encountered.", "self", ".", "_standard_value", "=", "\"\"", ".", "join", "(", "result", ")" ]
Convert the characters of character in value of component to standard value (WFN value). This function scans the value of component and returns a copy with all percent-encoded characters decoded. :exception: ValueError - invalid character in value of component
[ "Convert", "the", "characters", "of", "character", "in", "value", "of", "component", "to", "standard", "value", "(", "WFN", "value", ")", ".", "This", "function", "scans", "the", "value", "of", "component", "and", "returns", "a", "copy", "with", "all", "percent", "-", "encoded", "characters", "decoded", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3_uri.py#L163-L244
nilp0inter/cpe
cpe/comp/cpecomp2_3_uri.py
CPEComponent2_3_URI._is_valid_edition
def _is_valid_edition(self): """ Return True if the input value of attribute "edition" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._standard_value[0] packed = [] packed.append("(") packed.append(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) packed.append(CPEComponent2_3_URI._string) packed.append("){5}") value_pattern = [] value_pattern.append("^(") value_pattern.append(CPEComponent2_3_URI._string) value_pattern.append("|") value_pattern.append("".join(packed)) value_pattern.append(")$") value_rxc = re.compile("".join(value_pattern)) return value_rxc.match(comp_str) is not None
python
def _is_valid_edition(self): """ Return True if the input value of attribute "edition" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._standard_value[0] packed = [] packed.append("(") packed.append(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) packed.append(CPEComponent2_3_URI._string) packed.append("){5}") value_pattern = [] value_pattern.append("^(") value_pattern.append(CPEComponent2_3_URI._string) value_pattern.append("|") value_pattern.append("".join(packed)) value_pattern.append(")$") value_rxc = re.compile("".join(value_pattern)) return value_rxc.match(comp_str) is not None
[ "def", "_is_valid_edition", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_standard_value", "[", "0", "]", "packed", "=", "[", "]", "packed", ".", "append", "(", "\"(\"", ")", "packed", ".", "append", "(", "CPEComponent2_3_URI", ".", "SEPARATOR_PACKED_EDITION", ")", "packed", ".", "append", "(", "CPEComponent2_3_URI", ".", "_string", ")", "packed", ".", "append", "(", "\"){5}\"", ")", "value_pattern", "=", "[", "]", "value_pattern", ".", "append", "(", "\"^(\"", ")", "value_pattern", ".", "append", "(", "CPEComponent2_3_URI", ".", "_string", ")", "value_pattern", ".", "append", "(", "\"|\"", ")", "value_pattern", ".", "append", "(", "\"\"", ".", "join", "(", "packed", ")", ")", "value_pattern", ".", "append", "(", "\")$\"", ")", "value_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "value_pattern", ")", ")", "return", "value_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the input value of attribute "edition" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "input", "value", "of", "attribute", "edition", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3_uri.py#L246-L271
nilp0inter/cpe
cpe/comp/cpecomp2_3.py
CPEComponent2_3._is_valid_language
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean CASE 1: Language part with/without region part CASE 2: Language part without region part CASE 3: Region part with language part CASE 4: Region part without language part """ def check_generic_language(self, value): """ Check possible values in language part when region part exists or not in language value. Possible values of language attribute: a=letter | *a | *aa | aa | aaa | ?a | ?aa | ?? | ??a | ??? """ lang_pattern = [] lang_pattern.append("^(\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("[a-z]{1,2}") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(([a-z][a-z]?)|(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("|[a-z])?))") lang_pattern.append("|([a-z]{2,3}))$") lang_rxc = re.compile("".join(lang_pattern)) return lang_rxc.match(value) def check_language_without_region(self, value): """ Check possible values in language part when region part not exist in language value. Possible values of language attribute: a=letter | a? | aa? | a?? | a* | aa* | aaa* | *a* | *a? | ?a* | ?a? """ lang_pattern = [] lang_pattern.append("^([a-z]") lang_pattern.append("([a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("|") lang_pattern.append("([a-z]\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("))") lang_pattern.append("|") lang_pattern.append("\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")?") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append(")|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("[a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("[a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")") lang_pattern.append(")$") lang_rxc = re.compile("".join(lang_pattern)) return lang_rxc.match(value) def check_region_with_language(self, value): """ Check possible values in region part when language part exists. Possible values of language attribute: a=letter, 1=digit | * | a* | a? | aa | ?? | 1* | 1?? | 11* | 11? | 111 | ??? """ region_pattern = [] region_pattern.append("^(") region_pattern.append("(\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append(")|((\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("){2,3})|([a-z]([a-z]|\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("|\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("))|([0-9](\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("|\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("(\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append(")?|[0-9][0-9\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("])))$") region_rxc = re.compile("".join(region_pattern)) return region_rxc.match(region) def check_region_without_language(self, value): """ Check possible values in region part when language part not exist. Possible values of language attribute: 1=digit | *111 | *11 | *1 """ region_pattern = [] region_pattern.append("^(") region_pattern.append("(\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("[0-9])") region_pattern.append("([0-9]([0-9])?)?") region_pattern.append(")$") region_rxc = re.compile("".join(region_pattern)) return region_rxc.match(region) comp_str = self._encoded_value.lower() # Value with wildcards; separate language and region of value parts = comp_str.split(self.SEPARATOR_LANG) language = parts[0] region_exists = len(parts) == 2 # Check the language part if check_generic_language(self, language) is not None: # Valid language, check region part if region_exists: # Region part exists; check it region = parts[1] return (check_region_with_language(self, region) is not None) else: # Not region part return True elif check_language_without_region(self, language) is not None: # Language without region; region part should not exist return not region_exists else: # Language part not exist; check region part region = parts[0] return check_region_without_language(self, region) is not None
python
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean CASE 1: Language part with/without region part CASE 2: Language part without region part CASE 3: Region part with language part CASE 4: Region part without language part """ def check_generic_language(self, value): """ Check possible values in language part when region part exists or not in language value. Possible values of language attribute: a=letter | *a | *aa | aa | aaa | ?a | ?aa | ?? | ??a | ??? """ lang_pattern = [] lang_pattern.append("^(\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("[a-z]{1,2}") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(([a-z][a-z]?)|(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("|[a-z])?))") lang_pattern.append("|([a-z]{2,3}))$") lang_rxc = re.compile("".join(lang_pattern)) return lang_rxc.match(value) def check_language_without_region(self, value): """ Check possible values in language part when region part not exist in language value. Possible values of language attribute: a=letter | a? | aa? | a?? | a* | aa* | aaa* | *a* | *a? | ?a* | ?a? """ lang_pattern = [] lang_pattern.append("^([a-z]") lang_pattern.append("([a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("|") lang_pattern.append("([a-z]\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("))") lang_pattern.append("|") lang_pattern.append("\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("(\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")?") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append(")|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append("[a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")") lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("[a-z](\\") lang_pattern.append(self.WILDCARD_MULTI) lang_pattern.append("|\\") lang_pattern.append(self.WILDCARD_ONE) lang_pattern.append(")") lang_pattern.append(")$") lang_rxc = re.compile("".join(lang_pattern)) return lang_rxc.match(value) def check_region_with_language(self, value): """ Check possible values in region part when language part exists. Possible values of language attribute: a=letter, 1=digit | * | a* | a? | aa | ?? | 1* | 1?? | 11* | 11? | 111 | ??? """ region_pattern = [] region_pattern.append("^(") region_pattern.append("(\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append(")|((\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("){2,3})|([a-z]([a-z]|\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("|\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("))|([0-9](\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("|\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("(\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append(")?|[0-9][0-9\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("\\") region_pattern.append(self.WILDCARD_ONE) region_pattern.append("])))$") region_rxc = re.compile("".join(region_pattern)) return region_rxc.match(region) def check_region_without_language(self, value): """ Check possible values in region part when language part not exist. Possible values of language attribute: 1=digit | *111 | *11 | *1 """ region_pattern = [] region_pattern.append("^(") region_pattern.append("(\\") region_pattern.append(self.WILDCARD_MULTI) region_pattern.append("[0-9])") region_pattern.append("([0-9]([0-9])?)?") region_pattern.append(")$") region_rxc = re.compile("".join(region_pattern)) return region_rxc.match(region) comp_str = self._encoded_value.lower() # Value with wildcards; separate language and region of value parts = comp_str.split(self.SEPARATOR_LANG) language = parts[0] region_exists = len(parts) == 2 # Check the language part if check_generic_language(self, language) is not None: # Valid language, check region part if region_exists: # Region part exists; check it region = parts[1] return (check_region_with_language(self, region) is not None) else: # Not region part return True elif check_language_without_region(self, language) is not None: # Language without region; region part should not exist return not region_exists else: # Language part not exist; check region part region = parts[0] return check_region_without_language(self, region) is not None
[ "def", "_is_valid_language", "(", "self", ")", ":", "def", "check_generic_language", "(", "self", ",", "value", ")", ":", "\"\"\"\n Check possible values in language part\n when region part exists or not in language value.\n\n Possible values of language attribute: a=letter\n | *a\n | *aa\n | aa\n | aaa\n | ?a\n | ?aa\n | ??\n | ??a\n | ???\n \"\"\"", "lang_pattern", "=", "[", "]", "lang_pattern", ".", "append", "(", "\"^(\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"[a-z]{1,2}\"", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"(([a-z][a-z]?)|(\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"(\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"|[a-z])?))\"", ")", "lang_pattern", ".", "append", "(", "\"|([a-z]{2,3}))$\"", ")", "lang_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "lang_pattern", ")", ")", "return", "lang_rxc", ".", "match", "(", "value", ")", "def", "check_language_without_region", "(", "self", ",", "value", ")", ":", "\"\"\"\n Check possible values in language part\n when region part not exist in language value.\n\n Possible values of language attribute: a=letter\n | a?\n | aa?\n | a??\n | a*\n | aa*\n | aaa*\n | *a*\n | *a?\n | ?a*\n | ?a?\n \"\"\"", "lang_pattern", "=", "[", "]", "lang_pattern", ".", "append", "(", "\"^([a-z]\"", ")", "lang_pattern", ".", "append", "(", "\"([a-z](\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"|\"", ")", "lang_pattern", ".", "append", "(", "\"([a-z]\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"))\"", ")", "lang_pattern", ".", "append", "(", "\"|\"", ")", "lang_pattern", ".", "append", "(", "\"\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"(\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\")?\"", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\")|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\"[a-z](\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\")\"", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"[a-z](\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "lang_pattern", ".", "append", "(", "\"|\\\\\"", ")", "lang_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "lang_pattern", ".", "append", "(", "\")\"", ")", "lang_pattern", ".", "append", "(", "\")$\"", ")", "lang_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "lang_pattern", ")", ")", "return", "lang_rxc", ".", "match", "(", "value", ")", "def", "check_region_with_language", "(", "self", ",", "value", ")", ":", "\"\"\"\n Check possible values in region part when language part exists.\n\n Possible values of language attribute: a=letter, 1=digit\n | *\n | a*\n | a?\n | aa\n | ??\n | 1*\n | 1??\n | 11*\n | 11?\n | 111\n | ???\n \"\"\"", "region_pattern", "=", "[", "]", "region_pattern", ".", "append", "(", "\"^(\"", ")", "region_pattern", ".", "append", "(", "\"(\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "region_pattern", ".", "append", "(", "\")|((\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "region_pattern", ".", "append", "(", "\"){2,3})|([a-z]([a-z]|\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "region_pattern", ".", "append", "(", "\"|\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "region_pattern", ".", "append", "(", "\"))|([0-9](\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "region_pattern", ".", "append", "(", "\"|\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "region_pattern", ".", "append", "(", "\"(\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "region_pattern", ".", "append", "(", "\")?|[0-9][0-9\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "region_pattern", ".", "append", "(", "\"\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_ONE", ")", "region_pattern", ".", "append", "(", "\"])))$\"", ")", "region_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "region_pattern", ")", ")", "return", "region_rxc", ".", "match", "(", "region", ")", "def", "check_region_without_language", "(", "self", ",", "value", ")", ":", "\"\"\"\n Check possible values in region part when language part not exist.\n\n Possible values of language attribute: 1=digit\n | *111\n | *11\n | *1\n \"\"\"", "region_pattern", "=", "[", "]", "region_pattern", ".", "append", "(", "\"^(\"", ")", "region_pattern", ".", "append", "(", "\"(\\\\\"", ")", "region_pattern", ".", "append", "(", "self", ".", "WILDCARD_MULTI", ")", "region_pattern", ".", "append", "(", "\"[0-9])\"", ")", "region_pattern", ".", "append", "(", "\"([0-9]([0-9])?)?\"", ")", "region_pattern", ".", "append", "(", "\")$\"", ")", "region_rxc", "=", "re", ".", "compile", "(", "\"\"", ".", "join", "(", "region_pattern", ")", ")", "return", "region_rxc", ".", "match", "(", "region", ")", "comp_str", "=", "self", ".", "_encoded_value", ".", "lower", "(", ")", "# Value with wildcards; separate language and region of value", "parts", "=", "comp_str", ".", "split", "(", "self", ".", "SEPARATOR_LANG", ")", "language", "=", "parts", "[", "0", "]", "region_exists", "=", "len", "(", "parts", ")", "==", "2", "# Check the language part", "if", "check_generic_language", "(", "self", ",", "language", ")", "is", "not", "None", ":", "# Valid language, check region part", "if", "region_exists", ":", "# Region part exists; check it", "region", "=", "parts", "[", "1", "]", "return", "(", "check_region_with_language", "(", "self", ",", "region", ")", "is", "not", "None", ")", "else", ":", "# Not region part", "return", "True", "elif", "check_language_without_region", "(", "self", ",", "language", ")", "is", "not", "None", ":", "# Language without region; region part should not exist", "return", "not", "region_exists", "else", ":", "# Language part not exist; check region part", "region", "=", "parts", "[", "0", "]", "return", "check_region_without_language", "(", "self", ",", "region", ")", "is", "not", "None" ]
Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean CASE 1: Language part with/without region part CASE 2: Language part without region part CASE 3: Region part with language part CASE 4: Region part without language part
[ "Return", "True", "if", "the", "value", "of", "component", "in", "attribute", "language", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3.py#L58-L245
nilp0inter/cpe
cpe/comp/cpecomp2_3.py
CPEComponent2_3._is_valid_part
def _is_valid_part(self): """ Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value # Check if value of component do not have wildcard if ((comp_str.find(self.WILDCARD_ONE) == -1) and (comp_str.find(self.WILDCARD_MULTI) == -1)): return super(CPEComponent2_3, self)._is_valid_part() # Compilation of regular expression associated with value of part part_pattern = "^(\{0}|\{1})$".format(self.WILDCARD_ONE, self.WILDCARD_MULTI) part_rxc = re.compile(part_pattern) return part_rxc.match(comp_str) is not None
python
def _is_valid_part(self): """ Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value # Check if value of component do not have wildcard if ((comp_str.find(self.WILDCARD_ONE) == -1) and (comp_str.find(self.WILDCARD_MULTI) == -1)): return super(CPEComponent2_3, self)._is_valid_part() # Compilation of regular expression associated with value of part part_pattern = "^(\{0}|\{1})$".format(self.WILDCARD_ONE, self.WILDCARD_MULTI) part_rxc = re.compile(part_pattern) return part_rxc.match(comp_str) is not None
[ "def", "_is_valid_part", "(", "self", ")", ":", "comp_str", "=", "self", ".", "_encoded_value", "# Check if value of component do not have wildcard", "if", "(", "(", "comp_str", ".", "find", "(", "self", ".", "WILDCARD_ONE", ")", "==", "-", "1", ")", "and", "(", "comp_str", ".", "find", "(", "self", ".", "WILDCARD_MULTI", ")", "==", "-", "1", ")", ")", ":", "return", "super", "(", "CPEComponent2_3", ",", "self", ")", ".", "_is_valid_part", "(", ")", "# Compilation of regular expression associated with value of part", "part_pattern", "=", "\"^(\\{0}|\\{1})$\"", ".", "format", "(", "self", ".", "WILDCARD_ONE", ",", "self", ".", "WILDCARD_MULTI", ")", "part_rxc", "=", "re", ".", "compile", "(", "part_pattern", ")", "return", "part_rxc", ".", "match", "(", "comp_str", ")", "is", "not", "None" ]
Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean
[ "Return", "True", "if", "the", "value", "of", "component", "in", "attribute", "part", "is", "valid", "and", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3.py#L247-L269
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3._compare
def _compare(cls, source, target): """ Compares two values associated with a attribute of two WFNs, which may be logical values (ANY or NA) or string values. :param string source: First attribute value :param string target: Second attribute value :returns: The attribute comparison relation. :rtype: int This function is a support function for compare_WFNs. """ if (CPESet2_3._is_string(source)): source = source.lower() if (CPESet2_3._is_string(target)): target = target.lower() # In this specification, unquoted wildcard characters in the target # yield an undefined result if (CPESet2_3._is_string(target) and CPESet2_3._contains_wildcards(target)): return CPESet2_3.LOGICAL_VALUE_UNDEFINED # If source and target attribute values are equal, # then the result is EQUAL if (source == target): return CPESet2_3.LOGICAL_VALUE_EQUAL # If source attribute value is ANY, then the result is SUPERSET if (source == CPEComponent2_3_WFN.VALUE_ANY): return CPESet2_3.LOGICAL_VALUE_SUPERSET # If target attribute value is ANY, then the result is SUBSET if (target == CPEComponent2_3_WFN.VALUE_ANY): return CPESet2_3.LOGICAL_VALUE_SUBSET # If either source or target attribute value is NA # then the result is DISJOINT isSourceNA = source == CPEComponent2_3_WFN.VALUE_NA isTargetNA = target == CPEComponent2_3_WFN.VALUE_NA if (isSourceNA or isTargetNA): return CPESet2_3.LOGICAL_VALUE_DISJOINT # If we get to this point, we are comparing two strings return CPESet2_3._compare_strings(source, target)
python
def _compare(cls, source, target): """ Compares two values associated with a attribute of two WFNs, which may be logical values (ANY or NA) or string values. :param string source: First attribute value :param string target: Second attribute value :returns: The attribute comparison relation. :rtype: int This function is a support function for compare_WFNs. """ if (CPESet2_3._is_string(source)): source = source.lower() if (CPESet2_3._is_string(target)): target = target.lower() # In this specification, unquoted wildcard characters in the target # yield an undefined result if (CPESet2_3._is_string(target) and CPESet2_3._contains_wildcards(target)): return CPESet2_3.LOGICAL_VALUE_UNDEFINED # If source and target attribute values are equal, # then the result is EQUAL if (source == target): return CPESet2_3.LOGICAL_VALUE_EQUAL # If source attribute value is ANY, then the result is SUPERSET if (source == CPEComponent2_3_WFN.VALUE_ANY): return CPESet2_3.LOGICAL_VALUE_SUPERSET # If target attribute value is ANY, then the result is SUBSET if (target == CPEComponent2_3_WFN.VALUE_ANY): return CPESet2_3.LOGICAL_VALUE_SUBSET # If either source or target attribute value is NA # then the result is DISJOINT isSourceNA = source == CPEComponent2_3_WFN.VALUE_NA isTargetNA = target == CPEComponent2_3_WFN.VALUE_NA if (isSourceNA or isTargetNA): return CPESet2_3.LOGICAL_VALUE_DISJOINT # If we get to this point, we are comparing two strings return CPESet2_3._compare_strings(source, target)
[ "def", "_compare", "(", "cls", ",", "source", ",", "target", ")", ":", "if", "(", "CPESet2_3", ".", "_is_string", "(", "source", ")", ")", ":", "source", "=", "source", ".", "lower", "(", ")", "if", "(", "CPESet2_3", ".", "_is_string", "(", "target", ")", ")", ":", "target", "=", "target", ".", "lower", "(", ")", "# In this specification, unquoted wildcard characters in the target", "# yield an undefined result", "if", "(", "CPESet2_3", ".", "_is_string", "(", "target", ")", "and", "CPESet2_3", ".", "_contains_wildcards", "(", "target", ")", ")", ":", "return", "CPESet2_3", ".", "LOGICAL_VALUE_UNDEFINED", "# If source and target attribute values are equal,", "# then the result is EQUAL", "if", "(", "source", "==", "target", ")", ":", "return", "CPESet2_3", ".", "LOGICAL_VALUE_EQUAL", "# If source attribute value is ANY, then the result is SUPERSET", "if", "(", "source", "==", "CPEComponent2_3_WFN", ".", "VALUE_ANY", ")", ":", "return", "CPESet2_3", ".", "LOGICAL_VALUE_SUPERSET", "# If target attribute value is ANY, then the result is SUBSET", "if", "(", "target", "==", "CPEComponent2_3_WFN", ".", "VALUE_ANY", ")", ":", "return", "CPESet2_3", ".", "LOGICAL_VALUE_SUBSET", "# If either source or target attribute value is NA", "# then the result is DISJOINT", "isSourceNA", "=", "source", "==", "CPEComponent2_3_WFN", ".", "VALUE_NA", "isTargetNA", "=", "target", "==", "CPEComponent2_3_WFN", ".", "VALUE_NA", "if", "(", "isSourceNA", "or", "isTargetNA", ")", ":", "return", "CPESet2_3", ".", "LOGICAL_VALUE_DISJOINT", "# If we get to this point, we are comparing two strings", "return", "CPESet2_3", ".", "_compare_strings", "(", "source", ",", "target", ")" ]
Compares two values associated with a attribute of two WFNs, which may be logical values (ANY or NA) or string values. :param string source: First attribute value :param string target: Second attribute value :returns: The attribute comparison relation. :rtype: int This function is a support function for compare_WFNs.
[ "Compares", "two", "values", "associated", "with", "a", "attribute", "of", "two", "WFNs", "which", "may", "be", "logical", "values", "(", "ANY", "or", "NA", ")", "or", "string", "values", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L74-L121
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3._compare_strings
def _compare_strings(cls, source, target): """ Compares a source string to a target string, and addresses the condition in which the source string includes unquoted special characters. It performs a simple regular expression match, with the assumption that (as required) unquoted special characters appear only at the beginning and/or the end of the source string. It also properly differentiates between unquoted and quoted special characters. :param string source: First string value :param string target: Second string value :returns: The comparison relation among input strings. :rtype: int """ start = 0 end = len(source) begins = 0 ends = 0 # Reading of initial wildcard in source if source.startswith(CPEComponent2_3_WFN.WILDCARD_MULTI): # Source starts with "*" start = 1 begins = -1 else: while ((start < len(source)) and source.startswith(CPEComponent2_3_WFN.WILDCARD_ONE, start, start)): # Source starts with one or more "?" start += 1 begins += 1 # Reading of final wildcard in source if (source.endswith(CPEComponent2_3_WFN.WILDCARD_MULTI) and CPESet2_3._is_even_wildcards(source, end - 1)): # Source ends in "*" end -= 1 ends = -1 else: while ((end > 0) and source.endswith(CPEComponent2_3_WFN.WILDCARD_ONE, end - 1, end) and CPESet2_3._is_even_wildcards(source, end - 1)): # Source ends in "?" end -= 1 ends += 1 source = source[start: end] index = -1 leftover = len(target) while (leftover > 0): index = target.find(source, index + 1) if (index == -1): break escapes = target.count("\\", 0, index) if ((index > 0) and (begins != -1) and (begins < (index - escapes))): break escapes = target.count("\\", index + 1, len(target)) leftover = len(target) - index - escapes - len(source) if ((leftover > 0) and ((ends != -1) and (leftover > ends))): continue return CPESet2_3.LOGICAL_VALUE_SUPERSET return CPESet2_3.LOGICAL_VALUE_DISJOINT
python
def _compare_strings(cls, source, target): """ Compares a source string to a target string, and addresses the condition in which the source string includes unquoted special characters. It performs a simple regular expression match, with the assumption that (as required) unquoted special characters appear only at the beginning and/or the end of the source string. It also properly differentiates between unquoted and quoted special characters. :param string source: First string value :param string target: Second string value :returns: The comparison relation among input strings. :rtype: int """ start = 0 end = len(source) begins = 0 ends = 0 # Reading of initial wildcard in source if source.startswith(CPEComponent2_3_WFN.WILDCARD_MULTI): # Source starts with "*" start = 1 begins = -1 else: while ((start < len(source)) and source.startswith(CPEComponent2_3_WFN.WILDCARD_ONE, start, start)): # Source starts with one or more "?" start += 1 begins += 1 # Reading of final wildcard in source if (source.endswith(CPEComponent2_3_WFN.WILDCARD_MULTI) and CPESet2_3._is_even_wildcards(source, end - 1)): # Source ends in "*" end -= 1 ends = -1 else: while ((end > 0) and source.endswith(CPEComponent2_3_WFN.WILDCARD_ONE, end - 1, end) and CPESet2_3._is_even_wildcards(source, end - 1)): # Source ends in "?" end -= 1 ends += 1 source = source[start: end] index = -1 leftover = len(target) while (leftover > 0): index = target.find(source, index + 1) if (index == -1): break escapes = target.count("\\", 0, index) if ((index > 0) and (begins != -1) and (begins < (index - escapes))): break escapes = target.count("\\", index + 1, len(target)) leftover = len(target) - index - escapes - len(source) if ((leftover > 0) and ((ends != -1) and (leftover > ends))): continue return CPESet2_3.LOGICAL_VALUE_SUPERSET return CPESet2_3.LOGICAL_VALUE_DISJOINT
[ "def", "_compare_strings", "(", "cls", ",", "source", ",", "target", ")", ":", "start", "=", "0", "end", "=", "len", "(", "source", ")", "begins", "=", "0", "ends", "=", "0", "# Reading of initial wildcard in source", "if", "source", ".", "startswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_MULTI", ")", ":", "# Source starts with \"*\"", "start", "=", "1", "begins", "=", "-", "1", "else", ":", "while", "(", "(", "start", "<", "len", "(", "source", ")", ")", "and", "source", ".", "startswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_ONE", ",", "start", ",", "start", ")", ")", ":", "# Source starts with one or more \"?\"", "start", "+=", "1", "begins", "+=", "1", "# Reading of final wildcard in source", "if", "(", "source", ".", "endswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_MULTI", ")", "and", "CPESet2_3", ".", "_is_even_wildcards", "(", "source", ",", "end", "-", "1", ")", ")", ":", "# Source ends in \"*\"", "end", "-=", "1", "ends", "=", "-", "1", "else", ":", "while", "(", "(", "end", ">", "0", ")", "and", "source", ".", "endswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_ONE", ",", "end", "-", "1", ",", "end", ")", "and", "CPESet2_3", ".", "_is_even_wildcards", "(", "source", ",", "end", "-", "1", ")", ")", ":", "# Source ends in \"?\"", "end", "-=", "1", "ends", "+=", "1", "source", "=", "source", "[", "start", ":", "end", "]", "index", "=", "-", "1", "leftover", "=", "len", "(", "target", ")", "while", "(", "leftover", ">", "0", ")", ":", "index", "=", "target", ".", "find", "(", "source", ",", "index", "+", "1", ")", "if", "(", "index", "==", "-", "1", ")", ":", "break", "escapes", "=", "target", ".", "count", "(", "\"\\\\\"", ",", "0", ",", "index", ")", "if", "(", "(", "index", ">", "0", ")", "and", "(", "begins", "!=", "-", "1", ")", "and", "(", "begins", "<", "(", "index", "-", "escapes", ")", ")", ")", ":", "break", "escapes", "=", "target", ".", "count", "(", "\"\\\\\"", ",", "index", "+", "1", ",", "len", "(", "target", ")", ")", "leftover", "=", "len", "(", "target", ")", "-", "index", "-", "escapes", "-", "len", "(", "source", ")", "if", "(", "(", "leftover", ">", "0", ")", "and", "(", "(", "ends", "!=", "-", "1", ")", "and", "(", "leftover", ">", "ends", ")", ")", ")", ":", "continue", "return", "CPESet2_3", ".", "LOGICAL_VALUE_SUPERSET", "return", "CPESet2_3", ".", "LOGICAL_VALUE_DISJOINT" ]
Compares a source string to a target string, and addresses the condition in which the source string includes unquoted special characters. It performs a simple regular expression match, with the assumption that (as required) unquoted special characters appear only at the beginning and/or the end of the source string. It also properly differentiates between unquoted and quoted special characters. :param string source: First string value :param string target: Second string value :returns: The comparison relation among input strings. :rtype: int
[ "Compares", "a", "source", "string", "to", "a", "target", "string", "and", "addresses", "the", "condition", "in", "which", "the", "source", "string", "includes", "unquoted", "special", "characters", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L124-L198
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3._contains_wildcards
def _contains_wildcards(cls, s): """ Return True if the string contains any unquoted special characters (question-mark or asterisk), otherwise False. Ex: _contains_wildcards("foo") => FALSE Ex: _contains_wildcards("foo\?") => FALSE Ex: _contains_wildcards("foo?") => TRUE Ex: _contains_wildcards("\*bar") => FALSE Ex: _contains_wildcards("*bar") => TRUE :param string s: string to check :returns: True if string contains any unquoted special characters, False otherwise. :rtype: boolean This function is a support function for _compare(). """ idx = s.find("*") if idx != -1: if idx == 0: return True else: if s[idx - 1] != "\\": return True idx = s.find("?") if idx != -1: if idx == 0: return True else: if s[idx - 1] != "\\": return True return False
python
def _contains_wildcards(cls, s): """ Return True if the string contains any unquoted special characters (question-mark or asterisk), otherwise False. Ex: _contains_wildcards("foo") => FALSE Ex: _contains_wildcards("foo\?") => FALSE Ex: _contains_wildcards("foo?") => TRUE Ex: _contains_wildcards("\*bar") => FALSE Ex: _contains_wildcards("*bar") => TRUE :param string s: string to check :returns: True if string contains any unquoted special characters, False otherwise. :rtype: boolean This function is a support function for _compare(). """ idx = s.find("*") if idx != -1: if idx == 0: return True else: if s[idx - 1] != "\\": return True idx = s.find("?") if idx != -1: if idx == 0: return True else: if s[idx - 1] != "\\": return True return False
[ "def", "_contains_wildcards", "(", "cls", ",", "s", ")", ":", "idx", "=", "s", ".", "find", "(", "\"*\"", ")", "if", "idx", "!=", "-", "1", ":", "if", "idx", "==", "0", ":", "return", "True", "else", ":", "if", "s", "[", "idx", "-", "1", "]", "!=", "\"\\\\\"", ":", "return", "True", "idx", "=", "s", ".", "find", "(", "\"?\"", ")", "if", "idx", "!=", "-", "1", ":", "if", "idx", "==", "0", ":", "return", "True", "else", ":", "if", "s", "[", "idx", "-", "1", "]", "!=", "\"\\\\\"", ":", "return", "True", "return", "False" ]
Return True if the string contains any unquoted special characters (question-mark or asterisk), otherwise False. Ex: _contains_wildcards("foo") => FALSE Ex: _contains_wildcards("foo\?") => FALSE Ex: _contains_wildcards("foo?") => TRUE Ex: _contains_wildcards("\*bar") => FALSE Ex: _contains_wildcards("*bar") => TRUE :param string s: string to check :returns: True if string contains any unquoted special characters, False otherwise. :rtype: boolean This function is a support function for _compare().
[ "Return", "True", "if", "the", "string", "contains", "any", "unquoted", "special", "characters", "(", "question", "-", "mark", "or", "asterisk", ")", "otherwise", "False", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L201-L235
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3._is_even_wildcards
def _is_even_wildcards(cls, str, idx): """ Returns True if an even number of escape (backslash) characters precede the character at index idx in string str. :param string str: string to check :returns: True if an even number of escape characters precede the character at index idx in string str, False otherwise. :rtype: boolean """ result = 0 while ((idx > 0) and (str[idx - 1] == "\\")): idx -= 1 result += 1 isEvenNumber = (result % 2) == 0 return isEvenNumber
python
def _is_even_wildcards(cls, str, idx): """ Returns True if an even number of escape (backslash) characters precede the character at index idx in string str. :param string str: string to check :returns: True if an even number of escape characters precede the character at index idx in string str, False otherwise. :rtype: boolean """ result = 0 while ((idx > 0) and (str[idx - 1] == "\\")): idx -= 1 result += 1 isEvenNumber = (result % 2) == 0 return isEvenNumber
[ "def", "_is_even_wildcards", "(", "cls", ",", "str", ",", "idx", ")", ":", "result", "=", "0", "while", "(", "(", "idx", ">", "0", ")", "and", "(", "str", "[", "idx", "-", "1", "]", "==", "\"\\\\\"", ")", ")", ":", "idx", "-=", "1", "result", "+=", "1", "isEvenNumber", "=", "(", "result", "%", "2", ")", "==", "0", "return", "isEvenNumber" ]
Returns True if an even number of escape (backslash) characters precede the character at index idx in string str. :param string str: string to check :returns: True if an even number of escape characters precede the character at index idx in string str, False otherwise. :rtype: boolean
[ "Returns", "True", "if", "an", "even", "number", "of", "escape", "(", "backslash", ")", "characters", "precede", "the", "character", "at", "index", "idx", "in", "string", "str", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L238-L255
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3._is_string
def _is_string(cls, arg): """ Return True if arg is a string value, and False if arg is a logical value (ANY or NA). :param string arg: string to check :returns: True if value is a string, False if it is a logical value. :rtype: boolean This function is a support function for _compare(). """ isAny = arg == CPEComponent2_3_WFN.VALUE_ANY isNa = arg == CPEComponent2_3_WFN.VALUE_NA return not (isAny or isNa)
python
def _is_string(cls, arg): """ Return True if arg is a string value, and False if arg is a logical value (ANY or NA). :param string arg: string to check :returns: True if value is a string, False if it is a logical value. :rtype: boolean This function is a support function for _compare(). """ isAny = arg == CPEComponent2_3_WFN.VALUE_ANY isNa = arg == CPEComponent2_3_WFN.VALUE_NA return not (isAny or isNa)
[ "def", "_is_string", "(", "cls", ",", "arg", ")", ":", "isAny", "=", "arg", "==", "CPEComponent2_3_WFN", ".", "VALUE_ANY", "isNa", "=", "arg", "==", "CPEComponent2_3_WFN", ".", "VALUE_NA", "return", "not", "(", "isAny", "or", "isNa", ")" ]
Return True if arg is a string value, and False if arg is a logical value (ANY or NA). :param string arg: string to check :returns: True if value is a string, False if it is a logical value. :rtype: boolean This function is a support function for _compare().
[ "Return", "True", "if", "arg", "is", "a", "string", "value", "and", "False", "if", "arg", "is", "a", "logical", "value", "(", "ANY", "or", "NA", ")", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L258-L273
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3.compare_wfns
def compare_wfns(cls, source, target): """ Compares two WFNs and returns a generator of pairwise attribute-value comparison results. It provides full access to the individual comparison results to enable use-case specific implementations of novel name-comparison algorithms. Compare each attribute of the Source WFN to the Target WFN: :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: generator of pairwise attribute comparison results :rtype: generator """ # Compare results using the get() function in WFN for att in CPEComponent.CPE_COMP_KEYS_EXTENDED: value_src = source.get_attribute_values(att)[0] if value_src.find('"') > -1: # Not a logical value: del double quotes value_src = value_src[1:-1] value_tar = target.get_attribute_values(att)[0] if value_tar.find('"') > -1: # Not a logical value: del double quotes value_tar = value_tar[1:-1] yield (att, CPESet2_3._compare(value_src, value_tar))
python
def compare_wfns(cls, source, target): """ Compares two WFNs and returns a generator of pairwise attribute-value comparison results. It provides full access to the individual comparison results to enable use-case specific implementations of novel name-comparison algorithms. Compare each attribute of the Source WFN to the Target WFN: :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: generator of pairwise attribute comparison results :rtype: generator """ # Compare results using the get() function in WFN for att in CPEComponent.CPE_COMP_KEYS_EXTENDED: value_src = source.get_attribute_values(att)[0] if value_src.find('"') > -1: # Not a logical value: del double quotes value_src = value_src[1:-1] value_tar = target.get_attribute_values(att)[0] if value_tar.find('"') > -1: # Not a logical value: del double quotes value_tar = value_tar[1:-1] yield (att, CPESet2_3._compare(value_src, value_tar))
[ "def", "compare_wfns", "(", "cls", ",", "source", ",", "target", ")", ":", "# Compare results using the get() function in WFN", "for", "att", "in", "CPEComponent", ".", "CPE_COMP_KEYS_EXTENDED", ":", "value_src", "=", "source", ".", "get_attribute_values", "(", "att", ")", "[", "0", "]", "if", "value_src", ".", "find", "(", "'\"'", ")", ">", "-", "1", ":", "# Not a logical value: del double quotes", "value_src", "=", "value_src", "[", "1", ":", "-", "1", "]", "value_tar", "=", "target", ".", "get_attribute_values", "(", "att", ")", "[", "0", "]", "if", "value_tar", ".", "find", "(", "'\"'", ")", ">", "-", "1", ":", "# Not a logical value: del double quotes", "value_tar", "=", "value_tar", "[", "1", ":", "-", "1", "]", "yield", "(", "att", ",", "CPESet2_3", ".", "_compare", "(", "value_src", ",", "value_tar", ")", ")" ]
Compares two WFNs and returns a generator of pairwise attribute-value comparison results. It provides full access to the individual comparison results to enable use-case specific implementations of novel name-comparison algorithms. Compare each attribute of the Source WFN to the Target WFN: :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: generator of pairwise attribute comparison results :rtype: generator
[ "Compares", "two", "WFNs", "and", "returns", "a", "generator", "of", "pairwise", "attribute", "-", "value", "comparison", "results", ".", "It", "provides", "full", "access", "to", "the", "individual", "comparison", "results", "to", "enable", "use", "-", "case", "specific", "implementations", "of", "novel", "name", "-", "comparison", "algorithms", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L276-L303
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3.cpe_disjoint
def cpe_disjoint(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is DISJOINT. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is DISJOINT, otherwise False. :rtype: boolean """ # If any pairwise comparison returned DISJOINT then # the overall name relationship is DISJOINT for att, result in CPESet2_3.compare_wfns(source, target): isDisjoint = result == CPESet2_3.LOGICAL_VALUE_DISJOINT if isDisjoint: return True return False
python
def cpe_disjoint(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is DISJOINT. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is DISJOINT, otherwise False. :rtype: boolean """ # If any pairwise comparison returned DISJOINT then # the overall name relationship is DISJOINT for att, result in CPESet2_3.compare_wfns(source, target): isDisjoint = result == CPESet2_3.LOGICAL_VALUE_DISJOINT if isDisjoint: return True return False
[ "def", "cpe_disjoint", "(", "cls", ",", "source", ",", "target", ")", ":", "# If any pairwise comparison returned DISJOINT then", "# the overall name relationship is DISJOINT", "for", "att", ",", "result", "in", "CPESet2_3", ".", "compare_wfns", "(", "source", ",", "target", ")", ":", "isDisjoint", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_DISJOINT", "if", "isDisjoint", ":", "return", "True", "return", "False" ]
Compares two WFNs and returns True if the set-theoretic relation between the names is DISJOINT. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is DISJOINT, otherwise False. :rtype: boolean
[ "Compares", "two", "WFNs", "and", "returns", "True", "if", "the", "set", "-", "theoretic", "relation", "between", "the", "names", "is", "DISJOINT", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L306-L324
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3.cpe_equal
def cpe_equal(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is EQUAL. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is EQUAL, otherwise False. :rtype: boolean """ # If any pairwise comparison returned EQUAL then # the overall name relationship is EQUAL for att, result in CPESet2_3.compare_wfns(source, target): isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL if not isEqual: return False return True
python
def cpe_equal(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is EQUAL. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is EQUAL, otherwise False. :rtype: boolean """ # If any pairwise comparison returned EQUAL then # the overall name relationship is EQUAL for att, result in CPESet2_3.compare_wfns(source, target): isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL if not isEqual: return False return True
[ "def", "cpe_equal", "(", "cls", ",", "source", ",", "target", ")", ":", "# If any pairwise comparison returned EQUAL then", "# the overall name relationship is EQUAL", "for", "att", ",", "result", "in", "CPESet2_3", ".", "compare_wfns", "(", "source", ",", "target", ")", ":", "isEqual", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_EQUAL", "if", "not", "isEqual", ":", "return", "False", "return", "True" ]
Compares two WFNs and returns True if the set-theoretic relation between the names is EQUAL. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is EQUAL, otherwise False. :rtype: boolean
[ "Compares", "two", "WFNs", "and", "returns", "True", "if", "the", "set", "-", "theoretic", "relation", "between", "the", "names", "is", "EQUAL", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L327-L345
nilp0inter/cpe
cpe/cpeset2_3.py
CPESet2_3.cpe_subset
def cpe_subset(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is (non-proper) SUBSET. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is SUBSET, otherwise False. :rtype: boolean """ # If any pairwise comparison returned something other than SUBSET # or EQUAL, then SUBSET is False. for att, result in CPESet2_3.compare_wfns(source, target): isSubset = result == CPESet2_3.LOGICAL_VALUE_SUBSET isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL if (not isSubset) and (not isEqual): return False return True
python
def cpe_subset(cls, source, target): """ Compares two WFNs and returns True if the set-theoretic relation between the names is (non-proper) SUBSET. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is SUBSET, otherwise False. :rtype: boolean """ # If any pairwise comparison returned something other than SUBSET # or EQUAL, then SUBSET is False. for att, result in CPESet2_3.compare_wfns(source, target): isSubset = result == CPESet2_3.LOGICAL_VALUE_SUBSET isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL if (not isSubset) and (not isEqual): return False return True
[ "def", "cpe_subset", "(", "cls", ",", "source", ",", "target", ")", ":", "# If any pairwise comparison returned something other than SUBSET", "# or EQUAL, then SUBSET is False.", "for", "att", ",", "result", "in", "CPESet2_3", ".", "compare_wfns", "(", "source", ",", "target", ")", ":", "isSubset", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_SUBSET", "isEqual", "=", "result", "==", "CPESet2_3", ".", "LOGICAL_VALUE_EQUAL", "if", "(", "not", "isSubset", ")", "and", "(", "not", "isEqual", ")", ":", "return", "False", "return", "True" ]
Compares two WFNs and returns True if the set-theoretic relation between the names is (non-proper) SUBSET. :param CPE2_3_WFN source: first WFN CPE Name :param CPE2_3_WFN target: seconds WFN CPE Name :returns: True if the set relation between source and target is SUBSET, otherwise False. :rtype: boolean
[ "Compares", "two", "WFNs", "and", "returns", "True", "if", "the", "set", "-", "theoretic", "relation", "between", "the", "names", "is", "(", "non", "-", "proper", ")", "SUBSET", "." ]
train
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L348-L367