id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
26,400
gunthercox/ChatterBot
chatterbot/logic/logic_adapter.py
LogicAdapter.get_default_response
def get_default_response(self, input_statement): """ This method is called when a logic adapter is unable to generate any other meaningful response. """ from random import choice if self.default_responses: response = choice(self.default_responses) else: try: response = self.chatbot.storage.get_random() except StorageAdapter.EmptyDatabaseException: response = input_statement self.chatbot.logger.info( 'No known response to the input was found. Selecting a random response.' ) # Set confidence to zero because a random response is selected response.confidence = 0 return response
python
def get_default_response(self, input_statement): """ This method is called when a logic adapter is unable to generate any other meaningful response. """ from random import choice if self.default_responses: response = choice(self.default_responses) else: try: response = self.chatbot.storage.get_random() except StorageAdapter.EmptyDatabaseException: response = input_statement self.chatbot.logger.info( 'No known response to the input was found. Selecting a random response.' ) # Set confidence to zero because a random response is selected response.confidence = 0 return response
[ "def", "get_default_response", "(", "self", ",", "input_statement", ")", ":", "from", "random", "import", "choice", "if", "self", ".", "default_responses", ":", "response", "=", "choice", "(", "self", ".", "default_responses", ")", "else", ":", "try", ":", "response", "=", "self", ".", "chatbot", ".", "storage", ".", "get_random", "(", ")", "except", "StorageAdapter", ".", "EmptyDatabaseException", ":", "response", "=", "input_statement", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'No known response to the input was found. Selecting a random response.'", ")", "# Set confidence to zero because a random response is selected", "response", ".", "confidence", "=", "0", "return", "response" ]
This method is called when a logic adapter is unable to generate any other meaningful response.
[ "This", "method", "is", "called", "when", "a", "logic", "adapter", "is", "unable", "to", "generate", "any", "other", "meaningful", "response", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/logic_adapter.py#L103-L125
26,401
gunthercox/ChatterBot
chatterbot/logic/time_adapter.py
TimeLogicAdapter.time_question_features
def time_question_features(self, text): """ Provide an analysis of significant features in the string. """ features = {} # A list of all words from the known sentences all_words = " ".join(self.positive + self.negative).split() # A list of the first word in each of the known sentence all_first_words = [] for sentence in self.positive + self.negative: all_first_words.append( sentence.split(' ', 1)[0] ) for word in text.split(): features['first_word({})'.format(word)] = (word in all_first_words) for word in text.split(): features['contains({})'.format(word)] = (word in all_words) for letter in 'abcdefghijklmnopqrstuvwxyz': features['count({})'.format(letter)] = text.lower().count(letter) features['has({})'.format(letter)] = (letter in text.lower()) return features
python
def time_question_features(self, text): """ Provide an analysis of significant features in the string. """ features = {} # A list of all words from the known sentences all_words = " ".join(self.positive + self.negative).split() # A list of the first word in each of the known sentence all_first_words = [] for sentence in self.positive + self.negative: all_first_words.append( sentence.split(' ', 1)[0] ) for word in text.split(): features['first_word({})'.format(word)] = (word in all_first_words) for word in text.split(): features['contains({})'.format(word)] = (word in all_words) for letter in 'abcdefghijklmnopqrstuvwxyz': features['count({})'.format(letter)] = text.lower().count(letter) features['has({})'.format(letter)] = (letter in text.lower()) return features
[ "def", "time_question_features", "(", "self", ",", "text", ")", ":", "features", "=", "{", "}", "# A list of all words from the known sentences", "all_words", "=", "\" \"", ".", "join", "(", "self", ".", "positive", "+", "self", ".", "negative", ")", ".", "split", "(", ")", "# A list of the first word in each of the known sentence", "all_first_words", "=", "[", "]", "for", "sentence", "in", "self", ".", "positive", "+", "self", ".", "negative", ":", "all_first_words", ".", "append", "(", "sentence", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", ")", "for", "word", "in", "text", ".", "split", "(", ")", ":", "features", "[", "'first_word({})'", ".", "format", "(", "word", ")", "]", "=", "(", "word", "in", "all_first_words", ")", "for", "word", "in", "text", ".", "split", "(", ")", ":", "features", "[", "'contains({})'", ".", "format", "(", "word", ")", "]", "=", "(", "word", "in", "all_words", ")", "for", "letter", "in", "'abcdefghijklmnopqrstuvwxyz'", ":", "features", "[", "'count({})'", ".", "format", "(", "letter", ")", "]", "=", "text", ".", "lower", "(", ")", ".", "count", "(", "letter", ")", "features", "[", "'has({})'", ".", "format", "(", "letter", ")", "]", "=", "(", "letter", "in", "text", ".", "lower", "(", ")", ")", "return", "features" ]
Provide an analysis of significant features in the string.
[ "Provide", "an", "analysis", "of", "significant", "features", "in", "the", "string", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/time_adapter.py#L56-L82
26,402
gunthercox/ChatterBot
chatterbot/logic/mathematical_evaluation.py
MathematicalEvaluation.can_process
def can_process(self, statement): """ Determines whether it is appropriate for this adapter to respond to the user input. """ response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1
python
def can_process(self, statement): """ Determines whether it is appropriate for this adapter to respond to the user input. """ response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1
[ "def", "can_process", "(", "self", ",", "statement", ")", ":", "response", "=", "self", ".", "process", "(", "statement", ")", "self", ".", "cache", "[", "statement", ".", "text", "]", "=", "response", "return", "response", ".", "confidence", "==", "1" ]
Determines whether it is appropriate for this adapter to respond to the user input.
[ "Determines", "whether", "it", "is", "appropriate", "for", "this", "adapter", "to", "respond", "to", "the", "user", "input", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/mathematical_evaluation.py#L28-L35
26,403
gunthercox/ChatterBot
chatterbot/logic/mathematical_evaluation.py
MathematicalEvaluation.process
def process(self, statement, additional_response_selection_parameters=None): """ Takes a statement string. Returns the equation from the statement with the mathematical terms solved. """ from mathparse import mathparse input_text = statement.text # Use the result cached by the process method if it exists if input_text in self.cache: cached_result = self.cache[input_text] self.cache = {} return cached_result # Getting the mathematical terms within the input statement expression = mathparse.extract_expression(input_text, language=self.language.ISO_639.upper()) response = Statement(text=expression) try: response.text += ' = ' + str( mathparse.parse(expression, language=self.language.ISO_639.upper()) ) # The confidence is 1 if the expression could be evaluated response.confidence = 1 except mathparse.PostfixTokenEvaluationException: response.confidence = 0 return response
python
def process(self, statement, additional_response_selection_parameters=None): """ Takes a statement string. Returns the equation from the statement with the mathematical terms solved. """ from mathparse import mathparse input_text = statement.text # Use the result cached by the process method if it exists if input_text in self.cache: cached_result = self.cache[input_text] self.cache = {} return cached_result # Getting the mathematical terms within the input statement expression = mathparse.extract_expression(input_text, language=self.language.ISO_639.upper()) response = Statement(text=expression) try: response.text += ' = ' + str( mathparse.parse(expression, language=self.language.ISO_639.upper()) ) # The confidence is 1 if the expression could be evaluated response.confidence = 1 except mathparse.PostfixTokenEvaluationException: response.confidence = 0 return response
[ "def", "process", "(", "self", ",", "statement", ",", "additional_response_selection_parameters", "=", "None", ")", ":", "from", "mathparse", "import", "mathparse", "input_text", "=", "statement", ".", "text", "# Use the result cached by the process method if it exists", "if", "input_text", "in", "self", ".", "cache", ":", "cached_result", "=", "self", ".", "cache", "[", "input_text", "]", "self", ".", "cache", "=", "{", "}", "return", "cached_result", "# Getting the mathematical terms within the input statement", "expression", "=", "mathparse", ".", "extract_expression", "(", "input_text", ",", "language", "=", "self", ".", "language", ".", "ISO_639", ".", "upper", "(", ")", ")", "response", "=", "Statement", "(", "text", "=", "expression", ")", "try", ":", "response", ".", "text", "+=", "' = '", "+", "str", "(", "mathparse", ".", "parse", "(", "expression", ",", "language", "=", "self", ".", "language", ".", "ISO_639", ".", "upper", "(", ")", ")", ")", "# The confidence is 1 if the expression could be evaluated", "response", ".", "confidence", "=", "1", "except", "mathparse", ".", "PostfixTokenEvaluationException", ":", "response", ".", "confidence", "=", "0", "return", "response" ]
Takes a statement string. Returns the equation from the statement with the mathematical terms solved.
[ "Takes", "a", "statement", "string", ".", "Returns", "the", "equation", "from", "the", "statement", "with", "the", "mathematical", "terms", "solved", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/mathematical_evaluation.py#L37-L67
26,404
gunthercox/ChatterBot
chatterbot/filters.py
get_recent_repeated_responses
def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3): """ A filter that eliminates possibly repetitive responses to prevent a chat bot from repeating statements that it has recently said. """ from collections import Counter # Get the most recent statements from the conversation conversation_statements = list(chatbot.storage.filter( conversation=conversation, order_by=['id'] ))[sample * -1:] text_of_recent_responses = [ statement.text for statement in conversation_statements ] counter = Counter(text_of_recent_responses) # Find the n most common responses from the conversation most_common = counter.most_common(quantity) return [ counted[0] for counted in most_common if counted[1] >= threshold ]
python
def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3): """ A filter that eliminates possibly repetitive responses to prevent a chat bot from repeating statements that it has recently said. """ from collections import Counter # Get the most recent statements from the conversation conversation_statements = list(chatbot.storage.filter( conversation=conversation, order_by=['id'] ))[sample * -1:] text_of_recent_responses = [ statement.text for statement in conversation_statements ] counter = Counter(text_of_recent_responses) # Find the n most common responses from the conversation most_common = counter.most_common(quantity) return [ counted[0] for counted in most_common if counted[1] >= threshold ]
[ "def", "get_recent_repeated_responses", "(", "chatbot", ",", "conversation", ",", "sample", "=", "10", ",", "threshold", "=", "3", ",", "quantity", "=", "3", ")", ":", "from", "collections", "import", "Counter", "# Get the most recent statements from the conversation", "conversation_statements", "=", "list", "(", "chatbot", ".", "storage", ".", "filter", "(", "conversation", "=", "conversation", ",", "order_by", "=", "[", "'id'", "]", ")", ")", "[", "sample", "*", "-", "1", ":", "]", "text_of_recent_responses", "=", "[", "statement", ".", "text", "for", "statement", "in", "conversation_statements", "]", "counter", "=", "Counter", "(", "text_of_recent_responses", ")", "# Find the n most common responses from the conversation", "most_common", "=", "counter", ".", "most_common", "(", "quantity", ")", "return", "[", "counted", "[", "0", "]", "for", "counted", "in", "most_common", "if", "counted", "[", "1", "]", ">=", "threshold", "]" ]
A filter that eliminates possibly repetitive responses to prevent a chat bot from repeating statements that it has recently said.
[ "A", "filter", "that", "eliminates", "possibly", "repetitive", "responses", "to", "prevent", "a", "chat", "bot", "from", "repeating", "statements", "that", "it", "has", "recently", "said", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/filters.py#L1-L26
26,405
gunthercox/ChatterBot
chatterbot/comparisons.py
JaccardSimilarity.compare
def compare(self, statement_a, statement_b): """ Return the calculated similarity of two statements based on the Jaccard index. """ # Make both strings lowercase document_a = self.nlp(statement_a.text.lower()) document_b = self.nlp(statement_b.text.lower()) statement_a_lemmas = set([ token.lemma_ for token in document_a if not token.is_stop ]) statement_b_lemmas = set([ token.lemma_ for token in document_b if not token.is_stop ]) # Calculate Jaccard similarity numerator = len(statement_a_lemmas.intersection(statement_b_lemmas)) denominator = float(len(statement_a_lemmas.union(statement_b_lemmas))) ratio = numerator / denominator return ratio
python
def compare(self, statement_a, statement_b): """ Return the calculated similarity of two statements based on the Jaccard index. """ # Make both strings lowercase document_a = self.nlp(statement_a.text.lower()) document_b = self.nlp(statement_b.text.lower()) statement_a_lemmas = set([ token.lemma_ for token in document_a if not token.is_stop ]) statement_b_lemmas = set([ token.lemma_ for token in document_b if not token.is_stop ]) # Calculate Jaccard similarity numerator = len(statement_a_lemmas.intersection(statement_b_lemmas)) denominator = float(len(statement_a_lemmas.union(statement_b_lemmas))) ratio = numerator / denominator return ratio
[ "def", "compare", "(", "self", ",", "statement_a", ",", "statement_b", ")", ":", "# Make both strings lowercase", "document_a", "=", "self", ".", "nlp", "(", "statement_a", ".", "text", ".", "lower", "(", ")", ")", "document_b", "=", "self", ".", "nlp", "(", "statement_b", ".", "text", ".", "lower", "(", ")", ")", "statement_a_lemmas", "=", "set", "(", "[", "token", ".", "lemma_", "for", "token", "in", "document_a", "if", "not", "token", ".", "is_stop", "]", ")", "statement_b_lemmas", "=", "set", "(", "[", "token", ".", "lemma_", "for", "token", "in", "document_b", "if", "not", "token", ".", "is_stop", "]", ")", "# Calculate Jaccard similarity", "numerator", "=", "len", "(", "statement_a_lemmas", ".", "intersection", "(", "statement_b_lemmas", ")", ")", "denominator", "=", "float", "(", "len", "(", "statement_a_lemmas", ".", "union", "(", "statement_b_lemmas", ")", ")", ")", "ratio", "=", "numerator", "/", "denominator", "return", "ratio" ]
Return the calculated similarity of two statements based on the Jaccard index.
[ "Return", "the", "calculated", "similarity", "of", "two", "statements", "based", "on", "the", "Jaccard", "index", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/comparisons.py#L114-L135
26,406
gunthercox/ChatterBot
chatterbot/storage/mongodb.py
MongoDatabaseAdapter.get_statement_model
def get_statement_model(self): """ Return the class for the statement model. """ from chatterbot.conversation import Statement # Create a storage-aware statement statement = Statement statement.storage = self return statement
python
def get_statement_model(self): """ Return the class for the statement model. """ from chatterbot.conversation import Statement # Create a storage-aware statement statement = Statement statement.storage = self return statement
[ "def", "get_statement_model", "(", "self", ")", ":", "from", "chatterbot", ".", "conversation", "import", "Statement", "# Create a storage-aware statement", "statement", "=", "Statement", "statement", ".", "storage", "=", "self", "return", "statement" ]
Return the class for the statement model.
[ "Return", "the", "class", "for", "the", "statement", "model", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/mongodb.py#L44-L54
26,407
gunthercox/ChatterBot
chatterbot/storage/mongodb.py
MongoDatabaseAdapter.mongo_to_object
def mongo_to_object(self, statement_data): """ Return Statement object when given data returned from Mongo DB. """ Statement = self.get_model('statement') statement_data['id'] = statement_data['_id'] return Statement(**statement_data)
python
def mongo_to_object(self, statement_data): """ Return Statement object when given data returned from Mongo DB. """ Statement = self.get_model('statement') statement_data['id'] = statement_data['_id'] return Statement(**statement_data)
[ "def", "mongo_to_object", "(", "self", ",", "statement_data", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "statement_data", "[", "'id'", "]", "=", "statement_data", "[", "'_id'", "]", "return", "Statement", "(", "*", "*", "statement_data", ")" ]
Return Statement object when given data returned from Mongo DB.
[ "Return", "Statement", "object", "when", "given", "data", "returned", "from", "Mongo", "DB", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/mongodb.py#L59-L68
26,408
gunthercox/ChatterBot
chatterbot/ext/sqlalchemy_app/models.py
Statement.add_tags
def add_tags(self, *tags): """ Add a list of strings to the statement as tags. """ self.tags.extend([ Tag(name=tag) for tag in tags ])
python
def add_tags(self, *tags): """ Add a list of strings to the statement as tags. """ self.tags.extend([ Tag(name=tag) for tag in tags ])
[ "def", "add_tags", "(", "self", ",", "*", "tags", ")", ":", "self", ".", "tags", ".", "extend", "(", "[", "Tag", "(", "name", "=", "tag", ")", "for", "tag", "in", "tags", "]", ")" ]
Add a list of strings to the statement as tags.
[ "Add", "a", "list", "of", "strings", "to", "the", "statement", "as", "tags", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/ext/sqlalchemy_app/models.py#L108-L114
26,409
gunthercox/ChatterBot
chatterbot/trainers.py
Trainer.get_preprocessed_statement
def get_preprocessed_statement(self, input_statement): """ Preprocess the input statement. """ for preprocessor in self.chatbot.preprocessors: input_statement = preprocessor(input_statement) return input_statement
python
def get_preprocessed_statement(self, input_statement): """ Preprocess the input statement. """ for preprocessor in self.chatbot.preprocessors: input_statement = preprocessor(input_statement) return input_statement
[ "def", "get_preprocessed_statement", "(", "self", ",", "input_statement", ")", ":", "for", "preprocessor", "in", "self", ".", "chatbot", ".", "preprocessors", ":", "input_statement", "=", "preprocessor", "(", "input_statement", ")", "return", "input_statement" ]
Preprocess the input statement.
[ "Preprocess", "the", "input", "statement", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L30-L37
26,410
gunthercox/ChatterBot
chatterbot/trainers.py
Trainer.export_for_training
def export_for_training(self, file_path='./export.json'): """ Create a file from the database that can be used to train other chat bots. """ import json export = {'conversations': self._generate_export_data()} with open(file_path, 'w+') as jsonfile: json.dump(export, jsonfile, ensure_ascii=False)
python
def export_for_training(self, file_path='./export.json'): """ Create a file from the database that can be used to train other chat bots. """ import json export = {'conversations': self._generate_export_data()} with open(file_path, 'w+') as jsonfile: json.dump(export, jsonfile, ensure_ascii=False)
[ "def", "export_for_training", "(", "self", ",", "file_path", "=", "'./export.json'", ")", ":", "import", "json", "export", "=", "{", "'conversations'", ":", "self", ".", "_generate_export_data", "(", ")", "}", "with", "open", "(", "file_path", ",", "'w+'", ")", "as", "jsonfile", ":", "json", ".", "dump", "(", "export", ",", "jsonfile", ",", "ensure_ascii", "=", "False", ")" ]
Create a file from the database that can be used to train other chat bots.
[ "Create", "a", "file", "from", "the", "database", "that", "can", "be", "used", "to", "train", "other", "chat", "bots", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L66-L74
26,411
gunthercox/ChatterBot
chatterbot/trainers.py
ListTrainer.train
def train(self, conversation): """ Train the chat bot based on the provided list of statements that represents a single conversation. """ previous_statement_text = None previous_statement_search_text = '' statements_to_create = [] for conversation_count, text in enumerate(conversation): if self.show_training_progress: utils.print_progress_bar( 'List Trainer', conversation_count + 1, len(conversation) ) statement_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(text) statement = self.get_preprocessed_statement( Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) ) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) self.chatbot.storage.create_many(statements_to_create)
python
def train(self, conversation): """ Train the chat bot based on the provided list of statements that represents a single conversation. """ previous_statement_text = None previous_statement_search_text = '' statements_to_create = [] for conversation_count, text in enumerate(conversation): if self.show_training_progress: utils.print_progress_bar( 'List Trainer', conversation_count + 1, len(conversation) ) statement_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(text) statement = self.get_preprocessed_statement( Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) ) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) self.chatbot.storage.create_many(statements_to_create)
[ "def", "train", "(", "self", ",", "conversation", ")", ":", "previous_statement_text", "=", "None", "previous_statement_search_text", "=", "''", "statements_to_create", "=", "[", "]", "for", "conversation_count", ",", "text", "in", "enumerate", "(", "conversation", ")", ":", "if", "self", ".", "show_training_progress", ":", "utils", ".", "print_progress_bar", "(", "'List Trainer'", ",", "conversation_count", "+", "1", ",", "len", "(", "conversation", ")", ")", "statement_search_text", "=", "self", ".", "chatbot", ".", "storage", ".", "tagger", ".", "get_bigram_pair_string", "(", "text", ")", "statement", "=", "self", ".", "get_preprocessed_statement", "(", "Statement", "(", "text", "=", "text", ",", "search_text", "=", "statement_search_text", ",", "in_response_to", "=", "previous_statement_text", ",", "search_in_response_to", "=", "previous_statement_search_text", ",", "conversation", "=", "'training'", ")", ")", "previous_statement_text", "=", "statement", ".", "text", "previous_statement_search_text", "=", "statement_search_text", "statements_to_create", ".", "append", "(", "statement", ")", "self", ".", "chatbot", ".", "storage", ".", "create_many", "(", "statements_to_create", ")" ]
Train the chat bot based on the provided list of statements that represents a single conversation.
[ "Train", "the", "chat", "bot", "based", "on", "the", "provided", "list", "of", "statements", "that", "represents", "a", "single", "conversation", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L83-L117
26,412
gunthercox/ChatterBot
chatterbot/trainers.py
UbuntuCorpusTrainer.is_downloaded
def is_downloaded(self, file_path): """ Check if the data file is already downloaded. """ if os.path.exists(file_path): self.chatbot.logger.info('File is already downloaded') return True return False
python
def is_downloaded(self, file_path): """ Check if the data file is already downloaded. """ if os.path.exists(file_path): self.chatbot.logger.info('File is already downloaded') return True return False
[ "def", "is_downloaded", "(", "self", ",", "file_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'File is already downloaded'", ")", "return", "True", "return", "False" ]
Check if the data file is already downloaded.
[ "Check", "if", "the", "data", "file", "is", "already", "downloaded", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L203-L211
26,413
gunthercox/ChatterBot
chatterbot/trainers.py
UbuntuCorpusTrainer.is_extracted
def is_extracted(self, file_path): """ Check if the data file is already extracted. """ if os.path.isdir(file_path): self.chatbot.logger.info('File is already extracted') return True return False
python
def is_extracted(self, file_path): """ Check if the data file is already extracted. """ if os.path.isdir(file_path): self.chatbot.logger.info('File is already extracted') return True return False
[ "def", "is_extracted", "(", "self", ",", "file_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "file_path", ")", ":", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'File is already extracted'", ")", "return", "True", "return", "False" ]
Check if the data file is already extracted.
[ "Check", "if", "the", "data", "file", "is", "already", "extracted", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L213-L221
26,414
gunthercox/ChatterBot
chatterbot/trainers.py
UbuntuCorpusTrainer.extract
def extract(self, file_path): """ Extract a tar file at the specified file path. """ import tarfile print('Extracting {}'.format(file_path)) if not os.path.exists(self.extracted_data_directory): os.makedirs(self.extracted_data_directory) def track_progress(members): sys.stdout.write('.') for member in members: # This will be the current file being extracted yield member with tarfile.open(file_path) as tar: tar.extractall(path=self.extracted_data_directory, members=track_progress(tar)) self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory)) return True
python
def extract(self, file_path): """ Extract a tar file at the specified file path. """ import tarfile print('Extracting {}'.format(file_path)) if not os.path.exists(self.extracted_data_directory): os.makedirs(self.extracted_data_directory) def track_progress(members): sys.stdout.write('.') for member in members: # This will be the current file being extracted yield member with tarfile.open(file_path) as tar: tar.extractall(path=self.extracted_data_directory, members=track_progress(tar)) self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory)) return True
[ "def", "extract", "(", "self", ",", "file_path", ")", ":", "import", "tarfile", "print", "(", "'Extracting {}'", ".", "format", "(", "file_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "extracted_data_directory", ")", ":", "os", ".", "makedirs", "(", "self", ".", "extracted_data_directory", ")", "def", "track_progress", "(", "members", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'.'", ")", "for", "member", "in", "members", ":", "# This will be the current file being extracted", "yield", "member", "with", "tarfile", ".", "open", "(", "file_path", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "path", "=", "self", ".", "extracted_data_directory", ",", "members", "=", "track_progress", "(", "tar", ")", ")", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'File extracted to {}'", ".", "format", "(", "self", ".", "extracted_data_directory", ")", ")", "return", "True" ]
Extract a tar file at the specified file path.
[ "Extract", "a", "tar", "file", "at", "the", "specified", "file", "path", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L263-L285
26,415
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.count
def count(self): """ Return the number of entries in the database. """ Statement = self.get_model('statement') session = self.Session() statement_count = session.query(Statement).count() session.close() return statement_count
python
def count(self): """ Return the number of entries in the database. """ Statement = self.get_model('statement') session = self.Session() statement_count = session.query(Statement).count() session.close() return statement_count
[ "def", "count", "(", "self", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "session", "=", "self", ".", "Session", "(", ")", "statement_count", "=", "session", ".", "query", "(", "Statement", ")", ".", "count", "(", ")", "session", ".", "close", "(", ")", "return", "statement_count" ]
Return the number of entries in the database.
[ "Return", "the", "number", "of", "entries", "in", "the", "database", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L70-L79
26,416
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.remove
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements where the response text matches the input text. """ Statement = self.get_model('statement') session = self.Session() query = session.query(Statement).filter_by(text=statement_text) record = query.first() session.delete(record) self._session_finish(session)
python
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements where the response text matches the input text. """ Statement = self.get_model('statement') session = self.Session() query = session.query(Statement).filter_by(text=statement_text) record = query.first() session.delete(record) self._session_finish(session)
[ "def", "remove", "(", "self", ",", "statement_text", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "session", "=", "self", ".", "Session", "(", ")", "query", "=", "session", ".", "query", "(", "Statement", ")", ".", "filter_by", "(", "text", "=", "statement_text", ")", "record", "=", "query", ".", "first", "(", ")", "session", ".", "delete", "(", "record", ")", "self", ".", "_session_finish", "(", "session", ")" ]
Removes the statement that matches the input text. Removes any responses from statements where the response text matches the input text.
[ "Removes", "the", "statement", "that", "matches", "the", "input", "text", ".", "Removes", "any", "responses", "from", "statements", "where", "the", "response", "text", "matches", "the", "input", "text", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L81-L95
26,417
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.filter
def filter(self, **kwargs): """ Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned. """ from sqlalchemy import or_ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() page_size = kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) # Convert a single sting into a list if only one tag is provided if type(tags) == str: tags = [tags] if len(kwargs) == 0: statements = session.query(Statement).filter() else: statements = session.query(Statement).filter_by(**kwargs) if tags: statements = statements.join(Statement.tags).filter( Tag.name.in_(tags) ) if exclude_text: statements = statements.filter( ~Statement.text.in_(exclude_text) ) if exclude_text_words: or_word_query = [ Statement.text.ilike('%' + word + '%') for word in exclude_text_words ] statements = statements.filter( ~or_(*or_word_query) ) if persona_not_startswith: statements = statements.filter( ~Statement.persona.startswith('bot:') ) if search_text_contains: or_query = [ Statement.search_text.contains(word) for word in search_text_contains.split(' ') ] statements = statements.filter( or_(*or_query) ) if order_by: if 'created_at' in order_by: index = order_by.index('created_at') order_by[index] = Statement.created_at.asc() statements = statements.order_by(*order_by) total_statements = statements.count() for start_index in range(0, total_statements, page_size): for statement in statements.slice(start_index, start_index + page_size): yield self.model_to_object(statement) session.close()
python
def filter(self, **kwargs): """ Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned. """ from sqlalchemy import or_ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() page_size = kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) # Convert a single sting into a list if only one tag is provided if type(tags) == str: tags = [tags] if len(kwargs) == 0: statements = session.query(Statement).filter() else: statements = session.query(Statement).filter_by(**kwargs) if tags: statements = statements.join(Statement.tags).filter( Tag.name.in_(tags) ) if exclude_text: statements = statements.filter( ~Statement.text.in_(exclude_text) ) if exclude_text_words: or_word_query = [ Statement.text.ilike('%' + word + '%') for word in exclude_text_words ] statements = statements.filter( ~or_(*or_word_query) ) if persona_not_startswith: statements = statements.filter( ~Statement.persona.startswith('bot:') ) if search_text_contains: or_query = [ Statement.search_text.contains(word) for word in search_text_contains.split(' ') ] statements = statements.filter( or_(*or_query) ) if order_by: if 'created_at' in order_by: index = order_by.index('created_at') order_by[index] = Statement.created_at.asc() statements = statements.order_by(*order_by) total_statements = statements.count() for start_index in range(0, total_statements, page_size): for statement in statements.slice(start_index, start_index + page_size): yield self.model_to_object(statement) session.close()
[ "def", "filter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "sqlalchemy", "import", "or_", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "session", "=", "self", ".", "Session", "(", ")", "page_size", "=", "kwargs", ".", "pop", "(", "'page_size'", ",", "1000", ")", "order_by", "=", "kwargs", ".", "pop", "(", "'order_by'", ",", "None", ")", "tags", "=", "kwargs", ".", "pop", "(", "'tags'", ",", "[", "]", ")", "exclude_text", "=", "kwargs", ".", "pop", "(", "'exclude_text'", ",", "None", ")", "exclude_text_words", "=", "kwargs", ".", "pop", "(", "'exclude_text_words'", ",", "[", "]", ")", "persona_not_startswith", "=", "kwargs", ".", "pop", "(", "'persona_not_startswith'", ",", "None", ")", "search_text_contains", "=", "kwargs", ".", "pop", "(", "'search_text_contains'", ",", "None", ")", "# Convert a single sting into a list if only one tag is provided", "if", "type", "(", "tags", ")", "==", "str", ":", "tags", "=", "[", "tags", "]", "if", "len", "(", "kwargs", ")", "==", "0", ":", "statements", "=", "session", ".", "query", "(", "Statement", ")", ".", "filter", "(", ")", "else", ":", "statements", "=", "session", ".", "query", "(", "Statement", ")", ".", "filter_by", "(", "*", "*", "kwargs", ")", "if", "tags", ":", "statements", "=", "statements", ".", "join", "(", "Statement", ".", "tags", ")", ".", "filter", "(", "Tag", ".", "name", ".", "in_", "(", "tags", ")", ")", "if", "exclude_text", ":", "statements", "=", "statements", ".", "filter", "(", "~", "Statement", ".", "text", ".", "in_", "(", "exclude_text", ")", ")", "if", "exclude_text_words", ":", "or_word_query", "=", "[", "Statement", ".", "text", ".", "ilike", "(", "'%'", "+", "word", "+", "'%'", ")", "for", "word", "in", "exclude_text_words", "]", "statements", "=", "statements", ".", "filter", "(", "~", "or_", "(", "*", "or_word_query", ")", ")", "if", "persona_not_startswith", ":", "statements", "=", "statements", ".", "filter", "(", "~", "Statement", ".", "persona", ".", "startswith", "(", "'bot:'", ")", ")", "if", "search_text_contains", ":", "or_query", "=", "[", "Statement", ".", "search_text", ".", "contains", "(", "word", ")", "for", "word", "in", "search_text_contains", ".", "split", "(", "' '", ")", "]", "statements", "=", "statements", ".", "filter", "(", "or_", "(", "*", "or_query", ")", ")", "if", "order_by", ":", "if", "'created_at'", "in", "order_by", ":", "index", "=", "order_by", ".", "index", "(", "'created_at'", ")", "order_by", "[", "index", "]", "=", "Statement", ".", "created_at", ".", "asc", "(", ")", "statements", "=", "statements", ".", "order_by", "(", "*", "order_by", ")", "total_statements", "=", "statements", ".", "count", "(", ")", "for", "start_index", "in", "range", "(", "0", ",", "total_statements", ",", "page_size", ")", ":", "for", "statement", "in", "statements", ".", "slice", "(", "start_index", ",", "start_index", "+", "page_size", ")", ":", "yield", "self", ".", "model_to_object", "(", "statement", ")", "session", ".", "close", "(", ")" ]
Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned.
[ "Returns", "a", "list", "of", "objects", "from", "the", "database", ".", "The", "kwargs", "parameter", "can", "contain", "any", "number", "of", "attributes", ".", "Only", "objects", "which", "contain", "all", "listed", "attributes", "and", "in", "which", "all", "values", "match", "for", "all", "listed", "attributes", "will", "be", "returned", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L97-L174
26,418
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.update
def update(self, statement): """ Modifies an entry in the database. Creates an entry if one does not exist. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if statement is not None: session = self.Session() record = None if hasattr(statement, 'id') and statement.id is not None: record = session.query(Statement).get(statement.id) else: record = session.query(Statement).filter( Statement.text == statement.text, Statement.conversation == statement.conversation, ).first() # Create a new statement entry if one does not already exist if not record: record = Statement( text=statement.text, conversation=statement.conversation, persona=statement.persona ) # Update the response value record.in_response_to = statement.in_response_to record.created_at = statement.created_at record.search_text = self.tagger.get_bigram_pair_string(statement.text) if statement.in_response_to: record.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) for tag_name in statement.get_tags(): tag = session.query(Tag).filter_by(name=tag_name).first() if not tag: # Create the record tag = Tag(name=tag_name) record.tags.append(tag) session.add(record) self._session_finish(session)
python
def update(self, statement): """ Modifies an entry in the database. Creates an entry if one does not exist. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if statement is not None: session = self.Session() record = None if hasattr(statement, 'id') and statement.id is not None: record = session.query(Statement).get(statement.id) else: record = session.query(Statement).filter( Statement.text == statement.text, Statement.conversation == statement.conversation, ).first() # Create a new statement entry if one does not already exist if not record: record = Statement( text=statement.text, conversation=statement.conversation, persona=statement.persona ) # Update the response value record.in_response_to = statement.in_response_to record.created_at = statement.created_at record.search_text = self.tagger.get_bigram_pair_string(statement.text) if statement.in_response_to: record.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) for tag_name in statement.get_tags(): tag = session.query(Tag).filter_by(name=tag_name).first() if not tag: # Create the record tag = Tag(name=tag_name) record.tags.append(tag) session.add(record) self._session_finish(session)
[ "def", "update", "(", "self", ",", "statement", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "if", "statement", "is", "not", "None", ":", "session", "=", "self", ".", "Session", "(", ")", "record", "=", "None", "if", "hasattr", "(", "statement", ",", "'id'", ")", "and", "statement", ".", "id", "is", "not", "None", ":", "record", "=", "session", ".", "query", "(", "Statement", ")", ".", "get", "(", "statement", ".", "id", ")", "else", ":", "record", "=", "session", ".", "query", "(", "Statement", ")", ".", "filter", "(", "Statement", ".", "text", "==", "statement", ".", "text", ",", "Statement", ".", "conversation", "==", "statement", ".", "conversation", ",", ")", ".", "first", "(", ")", "# Create a new statement entry if one does not already exist", "if", "not", "record", ":", "record", "=", "Statement", "(", "text", "=", "statement", ".", "text", ",", "conversation", "=", "statement", ".", "conversation", ",", "persona", "=", "statement", ".", "persona", ")", "# Update the response value", "record", ".", "in_response_to", "=", "statement", ".", "in_response_to", "record", ".", "created_at", "=", "statement", ".", "created_at", "record", ".", "search_text", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "statement", ".", "text", ")", "if", "statement", ".", "in_response_to", ":", "record", ".", "search_in_response_to", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "statement", ".", "in_response_to", ")", "for", "tag_name", "in", "statement", ".", "get_tags", "(", ")", ":", "tag", "=", "session", ".", "query", "(", "Tag", ")", ".", "filter_by", "(", "name", "=", "tag_name", ")", ".", "first", "(", ")", "if", "not", "tag", ":", "# Create the record", "tag", "=", "Tag", "(", "name", "=", "tag_name", ")", "record", ".", "tags", ".", "append", "(", "tag", ")", "session", ".", "add", "(", "record", ")", "self", ".", "_session_finish", "(", "session", ")" ]
Modifies an entry in the database. Creates an entry if one does not exist.
[ "Modifies", "an", "entry", "in", "the", "database", ".", "Creates", "an", "entry", "if", "one", "does", "not", "exist", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L269-L318
26,419
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.get_random
def get_random(self): """ Returns a random statement from the database. """ import random Statement = self.get_model('statement') session = self.Session() count = self.count() if count < 1: raise self.EmptyDatabaseException() random_index = random.randrange(0, count) random_statement = session.query(Statement)[random_index] statement = self.model_to_object(random_statement) session.close() return statement
python
def get_random(self): """ Returns a random statement from the database. """ import random Statement = self.get_model('statement') session = self.Session() count = self.count() if count < 1: raise self.EmptyDatabaseException() random_index = random.randrange(0, count) random_statement = session.query(Statement)[random_index] statement = self.model_to_object(random_statement) session.close() return statement
[ "def", "get_random", "(", "self", ")", ":", "import", "random", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "session", "=", "self", ".", "Session", "(", ")", "count", "=", "self", ".", "count", "(", ")", "if", "count", "<", "1", ":", "raise", "self", ".", "EmptyDatabaseException", "(", ")", "random_index", "=", "random", ".", "randrange", "(", "0", ",", "count", ")", "random_statement", "=", "session", ".", "query", "(", "Statement", ")", "[", "random_index", "]", "statement", "=", "self", ".", "model_to_object", "(", "random_statement", ")", "session", ".", "close", "(", ")", "return", "statement" ]
Returns a random statement from the database.
[ "Returns", "a", "random", "statement", "from", "the", "database", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L320-L339
26,420
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.drop
def drop(self): """ Drop the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() session.query(Statement).delete() session.query(Tag).delete() session.commit() session.close()
python
def drop(self): """ Drop the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() session.query(Statement).delete() session.query(Tag).delete() session.commit() session.close()
[ "def", "drop", "(", "self", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "session", "=", "self", ".", "Session", "(", ")", "session", ".", "query", "(", "Statement", ")", ".", "delete", "(", ")", "session", ".", "query", "(", "Tag", ")", ".", "delete", "(", ")", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")" ]
Drop the database.
[ "Drop", "the", "database", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L341-L354
26,421
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
SQLStorageAdapter.create_database
def create_database(self): """ Populate the database with the tables. """ from chatterbot.ext.sqlalchemy_app.models import Base Base.metadata.create_all(self.engine)
python
def create_database(self): """ Populate the database with the tables. """ from chatterbot.ext.sqlalchemy_app.models import Base Base.metadata.create_all(self.engine)
[ "def", "create_database", "(", "self", ")", ":", "from", "chatterbot", ".", "ext", ".", "sqlalchemy_app", ".", "models", "import", "Base", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "engine", ")" ]
Populate the database with the tables.
[ "Populate", "the", "database", "with", "the", "tables", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L356-L361
26,422
gunthercox/ChatterBot
examples/django_app/example_app/views.py
ChatterBotApiView.post
def post(self, request, *args, **kwargs): """ Return a response to the statement in the posted data. * The JSON data should contain a 'text' attribute. """ input_data = json.loads(request.body.decode('utf-8')) if 'text' not in input_data: return JsonResponse({ 'text': [ 'The attribute "text" is required.' ] }, status=400) response = self.chatterbot.get_response(input_data) response_data = response.serialize() return JsonResponse(response_data, status=200)
python
def post(self, request, *args, **kwargs): """ Return a response to the statement in the posted data. * The JSON data should contain a 'text' attribute. """ input_data = json.loads(request.body.decode('utf-8')) if 'text' not in input_data: return JsonResponse({ 'text': [ 'The attribute "text" is required.' ] }, status=400) response = self.chatterbot.get_response(input_data) response_data = response.serialize() return JsonResponse(response_data, status=200)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "input_data", "=", "json", ".", "loads", "(", "request", ".", "body", ".", "decode", "(", "'utf-8'", ")", ")", "if", "'text'", "not", "in", "input_data", ":", "return", "JsonResponse", "(", "{", "'text'", ":", "[", "'The attribute \"text\" is required.'", "]", "}", ",", "status", "=", "400", ")", "response", "=", "self", ".", "chatterbot", ".", "get_response", "(", "input_data", ")", "response_data", "=", "response", ".", "serialize", "(", ")", "return", "JsonResponse", "(", "response_data", ",", "status", "=", "200", ")" ]
Return a response to the statement in the posted data. * The JSON data should contain a 'text' attribute.
[ "Return", "a", "response", "to", "the", "statement", "in", "the", "posted", "data", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/django_app/example_app/views.py#L20-L39
26,423
gunthercox/ChatterBot
chatterbot/corpus.py
get_file_path
def get_file_path(dotted_path, extension='json'): """ Reads a dotted file path and returns the file path. """ # If the operating system's file path seperator character is in the string if os.sep in dotted_path or '/' in dotted_path: # Assume the path is a valid file path return dotted_path parts = dotted_path.split('.') if parts[0] == 'chatterbot': parts.pop(0) parts[0] = DATA_DIRECTORY corpus_path = os.path.join(*parts) if os.path.exists(corpus_path + '.{}'.format(extension)): corpus_path += '.{}'.format(extension) return corpus_path
python
def get_file_path(dotted_path, extension='json'): """ Reads a dotted file path and returns the file path. """ # If the operating system's file path seperator character is in the string if os.sep in dotted_path or '/' in dotted_path: # Assume the path is a valid file path return dotted_path parts = dotted_path.split('.') if parts[0] == 'chatterbot': parts.pop(0) parts[0] = DATA_DIRECTORY corpus_path = os.path.join(*parts) if os.path.exists(corpus_path + '.{}'.format(extension)): corpus_path += '.{}'.format(extension) return corpus_path
[ "def", "get_file_path", "(", "dotted_path", ",", "extension", "=", "'json'", ")", ":", "# If the operating system's file path seperator character is in the string", "if", "os", ".", "sep", "in", "dotted_path", "or", "'/'", "in", "dotted_path", ":", "# Assume the path is a valid file path", "return", "dotted_path", "parts", "=", "dotted_path", ".", "split", "(", "'.'", ")", "if", "parts", "[", "0", "]", "==", "'chatterbot'", ":", "parts", ".", "pop", "(", "0", ")", "parts", "[", "0", "]", "=", "DATA_DIRECTORY", "corpus_path", "=", "os", ".", "path", ".", "join", "(", "*", "parts", ")", "if", "os", ".", "path", ".", "exists", "(", "corpus_path", "+", "'.{}'", ".", "format", "(", "extension", ")", ")", ":", "corpus_path", "+=", "'.{}'", ".", "format", "(", "extension", ")", "return", "corpus_path" ]
Reads a dotted file path and returns the file path.
[ "Reads", "a", "dotted", "file", "path", "and", "returns", "the", "file", "path", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/corpus.py#L11-L30
26,424
gunthercox/ChatterBot
chatterbot/corpus.py
read_corpus
def read_corpus(file_name): """ Read and return the data from a corpus json file. """ with io.open(file_name, encoding='utf-8') as data_file: return yaml.load(data_file)
python
def read_corpus(file_name): """ Read and return the data from a corpus json file. """ with io.open(file_name, encoding='utf-8') as data_file: return yaml.load(data_file)
[ "def", "read_corpus", "(", "file_name", ")", ":", "with", "io", ".", "open", "(", "file_name", ",", "encoding", "=", "'utf-8'", ")", "as", "data_file", ":", "return", "yaml", ".", "load", "(", "data_file", ")" ]
Read and return the data from a corpus json file.
[ "Read", "and", "return", "the", "data", "from", "a", "corpus", "json", "file", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/corpus.py#L33-L38
26,425
gunthercox/ChatterBot
chatterbot/corpus.py
list_corpus_files
def list_corpus_files(dotted_path): """ Return a list of file paths to each data file in the specified corpus. """ corpus_path = get_file_path(dotted_path, extension=CORPUS_EXTENSION) paths = [] if os.path.isdir(corpus_path): paths = glob.glob(corpus_path + '/**/*.' + CORPUS_EXTENSION, recursive=True) else: paths.append(corpus_path) paths.sort() return paths
python
def list_corpus_files(dotted_path): """ Return a list of file paths to each data file in the specified corpus. """ corpus_path = get_file_path(dotted_path, extension=CORPUS_EXTENSION) paths = [] if os.path.isdir(corpus_path): paths = glob.glob(corpus_path + '/**/*.' + CORPUS_EXTENSION, recursive=True) else: paths.append(corpus_path) paths.sort() return paths
[ "def", "list_corpus_files", "(", "dotted_path", ")", ":", "corpus_path", "=", "get_file_path", "(", "dotted_path", ",", "extension", "=", "CORPUS_EXTENSION", ")", "paths", "=", "[", "]", "if", "os", ".", "path", ".", "isdir", "(", "corpus_path", ")", ":", "paths", "=", "glob", ".", "glob", "(", "corpus_path", "+", "'/**/*.'", "+", "CORPUS_EXTENSION", ",", "recursive", "=", "True", ")", "else", ":", "paths", ".", "append", "(", "corpus_path", ")", "paths", ".", "sort", "(", ")", "return", "paths" ]
Return a list of file paths to each data file in the specified corpus.
[ "Return", "a", "list", "of", "file", "paths", "to", "each", "data", "file", "in", "the", "specified", "corpus", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/corpus.py#L41-L54
26,426
gunthercox/ChatterBot
chatterbot/corpus.py
load_corpus
def load_corpus(*data_file_paths): """ Return the data contained within a specified corpus. """ for file_path in data_file_paths: corpus = [] corpus_data = read_corpus(file_path) conversations = corpus_data.get('conversations', []) corpus.extend(conversations) categories = corpus_data.get('categories', []) yield corpus, categories, file_path
python
def load_corpus(*data_file_paths): """ Return the data contained within a specified corpus. """ for file_path in data_file_paths: corpus = [] corpus_data = read_corpus(file_path) conversations = corpus_data.get('conversations', []) corpus.extend(conversations) categories = corpus_data.get('categories', []) yield corpus, categories, file_path
[ "def", "load_corpus", "(", "*", "data_file_paths", ")", ":", "for", "file_path", "in", "data_file_paths", ":", "corpus", "=", "[", "]", "corpus_data", "=", "read_corpus", "(", "file_path", ")", "conversations", "=", "corpus_data", ".", "get", "(", "'conversations'", ",", "[", "]", ")", "corpus", ".", "extend", "(", "conversations", ")", "categories", "=", "corpus_data", ".", "get", "(", "'categories'", ",", "[", "]", ")", "yield", "corpus", ",", "categories", ",", "file_path" ]
Return the data contained within a specified corpus.
[ "Return", "the", "data", "contained", "within", "a", "specified", "corpus", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/corpus.py#L57-L70
26,427
gunthercox/ChatterBot
chatterbot/tagging.py
PosLemmaTagger.get_bigram_pair_string
def get_bigram_pair_string(self, text): """ Return a string of text containing part-of-speech, lemma pairs. """ bigram_pairs = [] if len(text) <= 2: text_without_punctuation = text.translate(self.punctuation_table) if len(text_without_punctuation) >= 1: text = text_without_punctuation document = self.nlp(text) if len(text) <= 2: bigram_pairs = [ token.lemma_.lower() for token in document ] else: tokens = [ token for token in document if token.is_alpha and not token.is_stop ] if len(tokens) < 2: tokens = [ token for token in document if token.is_alpha ] for index in range(1, len(tokens)): bigram_pairs.append('{}:{}'.format( tokens[index - 1].pos_, tokens[index].lemma_.lower() )) if not bigram_pairs: bigram_pairs = [ token.lemma_.lower() for token in document ] return ' '.join(bigram_pairs)
python
def get_bigram_pair_string(self, text): """ Return a string of text containing part-of-speech, lemma pairs. """ bigram_pairs = [] if len(text) <= 2: text_without_punctuation = text.translate(self.punctuation_table) if len(text_without_punctuation) >= 1: text = text_without_punctuation document = self.nlp(text) if len(text) <= 2: bigram_pairs = [ token.lemma_.lower() for token in document ] else: tokens = [ token for token in document if token.is_alpha and not token.is_stop ] if len(tokens) < 2: tokens = [ token for token in document if token.is_alpha ] for index in range(1, len(tokens)): bigram_pairs.append('{}:{}'.format( tokens[index - 1].pos_, tokens[index].lemma_.lower() )) if not bigram_pairs: bigram_pairs = [ token.lemma_.lower() for token in document ] return ' '.join(bigram_pairs)
[ "def", "get_bigram_pair_string", "(", "self", ",", "text", ")", ":", "bigram_pairs", "=", "[", "]", "if", "len", "(", "text", ")", "<=", "2", ":", "text_without_punctuation", "=", "text", ".", "translate", "(", "self", ".", "punctuation_table", ")", "if", "len", "(", "text_without_punctuation", ")", ">=", "1", ":", "text", "=", "text_without_punctuation", "document", "=", "self", ".", "nlp", "(", "text", ")", "if", "len", "(", "text", ")", "<=", "2", ":", "bigram_pairs", "=", "[", "token", ".", "lemma_", ".", "lower", "(", ")", "for", "token", "in", "document", "]", "else", ":", "tokens", "=", "[", "token", "for", "token", "in", "document", "if", "token", ".", "is_alpha", "and", "not", "token", ".", "is_stop", "]", "if", "len", "(", "tokens", ")", "<", "2", ":", "tokens", "=", "[", "token", "for", "token", "in", "document", "if", "token", ".", "is_alpha", "]", "for", "index", "in", "range", "(", "1", ",", "len", "(", "tokens", ")", ")", ":", "bigram_pairs", ".", "append", "(", "'{}:{}'", ".", "format", "(", "tokens", "[", "index", "-", "1", "]", ".", "pos_", ",", "tokens", "[", "index", "]", ".", "lemma_", ".", "lower", "(", ")", ")", ")", "if", "not", "bigram_pairs", ":", "bigram_pairs", "=", "[", "token", ".", "lemma_", ".", "lower", "(", ")", "for", "token", "in", "document", "]", "return", "' '", ".", "join", "(", "bigram_pairs", ")" ]
Return a string of text containing part-of-speech, lemma pairs.
[ "Return", "a", "string", "of", "text", "containing", "part", "-", "of", "-", "speech", "lemma", "pairs", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/tagging.py#L15-L53
26,428
gunthercox/ChatterBot
chatterbot/storage/django_storage.py
DjangoStorageAdapter.update
def update(self, statement): """ Update the provided statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if hasattr(statement, 'id'): statement.save() else: statement = Statement.objects.create( text=statement.text, search_text=self.tagger.get_bigram_pair_string(statement.text), conversation=statement.conversation, in_response_to=statement.in_response_to, search_in_response_to=self.tagger.get_bigram_pair_string(statement.in_response_to), created_at=statement.created_at ) for _tag in statement.tags.all(): tag, _ = Tag.objects.get_or_create(name=_tag) statement.tags.add(tag) return statement
python
def update(self, statement): """ Update the provided statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if hasattr(statement, 'id'): statement.save() else: statement = Statement.objects.create( text=statement.text, search_text=self.tagger.get_bigram_pair_string(statement.text), conversation=statement.conversation, in_response_to=statement.in_response_to, search_in_response_to=self.tagger.get_bigram_pair_string(statement.in_response_to), created_at=statement.created_at ) for _tag in statement.tags.all(): tag, _ = Tag.objects.get_or_create(name=_tag) statement.tags.add(tag) return statement
[ "def", "update", "(", "self", ",", "statement", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "if", "hasattr", "(", "statement", ",", "'id'", ")", ":", "statement", ".", "save", "(", ")", "else", ":", "statement", "=", "Statement", ".", "objects", ".", "create", "(", "text", "=", "statement", ".", "text", ",", "search_text", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "statement", ".", "text", ")", ",", "conversation", "=", "statement", ".", "conversation", ",", "in_response_to", "=", "statement", ".", "in_response_to", ",", "search_in_response_to", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "statement", ".", "in_response_to", ")", ",", "created_at", "=", "statement", ".", "created_at", ")", "for", "_tag", "in", "statement", ".", "tags", ".", "all", "(", ")", ":", "tag", ",", "_", "=", "Tag", ".", "objects", ".", "get_or_create", "(", "name", "=", "_tag", ")", "statement", ".", "tags", ".", "add", "(", "tag", ")", "return", "statement" ]
Update the provided statement.
[ "Update", "the", "provided", "statement", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L159-L183
26,429
gunthercox/ChatterBot
chatterbot/storage/django_storage.py
DjangoStorageAdapter.remove
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text. """ Statement = self.get_model('statement') statements = Statement.objects.filter(text=statement_text) statements.delete()
python
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text. """ Statement = self.get_model('statement') statements = Statement.objects.filter(text=statement_text) statements.delete()
[ "def", "remove", "(", "self", ",", "statement_text", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "statements", "=", "Statement", ".", "objects", ".", "filter", "(", "text", "=", "statement_text", ")", "statements", ".", "delete", "(", ")" ]
Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text.
[ "Removes", "the", "statement", "that", "matches", "the", "input", "text", ".", "Removes", "any", "responses", "from", "statements", "if", "the", "response", "text", "matches", "the", "input", "text", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L198-L208
26,430
gunthercox/ChatterBot
chatterbot/storage/django_storage.py
DjangoStorageAdapter.drop
def drop(self): """ Remove all data from the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') Statement.objects.all().delete() Tag.objects.all().delete()
python
def drop(self): """ Remove all data from the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') Statement.objects.all().delete() Tag.objects.all().delete()
[ "def", "drop", "(", "self", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "Statement", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "Tag", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")" ]
Remove all data from the database.
[ "Remove", "all", "data", "from", "the", "database", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L210-L218
26,431
gunthercox/ChatterBot
chatterbot/preprocessors.py
clean_whitespace
def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement
python
def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement
[ "def", "clean_whitespace", "(", "statement", ")", ":", "import", "re", "# Replace linebreaks and tabs with spaces", "statement", ".", "text", "=", "statement", ".", "text", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "replace", "(", "'\\r'", ",", "' '", ")", ".", "replace", "(", "'\\t'", ",", "' '", ")", "# Remove any leeding or trailing whitespace", "statement", ".", "text", "=", "statement", ".", "text", ".", "strip", "(", ")", "# Remove consecutive spaces", "statement", ".", "text", "=", "re", ".", "sub", "(", "' +'", ",", "' '", ",", "statement", ".", "text", ")", "return", "statement" ]
Remove any consecutive whitespace characters from the statement text.
[ "Remove", "any", "consecutive", "whitespace", "characters", "from", "the", "statement", "text", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/preprocessors.py#L6-L21
26,432
gunthercox/ChatterBot
chatterbot/parsing.py
convert_string_to_number
def convert_string_to_number(value): """ Convert strings to numbers """ if value is None: return 1 if isinstance(value, int): return value if value.isdigit(): return int(value) num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower())) return sum(num_list)
python
def convert_string_to_number(value): """ Convert strings to numbers """ if value is None: return 1 if isinstance(value, int): return value if value.isdigit(): return int(value) num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower())) return sum(num_list)
[ "def", "convert_string_to_number", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "1", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "value", "if", "value", ".", "isdigit", "(", ")", ":", "return", "int", "(", "value", ")", "num_list", "=", "map", "(", "lambda", "s", ":", "NUMBERS", "[", "s", "]", ",", "re", ".", "findall", "(", "numbers", "+", "'+'", ",", "value", ".", "lower", "(", ")", ")", ")", "return", "sum", "(", "num_list", ")" ]
Convert strings to numbers
[ "Convert", "strings", "to", "numbers" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L506-L517
26,433
gunthercox/ChatterBot
chatterbot/parsing.py
convert_time_to_hour_minute
def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute}
python
def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute}
[ "def", "convert_time_to_hour_minute", "(", "hour", ",", "minute", ",", "convention", ")", ":", "if", "hour", "is", "None", ":", "hour", "=", "0", "if", "minute", "is", "None", ":", "minute", "=", "0", "if", "convention", "is", "None", ":", "convention", "=", "'am'", "hour", "=", "int", "(", "hour", ")", "minute", "=", "int", "(", "minute", ")", "if", "convention", ".", "lower", "(", ")", "==", "'pm'", ":", "hour", "+=", "12", "return", "{", "'hours'", ":", "hour", ",", "'minutes'", ":", "minute", "}" ]
Convert time to hour, minute
[ "Convert", "time", "to", "hour", "minute" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L520-L537
26,434
gunthercox/ChatterBot
chatterbot/parsing.py
date_from_quarter
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
python
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
[ "def", "date_from_quarter", "(", "base_date", ",", "ordinal", ",", "year", ")", ":", "interval", "=", "3", "month_start", "=", "interval", "*", "(", "ordinal", "-", "1", ")", "if", "month_start", "<", "0", ":", "month_start", "=", "9", "month_end", "=", "month_start", "+", "interval", "if", "month_start", "==", "0", ":", "month_start", "=", "1", "return", "[", "datetime", "(", "year", ",", "month_start", ",", "1", ")", ",", "datetime", "(", "year", ",", "month_end", ",", "calendar", ".", "monthrange", "(", "year", ",", "month_end", ")", "[", "1", "]", ")", "]" ]
Extract date from quarter of a year
[ "Extract", "date", "from", "quarter", "of", "a", "year" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L540-L554
26,435
gunthercox/ChatterBot
chatterbot/parsing.py
date_from_relative_week_year
def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59)
python
def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59)
[ "def", "date_from_relative_week_year", "(", "base_date", ",", "time", ",", "dow", ",", "ordinal", "=", "1", ")", ":", "# If there is an ordinal (next 3 weeks) => return a start and end range", "# Reset date to start of the day", "relative_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "ord", "=", "convert_string_to_number", "(", "ordinal", ")", "if", "dow", "in", "year_variations", ":", "if", "time", "==", "'this'", "or", "time", "==", "'coming'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "1", ",", "1", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "datetime", "(", "relative_date", ".", "year", "-", "1", ",", "relative_date", ".", "month", ",", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "ord", "*", "365", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "12", ",", "31", ")", "elif", "dow", "in", "month_variations", ":", "if", "time", "==", "'this'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", "-", "1", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "if", "relative_date", ".", "month", "+", "ord", ">=", "12", ":", "month", "=", "relative_date", ".", "month", "-", "1", "+", "ord", "year", "=", "relative_date", ".", "year", "+", "month", "//", "12", "month", "=", "month", "%", "12", "+", "1", "day", "=", "min", "(", "relative_date", ".", "day", ",", "calendar", ".", "monthrange", "(", "year", ",", "month", ")", "[", "1", "]", ")", "return", "datetime", "(", "year", ",", "month", ",", "day", ")", "else", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", "+", "ord", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "calendar", ".", "monthrange", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ")", "[", "1", "]", ")", "elif", "dow", "in", "week_variations", ":", "if", "time", "==", "'this'", ":", "return", "relative_date", "-", "timedelta", "(", "days", "=", "relative_date", ".", "weekday", "(", ")", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "relative_date", "-", "timedelta", "(", "weeks", "=", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "weeks", "=", "ord", ")", "elif", "time", "==", "'end of the'", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "return", "day_of_week", "+", "timedelta", "(", "days", "=", "6", "-", "relative_date", ".", "weekday", "(", ")", ")", "elif", "dow", "in", "day_variations", ":", "if", "time", "==", "'this'", ":", "return", "relative_date", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "relative_date", "-", "timedelta", "(", "days", "=", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "days", "=", "ord", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "relative_date", ".", "day", ",", "23", ",", "59", ",", "59", ")" ]
Converts relative day to time Eg. this tuesday, last tuesday
[ "Converts", "relative", "day", "to", "time", "Eg", ".", "this", "tuesday", "last", "tuesday" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L580-L636
26,436
gunthercox/ChatterBot
chatterbot/parsing.py
date_from_adverb
def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today() elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1)
python
def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today() elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1)
[ "def", "date_from_adverb", "(", "base_date", ",", "name", ")", ":", "# Reset date to start of the day", "adverb_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "if", "name", "==", "'today'", "or", "name", "==", "'tonite'", "or", "name", "==", "'tonight'", ":", "return", "adverb_date", ".", "today", "(", ")", "elif", "name", "==", "'yesterday'", ":", "return", "adverb_date", "-", "timedelta", "(", "days", "=", "1", ")", "elif", "name", "==", "'tomorrow'", "or", "name", "==", "'tom'", ":", "return", "adverb_date", "+", "timedelta", "(", "days", "=", "1", ")" ]
Convert Day adverbs to dates Tomorrow => Date Today => Date
[ "Convert", "Day", "adverbs", "to", "dates", "Tomorrow", "=", ">", "Date", "Today", "=", ">", "Date" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L639-L652
26,437
gunthercox/ChatterBot
chatterbot/parsing.py
this_week_day
def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
python
def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
[ "def", "this_week_day", "(", "base_date", ",", "weekday", ")", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "# If today is Tuesday and the query is `this monday`", "# We should output the next_week monday", "if", "day_of_week", ">", "weekday", ":", "return", "next_week_day", "(", "base_date", ",", "weekday", ")", "start_of_this_week", "=", "base_date", "-", "timedelta", "(", "days", "=", "day_of_week", "+", "1", ")", "day", "=", "start_of_this_week", "+", "timedelta", "(", "days", "=", "1", ")", "while", "day", ".", "weekday", "(", ")", "!=", "weekday", ":", "day", "=", "day", "+", "timedelta", "(", "days", "=", "1", ")", "return", "day" ]
Finds coming weekday
[ "Finds", "coming", "weekday" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L685-L698
26,438
gunthercox/ChatterBot
chatterbot/parsing.py
previous_week_day
def previous_week_day(base_date, weekday): """ Finds previous weekday """ day = base_date - timedelta(days=1) while day.weekday() != weekday: day = day - timedelta(days=1) return day
python
def previous_week_day(base_date, weekday): """ Finds previous weekday """ day = base_date - timedelta(days=1) while day.weekday() != weekday: day = day - timedelta(days=1) return day
[ "def", "previous_week_day", "(", "base_date", ",", "weekday", ")", ":", "day", "=", "base_date", "-", "timedelta", "(", "days", "=", "1", ")", "while", "day", ".", "weekday", "(", ")", "!=", "weekday", ":", "day", "=", "day", "-", "timedelta", "(", "days", "=", "1", ")", "return", "day" ]
Finds previous weekday
[ "Finds", "previous", "weekday" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L701-L708
26,439
gunthercox/ChatterBot
chatterbot/parsing.py
next_week_day
def next_week_day(base_date, weekday): """ Finds next weekday """ day_of_week = base_date.weekday() end_of_this_week = base_date + timedelta(days=6 - day_of_week) day = end_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
python
def next_week_day(base_date, weekday): """ Finds next weekday """ day_of_week = base_date.weekday() end_of_this_week = base_date + timedelta(days=6 - day_of_week) day = end_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
[ "def", "next_week_day", "(", "base_date", ",", "weekday", ")", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "end_of_this_week", "=", "base_date", "+", "timedelta", "(", "days", "=", "6", "-", "day_of_week", ")", "day", "=", "end_of_this_week", "+", "timedelta", "(", "days", "=", "1", ")", "while", "day", ".", "weekday", "(", ")", "!=", "weekday", ":", "day", "=", "day", "+", "timedelta", "(", "days", "=", "1", ")", "return", "day" ]
Finds next weekday
[ "Finds", "next", "weekday" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L711-L720
26,440
gunthercox/ChatterBot
chatterbot/parsing.py
datetime_parsing
def datetime_parsing(text, base_date=datetime.now()): """ Extract datetime objects from a string of text. """ matches = [] found_array = [] # Find the position in the string for expression, function in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) # Wrap the matched text with TAG element to prevent nested selections for match, value, spans in matches: subn = re.subn( '(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text ) text = subn[0] is_substituted = subn[1] if is_substituted != 0: found_array.append((match, value, spans)) # To preserve order of the match, sort based on the start position return sorted(found_array, key=lambda match: match and match[2][0])
python
def datetime_parsing(text, base_date=datetime.now()): """ Extract datetime objects from a string of text. """ matches = [] found_array = [] # Find the position in the string for expression, function in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) # Wrap the matched text with TAG element to prevent nested selections for match, value, spans in matches: subn = re.subn( '(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text ) text = subn[0] is_substituted = subn[1] if is_substituted != 0: found_array.append((match, value, spans)) # To preserve order of the match, sort based on the start position return sorted(found_array, key=lambda match: match and match[2][0])
[ "def", "datetime_parsing", "(", "text", ",", "base_date", "=", "datetime", ".", "now", "(", ")", ")", ":", "matches", "=", "[", "]", "found_array", "=", "[", "]", "# Find the position in the string", "for", "expression", ",", "function", "in", "regex", ":", "for", "match", "in", "expression", ".", "finditer", "(", "text", ")", ":", "matches", ".", "append", "(", "(", "match", ".", "group", "(", ")", ",", "function", "(", "match", ",", "base_date", ")", ",", "match", ".", "span", "(", ")", ")", ")", "# Wrap the matched text with TAG element to prevent nested selections", "for", "match", ",", "value", ",", "spans", "in", "matches", ":", "subn", "=", "re", ".", "subn", "(", "'(?!<TAG[^>]*?>)'", "+", "match", "+", "'(?![^<]*?</TAG>)'", ",", "'<TAG>'", "+", "match", "+", "'</TAG>'", ",", "text", ")", "text", "=", "subn", "[", "0", "]", "is_substituted", "=", "subn", "[", "1", "]", "if", "is_substituted", "!=", "0", ":", "found_array", ".", "append", "(", "(", "match", ",", "value", ",", "spans", ")", ")", "# To preserve order of the match, sort based on the start position", "return", "sorted", "(", "found_array", ",", "key", "=", "lambda", "match", ":", "match", "and", "match", "[", "2", "]", "[", "0", "]", ")" ]
Extract datetime objects from a string of text.
[ "Extract", "datetime", "objects", "from", "a", "string", "of", "text", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L723-L746
26,441
gunthercox/ChatterBot
chatterbot/search.py
IndexedTextSearch.search
def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') input_search_text = input_statement.search_text if not input_statement.search_text: self.chatbot.logger.warn( 'No value for search_text was available on the provided input' ) input_search_text = self.chatbot.storage.tagger.get_bigram_pair_string( input_statement.text ) search_parameters = { 'search_text_contains': input_search_text, 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) closest_match = Statement(text='') closest_match.confidence = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > closest_match.confidence: statement.confidence = confidence closest_match = statement self.chatbot.logger.info('Similar text found: {} {}'.format( closest_match.text, confidence )) yield closest_match
python
def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') input_search_text = input_statement.search_text if not input_statement.search_text: self.chatbot.logger.warn( 'No value for search_text was available on the provided input' ) input_search_text = self.chatbot.storage.tagger.get_bigram_pair_string( input_statement.text ) search_parameters = { 'search_text_contains': input_search_text, 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) closest_match = Statement(text='') closest_match.confidence = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > closest_match.confidence: statement.confidence = confidence closest_match = statement self.chatbot.logger.info('Similar text found: {} {}'.format( closest_match.text, confidence )) yield closest_match
[ "def", "search", "(", "self", ",", "input_statement", ",", "*", "*", "additional_parameters", ")", ":", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'Beginning search for close text match'", ")", "input_search_text", "=", "input_statement", ".", "search_text", "if", "not", "input_statement", ".", "search_text", ":", "self", ".", "chatbot", ".", "logger", ".", "warn", "(", "'No value for search_text was available on the provided input'", ")", "input_search_text", "=", "self", ".", "chatbot", ".", "storage", ".", "tagger", ".", "get_bigram_pair_string", "(", "input_statement", ".", "text", ")", "search_parameters", "=", "{", "'search_text_contains'", ":", "input_search_text", ",", "'persona_not_startswith'", ":", "'bot:'", ",", "'page_size'", ":", "self", ".", "search_page_size", "}", "if", "additional_parameters", ":", "search_parameters", ".", "update", "(", "additional_parameters", ")", "statement_list", "=", "self", ".", "chatbot", ".", "storage", ".", "filter", "(", "*", "*", "search_parameters", ")", "closest_match", "=", "Statement", "(", "text", "=", "''", ")", "closest_match", ".", "confidence", "=", "0", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'Processing search results'", ")", "# Find the closest matching known statement", "for", "statement", "in", "statement_list", ":", "confidence", "=", "self", ".", "compare_statements", "(", "input_statement", ",", "statement", ")", "if", "confidence", ">", "closest_match", ".", "confidence", ":", "statement", ".", "confidence", "=", "confidence", "closest_match", "=", "statement", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'Similar text found: {} {}'", ".", "format", "(", "closest_match", ".", "text", ",", "confidence", ")", ")", "yield", "closest_match" ]
Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time.
[ "Search", "for", "close", "matches", "to", "the", "input", ".", "Confidence", "scores", "for", "subsequent", "results", "will", "order", "of", "increasing", "value", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/search.py#L35-L89
26,442
gunthercox/ChatterBot
examples/tkinter_gui.py
TkinterGUIExample.initialize
def initialize(self): """ Set window layout. """ self.grid() self.respond = ttk.Button(self, text='Get Response', command=self.get_response) self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3) self.usr_input = ttk.Entry(self, state='normal') self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3) self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:') self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3) self.conversation = ScrolledText.ScrolledText(self, state='disabled') self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3)
python
def initialize(self): """ Set window layout. """ self.grid() self.respond = ttk.Button(self, text='Get Response', command=self.get_response) self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3) self.usr_input = ttk.Entry(self, state='normal') self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3) self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:') self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3) self.conversation = ScrolledText.ScrolledText(self, state='disabled') self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3)
[ "def", "initialize", "(", "self", ")", ":", "self", ".", "grid", "(", ")", "self", ".", "respond", "=", "ttk", ".", "Button", "(", "self", ",", "text", "=", "'Get Response'", ",", "command", "=", "self", ".", "get_response", ")", "self", ".", "respond", ".", "grid", "(", "column", "=", "0", ",", "row", "=", "0", ",", "sticky", "=", "'nesw'", ",", "padx", "=", "3", ",", "pady", "=", "3", ")", "self", ".", "usr_input", "=", "ttk", ".", "Entry", "(", "self", ",", "state", "=", "'normal'", ")", "self", ".", "usr_input", ".", "grid", "(", "column", "=", "1", ",", "row", "=", "0", ",", "sticky", "=", "'nesw'", ",", "padx", "=", "3", ",", "pady", "=", "3", ")", "self", ".", "conversation_lbl", "=", "ttk", ".", "Label", "(", "self", ",", "anchor", "=", "tk", ".", "E", ",", "text", "=", "'Conversation:'", ")", "self", ".", "conversation_lbl", ".", "grid", "(", "column", "=", "0", ",", "row", "=", "1", ",", "sticky", "=", "'nesw'", ",", "padx", "=", "3", ",", "pady", "=", "3", ")", "self", ".", "conversation", "=", "ScrolledText", ".", "ScrolledText", "(", "self", ",", "state", "=", "'disabled'", ")", "self", ".", "conversation", ".", "grid", "(", "column", "=", "0", ",", "row", "=", "2", ",", "columnspan", "=", "2", ",", "sticky", "=", "'nesw'", ",", "padx", "=", "3", ",", "pady", "=", "3", ")" ]
Set window layout.
[ "Set", "window", "layout", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/tkinter_gui.py#L33-L49
26,443
gunthercox/ChatterBot
examples/tkinter_gui.py
TkinterGUIExample.get_response
def get_response(self): """ Get a response from the chatbot and display it. """ user_input = self.usr_input.get() self.usr_input.delete(0, tk.END) response = self.chatbot.get_response(user_input) self.conversation['state'] = 'normal' self.conversation.insert( tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n" ) self.conversation['state'] = 'disabled' time.sleep(0.5)
python
def get_response(self): """ Get a response from the chatbot and display it. """ user_input = self.usr_input.get() self.usr_input.delete(0, tk.END) response = self.chatbot.get_response(user_input) self.conversation['state'] = 'normal' self.conversation.insert( tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n" ) self.conversation['state'] = 'disabled' time.sleep(0.5)
[ "def", "get_response", "(", "self", ")", ":", "user_input", "=", "self", ".", "usr_input", ".", "get", "(", ")", "self", ".", "usr_input", ".", "delete", "(", "0", ",", "tk", ".", "END", ")", "response", "=", "self", ".", "chatbot", ".", "get_response", "(", "user_input", ")", "self", ".", "conversation", "[", "'state'", "]", "=", "'normal'", "self", ".", "conversation", ".", "insert", "(", "tk", ".", "END", ",", "\"Human: \"", "+", "user_input", "+", "\"\\n\"", "+", "\"ChatBot: \"", "+", "str", "(", "response", ".", "text", ")", "+", "\"\\n\"", ")", "self", ".", "conversation", "[", "'state'", "]", "=", "'disabled'", "time", ".", "sleep", "(", "0.5", ")" ]
Get a response from the chatbot and display it.
[ "Get", "a", "response", "from", "the", "chatbot", "and", "display", "it", "." ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/tkinter_gui.py#L51-L66
26,444
tensorflow/lucid
lucid/scratch/web/svelte.py
SvelteComponent
def SvelteComponent(name, path): """Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython. """ if path[-3:] == ".js": js_path = path elif path[-5:] == ".html": print("Trying to build svelte component from html...") js_path = build_svelte(path) js_content = read(js_path, mode='r') def inner(data): id_str = js_id(name) html = _template \ .replace("$js", js_content) \ .replace("$name", name) \ .replace("$data", json.dumps(data)) \ .replace("$id", id_str) _display_html(html) return inner
python
def SvelteComponent(name, path): """Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython. """ if path[-3:] == ".js": js_path = path elif path[-5:] == ".html": print("Trying to build svelte component from html...") js_path = build_svelte(path) js_content = read(js_path, mode='r') def inner(data): id_str = js_id(name) html = _template \ .replace("$js", js_content) \ .replace("$name", name) \ .replace("$data", json.dumps(data)) \ .replace("$id", id_str) _display_html(html) return inner
[ "def", "SvelteComponent", "(", "name", ",", "path", ")", ":", "if", "path", "[", "-", "3", ":", "]", "==", "\".js\"", ":", "js_path", "=", "path", "elif", "path", "[", "-", "5", ":", "]", "==", "\".html\"", ":", "print", "(", "\"Trying to build svelte component from html...\"", ")", "js_path", "=", "build_svelte", "(", "path", ")", "js_content", "=", "read", "(", "js_path", ",", "mode", "=", "'r'", ")", "def", "inner", "(", "data", ")", ":", "id_str", "=", "js_id", "(", "name", ")", "html", "=", "_template", ".", "replace", "(", "\"$js\"", ",", "js_content", ")", ".", "replace", "(", "\"$name\"", ",", "name", ")", ".", "replace", "(", "\"$data\"", ",", "json", ".", "dumps", "(", "data", ")", ")", ".", "replace", "(", "\"$id\"", ",", "id_str", ")", "_display_html", "(", "html", ")", "return", "inner" ]
Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython.
[ "Display", "svelte", "components", "in", "iPython", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/web/svelte.py#L43-L68
26,445
tensorflow/lucid
lucid/misc/io/saving.py
save_json
def save_json(object, handle, indent=2): """Save object as json on CNS.""" obj_json = json.dumps(object, indent=indent, cls=NumpyJSONEncoder) handle.write(obj_json)
python
def save_json(object, handle, indent=2): """Save object as json on CNS.""" obj_json = json.dumps(object, indent=indent, cls=NumpyJSONEncoder) handle.write(obj_json)
[ "def", "save_json", "(", "object", ",", "handle", ",", "indent", "=", "2", ")", ":", "obj_json", "=", "json", ".", "dumps", "(", "object", ",", "indent", "=", "indent", ",", "cls", "=", "NumpyJSONEncoder", ")", "handle", ".", "write", "(", "obj_json", ")" ]
Save object as json on CNS.
[ "Save", "object", "as", "json", "on", "CNS", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L58-L61
26,446
tensorflow/lucid
lucid/misc/io/saving.py
save_npz
def save_npz(object, handle): """Save dict of numpy array as npz file.""" # there is a bug where savez doesn't actually accept a file handle. log.warning("Saving npz files currently only works locally. :/") path = handle.name handle.close() if type(object) is dict: np.savez(path, **object) elif type(object) is list: np.savez(path, *object) else: log.warning("Saving non dict or list as npz file, did you maybe want npy?") np.savez(path, object)
python
def save_npz(object, handle): """Save dict of numpy array as npz file.""" # there is a bug where savez doesn't actually accept a file handle. log.warning("Saving npz files currently only works locally. :/") path = handle.name handle.close() if type(object) is dict: np.savez(path, **object) elif type(object) is list: np.savez(path, *object) else: log.warning("Saving non dict or list as npz file, did you maybe want npy?") np.savez(path, object)
[ "def", "save_npz", "(", "object", ",", "handle", ")", ":", "# there is a bug where savez doesn't actually accept a file handle.", "log", ".", "warning", "(", "\"Saving npz files currently only works locally. :/\"", ")", "path", "=", "handle", ".", "name", "handle", ".", "close", "(", ")", "if", "type", "(", "object", ")", "is", "dict", ":", "np", ".", "savez", "(", "path", ",", "*", "*", "object", ")", "elif", "type", "(", "object", ")", "is", "list", ":", "np", ".", "savez", "(", "path", ",", "*", "object", ")", "else", ":", "log", ".", "warning", "(", "\"Saving non dict or list as npz file, did you maybe want npy?\"", ")", "np", ".", "savez", "(", "path", ",", "object", ")" ]
Save dict of numpy array as npz file.
[ "Save", "dict", "of", "numpy", "array", "as", "npz", "file", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L69-L81
26,447
tensorflow/lucid
lucid/misc/io/saving.py
save_img
def save_img(object, handle, **kwargs): """Save numpy array as image file on CNS.""" if isinstance(object, np.ndarray): normalized = _normalize_array(object) object = PIL.Image.fromarray(normalized) if isinstance(object, PIL.Image.Image): object.save(handle, **kwargs) # will infer format from handle's url ext. else: raise ValueError("Can only save_img for numpy arrays or PIL.Images!")
python
def save_img(object, handle, **kwargs): """Save numpy array as image file on CNS.""" if isinstance(object, np.ndarray): normalized = _normalize_array(object) object = PIL.Image.fromarray(normalized) if isinstance(object, PIL.Image.Image): object.save(handle, **kwargs) # will infer format from handle's url ext. else: raise ValueError("Can only save_img for numpy arrays or PIL.Images!")
[ "def", "save_img", "(", "object", ",", "handle", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "object", ",", "np", ".", "ndarray", ")", ":", "normalized", "=", "_normalize_array", "(", "object", ")", "object", "=", "PIL", ".", "Image", ".", "fromarray", "(", "normalized", ")", "if", "isinstance", "(", "object", ",", "PIL", ".", "Image", ".", "Image", ")", ":", "object", ".", "save", "(", "handle", ",", "*", "*", "kwargs", ")", "# will infer format from handle's url ext.", "else", ":", "raise", "ValueError", "(", "\"Can only save_img for numpy arrays or PIL.Images!\"", ")" ]
Save numpy array as image file on CNS.
[ "Save", "numpy", "array", "as", "image", "file", "on", "CNS", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L84-L94
26,448
tensorflow/lucid
lucid/misc/io/saving.py
save
def save(thing, url_or_handle, **kwargs): """Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported. """ is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name") if is_handle: _, ext = os.path.splitext(url_or_handle.name) else: _, ext = os.path.splitext(url_or_handle) if not ext: raise RuntimeError("No extension in URL: " + url_or_handle) if ext in savers: saver = savers[ext] if is_handle: saver(thing, url_or_handle, **kwargs) else: with write_handle(url_or_handle) as handle: saver(thing, handle, **kwargs) else: saver_names = [(key, fn.__name__) for (key, fn) in savers.items()] message = "Unknown extension '{}', supports {}." raise ValueError(message.format(ext, saver_names))
python
def save(thing, url_or_handle, **kwargs): """Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported. """ is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name") if is_handle: _, ext = os.path.splitext(url_or_handle.name) else: _, ext = os.path.splitext(url_or_handle) if not ext: raise RuntimeError("No extension in URL: " + url_or_handle) if ext in savers: saver = savers[ext] if is_handle: saver(thing, url_or_handle, **kwargs) else: with write_handle(url_or_handle) as handle: saver(thing, handle, **kwargs) else: saver_names = [(key, fn.__name__) for (key, fn) in savers.items()] message = "Unknown extension '{}', supports {}." raise ValueError(message.format(ext, saver_names))
[ "def", "save", "(", "thing", ",", "url_or_handle", ",", "*", "*", "kwargs", ")", ":", "is_handle", "=", "hasattr", "(", "url_or_handle", ",", "\"write\"", ")", "and", "hasattr", "(", "url_or_handle", ",", "\"name\"", ")", "if", "is_handle", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "url_or_handle", ".", "name", ")", "else", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "url_or_handle", ")", "if", "not", "ext", ":", "raise", "RuntimeError", "(", "\"No extension in URL: \"", "+", "url_or_handle", ")", "if", "ext", "in", "savers", ":", "saver", "=", "savers", "[", "ext", "]", "if", "is_handle", ":", "saver", "(", "thing", ",", "url_or_handle", ",", "*", "*", "kwargs", ")", "else", ":", "with", "write_handle", "(", "url_or_handle", ")", "as", "handle", ":", "saver", "(", "thing", ",", "handle", ",", "*", "*", "kwargs", ")", "else", ":", "saver_names", "=", "[", "(", "key", ",", "fn", ".", "__name__", ")", "for", "(", "key", ",", "fn", ")", "in", "savers", ".", "items", "(", ")", "]", "message", "=", "\"Unknown extension '{}', supports {}.\"", "raise", "ValueError", "(", "message", ".", "format", "(", "ext", ",", "saver_names", ")", ")" ]
Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported.
[ "Save", "object", "to", "file", "on", "CNS", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L135-L166
26,449
tensorflow/lucid
lucid/misc/gl/meshutil.py
frustum
def frustum(left, right, bottom, top, znear, zfar): """Create view frustum matrix.""" assert right != left assert bottom != top assert znear != zfar M = np.zeros((4, 4), dtype=np.float32) M[0, 0] = +2.0 * znear / (right - left) M[2, 0] = (right + left) / (right - left) M[1, 1] = +2.0 * znear / (top - bottom) M[3, 1] = (top + bottom) / (top - bottom) M[2, 2] = -(zfar + znear) / (zfar - znear) M[3, 2] = -2.0 * znear * zfar / (zfar - znear) M[2, 3] = -1.0 return M
python
def frustum(left, right, bottom, top, znear, zfar): """Create view frustum matrix.""" assert right != left assert bottom != top assert znear != zfar M = np.zeros((4, 4), dtype=np.float32) M[0, 0] = +2.0 * znear / (right - left) M[2, 0] = (right + left) / (right - left) M[1, 1] = +2.0 * znear / (top - bottom) M[3, 1] = (top + bottom) / (top - bottom) M[2, 2] = -(zfar + znear) / (zfar - znear) M[3, 2] = -2.0 * znear * zfar / (zfar - znear) M[2, 3] = -1.0 return M
[ "def", "frustum", "(", "left", ",", "right", ",", "bottom", ",", "top", ",", "znear", ",", "zfar", ")", ":", "assert", "right", "!=", "left", "assert", "bottom", "!=", "top", "assert", "znear", "!=", "zfar", "M", "=", "np", ".", "zeros", "(", "(", "4", ",", "4", ")", ",", "dtype", "=", "np", ".", "float32", ")", "M", "[", "0", ",", "0", "]", "=", "+", "2.0", "*", "znear", "/", "(", "right", "-", "left", ")", "M", "[", "2", ",", "0", "]", "=", "(", "right", "+", "left", ")", "/", "(", "right", "-", "left", ")", "M", "[", "1", ",", "1", "]", "=", "+", "2.0", "*", "znear", "/", "(", "top", "-", "bottom", ")", "M", "[", "3", ",", "1", "]", "=", "(", "top", "+", "bottom", ")", "/", "(", "top", "-", "bottom", ")", "M", "[", "2", ",", "2", "]", "=", "-", "(", "zfar", "+", "znear", ")", "/", "(", "zfar", "-", "znear", ")", "M", "[", "3", ",", "2", "]", "=", "-", "2.0", "*", "znear", "*", "zfar", "/", "(", "zfar", "-", "znear", ")", "M", "[", "2", ",", "3", "]", "=", "-", "1.0", "return", "M" ]
Create view frustum matrix.
[ "Create", "view", "frustum", "matrix", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L8-L22
26,450
tensorflow/lucid
lucid/misc/gl/meshutil.py
anorm
def anorm(x, axis=None, keepdims=False): """Compute L2 norms alogn specified axes.""" return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims))
python
def anorm(x, axis=None, keepdims=False): """Compute L2 norms alogn specified axes.""" return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims))
[ "def", "anorm", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "np", ".", "sqrt", "(", "(", "x", "*", "x", ")", ".", "sum", "(", "axis", "=", "axis", ",", "keepdims", "=", "keepdims", ")", ")" ]
Compute L2 norms alogn specified axes.
[ "Compute", "L2", "norms", "alogn", "specified", "axes", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L33-L35
26,451
tensorflow/lucid
lucid/misc/gl/meshutil.py
normalize
def normalize(v, axis=None, eps=1e-10): """L2 Normalize along specified axes.""" return v / max(anorm(v, axis=axis, keepdims=True), eps)
python
def normalize(v, axis=None, eps=1e-10): """L2 Normalize along specified axes.""" return v / max(anorm(v, axis=axis, keepdims=True), eps)
[ "def", "normalize", "(", "v", ",", "axis", "=", "None", ",", "eps", "=", "1e-10", ")", ":", "return", "v", "/", "max", "(", "anorm", "(", "v", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", ",", "eps", ")" ]
L2 Normalize along specified axes.
[ "L2", "Normalize", "along", "specified", "axes", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L38-L40
26,452
tensorflow/lucid
lucid/misc/gl/meshutil.py
lookat
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]): """Generate LookAt modelview matrix.""" eye = np.float32(eye) forward = normalize(target - eye) side = normalize(np.cross(forward, up)) up = np.cross(side, forward) M = np.eye(4, dtype=np.float32) R = M[:3, :3] R[:] = [side, up, -forward] M[:3, 3] = -R.dot(eye) return M
python
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]): """Generate LookAt modelview matrix.""" eye = np.float32(eye) forward = normalize(target - eye) side = normalize(np.cross(forward, up)) up = np.cross(side, forward) M = np.eye(4, dtype=np.float32) R = M[:3, :3] R[:] = [side, up, -forward] M[:3, 3] = -R.dot(eye) return M
[ "def", "lookat", "(", "eye", ",", "target", "=", "[", "0", ",", "0", ",", "0", "]", ",", "up", "=", "[", "0", ",", "1", ",", "0", "]", ")", ":", "eye", "=", "np", ".", "float32", "(", "eye", ")", "forward", "=", "normalize", "(", "target", "-", "eye", ")", "side", "=", "normalize", "(", "np", ".", "cross", "(", "forward", ",", "up", ")", ")", "up", "=", "np", ".", "cross", "(", "side", ",", "forward", ")", "M", "=", "np", ".", "eye", "(", "4", ",", "dtype", "=", "np", ".", "float32", ")", "R", "=", "M", "[", ":", "3", ",", ":", "3", "]", "R", "[", ":", "]", "=", "[", "side", ",", "up", ",", "-", "forward", "]", "M", "[", ":", "3", ",", "3", "]", "=", "-", "R", ".", "dot", "(", "eye", ")", "return", "M" ]
Generate LookAt modelview matrix.
[ "Generate", "LookAt", "modelview", "matrix", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L43-L53
26,453
tensorflow/lucid
lucid/misc/gl/meshutil.py
sample_view
def sample_view(min_dist, max_dist=None): '''Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned. ''' if max_dist is None: max_dist = min_dist dist = np.random.uniform(min_dist, max_dist) eye = np.random.normal(size=3) eye = normalize(eye)*dist return lookat(eye)
python
def sample_view(min_dist, max_dist=None): '''Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned. ''' if max_dist is None: max_dist = min_dist dist = np.random.uniform(min_dist, max_dist) eye = np.random.normal(size=3) eye = normalize(eye)*dist return lookat(eye)
[ "def", "sample_view", "(", "min_dist", ",", "max_dist", "=", "None", ")", ":", "if", "max_dist", "is", "None", ":", "max_dist", "=", "min_dist", "dist", "=", "np", ".", "random", ".", "uniform", "(", "min_dist", ",", "max_dist", ")", "eye", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "3", ")", "eye", "=", "normalize", "(", "eye", ")", "*", "dist", "return", "lookat", "(", "eye", ")" ]
Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned.
[ "Sample", "random", "camera", "position", ".", "Sample", "origin", "directed", "camera", "position", "in", "given", "distance", "range", "from", "the", "origin", ".", "ModelView", "matrix", "is", "returned", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L56-L67
26,454
tensorflow/lucid
lucid/misc/gl/meshutil.py
_unify_rows
def _unify_rows(a): """Unify lengths of each row of a.""" lens = np.fromiter(map(len, a), np.int32) if not (lens[0] == lens).all(): out = np.zeros((len(a), lens.max()), np.float32) for i, row in enumerate(a): out[i, :lens[i]] = row else: out = np.float32(a) return out
python
def _unify_rows(a): """Unify lengths of each row of a.""" lens = np.fromiter(map(len, a), np.int32) if not (lens[0] == lens).all(): out = np.zeros((len(a), lens.max()), np.float32) for i, row in enumerate(a): out[i, :lens[i]] = row else: out = np.float32(a) return out
[ "def", "_unify_rows", "(", "a", ")", ":", "lens", "=", "np", ".", "fromiter", "(", "map", "(", "len", ",", "a", ")", ",", "np", ".", "int32", ")", "if", "not", "(", "lens", "[", "0", "]", "==", "lens", ")", ".", "all", "(", ")", ":", "out", "=", "np", ".", "zeros", "(", "(", "len", "(", "a", ")", ",", "lens", ".", "max", "(", ")", ")", ",", "np", ".", "float32", ")", "for", "i", ",", "row", "in", "enumerate", "(", "a", ")", ":", "out", "[", "i", ",", ":", "lens", "[", "i", "]", "]", "=", "row", "else", ":", "out", "=", "np", ".", "float32", "(", "a", ")", "return", "out" ]
Unify lengths of each row of a.
[ "Unify", "lengths", "of", "each", "row", "of", "a", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L87-L96
26,455
tensorflow/lucid
lucid/misc/gl/meshutil.py
normalize_mesh
def normalize_mesh(mesh): '''Scale mesh to fit into -1..1 cube''' mesh = dict(mesh) pos = mesh['position'][:,:3].copy() pos -= (pos.max(0)+pos.min(0)) / 2.0 pos /= np.abs(pos).max() mesh['position'] = pos return mesh
python
def normalize_mesh(mesh): '''Scale mesh to fit into -1..1 cube''' mesh = dict(mesh) pos = mesh['position'][:,:3].copy() pos -= (pos.max(0)+pos.min(0)) / 2.0 pos /= np.abs(pos).max() mesh['position'] = pos return mesh
[ "def", "normalize_mesh", "(", "mesh", ")", ":", "mesh", "=", "dict", "(", "mesh", ")", "pos", "=", "mesh", "[", "'position'", "]", "[", ":", ",", ":", "3", "]", ".", "copy", "(", ")", "pos", "-=", "(", "pos", ".", "max", "(", "0", ")", "+", "pos", ".", "min", "(", "0", ")", ")", "/", "2.0", "pos", "/=", "np", ".", "abs", "(", "pos", ")", ".", "max", "(", ")", "mesh", "[", "'position'", "]", "=", "pos", "return", "mesh" ]
Scale mesh to fit into -1..1 cube
[ "Scale", "mesh", "to", "fit", "into", "-", "1", "..", "1", "cube" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L161-L168
26,456
tensorflow/lucid
lucid/modelzoo/vision_base.py
Layer.activations
def activations(self): """Loads sampled activations, which requires network access.""" if self._activations is None: self._activations = _get_aligned_activations(self) return self._activations
python
def activations(self): """Loads sampled activations, which requires network access.""" if self._activations is None: self._activations = _get_aligned_activations(self) return self._activations
[ "def", "activations", "(", "self", ")", ":", "if", "self", ".", "_activations", "is", "None", ":", "self", ".", "_activations", "=", "_get_aligned_activations", "(", "self", ")", "return", "self", ".", "_activations" ]
Loads sampled activations, which requires network access.
[ "Loads", "sampled", "activations", "which", "requires", "network", "access", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L71-L75
26,457
tensorflow/lucid
lucid/modelzoo/vision_base.py
Model.create_input
def create_input(self, t_input=None, forget_xy_shape=True): """Create input tensor.""" if t_input is None: t_input = tf.placeholder(tf.float32, self.image_shape) t_prep_input = t_input if len(t_prep_input.shape) == 3: t_prep_input = tf.expand_dims(t_prep_input, 0) if forget_xy_shape: t_prep_input = model_util.forget_xy(t_prep_input) if hasattr(self, "is_BGR") and self.is_BGR is True: t_prep_input = tf.reverse(t_prep_input, [-1]) lo, hi = self.image_value_range t_prep_input = lo + t_prep_input * (hi - lo) return t_input, t_prep_input
python
def create_input(self, t_input=None, forget_xy_shape=True): """Create input tensor.""" if t_input is None: t_input = tf.placeholder(tf.float32, self.image_shape) t_prep_input = t_input if len(t_prep_input.shape) == 3: t_prep_input = tf.expand_dims(t_prep_input, 0) if forget_xy_shape: t_prep_input = model_util.forget_xy(t_prep_input) if hasattr(self, "is_BGR") and self.is_BGR is True: t_prep_input = tf.reverse(t_prep_input, [-1]) lo, hi = self.image_value_range t_prep_input = lo + t_prep_input * (hi - lo) return t_input, t_prep_input
[ "def", "create_input", "(", "self", ",", "t_input", "=", "None", ",", "forget_xy_shape", "=", "True", ")", ":", "if", "t_input", "is", "None", ":", "t_input", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "self", ".", "image_shape", ")", "t_prep_input", "=", "t_input", "if", "len", "(", "t_prep_input", ".", "shape", ")", "==", "3", ":", "t_prep_input", "=", "tf", ".", "expand_dims", "(", "t_prep_input", ",", "0", ")", "if", "forget_xy_shape", ":", "t_prep_input", "=", "model_util", ".", "forget_xy", "(", "t_prep_input", ")", "if", "hasattr", "(", "self", ",", "\"is_BGR\"", ")", "and", "self", ".", "is_BGR", "is", "True", ":", "t_prep_input", "=", "tf", ".", "reverse", "(", "t_prep_input", ",", "[", "-", "1", "]", ")", "lo", ",", "hi", "=", "self", ".", "image_value_range", "t_prep_input", "=", "lo", "+", "t_prep_input", "*", "(", "hi", "-", "lo", ")", "return", "t_input", ",", "t_prep_input" ]
Create input tensor.
[ "Create", "input", "tensor", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L161-L174
26,458
tensorflow/lucid
lucid/modelzoo/vision_base.py
Model.import_graph
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True): """Import model GraphDef into the current graph.""" graph = tf.get_default_graph() assert graph.unique_name(scope, False) == scope, ( 'Scope "%s" already exists. Provide explicit scope names when ' 'importing multiple instances of the model.') % scope t_input, t_prep_input = self.create_input(t_input, forget_xy_shape) tf.import_graph_def( self.graph_def, {self.input_name: t_prep_input}, name=scope) self.post_import(scope)
python
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True): """Import model GraphDef into the current graph.""" graph = tf.get_default_graph() assert graph.unique_name(scope, False) == scope, ( 'Scope "%s" already exists. Provide explicit scope names when ' 'importing multiple instances of the model.') % scope t_input, t_prep_input = self.create_input(t_input, forget_xy_shape) tf.import_graph_def( self.graph_def, {self.input_name: t_prep_input}, name=scope) self.post_import(scope)
[ "def", "import_graph", "(", "self", ",", "t_input", "=", "None", ",", "scope", "=", "'import'", ",", "forget_xy_shape", "=", "True", ")", ":", "graph", "=", "tf", ".", "get_default_graph", "(", ")", "assert", "graph", ".", "unique_name", "(", "scope", ",", "False", ")", "==", "scope", ",", "(", "'Scope \"%s\" already exists. Provide explicit scope names when '", "'importing multiple instances of the model.'", ")", "%", "scope", "t_input", ",", "t_prep_input", "=", "self", ".", "create_input", "(", "t_input", ",", "forget_xy_shape", ")", "tf", ".", "import_graph_def", "(", "self", ".", "graph_def", ",", "{", "self", ".", "input_name", ":", "t_prep_input", "}", ",", "name", "=", "scope", ")", "self", ".", "post_import", "(", "scope", ")" ]
Import model GraphDef into the current graph.
[ "Import", "model", "GraphDef", "into", "the", "current", "graph", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L176-L185
26,459
tensorflow/lucid
lucid/recipes/activation_atlas/layout.py
aligned_umap
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False): """`activations` can be a list of ndarrays. In that case a list of layouts is returned.""" umap_defaults = dict( n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine" ) umap_defaults.update(umap_options) # if passed a list of activations, we combine them and later split the layouts if type(activations) is list or type(activations) is tuple: num_activation_groups = len(activations) combined_activations = np.concatenate(activations) else: num_activation_groups = 1 combined_activations = activations try: layout = UMAP(**umap_defaults).fit_transform(combined_activations) except (RecursionError, SystemError) as exception: log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.") raise ValueError("UMAP failed to fit activations: %s", exception) if normalize: layout = normalize_layout(layout) if num_activation_groups > 1: layouts = np.split(layout, num_activation_groups, axis=0) return layouts else: return layout
python
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False): """`activations` can be a list of ndarrays. In that case a list of layouts is returned.""" umap_defaults = dict( n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine" ) umap_defaults.update(umap_options) # if passed a list of activations, we combine them and later split the layouts if type(activations) is list or type(activations) is tuple: num_activation_groups = len(activations) combined_activations = np.concatenate(activations) else: num_activation_groups = 1 combined_activations = activations try: layout = UMAP(**umap_defaults).fit_transform(combined_activations) except (RecursionError, SystemError) as exception: log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.") raise ValueError("UMAP failed to fit activations: %s", exception) if normalize: layout = normalize_layout(layout) if num_activation_groups > 1: layouts = np.split(layout, num_activation_groups, axis=0) return layouts else: return layout
[ "def", "aligned_umap", "(", "activations", ",", "umap_options", "=", "{", "}", ",", "normalize", "=", "True", ",", "verbose", "=", "False", ")", ":", "umap_defaults", "=", "dict", "(", "n_components", "=", "2", ",", "n_neighbors", "=", "50", ",", "min_dist", "=", "0.05", ",", "verbose", "=", "verbose", ",", "metric", "=", "\"cosine\"", ")", "umap_defaults", ".", "update", "(", "umap_options", ")", "# if passed a list of activations, we combine them and later split the layouts", "if", "type", "(", "activations", ")", "is", "list", "or", "type", "(", "activations", ")", "is", "tuple", ":", "num_activation_groups", "=", "len", "(", "activations", ")", "combined_activations", "=", "np", ".", "concatenate", "(", "activations", ")", "else", ":", "num_activation_groups", "=", "1", "combined_activations", "=", "activations", "try", ":", "layout", "=", "UMAP", "(", "*", "*", "umap_defaults", ")", ".", "fit_transform", "(", "combined_activations", ")", "except", "(", "RecursionError", ",", "SystemError", ")", "as", "exception", ":", "log", ".", "error", "(", "\"UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.\"", ")", "raise", "ValueError", "(", "\"UMAP failed to fit activations: %s\"", ",", "exception", ")", "if", "normalize", ":", "layout", "=", "normalize_layout", "(", "layout", ")", "if", "num_activation_groups", ">", "1", ":", "layouts", "=", "np", ".", "split", "(", "layout", ",", "num_activation_groups", ",", "axis", "=", "0", ")", "return", "layouts", "else", ":", "return", "layout" ]
`activations` can be a list of ndarrays. In that case a list of layouts is returned.
[ "activations", "can", "be", "a", "list", "of", "ndarrays", ".", "In", "that", "case", "a", "list", "of", "layouts", "is", "returned", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/recipes/activation_atlas/layout.py#L46-L74
26,460
tensorflow/lucid
lucid/scratch/atlas_pipeline/render_tile.py
render_tile
def render_tile(cells, ti, tj, render, params, metadata, layout, summary): """ Render each cell in the tile and stitch it into a single image """ image_size = params["cell_size"] * params["n_tile"] tile = Image.new("RGB", (image_size, image_size), (255,255,255)) keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_image = render(cells[key], params, metadata, layout, summary) # stitch this rendering into the tile image ci = key[0] % params["n_tile"] cj = key[1] % params["n_tile"] xmin = ci*params["cell_size"] ymin = cj*params["cell_size"] xmax = (ci+1)*params["cell_size"] ymax = (cj+1)*params["cell_size"] if params.get("scale_density", False): density = len(cells[key]["gi"]) # scale = density/summary["max_density"] scale = math.log(density)/(math.log(summary["max_density"]) or 1) owidth = xmax - xmin width = int(round(owidth * scale)) if(width < 1): width = 1 offsetL = int(round((owidth - width)/2)) offsetR = owidth - width - offsetL # handle odd numbers # print("\n") # print("width", width, offsetL, offsetR) box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR] resample = params.get("scale_type", Image.NEAREST) cell_image = cell_image.resize(size=(width,width), resample=resample) # print(cell_image) else: box = [xmin, ymin, xmax, ymax] # print("box", box) tile.paste(cell_image, box) print("\n") return tile
python
def render_tile(cells, ti, tj, render, params, metadata, layout, summary): """ Render each cell in the tile and stitch it into a single image """ image_size = params["cell_size"] * params["n_tile"] tile = Image.new("RGB", (image_size, image_size), (255,255,255)) keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_image = render(cells[key], params, metadata, layout, summary) # stitch this rendering into the tile image ci = key[0] % params["n_tile"] cj = key[1] % params["n_tile"] xmin = ci*params["cell_size"] ymin = cj*params["cell_size"] xmax = (ci+1)*params["cell_size"] ymax = (cj+1)*params["cell_size"] if params.get("scale_density", False): density = len(cells[key]["gi"]) # scale = density/summary["max_density"] scale = math.log(density)/(math.log(summary["max_density"]) or 1) owidth = xmax - xmin width = int(round(owidth * scale)) if(width < 1): width = 1 offsetL = int(round((owidth - width)/2)) offsetR = owidth - width - offsetL # handle odd numbers # print("\n") # print("width", width, offsetL, offsetR) box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR] resample = params.get("scale_type", Image.NEAREST) cell_image = cell_image.resize(size=(width,width), resample=resample) # print(cell_image) else: box = [xmin, ymin, xmax, ymax] # print("box", box) tile.paste(cell_image, box) print("\n") return tile
[ "def", "render_tile", "(", "cells", ",", "ti", ",", "tj", ",", "render", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", ":", "image_size", "=", "params", "[", "\"cell_size\"", "]", "*", "params", "[", "\"n_tile\"", "]", "tile", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "(", "image_size", ",", "image_size", ")", ",", "(", "255", ",", "255", ",", "255", ")", ")", "keys", "=", "cells", ".", "keys", "(", ")", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "print", "(", "\"cell\"", ",", "i", "+", "1", ",", "\"/\"", ",", "len", "(", "keys", ")", ",", "end", "=", "'\\r'", ")", "cell_image", "=", "render", "(", "cells", "[", "key", "]", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", "# stitch this rendering into the tile image", "ci", "=", "key", "[", "0", "]", "%", "params", "[", "\"n_tile\"", "]", "cj", "=", "key", "[", "1", "]", "%", "params", "[", "\"n_tile\"", "]", "xmin", "=", "ci", "*", "params", "[", "\"cell_size\"", "]", "ymin", "=", "cj", "*", "params", "[", "\"cell_size\"", "]", "xmax", "=", "(", "ci", "+", "1", ")", "*", "params", "[", "\"cell_size\"", "]", "ymax", "=", "(", "cj", "+", "1", ")", "*", "params", "[", "\"cell_size\"", "]", "if", "params", ".", "get", "(", "\"scale_density\"", ",", "False", ")", ":", "density", "=", "len", "(", "cells", "[", "key", "]", "[", "\"gi\"", "]", ")", "# scale = density/summary[\"max_density\"]", "scale", "=", "math", ".", "log", "(", "density", ")", "/", "(", "math", ".", "log", "(", "summary", "[", "\"max_density\"", "]", ")", "or", "1", ")", "owidth", "=", "xmax", "-", "xmin", "width", "=", "int", "(", "round", "(", "owidth", "*", "scale", ")", ")", "if", "(", "width", "<", "1", ")", ":", "width", "=", "1", "offsetL", "=", "int", "(", "round", "(", "(", "owidth", "-", "width", ")", "/", "2", ")", ")", "offsetR", "=", "owidth", "-", "width", "-", "offsetL", "# handle odd numbers", "# print(\"\\n\")", "# print(\"width\", width, offsetL, offsetR)", "box", "=", "[", "xmin", "+", "offsetL", ",", "ymin", "+", "offsetL", ",", "xmax", "-", "offsetR", ",", "ymax", "-", "offsetR", "]", "resample", "=", "params", ".", "get", "(", "\"scale_type\"", ",", "Image", ".", "NEAREST", ")", "cell_image", "=", "cell_image", ".", "resize", "(", "size", "=", "(", "width", ",", "width", ")", ",", "resample", "=", "resample", ")", "# print(cell_image)", "else", ":", "box", "=", "[", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", "]", "# print(\"box\", box)", "tile", ".", "paste", "(", "cell_image", ",", "box", ")", "print", "(", "\"\\n\"", ")", "return", "tile" ]
Render each cell in the tile and stitch it into a single image
[ "Render", "each", "cell", "in", "the", "tile", "and", "stitch", "it", "into", "a", "single", "image" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/render_tile.py#L11-L51
26,461
tensorflow/lucid
lucid/scratch/atlas_pipeline/render_tile.py
aggregate_tile
def aggregate_tile(cells, ti, tj, aggregate, params, metadata, layout, summary): """ Call the user defined aggregation function on each cell and combine into a single json object """ tile = [] keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_json = aggregate(cells[key], params, metadata, layout, summary) tile.append({"aggregate":cell_json, "i":int(key[0]), "j":int(key[1])}) return tile
python
def aggregate_tile(cells, ti, tj, aggregate, params, metadata, layout, summary): """ Call the user defined aggregation function on each cell and combine into a single json object """ tile = [] keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_json = aggregate(cells[key], params, metadata, layout, summary) tile.append({"aggregate":cell_json, "i":int(key[0]), "j":int(key[1])}) return tile
[ "def", "aggregate_tile", "(", "cells", ",", "ti", ",", "tj", ",", "aggregate", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", ":", "tile", "=", "[", "]", "keys", "=", "cells", ".", "keys", "(", ")", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "print", "(", "\"cell\"", ",", "i", "+", "1", ",", "\"/\"", ",", "len", "(", "keys", ")", ",", "end", "=", "'\\r'", ")", "cell_json", "=", "aggregate", "(", "cells", "[", "key", "]", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", "tile", ".", "append", "(", "{", "\"aggregate\"", ":", "cell_json", ",", "\"i\"", ":", "int", "(", "key", "[", "0", "]", ")", ",", "\"j\"", ":", "int", "(", "key", "[", "1", "]", ")", "}", ")", "return", "tile" ]
Call the user defined aggregation function on each cell and combine into a single json object
[ "Call", "the", "user", "defined", "aggregation", "function", "on", "each", "cell", "and", "combine", "into", "a", "single", "json", "object" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/render_tile.py#L54-L64
26,462
tensorflow/lucid
lucid/misc/gl/glcontext.py
create_opengl_context
def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
python
def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
[ "def", "create_opengl_context", "(", "surface_size", "=", "(", "640", ",", "480", ")", ")", ":", "egl_display", "=", "egl", ".", "eglGetDisplay", "(", "egl", ".", "EGL_DEFAULT_DISPLAY", ")", "major", ",", "minor", "=", "egl", ".", "EGLint", "(", ")", ",", "egl", ".", "EGLint", "(", ")", "egl", ".", "eglInitialize", "(", "egl_display", ",", "pointer", "(", "major", ")", ",", "pointer", "(", "minor", ")", ")", "config_attribs", "=", "[", "egl", ".", "EGL_SURFACE_TYPE", ",", "egl", ".", "EGL_PBUFFER_BIT", ",", "egl", ".", "EGL_BLUE_SIZE", ",", "8", ",", "egl", ".", "EGL_GREEN_SIZE", ",", "8", ",", "egl", ".", "EGL_RED_SIZE", ",", "8", ",", "egl", ".", "EGL_DEPTH_SIZE", ",", "24", ",", "egl", ".", "EGL_RENDERABLE_TYPE", ",", "egl", ".", "EGL_OPENGL_BIT", ",", "egl", ".", "EGL_NONE", "]", "config_attribs", "=", "(", "egl", ".", "EGLint", "*", "len", "(", "config_attribs", ")", ")", "(", "*", "config_attribs", ")", "num_configs", "=", "egl", ".", "EGLint", "(", ")", "egl_cfg", "=", "egl", ".", "EGLConfig", "(", ")", "egl", ".", "eglChooseConfig", "(", "egl_display", ",", "config_attribs", ",", "pointer", "(", "egl_cfg", ")", ",", "1", ",", "pointer", "(", "num_configs", ")", ")", "width", ",", "height", "=", "surface_size", "pbuffer_attribs", "=", "[", "egl", ".", "EGL_WIDTH", ",", "width", ",", "egl", ".", "EGL_HEIGHT", ",", "height", ",", "egl", ".", "EGL_NONE", ",", "]", "pbuffer_attribs", "=", "(", "egl", ".", "EGLint", "*", "len", "(", "pbuffer_attribs", ")", ")", "(", "*", "pbuffer_attribs", ")", "egl_surf", "=", "egl", ".", "eglCreatePbufferSurface", "(", "egl_display", ",", "egl_cfg", ",", "pbuffer_attribs", ")", "egl", ".", "eglBindAPI", "(", "egl", ".", "EGL_OPENGL_API", ")", "egl_context", "=", "egl", ".", "eglCreateContext", "(", "egl_display", ",", "egl_cfg", ",", "egl", ".", "EGL_NO_CONTEXT", ",", "None", ")", "egl", ".", "eglMakeCurrent", "(", "egl_display", ",", "egl_surf", ",", "egl_surf", ",", "egl_context", ")" ]
Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface.
[ "Create", "offscreen", "OpenGL", "context", "and", "make", "it", "current", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/glcontext.py#L79-L120
26,463
tensorflow/lucid
lucid/optvis/param/resize_bilinear_nd.py
resize_bilinear_nd
def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = t.get_shape().as_list() target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimesnion, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d : d+2] = target_shape[d : d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimesnions we want to deal with in the middle. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use the 2d tf.image.resize_bilinear() on the # inner two dimesions. t_ = tf.reshape(t, shape_) t_ = tf.image.resize_bilinear(t_, new_shape_[1:3]) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = tf.reshape(t_, new_shape) shape = new_shape d += 2 return t
python
def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = t.get_shape().as_list() target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimesnion, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d : d+2] = target_shape[d : d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimesnions we want to deal with in the middle. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use the 2d tf.image.resize_bilinear() on the # inner two dimesions. t_ = tf.reshape(t, shape_) t_ = tf.image.resize_bilinear(t_, new_shape_[1:3]) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = tf.reshape(t_, new_shape) shape = new_shape d += 2 return t
[ "def", "resize_bilinear_nd", "(", "t", ",", "target_shape", ")", ":", "shape", "=", "t", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "target_shape", "=", "list", "(", "target_shape", ")", "assert", "len", "(", "shape", ")", "==", "len", "(", "target_shape", ")", "# We progressively move through the shape, resizing dimensions...", "d", "=", "0", "while", "d", "<", "len", "(", "shape", ")", ":", "# If we don't need to deal with the next dimesnion, step over it", "if", "shape", "[", "d", "]", "==", "target_shape", "[", "d", "]", ":", "d", "+=", "1", "continue", "# Otherwise, we'll resize the next two dimensions...", "# If d+2 doesn't need to be resized, this will just be a null op for it", "new_shape", "=", "shape", "[", ":", "]", "new_shape", "[", "d", ":", "d", "+", "2", "]", "=", "target_shape", "[", "d", ":", "d", "+", "2", "]", "# The helper collapse_shape() makes our shapes 4-dimensional with", "# the two dimesnions we want to deal with in the middle.", "shape_", "=", "collapse_shape", "(", "shape", ",", "d", ",", "d", "+", "2", ")", "new_shape_", "=", "collapse_shape", "(", "new_shape", ",", "d", ",", "d", "+", "2", ")", "# We can then reshape and use the 2d tf.image.resize_bilinear() on the", "# inner two dimesions.", "t_", "=", "tf", ".", "reshape", "(", "t", ",", "shape_", ")", "t_", "=", "tf", ".", "image", ".", "resize_bilinear", "(", "t_", ",", "new_shape_", "[", "1", ":", "3", "]", ")", "# And then reshape back to our uncollapsed version, having finished resizing", "# two more dimensions in our shape.", "t", "=", "tf", ".", "reshape", "(", "t_", ",", "new_shape", ")", "shape", "=", "new_shape", "d", "+=", "2", "return", "t" ]
Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor
[ "Bilinear", "resizes", "a", "tensor", "t", "to", "have", "shape", "target_shape", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/resize_bilinear_nd.py#L68-L116
26,464
tensorflow/lucid
lucid/modelzoo/aligned_activations.py
get_aligned_activations
def get_aligned_activations(layer): """Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.""" activation_paths = [ PATH_TEMPLATE.format( sanitize(layer.model_class.name), sanitize(layer.name), page ) for page in range(NUMBER_OF_PAGES) ] activations = np.vstack([load(path) for path in activation_paths]) assert np.all(np.isfinite(activations)) return activations
python
def get_aligned_activations(layer): """Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.""" activation_paths = [ PATH_TEMPLATE.format( sanitize(layer.model_class.name), sanitize(layer.name), page ) for page in range(NUMBER_OF_PAGES) ] activations = np.vstack([load(path) for path in activation_paths]) assert np.all(np.isfinite(activations)) return activations
[ "def", "get_aligned_activations", "(", "layer", ")", ":", "activation_paths", "=", "[", "PATH_TEMPLATE", ".", "format", "(", "sanitize", "(", "layer", ".", "model_class", ".", "name", ")", ",", "sanitize", "(", "layer", ".", "name", ")", ",", "page", ")", "for", "page", "in", "range", "(", "NUMBER_OF_PAGES", ")", "]", "activations", "=", "np", ".", "vstack", "(", "[", "load", "(", "path", ")", "for", "path", "in", "activation_paths", "]", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "activations", ")", ")", "return", "activations" ]
Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.
[ "Downloads", "100k", "activations", "of", "the", "specified", "layer", "sampled", "from", "iterating", "over", "ImageNet", ".", "Activations", "of", "all", "layers", "where", "sampled", "at", "the", "same", "spatial", "positions", "for", "each", "image", "allowing", "the", "calculation", "of", "correlations", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L35-L47
26,465
tensorflow/lucid
lucid/modelzoo/aligned_activations.py
layer_covariance
def layer_covariance(layer1, layer2=None): """Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.""" layer2 = layer2 or layer1 act1, act2 = layer1.activations, layer2.activations num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division return np.matmul(act1.T, act2) / float(num_datapoints)
python
def layer_covariance(layer1, layer2=None): """Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.""" layer2 = layer2 or layer1 act1, act2 = layer1.activations, layer2.activations num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division return np.matmul(act1.T, act2) / float(num_datapoints)
[ "def", "layer_covariance", "(", "layer1", ",", "layer2", "=", "None", ")", ":", "layer2", "=", "layer2", "or", "layer1", "act1", ",", "act2", "=", "layer1", ".", "activations", ",", "layer2", ".", "activations", "num_datapoints", "=", "act1", ".", "shape", "[", "0", "]", "# cast to avoid numpy type promotion during division", "return", "np", ".", "matmul", "(", "act1", ".", "T", ",", "act2", ")", "/", "float", "(", "num_datapoints", ")" ]
Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.
[ "Computes", "the", "covariance", "matrix", "between", "the", "neurons", "of", "two", "layers", ".", "If", "only", "one", "layer", "is", "passed", "computes", "the", "symmetric", "covariance", "matrix", "of", "that", "layer", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L51-L57
26,466
tensorflow/lucid
lucid/modelzoo/aligned_activations.py
push_activations
def push_activations(activations, from_layer, to_layer): """Push activations from one model to another using prerecorded correlations""" inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
python
def push_activations(activations, from_layer, to_layer): """Push activations from one model to another using prerecorded correlations""" inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
[ "def", "push_activations", "(", "activations", ",", "from_layer", ",", "to_layer", ")", ":", "inverse_covariance_matrix", "=", "layer_inverse_covariance", "(", "from_layer", ")", "activations_decorrelated", "=", "np", ".", "dot", "(", "inverse_covariance_matrix", ",", "activations", ".", "T", ")", ".", "T", "covariance_matrix", "=", "layer_covariance", "(", "from_layer", ",", "to_layer", ")", "activation_recorrelated", "=", "np", ".", "dot", "(", "activations_decorrelated", ",", "covariance_matrix", ")", "return", "activation_recorrelated" ]
Push activations from one model to another using prerecorded correlations
[ "Push", "activations", "from", "one", "model", "to", "another", "using", "prerecorded", "correlations" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L66-L72
26,467
tensorflow/lucid
lucid/recipes/image_interpolation_params.py
multi_interpolation_basis
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3): """A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i """ N, M, W, Ch = n_objectives, n_interp_steps, width, channels const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch]) example_interps = [ sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) for _ in range(N)] example_basis = [] for n in range(N): col = [] for m in range(N): interp = example_interps[n] + example_interps[m][::-1] col.append(interp) example_basis.append(col) interp_basis = [] for n in range(N): col = [interp_basis[m][N-n][::-1] for m in range(n)] col.append(tf.zeros([M, W, W, 3])) for m in range(n+1, N): interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch]) for k in [1, 2]]) col.append(interp) interp_basis.append(col) basis = [] for n in range(N): col_ex = tf.stack(example_basis[n]) col_in = tf.stack(interp_basis[n]) basis.append(col_ex + col_in) basis = tf.stack(basis) return basis + const_term
python
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3): """A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i """ N, M, W, Ch = n_objectives, n_interp_steps, width, channels const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch]) example_interps = [ sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) for _ in range(N)] example_basis = [] for n in range(N): col = [] for m in range(N): interp = example_interps[n] + example_interps[m][::-1] col.append(interp) example_basis.append(col) interp_basis = [] for n in range(N): col = [interp_basis[m][N-n][::-1] for m in range(n)] col.append(tf.zeros([M, W, W, 3])) for m in range(n+1, N): interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch]) for k in [1, 2]]) col.append(interp) interp_basis.append(col) basis = [] for n in range(N): col_ex = tf.stack(example_basis[n]) col_in = tf.stack(interp_basis[n]) basis.append(col_ex + col_in) basis = tf.stack(basis) return basis + const_term
[ "def", "multi_interpolation_basis", "(", "n_objectives", "=", "6", ",", "n_interp_steps", "=", "5", ",", "width", "=", "128", ",", "channels", "=", "3", ")", ":", "N", ",", "M", ",", "W", ",", "Ch", "=", "n_objectives", ",", "n_interp_steps", ",", "width", ",", "channels", "const_term", "=", "sum", "(", "[", "lowres_tensor", "(", "[", "W", ",", "W", ",", "Ch", "]", ",", "[", "W", "//", "k", ",", "W", "//", "k", ",", "Ch", "]", ")", "for", "k", "in", "[", "1", ",", "2", ",", "4", ",", "8", "]", "]", ")", "const_term", "=", "tf", ".", "reshape", "(", "const_term", ",", "[", "1", ",", "1", ",", "1", ",", "W", ",", "W", ",", "Ch", "]", ")", "example_interps", "=", "[", "sum", "(", "[", "lowres_tensor", "(", "[", "M", ",", "W", ",", "W", ",", "Ch", "]", ",", "[", "2", ",", "W", "//", "k", ",", "W", "//", "k", ",", "Ch", "]", ")", "for", "k", "in", "[", "1", ",", "2", ",", "4", ",", "8", "]", "]", ")", "for", "_", "in", "range", "(", "N", ")", "]", "example_basis", "=", "[", "]", "for", "n", "in", "range", "(", "N", ")", ":", "col", "=", "[", "]", "for", "m", "in", "range", "(", "N", ")", ":", "interp", "=", "example_interps", "[", "n", "]", "+", "example_interps", "[", "m", "]", "[", ":", ":", "-", "1", "]", "col", ".", "append", "(", "interp", ")", "example_basis", ".", "append", "(", "col", ")", "interp_basis", "=", "[", "]", "for", "n", "in", "range", "(", "N", ")", ":", "col", "=", "[", "interp_basis", "[", "m", "]", "[", "N", "-", "n", "]", "[", ":", ":", "-", "1", "]", "for", "m", "in", "range", "(", "n", ")", "]", "col", ".", "append", "(", "tf", ".", "zeros", "(", "[", "M", ",", "W", ",", "W", ",", "3", "]", ")", ")", "for", "m", "in", "range", "(", "n", "+", "1", ",", "N", ")", ":", "interp", "=", "sum", "(", "[", "lowres_tensor", "(", "[", "M", ",", "W", ",", "W", ",", "Ch", "]", ",", "[", "M", ",", "W", "//", "k", ",", "W", "//", "k", ",", "Ch", "]", ")", "for", "k", "in", "[", "1", ",", "2", "]", "]", ")", "col", ".", "append", "(", "interp", ")", "interp_basis", ".", "append", "(", "col", ")", "basis", "=", "[", "]", "for", "n", "in", "range", "(", "N", ")", ":", "col_ex", "=", "tf", ".", "stack", "(", "example_basis", "[", "n", "]", ")", "col_in", "=", "tf", ".", "stack", "(", "interp_basis", "[", "n", "]", ")", "basis", ".", "append", "(", "col_ex", "+", "col_in", ")", "basis", "=", "tf", ".", "stack", "(", "basis", ")", "return", "basis", "+", "const_term" ]
A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i
[ "A", "paramaterization", "for", "interpolating", "between", "each", "pair", "of", "N", "objectives", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/recipes/image_interpolation_params.py#L22-L82
26,468
tensorflow/lucid
lucid/optvis/overrides/gradient_override.py
register_to_random_name
def register_to_random_name(grad_f): """Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to. """ grad_f_name = grad_f.__name__ + "_" + str(uuid.uuid4()) tf.RegisterGradient(grad_f_name)(grad_f) return grad_f_name
python
def register_to_random_name(grad_f): """Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to. """ grad_f_name = grad_f.__name__ + "_" + str(uuid.uuid4()) tf.RegisterGradient(grad_f_name)(grad_f) return grad_f_name
[ "def", "register_to_random_name", "(", "grad_f", ")", ":", "grad_f_name", "=", "grad_f", ".", "__name__", "+", "\"_\"", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "tf", ".", "RegisterGradient", "(", "grad_f_name", ")", "(", "grad_f", ")", "return", "grad_f_name" ]
Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to.
[ "Register", "a", "gradient", "function", "to", "a", "random", "string", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/overrides/gradient_override.py#L50-L73
26,469
tensorflow/lucid
lucid/optvis/overrides/gradient_override.py
use_gradient
def use_gradient(grad_f): """Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of. """ grad_f_name = register_to_random_name(grad_f) def function_wrapper(f): def inner(*inputs): # TensorFlow only supports (as of writing) overriding the gradient of # individual ops. In order to override the gardient of `f`, we need to # somehow make it appear to be an individual TensorFlow op. # # Our solution is to create a PyFunc that mimics `f`. # # In particular, we construct a graph for `f` and run it, then use a # stateful PyFunc to stash it's results in Python. Then we have another # PyFunc mimic it by taking all the same inputs and returning the stashed # output. # # I wish we could do this without PyFunc, but I don't see a way to have # it be fully general. state = {"out_value": None} # First, we need to run `f` and store it's output. out = f(*inputs) def store_out(out_value): """Store the value of out to a python variable.""" state["out_value"] = out_value store_name = "store_" + f.__name__ store = tf.py_func(store_out, [out], (), stateful=True, name=store_name) # Next, we create the mock function, with an overriden gradient. # Note that we need to make sure store gets evaluated before the mock # runs. def mock_f(*inputs): """Mimic f by retrieving the stored value of out.""" return state["out_value"] with tf.control_dependencies([store]): with gradient_override_map({"PyFunc": grad_f_name}): mock_name = "mock_" + f.__name__ mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name) mock_out.set_shape(out.get_shape()) # Finally, we can return the mock. return mock_out return inner return function_wrapper
python
def use_gradient(grad_f): """Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of. """ grad_f_name = register_to_random_name(grad_f) def function_wrapper(f): def inner(*inputs): # TensorFlow only supports (as of writing) overriding the gradient of # individual ops. In order to override the gardient of `f`, we need to # somehow make it appear to be an individual TensorFlow op. # # Our solution is to create a PyFunc that mimics `f`. # # In particular, we construct a graph for `f` and run it, then use a # stateful PyFunc to stash it's results in Python. Then we have another # PyFunc mimic it by taking all the same inputs and returning the stashed # output. # # I wish we could do this without PyFunc, but I don't see a way to have # it be fully general. state = {"out_value": None} # First, we need to run `f` and store it's output. out = f(*inputs) def store_out(out_value): """Store the value of out to a python variable.""" state["out_value"] = out_value store_name = "store_" + f.__name__ store = tf.py_func(store_out, [out], (), stateful=True, name=store_name) # Next, we create the mock function, with an overriden gradient. # Note that we need to make sure store gets evaluated before the mock # runs. def mock_f(*inputs): """Mimic f by retrieving the stored value of out.""" return state["out_value"] with tf.control_dependencies([store]): with gradient_override_map({"PyFunc": grad_f_name}): mock_name = "mock_" + f.__name__ mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name) mock_out.set_shape(out.get_shape()) # Finally, we can return the mock. return mock_out return inner return function_wrapper
[ "def", "use_gradient", "(", "grad_f", ")", ":", "grad_f_name", "=", "register_to_random_name", "(", "grad_f", ")", "def", "function_wrapper", "(", "f", ")", ":", "def", "inner", "(", "*", "inputs", ")", ":", "# TensorFlow only supports (as of writing) overriding the gradient of", "# individual ops. In order to override the gardient of `f`, we need to", "# somehow make it appear to be an individual TensorFlow op.", "#", "# Our solution is to create a PyFunc that mimics `f`.", "#", "# In particular, we construct a graph for `f` and run it, then use a", "# stateful PyFunc to stash it's results in Python. Then we have another", "# PyFunc mimic it by taking all the same inputs and returning the stashed", "# output.", "#", "# I wish we could do this without PyFunc, but I don't see a way to have", "# it be fully general.", "state", "=", "{", "\"out_value\"", ":", "None", "}", "# First, we need to run `f` and store it's output.", "out", "=", "f", "(", "*", "inputs", ")", "def", "store_out", "(", "out_value", ")", ":", "\"\"\"Store the value of out to a python variable.\"\"\"", "state", "[", "\"out_value\"", "]", "=", "out_value", "store_name", "=", "\"store_\"", "+", "f", ".", "__name__", "store", "=", "tf", ".", "py_func", "(", "store_out", ",", "[", "out", "]", ",", "(", ")", ",", "stateful", "=", "True", ",", "name", "=", "store_name", ")", "# Next, we create the mock function, with an overriden gradient.", "# Note that we need to make sure store gets evaluated before the mock", "# runs.", "def", "mock_f", "(", "*", "inputs", ")", ":", "\"\"\"Mimic f by retrieving the stored value of out.\"\"\"", "return", "state", "[", "\"out_value\"", "]", "with", "tf", ".", "control_dependencies", "(", "[", "store", "]", ")", ":", "with", "gradient_override_map", "(", "{", "\"PyFunc\"", ":", "grad_f_name", "}", ")", ":", "mock_name", "=", "\"mock_\"", "+", "f", ".", "__name__", "mock_out", "=", "tf", ".", "py_func", "(", "mock_f", ",", "inputs", ",", "out", ".", "dtype", ",", "stateful", "=", "True", ",", "name", "=", "mock_name", ")", "mock_out", ".", "set_shape", "(", "out", ".", "get_shape", "(", ")", ")", "# Finally, we can return the mock.", "return", "mock_out", "return", "inner", "return", "function_wrapper" ]
Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of.
[ "Decorator", "for", "easily", "setting", "custom", "gradients", "for", "TensorFlow", "functions", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/overrides/gradient_override.py#L107-L178
26,470
tensorflow/lucid
lucid/optvis/param/spatial.py
pixel_image
def pixel_image(shape, sd=None, init_val=None): """A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument. """ if sd is not None and init_val is not None: warnings.warn( "`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value." ) sd = sd or 0.01 init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32) return tf.Variable(init_val)
python
def pixel_image(shape, sd=None, init_val=None): """A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument. """ if sd is not None and init_val is not None: warnings.warn( "`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value." ) sd = sd or 0.01 init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32) return tf.Variable(init_val)
[ "def", "pixel_image", "(", "shape", ",", "sd", "=", "None", ",", "init_val", "=", "None", ")", ":", "if", "sd", "is", "not", "None", "and", "init_val", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value.\"", ")", "sd", "=", "sd", "or", "0.01", "init_val", "=", "init_val", "or", "np", ".", "random", ".", "normal", "(", "size", "=", "shape", ",", "scale", "=", "sd", ")", ".", "astype", "(", "np", ".", "float32", ")", "return", "tf", ".", "Variable", "(", "init_val", ")" ]
A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument.
[ "A", "naive", "pixel", "-", "based", "image", "parameterization", ".", "Defaults", "to", "a", "random", "initialization", "but", "can", "take", "a", "supplied", "init_val", "argument", "instead", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L24-L45
26,471
tensorflow/lucid
lucid/optvis/param/spatial.py
rfft2d_freqs
def rfft2d_freqs(h, w): """Computes 2D spectrum frequencies.""" fy = np.fft.fftfreq(h)[:, None] # when we have an odd input dimension we need to keep one additional # frequency and later cut off 1 pixel if w % 2 == 1: fx = np.fft.fftfreq(w)[: w // 2 + 2] else: fx = np.fft.fftfreq(w)[: w // 2 + 1] return np.sqrt(fx * fx + fy * fy)
python
def rfft2d_freqs(h, w): """Computes 2D spectrum frequencies.""" fy = np.fft.fftfreq(h)[:, None] # when we have an odd input dimension we need to keep one additional # frequency and later cut off 1 pixel if w % 2 == 1: fx = np.fft.fftfreq(w)[: w // 2 + 2] else: fx = np.fft.fftfreq(w)[: w // 2 + 1] return np.sqrt(fx * fx + fy * fy)
[ "def", "rfft2d_freqs", "(", "h", ",", "w", ")", ":", "fy", "=", "np", ".", "fft", ".", "fftfreq", "(", "h", ")", "[", ":", ",", "None", "]", "# when we have an odd input dimension we need to keep one additional", "# frequency and later cut off 1 pixel", "if", "w", "%", "2", "==", "1", ":", "fx", "=", "np", ".", "fft", ".", "fftfreq", "(", "w", ")", "[", ":", "w", "//", "2", "+", "2", "]", "else", ":", "fx", "=", "np", ".", "fft", ".", "fftfreq", "(", "w", ")", "[", ":", "w", "//", "2", "+", "1", "]", "return", "np", ".", "sqrt", "(", "fx", "*", "fx", "+", "fy", "*", "fy", ")" ]
Computes 2D spectrum frequencies.
[ "Computes", "2D", "spectrum", "frequencies", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L48-L58
26,472
tensorflow/lucid
lucid/optvis/param/spatial.py
fft_image
def fft_image(shape, sd=None, decay_power=1): """An image paramaterization using 2D Fourier coefficients.""" sd = sd or 0.01 batch, h, w, ch = shape freqs = rfft2d_freqs(h, w) init_val_size = (2, ch) + freqs.shape images = [] for _ in range(batch): # Create a random variable holding the actual 2D fourier coefficients init_val = np.random.normal(size=init_val_size, scale=sd).astype(np.float32) spectrum_real_imag_t = tf.Variable(init_val) spectrum_t = tf.complex(spectrum_real_imag_t[0], spectrum_real_imag_t[1]) # Scale the spectrum. First normalize energy, then scale by the square-root # of the number of pixels to get a unitary transformation. # This allows to use similar leanring rates to pixel-wise optimisation. scale = 1.0 / np.maximum(freqs, 1.0 / max(w, h)) ** decay_power scale *= np.sqrt(w * h) scaled_spectrum_t = scale * spectrum_t # convert complex scaled spectrum to shape (h, w, ch) image tensor # needs to transpose because irfft2d returns channels first image_t = tf.transpose(tf.spectral.irfft2d(scaled_spectrum_t), (1, 2, 0)) # in case of odd spatial input dimensions we need to crop image_t = image_t[:h, :w, :ch] images.append(image_t) batched_image_t = tf.stack(images) / 4.0 # TODO: is that a magic constant? return batched_image_t
python
def fft_image(shape, sd=None, decay_power=1): """An image paramaterization using 2D Fourier coefficients.""" sd = sd or 0.01 batch, h, w, ch = shape freqs = rfft2d_freqs(h, w) init_val_size = (2, ch) + freqs.shape images = [] for _ in range(batch): # Create a random variable holding the actual 2D fourier coefficients init_val = np.random.normal(size=init_val_size, scale=sd).astype(np.float32) spectrum_real_imag_t = tf.Variable(init_val) spectrum_t = tf.complex(spectrum_real_imag_t[0], spectrum_real_imag_t[1]) # Scale the spectrum. First normalize energy, then scale by the square-root # of the number of pixels to get a unitary transformation. # This allows to use similar leanring rates to pixel-wise optimisation. scale = 1.0 / np.maximum(freqs, 1.0 / max(w, h)) ** decay_power scale *= np.sqrt(w * h) scaled_spectrum_t = scale * spectrum_t # convert complex scaled spectrum to shape (h, w, ch) image tensor # needs to transpose because irfft2d returns channels first image_t = tf.transpose(tf.spectral.irfft2d(scaled_spectrum_t), (1, 2, 0)) # in case of odd spatial input dimensions we need to crop image_t = image_t[:h, :w, :ch] images.append(image_t) batched_image_t = tf.stack(images) / 4.0 # TODO: is that a magic constant? return batched_image_t
[ "def", "fft_image", "(", "shape", ",", "sd", "=", "None", ",", "decay_power", "=", "1", ")", ":", "sd", "=", "sd", "or", "0.01", "batch", ",", "h", ",", "w", ",", "ch", "=", "shape", "freqs", "=", "rfft2d_freqs", "(", "h", ",", "w", ")", "init_val_size", "=", "(", "2", ",", "ch", ")", "+", "freqs", ".", "shape", "images", "=", "[", "]", "for", "_", "in", "range", "(", "batch", ")", ":", "# Create a random variable holding the actual 2D fourier coefficients", "init_val", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "init_val_size", ",", "scale", "=", "sd", ")", ".", "astype", "(", "np", ".", "float32", ")", "spectrum_real_imag_t", "=", "tf", ".", "Variable", "(", "init_val", ")", "spectrum_t", "=", "tf", ".", "complex", "(", "spectrum_real_imag_t", "[", "0", "]", ",", "spectrum_real_imag_t", "[", "1", "]", ")", "# Scale the spectrum. First normalize energy, then scale by the square-root", "# of the number of pixels to get a unitary transformation.", "# This allows to use similar leanring rates to pixel-wise optimisation.", "scale", "=", "1.0", "/", "np", ".", "maximum", "(", "freqs", ",", "1.0", "/", "max", "(", "w", ",", "h", ")", ")", "**", "decay_power", "scale", "*=", "np", ".", "sqrt", "(", "w", "*", "h", ")", "scaled_spectrum_t", "=", "scale", "*", "spectrum_t", "# convert complex scaled spectrum to shape (h, w, ch) image tensor", "# needs to transpose because irfft2d returns channels first", "image_t", "=", "tf", ".", "transpose", "(", "tf", ".", "spectral", ".", "irfft2d", "(", "scaled_spectrum_t", ")", ",", "(", "1", ",", "2", ",", "0", ")", ")", "# in case of odd spatial input dimensions we need to crop", "image_t", "=", "image_t", "[", ":", "h", ",", ":", "w", ",", ":", "ch", "]", "images", ".", "append", "(", "image_t", ")", "batched_image_t", "=", "tf", ".", "stack", "(", "images", ")", "/", "4.0", "# TODO: is that a magic constant?", "return", "batched_image_t" ]
An image paramaterization using 2D Fourier coefficients.
[ "An", "image", "paramaterization", "using", "2D", "Fourier", "coefficients", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L61-L93
26,473
tensorflow/lucid
lucid/optvis/param/spatial.py
laplacian_pyramid_image
def laplacian_pyramid_image(shape, n_levels=4, sd=None): """Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument. """ batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd) return pyramid
python
def laplacian_pyramid_image(shape, n_levels=4, sd=None): """Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument. """ batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd) return pyramid
[ "def", "laplacian_pyramid_image", "(", "shape", ",", "n_levels", "=", "4", ",", "sd", "=", "None", ")", ":", "batch_dims", "=", "shape", "[", ":", "-", "3", "]", "w", ",", "h", ",", "ch", "=", "shape", "[", "-", "3", ":", "]", "pyramid", "=", "0", "for", "n", "in", "range", "(", "n_levels", ")", ":", "k", "=", "2", "**", "n", "pyramid", "+=", "lowres_tensor", "(", "shape", ",", "batch_dims", "+", "(", "w", "//", "k", ",", "h", "//", "k", ",", "ch", ")", ",", "sd", "=", "sd", ")", "return", "pyramid" ]
Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument.
[ "Simple", "laplacian", "pyramid", "paramaterization", "of", "an", "image", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L96-L115
26,474
tensorflow/lucid
lucid/optvis/param/spatial.py
bilinearly_sampled_image
def bilinearly_sampled_image(texture, uv): """Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values. """ h, w = tf.unstack(tf.shape(texture)[:2]) u, v = tf.split(uv, 2, axis=-1) v = 1.0 - v # vertical flip to match GL convention u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5 u0, u1 = tf.floor(u), tf.ceil(u) v0, v1 = tf.floor(v), tf.ceil(v) uf, vf = u - u0, v - v0 u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1]) def sample(u, v): vu = tf.concat([v % h, u % w], axis=-1) return tf.gather_nd(texture, vu) s00, s01 = sample(u0, v0), sample(u0, v1) s10, s11 = sample(u1, v0), sample(u1, v1) s0 = s00 * (1.0 - vf) + s01 * vf s1 = s10 * (1.0 - vf) + s11 * vf s = s0 * (1.0 - uf) + s1 * uf return s
python
def bilinearly_sampled_image(texture, uv): """Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values. """ h, w = tf.unstack(tf.shape(texture)[:2]) u, v = tf.split(uv, 2, axis=-1) v = 1.0 - v # vertical flip to match GL convention u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5 u0, u1 = tf.floor(u), tf.ceil(u) v0, v1 = tf.floor(v), tf.ceil(v) uf, vf = u - u0, v - v0 u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1]) def sample(u, v): vu = tf.concat([v % h, u % w], axis=-1) return tf.gather_nd(texture, vu) s00, s01 = sample(u0, v0), sample(u0, v1) s10, s11 = sample(u1, v0), sample(u1, v1) s0 = s00 * (1.0 - vf) + s01 * vf s1 = s10 * (1.0 - vf) + s11 * vf s = s0 * (1.0 - uf) + s1 * uf return s
[ "def", "bilinearly_sampled_image", "(", "texture", ",", "uv", ")", ":", "h", ",", "w", "=", "tf", ".", "unstack", "(", "tf", ".", "shape", "(", "texture", ")", "[", ":", "2", "]", ")", "u", ",", "v", "=", "tf", ".", "split", "(", "uv", ",", "2", ",", "axis", "=", "-", "1", ")", "v", "=", "1.0", "-", "v", "# vertical flip to match GL convention", "u", ",", "v", "=", "u", "*", "tf", ".", "to_float", "(", "w", ")", "-", "0.5", ",", "v", "*", "tf", ".", "to_float", "(", "h", ")", "-", "0.5", "u0", ",", "u1", "=", "tf", ".", "floor", "(", "u", ")", ",", "tf", ".", "ceil", "(", "u", ")", "v0", ",", "v1", "=", "tf", ".", "floor", "(", "v", ")", ",", "tf", ".", "ceil", "(", "v", ")", "uf", ",", "vf", "=", "u", "-", "u0", ",", "v", "-", "v0", "u0", ",", "u1", ",", "v0", ",", "v1", "=", "map", "(", "tf", ".", "to_int32", ",", "[", "u0", ",", "u1", ",", "v0", ",", "v1", "]", ")", "def", "sample", "(", "u", ",", "v", ")", ":", "vu", "=", "tf", ".", "concat", "(", "[", "v", "%", "h", ",", "u", "%", "w", "]", ",", "axis", "=", "-", "1", ")", "return", "tf", ".", "gather_nd", "(", "texture", ",", "vu", ")", "s00", ",", "s01", "=", "sample", "(", "u0", ",", "v0", ")", ",", "sample", "(", "u0", ",", "v1", ")", "s10", ",", "s11", "=", "sample", "(", "u1", ",", "v0", ")", ",", "sample", "(", "u1", ",", "v1", ")", "s0", "=", "s00", "*", "(", "1.0", "-", "vf", ")", "+", "s01", "*", "vf", "s1", "=", "s10", "*", "(", "1.0", "-", "vf", ")", "+", "s11", "*", "vf", "s", "=", "s0", "*", "(", "1.0", "-", "uf", ")", "+", "s1", "*", "uf", "return", "s" ]
Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values.
[ "Build", "bilinear", "texture", "sampling", "graph", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L118-L149
26,475
tensorflow/lucid
lucid/modelzoo/other_models/InceptionV1.py
_populate_inception_bottlenecks
def _populate_inception_bottlenecks(scope): """Add Inception bottlenecks and their pre-Relu versions to the graph.""" graph = tf.get_default_graph() for op in graph.get_operations(): if op.name.startswith(scope+'/') and 'Concat' in op.type: name = op.name.split('/')[1] pre_relus = [] for tower in op.inputs[1:]: if tower.op.type == 'Relu': tower = tower.op.inputs[0] pre_relus.append(tower) concat_name = scope + '/' + name + '_pre_relu' _ = tf.concat(pre_relus, -1, name=concat_name)
python
def _populate_inception_bottlenecks(scope): """Add Inception bottlenecks and their pre-Relu versions to the graph.""" graph = tf.get_default_graph() for op in graph.get_operations(): if op.name.startswith(scope+'/') and 'Concat' in op.type: name = op.name.split('/')[1] pre_relus = [] for tower in op.inputs[1:]: if tower.op.type == 'Relu': tower = tower.op.inputs[0] pre_relus.append(tower) concat_name = scope + '/' + name + '_pre_relu' _ = tf.concat(pre_relus, -1, name=concat_name)
[ "def", "_populate_inception_bottlenecks", "(", "scope", ")", ":", "graph", "=", "tf", ".", "get_default_graph", "(", ")", "for", "op", "in", "graph", ".", "get_operations", "(", ")", ":", "if", "op", ".", "name", ".", "startswith", "(", "scope", "+", "'/'", ")", "and", "'Concat'", "in", "op", ".", "type", ":", "name", "=", "op", ".", "name", ".", "split", "(", "'/'", ")", "[", "1", "]", "pre_relus", "=", "[", "]", "for", "tower", "in", "op", ".", "inputs", "[", "1", ":", "]", ":", "if", "tower", ".", "op", ".", "type", "==", "'Relu'", ":", "tower", "=", "tower", ".", "op", ".", "inputs", "[", "0", "]", "pre_relus", ".", "append", "(", "tower", ")", "concat_name", "=", "scope", "+", "'/'", "+", "name", "+", "'_pre_relu'", "_", "=", "tf", ".", "concat", "(", "pre_relus", ",", "-", "1", ",", "name", "=", "concat_name", ")" ]
Add Inception bottlenecks and their pre-Relu versions to the graph.
[ "Add", "Inception", "bottlenecks", "and", "their", "pre", "-", "Relu", "versions", "to", "the", "graph", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/other_models/InceptionV1.py#L22-L34
26,476
tensorflow/lucid
lucid/optvis/objectives.py
wrap_objective
def wrap_objective(f, *args, **kwds): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python. """ objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str return Objective(objective_func, objective_name, description)
python
def wrap_objective(f, *args, **kwds): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python. """ objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str return Objective(objective_func, objective_name, description)
[ "def", "wrap_objective", "(", "f", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "objective_func", "=", "f", "(", "*", "args", ",", "*", "*", "kwds", ")", "objective_name", "=", "f", ".", "__name__", "args_str", "=", "\" [\"", "+", "\", \"", ".", "join", "(", "[", "_make_arg_str", "(", "arg", ")", "for", "arg", "in", "args", "]", ")", "+", "\"]\"", "description", "=", "objective_name", ".", "title", "(", ")", "+", "args_str", "return", "Objective", "(", "objective_func", ",", "objective_name", ",", "description", ")" ]
Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python.
[ "Decorator", "for", "creating", "Objective", "factories", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L117-L129
26,477
tensorflow/lucid
lucid/optvis/objectives.py
neuron
def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
python
def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
[ "def", "neuron", "(", "layer_name", ",", "channel_n", ",", "x", "=", "None", ",", "y", "=", "None", ",", "batch", "=", "None", ")", ":", "def", "inner", "(", "T", ")", ":", "layer", "=", "T", "(", "layer_name", ")", "shape", "=", "tf", ".", "shape", "(", "layer", ")", "x_", "=", "shape", "[", "1", "]", "//", "2", "if", "x", "is", "None", "else", "x", "y_", "=", "shape", "[", "2", "]", "//", "2", "if", "y", "is", "None", "else", "y", "if", "batch", "is", "None", ":", "return", "layer", "[", ":", ",", "x_", ",", "y_", ",", "channel_n", "]", "else", ":", "return", "layer", "[", "batch", ",", "x_", ",", "y_", ",", "channel_n", "]", "return", "inner" ]
Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+
[ "Visualize", "a", "single", "neuron", "of", "a", "single", "channel", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L133-L161
26,478
tensorflow/lucid
lucid/optvis/objectives.py
channel
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
python
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
[ "def", "channel", "(", "layer", ",", "n_channel", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "...", ",", "n_channel", "]", ")", "else", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "batch", ",", "...", ",", "n_channel", "]", ")" ]
Visualize a single channel
[ "Visualize", "a", "single", "channel" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L165-L170
26,479
tensorflow/lucid
lucid/optvis/objectives.py
direction
def direction(layer, vec, batch=None, cossim_pow=0): """Visualize a direction""" if batch is None: vec = vec[None, None, None] return lambda T: _dot_cossim(T(layer), vec) else: vec = vec[None, None] return lambda T: _dot_cossim(T(layer)[batch], vec)
python
def direction(layer, vec, batch=None, cossim_pow=0): """Visualize a direction""" if batch is None: vec = vec[None, None, None] return lambda T: _dot_cossim(T(layer), vec) else: vec = vec[None, None] return lambda T: _dot_cossim(T(layer)[batch], vec)
[ "def", "direction", "(", "layer", ",", "vec", ",", "batch", "=", "None", ",", "cossim_pow", "=", "0", ")", ":", "if", "batch", "is", "None", ":", "vec", "=", "vec", "[", "None", ",", "None", ",", "None", "]", "return", "lambda", "T", ":", "_dot_cossim", "(", "T", "(", "layer", ")", ",", "vec", ")", "else", ":", "vec", "=", "vec", "[", "None", ",", "None", "]", "return", "lambda", "T", ":", "_dot_cossim", "(", "T", "(", "layer", ")", "[", "batch", "]", ",", "vec", ")" ]
Visualize a direction
[ "Visualize", "a", "direction" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L189-L196
26,480
tensorflow/lucid
lucid/optvis/objectives.py
L1
def L1(layer="input", constant=0, batch=None): """L1 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant)) else: return lambda T: tf.reduce_sum(tf.abs(T(layer)[batch] - constant))
python
def L1(layer="input", constant=0, batch=None): """L1 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant)) else: return lambda T: tf.reduce_sum(tf.abs(T(layer)[batch] - constant))
[ "def", "L1", "(", "layer", "=", "\"input\"", ",", "constant", "=", "0", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_sum", "(", "tf", ".", "abs", "(", "T", "(", "layer", ")", "-", "constant", ")", ")", "else", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_sum", "(", "tf", ".", "abs", "(", "T", "(", "layer", ")", "[", "batch", "]", "-", "constant", ")", ")" ]
L1 norm of layer. Generally used as penalty.
[ "L1", "norm", "of", "layer", ".", "Generally", "used", "as", "penalty", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L247-L252
26,481
tensorflow/lucid
lucid/optvis/objectives.py
L2
def L2(layer="input", constant=0, epsilon=1e-6, batch=None): """L2 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2)) else: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer)[batch] - constant) ** 2))
python
def L2(layer="input", constant=0, epsilon=1e-6, batch=None): """L2 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2)) else: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer)[batch] - constant) ** 2))
[ "def", "L2", "(", "layer", "=", "\"input\"", ",", "constant", "=", "0", ",", "epsilon", "=", "1e-6", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "sqrt", "(", "epsilon", "+", "tf", ".", "reduce_sum", "(", "(", "T", "(", "layer", ")", "-", "constant", ")", "**", "2", ")", ")", "else", ":", "return", "lambda", "T", ":", "tf", ".", "sqrt", "(", "epsilon", "+", "tf", ".", "reduce_sum", "(", "(", "T", "(", "layer", ")", "[", "batch", "]", "-", "constant", ")", "**", "2", ")", ")" ]
L2 norm of layer. Generally used as penalty.
[ "L2", "norm", "of", "layer", ".", "Generally", "used", "as", "penalty", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L256-L261
26,482
tensorflow/lucid
lucid/optvis/objectives.py
blur_input_each_step
def blur_input_each_step(): """Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015. """ def inner(T): t_input = T("input") t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner
python
def blur_input_each_step(): """Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015. """ def inner(T): t_input = T("input") t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner
[ "def", "blur_input_each_step", "(", ")", ":", "def", "inner", "(", "T", ")", ":", "t_input", "=", "T", "(", "\"input\"", ")", "t_input_blurred", "=", "tf", ".", "stop_gradient", "(", "_tf_blur", "(", "t_input", ")", ")", "return", "0.5", "*", "tf", ".", "reduce_sum", "(", "(", "t_input", "-", "t_input_blurred", ")", "**", "2", ")", "return", "inner" ]
Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015.
[ "Minimizing", "this", "objective", "is", "equivelant", "to", "blurring", "input", "each", "step", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L277-L291
26,483
tensorflow/lucid
lucid/optvis/objectives.py
channel_interpolate
def channel_interpolate(layer1, n_channel1, layer2, n_channel2): """Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective """ def inner(T): batch_n = T(layer1).get_shape().as_list()[0] arr1 = T(layer1)[..., n_channel1] arr2 = T(layer2)[..., n_channel2] weights = (np.arange(batch_n)/float(batch_n-1)) S = 0 for n in range(batch_n): S += (1-weights[n]) * tf.reduce_mean(arr1[n]) S += weights[n] * tf.reduce_mean(arr2[n]) return S return inner
python
def channel_interpolate(layer1, n_channel1, layer2, n_channel2): """Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective """ def inner(T): batch_n = T(layer1).get_shape().as_list()[0] arr1 = T(layer1)[..., n_channel1] arr2 = T(layer2)[..., n_channel2] weights = (np.arange(batch_n)/float(batch_n-1)) S = 0 for n in range(batch_n): S += (1-weights[n]) * tf.reduce_mean(arr1[n]) S += weights[n] * tf.reduce_mean(arr2[n]) return S return inner
[ "def", "channel_interpolate", "(", "layer1", ",", "n_channel1", ",", "layer2", ",", "n_channel2", ")", ":", "def", "inner", "(", "T", ")", ":", "batch_n", "=", "T", "(", "layer1", ")", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "arr1", "=", "T", "(", "layer1", ")", "[", "...", ",", "n_channel1", "]", "arr2", "=", "T", "(", "layer2", ")", "[", "...", ",", "n_channel2", "]", "weights", "=", "(", "np", ".", "arange", "(", "batch_n", ")", "/", "float", "(", "batch_n", "-", "1", ")", ")", "S", "=", "0", "for", "n", "in", "range", "(", "batch_n", ")", ":", "S", "+=", "(", "1", "-", "weights", "[", "n", "]", ")", "*", "tf", ".", "reduce_mean", "(", "arr1", "[", "n", "]", ")", "S", "+=", "weights", "[", "n", "]", "*", "tf", ".", "reduce_mean", "(", "arr2", "[", "n", "]", ")", "return", "S", "return", "inner" ]
Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective
[ "Interpolate", "between", "layer1", "n_channel1", "and", "layer2", "n_channel2", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L303-L328
26,484
tensorflow/lucid
lucid/optvis/objectives.py
penalize_boundary_complexity
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5): """Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective. """ def inner(T): arr = T("input") # print shp if mask is None: mask_ = np.ones(shp) mask_[:, w:-w, w:-w] = 0 else: mask_ = mask blur = _tf_blur(arr, w=5) diffs = (blur-arr)**2 diffs += 0.8*(arr-C)**2 return -tf.reduce_sum(diffs*mask_) return inner
python
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5): """Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective. """ def inner(T): arr = T("input") # print shp if mask is None: mask_ = np.ones(shp) mask_[:, w:-w, w:-w] = 0 else: mask_ = mask blur = _tf_blur(arr, w=5) diffs = (blur-arr)**2 diffs += 0.8*(arr-C)**2 return -tf.reduce_sum(diffs*mask_) return inner
[ "def", "penalize_boundary_complexity", "(", "shp", ",", "w", "=", "20", ",", "mask", "=", "None", ",", "C", "=", "0.5", ")", ":", "def", "inner", "(", "T", ")", ":", "arr", "=", "T", "(", "\"input\"", ")", "# print shp", "if", "mask", "is", "None", ":", "mask_", "=", "np", ".", "ones", "(", "shp", ")", "mask_", "[", ":", ",", "w", ":", "-", "w", ",", "w", ":", "-", "w", "]", "=", "0", "else", ":", "mask_", "=", "mask", "blur", "=", "_tf_blur", "(", "arr", ",", "w", "=", "5", ")", "diffs", "=", "(", "blur", "-", "arr", ")", "**", "2", "diffs", "+=", "0.8", "*", "(", "arr", "-", "C", ")", "**", "2", "return", "-", "tf", ".", "reduce_sum", "(", "diffs", "*", "mask_", ")", "return", "inner" ]
Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective.
[ "Encourage", "the", "boundaries", "of", "an", "image", "to", "have", "less", "variation", "and", "of", "color", "C", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L332-L358
26,485
tensorflow/lucid
lucid/optvis/objectives.py
alignment
def alignment(layer, decay_ratio=2): """Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective. """ def inner(T): batch_n = T(layer).get_shape().as_list()[0] arr = T(layer) accum = 0 for d in [1, 2, 3, 4]: for i in range(batch_n - d): a, b = i, i+d arr1, arr2 = arr[a], arr[b] accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d) return -accum return inner
python
def alignment(layer, decay_ratio=2): """Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective. """ def inner(T): batch_n = T(layer).get_shape().as_list()[0] arr = T(layer) accum = 0 for d in [1, 2, 3, 4]: for i in range(batch_n - d): a, b = i, i+d arr1, arr2 = arr[a], arr[b] accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d) return -accum return inner
[ "def", "alignment", "(", "layer", ",", "decay_ratio", "=", "2", ")", ":", "def", "inner", "(", "T", ")", ":", "batch_n", "=", "T", "(", "layer", ")", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "arr", "=", "T", "(", "layer", ")", "accum", "=", "0", "for", "d", "in", "[", "1", ",", "2", ",", "3", ",", "4", "]", ":", "for", "i", "in", "range", "(", "batch_n", "-", "d", ")", ":", "a", ",", "b", "=", "i", ",", "i", "+", "d", "arr1", ",", "arr2", "=", "arr", "[", "a", "]", ",", "arr", "[", "b", "]", "accum", "+=", "tf", ".", "reduce_mean", "(", "(", "arr1", "-", "arr2", ")", "**", "2", ")", "/", "decay_ratio", "**", "float", "(", "d", ")", "return", "-", "accum", "return", "inner" ]
Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective.
[ "Encourage", "neighboring", "images", "to", "be", "similar", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L362-L393
26,486
tensorflow/lucid
lucid/optvis/objectives.py
diversity
def diversity(layer): """Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective. """ def inner(T): layer_t = T(layer) batch_n, _, _, channels = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, -1, channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10) return sum([ sum([ tf.reduce_sum(grams[i]*grams[j]) for j in range(batch_n) if j != i]) for i in range(batch_n)]) / batch_n return inner
python
def diversity(layer): """Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective. """ def inner(T): layer_t = T(layer) batch_n, _, _, channels = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, -1, channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10) return sum([ sum([ tf.reduce_sum(grams[i]*grams[j]) for j in range(batch_n) if j != i]) for i in range(batch_n)]) / batch_n return inner
[ "def", "diversity", "(", "layer", ")", ":", "def", "inner", "(", "T", ")", ":", "layer_t", "=", "T", "(", "layer", ")", "batch_n", ",", "_", ",", "_", ",", "channels", "=", "layer_t", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "flattened", "=", "tf", ".", "reshape", "(", "layer_t", ",", "[", "batch_n", ",", "-", "1", ",", "channels", "]", ")", "grams", "=", "tf", ".", "matmul", "(", "flattened", ",", "flattened", ",", "transpose_a", "=", "True", ")", "grams", "=", "tf", ".", "nn", ".", "l2_normalize", "(", "grams", ",", "axis", "=", "[", "1", ",", "2", "]", ",", "epsilon", "=", "1e-10", ")", "return", "sum", "(", "[", "sum", "(", "[", "tf", ".", "reduce_sum", "(", "grams", "[", "i", "]", "*", "grams", "[", "j", "]", ")", "for", "j", "in", "range", "(", "batch_n", ")", "if", "j", "!=", "i", "]", ")", "for", "i", "in", "range", "(", "batch_n", ")", "]", ")", "/", "batch_n", "return", "inner" ]
Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective.
[ "Encourage", "diversity", "between", "each", "batch", "element", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L396-L425
26,487
tensorflow/lucid
lucid/optvis/objectives.py
input_diff
def input_diff(orig_img): """Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples. """ def inner(T): diff = T("input") - orig_img return tf.sqrt(tf.reduce_mean(diff**2)) return inner
python
def input_diff(orig_img): """Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples. """ def inner(T): diff = T("input") - orig_img return tf.sqrt(tf.reduce_mean(diff**2)) return inner
[ "def", "input_diff", "(", "orig_img", ")", ":", "def", "inner", "(", "T", ")", ":", "diff", "=", "T", "(", "\"input\"", ")", "-", "orig_img", "return", "tf", ".", "sqrt", "(", "tf", ".", "reduce_mean", "(", "diff", "**", "2", ")", ")", "return", "inner" ]
Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples.
[ "Average", "L2", "difference", "between", "optimized", "image", "and", "orig_img", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L429-L438
26,488
tensorflow/lucid
lucid/optvis/objectives.py
class_logit
def class_logit(layer, label): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
python
def class_logit(layer, label): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
[ "def", "class_logit", "(", "layer", ",", "label", ")", ":", "def", "inner", "(", "T", ")", ":", "if", "isinstance", "(", "label", ",", "int", ")", ":", "class_n", "=", "label", "else", ":", "class_n", "=", "T", "(", "\"labels\"", ")", ".", "index", "(", "label", ")", "logits", "=", "T", "(", "layer", ")", "logit", "=", "tf", ".", "reduce_sum", "(", "logits", "[", ":", ",", "class_n", "]", ")", "return", "logit", "return", "inner" ]
Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit.
[ "Like", "channel", "but", "for", "softmax", "layers", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L442-L461
26,489
tensorflow/lucid
lucid/optvis/objectives.py
as_objective
def as_objective(obj): """Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective """ if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): layer, n = obj.split(":") layer, n = layer.strip(), int(n) return channel(layer, n)
python
def as_objective(obj): """Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective """ if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): layer, n = obj.split(":") layer, n = layer.strip(), int(n) return channel(layer, n)
[ "def", "as_objective", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Objective", ")", ":", "return", "obj", "elif", "callable", "(", "obj", ")", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "layer", ",", "n", "=", "obj", ".", "split", "(", "\":\"", ")", "layer", ",", "n", "=", "layer", ".", "strip", "(", ")", ",", "int", "(", "n", ")", "return", "channel", "(", "layer", ",", "n", ")" ]
Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective
[ "Convert", "obj", "into", "Objective", "class", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L464-L483
26,490
tensorflow/lucid
lucid/optvis/param/unit_balls.py
_constrain_L2_grad
def _constrain_L2_grad(op, grad): """Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient. """ inp = op.inputs[0] inp_norm = tf.norm(inp) unit_inp = inp / inp_norm grad_projection = dot(unit_inp, grad) parallel_grad = unit_inp * grad_projection is_in_ball = tf.less_equal(inp_norm, 1) is_pointed_inward = tf.less(grad_projection, 0) allow_grad = tf.logical_or(is_in_ball, is_pointed_inward) clip_grad = tf.logical_not(allow_grad) clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad) return clipped_grad
python
def _constrain_L2_grad(op, grad): """Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient. """ inp = op.inputs[0] inp_norm = tf.norm(inp) unit_inp = inp / inp_norm grad_projection = dot(unit_inp, grad) parallel_grad = unit_inp * grad_projection is_in_ball = tf.less_equal(inp_norm, 1) is_pointed_inward = tf.less(grad_projection, 0) allow_grad = tf.logical_or(is_in_ball, is_pointed_inward) clip_grad = tf.logical_not(allow_grad) clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad) return clipped_grad
[ "def", "_constrain_L2_grad", "(", "op", ",", "grad", ")", ":", "inp", "=", "op", ".", "inputs", "[", "0", "]", "inp_norm", "=", "tf", ".", "norm", "(", "inp", ")", "unit_inp", "=", "inp", "/", "inp_norm", "grad_projection", "=", "dot", "(", "unit_inp", ",", "grad", ")", "parallel_grad", "=", "unit_inp", "*", "grad_projection", "is_in_ball", "=", "tf", ".", "less_equal", "(", "inp_norm", ",", "1", ")", "is_pointed_inward", "=", "tf", ".", "less", "(", "grad_projection", ",", "0", ")", "allow_grad", "=", "tf", ".", "logical_or", "(", "is_in_ball", ",", "is_pointed_inward", ")", "clip_grad", "=", "tf", ".", "logical_not", "(", "allow_grad", ")", "clipped_grad", "=", "tf", ".", "cond", "(", "clip_grad", ",", "lambda", ":", "grad", "-", "parallel_grad", ",", "lambda", ":", "grad", ")", "return", "clipped_grad" ]
Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient.
[ "Gradient", "for", "constrained", "optimization", "on", "an", "L2", "unit", "ball", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L20-L47
26,491
tensorflow/lucid
lucid/optvis/param/unit_balls.py
unit_ball_L2
def unit_ball_L2(shape): """A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) return constrain_L2(x)
python
def unit_ball_L2(shape): """A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) return constrain_L2(x)
[ "def", "unit_ball_L2", "(", "shape", ")", ":", "x", "=", "tf", ".", "Variable", "(", "tf", ".", "zeros", "(", "shape", ")", ")", "return", "constrain_L2", "(", "x", ")" ]
A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code.
[ "A", "tensorflow", "variable", "tranfomed", "to", "be", "constrained", "in", "a", "L2", "unit", "ball", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L55-L62
26,492
tensorflow/lucid
lucid/optvis/param/unit_balls.py
unit_ball_L_inf
def unit_ball_L_inf(shape, precondition=True): """A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) if precondition: return constrain_L_inf_precondition(x) else: return constrain_L_inf(x)
python
def unit_ball_L_inf(shape, precondition=True): """A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) if precondition: return constrain_L_inf_precondition(x) else: return constrain_L_inf(x)
[ "def", "unit_ball_L_inf", "(", "shape", ",", "precondition", "=", "True", ")", ":", "x", "=", "tf", ".", "Variable", "(", "tf", ".", "zeros", "(", "shape", ")", ")", "if", "precondition", ":", "return", "constrain_L_inf_precondition", "(", "x", ")", "else", ":", "return", "constrain_L_inf", "(", "x", ")" ]
A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code.
[ "A", "tensorflow", "variable", "tranfomed", "to", "be", "constrained", "in", "a", "L_inf", "unit", "ball", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L106-L119
26,493
tensorflow/lucid
lucid/optvis/render.py
render_vis
def render_vis(model, objective_f, param_f=None, optimizer=None, transforms=None, thresholds=(512,), print_objectives=None, verbose=True, relu_gradient_override=True, use_fixed_seed=False): """Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other. """ with tf.Graph().as_default() as graph, tf.Session() as sess: if use_fixed_seed: # does not mean results are reproducible, see Args doc tf.set_random_seed(0) T = make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override) print_objective_func = make_print_objective_func(print_objectives, T) loss, vis_op, t_image = T("loss"), T("vis_op"), T("input") tf.global_variables_initializer().run() images = [] try: for i in range(max(thresholds)+1): loss_, _ = sess.run([loss, vis_op]) if i in thresholds: vis = t_image.eval() images.append(vis) if verbose: print(i, loss_) print_objective_func(sess) show(np.hstack(vis)) except KeyboardInterrupt: log.warning("Interrupted optimization at step {:d}.".format(i+1)) vis = t_image.eval() show(np.hstack(vis)) return images
python
def render_vis(model, objective_f, param_f=None, optimizer=None, transforms=None, thresholds=(512,), print_objectives=None, verbose=True, relu_gradient_override=True, use_fixed_seed=False): """Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other. """ with tf.Graph().as_default() as graph, tf.Session() as sess: if use_fixed_seed: # does not mean results are reproducible, see Args doc tf.set_random_seed(0) T = make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override) print_objective_func = make_print_objective_func(print_objectives, T) loss, vis_op, t_image = T("loss"), T("vis_op"), T("input") tf.global_variables_initializer().run() images = [] try: for i in range(max(thresholds)+1): loss_, _ = sess.run([loss, vis_op]) if i in thresholds: vis = t_image.eval() images.append(vis) if verbose: print(i, loss_) print_objective_func(sess) show(np.hstack(vis)) except KeyboardInterrupt: log.warning("Interrupted optimization at step {:d}.".format(i+1)) vis = t_image.eval() show(np.hstack(vis)) return images
[ "def", "render_vis", "(", "model", ",", "objective_f", ",", "param_f", "=", "None", ",", "optimizer", "=", "None", ",", "transforms", "=", "None", ",", "thresholds", "=", "(", "512", ",", ")", ",", "print_objectives", "=", "None", ",", "verbose", "=", "True", ",", "relu_gradient_override", "=", "True", ",", "use_fixed_seed", "=", "False", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", "as", "graph", ",", "tf", ".", "Session", "(", ")", "as", "sess", ":", "if", "use_fixed_seed", ":", "# does not mean results are reproducible, see Args doc", "tf", ".", "set_random_seed", "(", "0", ")", "T", "=", "make_vis_T", "(", "model", ",", "objective_f", ",", "param_f", ",", "optimizer", ",", "transforms", ",", "relu_gradient_override", ")", "print_objective_func", "=", "make_print_objective_func", "(", "print_objectives", ",", "T", ")", "loss", ",", "vis_op", ",", "t_image", "=", "T", "(", "\"loss\"", ")", ",", "T", "(", "\"vis_op\"", ")", ",", "T", "(", "\"input\"", ")", "tf", ".", "global_variables_initializer", "(", ")", ".", "run", "(", ")", "images", "=", "[", "]", "try", ":", "for", "i", "in", "range", "(", "max", "(", "thresholds", ")", "+", "1", ")", ":", "loss_", ",", "_", "=", "sess", ".", "run", "(", "[", "loss", ",", "vis_op", "]", ")", "if", "i", "in", "thresholds", ":", "vis", "=", "t_image", ".", "eval", "(", ")", "images", ".", "append", "(", "vis", ")", "if", "verbose", ":", "print", "(", "i", ",", "loss_", ")", "print_objective_func", "(", "sess", ")", "show", "(", "np", ".", "hstack", "(", "vis", ")", ")", "except", "KeyboardInterrupt", ":", "log", ".", "warning", "(", "\"Interrupted optimization at step {:d}.\"", ".", "format", "(", "i", "+", "1", ")", ")", "vis", "=", "t_image", ".", "eval", "(", ")", "show", "(", "np", ".", "hstack", "(", "vis", ")", ")", "return", "images" ]
Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other.
[ "Flexible", "optimization", "-", "base", "feature", "vis", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/render.py#L44-L115
26,494
tensorflow/lucid
lucid/optvis/render.py
make_vis_T
def make_vis_T(model, objective_f, param_f=None, optimizer=None, transforms=None, relu_gradient_override=False): """Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network """ # pylint: disable=unused-variable t_image = make_t_image(param_f) objective_f = objectives.as_objective(objective_f) transform_f = make_transform_f(transforms) optimizer = make_optimizer(optimizer, []) global_step = tf.train.get_or_create_global_step() init_global_step = tf.variables_initializer([global_step]) init_global_step.run() if relu_gradient_override: with gradient_override_map({'Relu': redirected_relu_grad, 'Relu6': redirected_relu6_grad}): T = import_model(model, transform_f(t_image), t_image) else: T = import_model(model, transform_f(t_image), t_image) loss = objective_f(T) vis_op = optimizer.minimize(-loss, global_step=global_step) local_vars = locals() # pylint: enable=unused-variable def T2(name): if name in local_vars: return local_vars[name] else: return T(name) return T2
python
def make_vis_T(model, objective_f, param_f=None, optimizer=None, transforms=None, relu_gradient_override=False): """Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network """ # pylint: disable=unused-variable t_image = make_t_image(param_f) objective_f = objectives.as_objective(objective_f) transform_f = make_transform_f(transforms) optimizer = make_optimizer(optimizer, []) global_step = tf.train.get_or_create_global_step() init_global_step = tf.variables_initializer([global_step]) init_global_step.run() if relu_gradient_override: with gradient_override_map({'Relu': redirected_relu_grad, 'Relu6': redirected_relu6_grad}): T = import_model(model, transform_f(t_image), t_image) else: T = import_model(model, transform_f(t_image), t_image) loss = objective_f(T) vis_op = optimizer.minimize(-loss, global_step=global_step) local_vars = locals() # pylint: enable=unused-variable def T2(name): if name in local_vars: return local_vars[name] else: return T(name) return T2
[ "def", "make_vis_T", "(", "model", ",", "objective_f", ",", "param_f", "=", "None", ",", "optimizer", "=", "None", ",", "transforms", "=", "None", ",", "relu_gradient_override", "=", "False", ")", ":", "# pylint: disable=unused-variable", "t_image", "=", "make_t_image", "(", "param_f", ")", "objective_f", "=", "objectives", ".", "as_objective", "(", "objective_f", ")", "transform_f", "=", "make_transform_f", "(", "transforms", ")", "optimizer", "=", "make_optimizer", "(", "optimizer", ",", "[", "]", ")", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", "init_global_step", "=", "tf", ".", "variables_initializer", "(", "[", "global_step", "]", ")", "init_global_step", ".", "run", "(", ")", "if", "relu_gradient_override", ":", "with", "gradient_override_map", "(", "{", "'Relu'", ":", "redirected_relu_grad", ",", "'Relu6'", ":", "redirected_relu6_grad", "}", ")", ":", "T", "=", "import_model", "(", "model", ",", "transform_f", "(", "t_image", ")", ",", "t_image", ")", "else", ":", "T", "=", "import_model", "(", "model", ",", "transform_f", "(", "t_image", ")", ",", "t_image", ")", "loss", "=", "objective_f", "(", "T", ")", "vis_op", "=", "optimizer", ".", "minimize", "(", "-", "loss", ",", "global_step", "=", "global_step", ")", "local_vars", "=", "locals", "(", ")", "# pylint: enable=unused-variable", "def", "T2", "(", "name", ")", ":", "if", "name", "in", "local_vars", ":", "return", "local_vars", "[", "name", "]", "else", ":", "return", "T", "(", "name", ")", "return", "T2" ]
Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network
[ "Even", "more", "flexible", "optimization", "-", "base", "feature", "vis", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/render.py#L118-L192
26,495
tensorflow/lucid
lucid/scratch/atlas_pipeline/grid.py
write_grid_local
def write_grid_local(tiles, params): """ Write a file for each tile """ # TODO: this isn't being used right now, will need to be # ported to gfile if we want to keep it for ti,tj,tile in enumerate_tiles(tiles): filename = "{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}".format(ti=ti, tj=tj, **params) #directory=directory, name=name, n_layer=n_layer, n_tile=n_tile, # write out the tile as a npz print("saving", filename + ".npz") np.savez_compressed(filename + ".npz", **tile) # write out the tile as a csv print("saving", filename + ".csv") df = pd.DataFrame(tile) df.to_csv(filename + ".csv", index=False)
python
def write_grid_local(tiles, params): """ Write a file for each tile """ # TODO: this isn't being used right now, will need to be # ported to gfile if we want to keep it for ti,tj,tile in enumerate_tiles(tiles): filename = "{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}".format(ti=ti, tj=tj, **params) #directory=directory, name=name, n_layer=n_layer, n_tile=n_tile, # write out the tile as a npz print("saving", filename + ".npz") np.savez_compressed(filename + ".npz", **tile) # write out the tile as a csv print("saving", filename + ".csv") df = pd.DataFrame(tile) df.to_csv(filename + ".csv", index=False)
[ "def", "write_grid_local", "(", "tiles", ",", "params", ")", ":", "# TODO: this isn't being used right now, will need to be", "# ported to gfile if we want to keep it", "for", "ti", ",", "tj", ",", "tile", "in", "enumerate_tiles", "(", "tiles", ")", ":", "filename", "=", "\"{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}\"", ".", "format", "(", "ti", "=", "ti", ",", "tj", "=", "tj", ",", "*", "*", "params", ")", "#directory=directory, name=name, n_layer=n_layer, n_tile=n_tile, ", "# write out the tile as a npz", "print", "(", "\"saving\"", ",", "filename", "+", "\".npz\"", ")", "np", ".", "savez_compressed", "(", "filename", "+", "\".npz\"", ",", "*", "*", "tile", ")", "# write out the tile as a csv", "print", "(", "\"saving\"", ",", "filename", "+", "\".csv\"", ")", "df", "=", "pd", ".", "DataFrame", "(", "tile", ")", "df", ".", "to_csv", "(", "filename", "+", "\".csv\"", ",", "index", "=", "False", ")" ]
Write a file for each tile
[ "Write", "a", "file", "for", "each", "tile" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/grid.py#L70-L84
26,496
tensorflow/lucid
lucid/misc/io/loading.py
_load_img
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs): """Load image file as numpy array.""" image_pil = PIL.Image.open(handle, **kwargs) # resize the image to the requested size, if one was specified if size is not None: if len(size) > 2: size = size[:2] log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size)) image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS) image_array = np.asarray(image_pil) # remove alpha channel if it contains no information # if image_array.shape[-1] > 3 and 'A' not in image_pil.mode: # image_array = image_array[..., :-1] image_dtype = image_array.dtype image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc. # using np.divide should avoid an extra copy compared to doing division first ndimage = np.divide(image_array, image_max_value, dtype=target_dtype) rank = len(ndimage.shape) if rank == 3: return ndimage elif rank == 2: return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2) else: message = "Loaded image has more dimensions than expected: {}".format(rank) raise NotImplementedError(message)
python
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs): """Load image file as numpy array.""" image_pil = PIL.Image.open(handle, **kwargs) # resize the image to the requested size, if one was specified if size is not None: if len(size) > 2: size = size[:2] log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size)) image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS) image_array = np.asarray(image_pil) # remove alpha channel if it contains no information # if image_array.shape[-1] > 3 and 'A' not in image_pil.mode: # image_array = image_array[..., :-1] image_dtype = image_array.dtype image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc. # using np.divide should avoid an extra copy compared to doing division first ndimage = np.divide(image_array, image_max_value, dtype=target_dtype) rank = len(ndimage.shape) if rank == 3: return ndimage elif rank == 2: return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2) else: message = "Loaded image has more dimensions than expected: {}".format(rank) raise NotImplementedError(message)
[ "def", "_load_img", "(", "handle", ",", "target_dtype", "=", "np", ".", "float32", ",", "size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "image_pil", "=", "PIL", ".", "Image", ".", "open", "(", "handle", ",", "*", "*", "kwargs", ")", "# resize the image to the requested size, if one was specified", "if", "size", "is", "not", "None", ":", "if", "len", "(", "size", ")", ">", "2", ":", "size", "=", "size", "[", ":", "2", "]", "log", ".", "warning", "(", "\"`_load_img()` received size: {}, trimming to first two dims!\"", ".", "format", "(", "size", ")", ")", "image_pil", "=", "image_pil", ".", "resize", "(", "size", ",", "resample", "=", "PIL", ".", "Image", ".", "LANCZOS", ")", "image_array", "=", "np", ".", "asarray", "(", "image_pil", ")", "# remove alpha channel if it contains no information", "# if image_array.shape[-1] > 3 and 'A' not in image_pil.mode:", "# image_array = image_array[..., :-1]", "image_dtype", "=", "image_array", ".", "dtype", "image_max_value", "=", "np", ".", "iinfo", "(", "image_dtype", ")", ".", "max", "# ...for uint8 that's 255, etc.", "# using np.divide should avoid an extra copy compared to doing division first", "ndimage", "=", "np", ".", "divide", "(", "image_array", ",", "image_max_value", ",", "dtype", "=", "target_dtype", ")", "rank", "=", "len", "(", "ndimage", ".", "shape", ")", "if", "rank", "==", "3", ":", "return", "ndimage", "elif", "rank", "==", "2", ":", "return", "np", ".", "repeat", "(", "np", ".", "expand_dims", "(", "ndimage", ",", "axis", "=", "2", ")", ",", "3", ",", "axis", "=", "2", ")", "else", ":", "message", "=", "\"Loaded image has more dimensions than expected: {}\"", ".", "format", "(", "rank", ")", "raise", "NotImplementedError", "(", "message", ")" ]
Load image file as numpy array.
[ "Load", "image", "file", "as", "numpy", "array", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L47-L78
26,497
tensorflow/lucid
lucid/misc/io/loading.py
_load_text
def _load_text(handle, split=False, encoding="utf-8"): """Load and decode a string.""" string = handle.read().decode(encoding) return string.splitlines() if split else string
python
def _load_text(handle, split=False, encoding="utf-8"): """Load and decode a string.""" string = handle.read().decode(encoding) return string.splitlines() if split else string
[ "def", "_load_text", "(", "handle", ",", "split", "=", "False", ",", "encoding", "=", "\"utf-8\"", ")", ":", "string", "=", "handle", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")", "return", "string", ".", "splitlines", "(", ")", "if", "split", "else", "string" ]
Load and decode a string.
[ "Load", "and", "decode", "a", "string", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L86-L89
26,498
tensorflow/lucid
lucid/misc/io/loading.py
load
def load(url_or_handle, cache=None, **kwargs): """Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported. """ ext = get_extension(url_or_handle) try: loader = loaders[ext.lower()] message = "Using inferred loader '%s' due to passed file extension '%s'." log.debug(message, loader.__name__[6:], ext) return load_using_loader(url_or_handle, loader, cache, **kwargs) except KeyError: log.warning("Unknown extension '%s', attempting to load as image.", ext) try: with read_handle(url_or_handle, cache=cache) as handle: result = _load_img(handle) except Exception as e: message = "Could not load resource %s as image. Supported extensions: %s" log.error(message, url_or_handle, list(loaders)) raise RuntimeError(message.format(url_or_handle, list(loaders))) else: log.info("Unknown extension '%s' successfully loaded as image.", ext) return result
python
def load(url_or_handle, cache=None, **kwargs): """Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported. """ ext = get_extension(url_or_handle) try: loader = loaders[ext.lower()] message = "Using inferred loader '%s' due to passed file extension '%s'." log.debug(message, loader.__name__[6:], ext) return load_using_loader(url_or_handle, loader, cache, **kwargs) except KeyError: log.warning("Unknown extension '%s', attempting to load as image.", ext) try: with read_handle(url_or_handle, cache=cache) as handle: result = _load_img(handle) except Exception as e: message = "Could not load resource %s as image. Supported extensions: %s" log.error(message, url_or_handle, list(loaders)) raise RuntimeError(message.format(url_or_handle, list(loaders))) else: log.info("Unknown extension '%s' successfully loaded as image.", ext) return result
[ "def", "load", "(", "url_or_handle", ",", "cache", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ext", "=", "get_extension", "(", "url_or_handle", ")", "try", ":", "loader", "=", "loaders", "[", "ext", ".", "lower", "(", ")", "]", "message", "=", "\"Using inferred loader '%s' due to passed file extension '%s'.\"", "log", ".", "debug", "(", "message", ",", "loader", ".", "__name__", "[", "6", ":", "]", ",", "ext", ")", "return", "load_using_loader", "(", "url_or_handle", ",", "loader", ",", "cache", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "log", ".", "warning", "(", "\"Unknown extension '%s', attempting to load as image.\"", ",", "ext", ")", "try", ":", "with", "read_handle", "(", "url_or_handle", ",", "cache", "=", "cache", ")", "as", "handle", ":", "result", "=", "_load_img", "(", "handle", ")", "except", "Exception", "as", "e", ":", "message", "=", "\"Could not load resource %s as image. Supported extensions: %s\"", "log", ".", "error", "(", "message", ",", "url_or_handle", ",", "list", "(", "loaders", ")", ")", "raise", "RuntimeError", "(", "message", ".", "format", "(", "url_or_handle", ",", "list", "(", "loaders", ")", ")", ")", "else", ":", "log", ".", "info", "(", "\"Unknown extension '%s' successfully loaded as image.\"", ",", "ext", ")", "return", "result" ]
Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported.
[ "Load", "a", "file", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L120-L152
26,499
tensorflow/lucid
lucid/optvis/transform.py
crop_or_pad_to
def crop_or_pad_to(height, width): """Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs. """ def inner(t_image): return tf.image.resize_image_with_crop_or_pad(t_image, height, width) return inner
python
def crop_or_pad_to(height, width): """Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs. """ def inner(t_image): return tf.image.resize_image_with_crop_or_pad(t_image, height, width) return inner
[ "def", "crop_or_pad_to", "(", "height", ",", "width", ")", ":", "def", "inner", "(", "t_image", ")", ":", "return", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "t_image", ",", "height", ",", "width", ")", "return", "inner" ]
Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs.
[ "Ensures", "the", "specified", "spatial", "shape", "by", "either", "padding", "or", "cropping", ".", "Meant", "to", "be", "used", "as", "a", "last", "transform", "for", "architectures", "insisting", "on", "a", "specific", "spatial", "shape", "of", "their", "inputs", "." ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/transform.py#L154-L161