partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
DjangoStorageAdapter.create_many
Creates multiple statement entries.
chatterbot/storage/django_storage.py
def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tag_cache = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) statement_model_object.save() tags_to_add = [] for tag_name in tag_data: if tag_name in tag_cache: tag = tag_cache[tag_name] else: tag, _ = Tag.objects.get_or_create(name=tag_name) tag_cache[tag_name] = tag tags_to_add.append(tag) statement_model_object.tags.add(*tags_to_add)
def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tag_cache = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) statement_model_object.save() tags_to_add = [] for tag_name in tag_data: if tag_name in tag_cache: tag = tag_cache[tag_name] else: tag, _ = Tag.objects.get_or_create(name=tag_name) tag_cache[tag_name] = tag tags_to_add.append(tag) statement_model_object.tags.add(*tags_to_add)
[ "Creates", "multiple", "statement", "entries", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L123-L157
[ "def", "create_many", "(", "self", ",", "statements", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "tag_cache", "=", "{", "}", "for", "statement", "in", "statement...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
DjangoStorageAdapter.update
Update the provided statement.
chatterbot/storage/django_storage.py
def update(self, statement): """ Update the provided statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if hasattr(statement, 'id'): statement.save() else: statement = Statement.objects.create( text=statement.text, search_text=self.tagger.get_bigram_pair_string(statement.text), conversation=statement.conversation, in_response_to=statement.in_response_to, search_in_response_to=self.tagger.get_bigram_pair_string(statement.in_response_to), created_at=statement.created_at ) for _tag in statement.tags.all(): tag, _ = Tag.objects.get_or_create(name=_tag) statement.tags.add(tag) return statement
def update(self, statement): """ Update the provided statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if hasattr(statement, 'id'): statement.save() else: statement = Statement.objects.create( text=statement.text, search_text=self.tagger.get_bigram_pair_string(statement.text), conversation=statement.conversation, in_response_to=statement.in_response_to, search_in_response_to=self.tagger.get_bigram_pair_string(statement.in_response_to), created_at=statement.created_at ) for _tag in statement.tags.all(): tag, _ = Tag.objects.get_or_create(name=_tag) statement.tags.add(tag) return statement
[ "Update", "the", "provided", "statement", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L159-L183
[ "def", "update", "(", "self", ",", "statement", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "if", "hasattr", "(", "statement", ",", "'id'", ")", ":", "statement...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
DjangoStorageAdapter.get_random
Returns a random statement from the database
chatterbot/storage/django_storage.py
def get_random(self): """ Returns a random statement from the database """ Statement = self.get_model('statement') statement = Statement.objects.order_by('?').first() if statement is None: raise self.EmptyDatabaseException() return statement
def get_random(self): """ Returns a random statement from the database """ Statement = self.get_model('statement') statement = Statement.objects.order_by('?').first() if statement is None: raise self.EmptyDatabaseException() return statement
[ "Returns", "a", "random", "statement", "from", "the", "database" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L185-L196
[ "def", "get_random", "(", "self", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "statement", "=", "Statement", ".", "objects", ".", "order_by", "(", "'?'", ")", ".", "first", "(", ")", "if", "statement", "is", "None", "...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
DjangoStorageAdapter.remove
Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text.
chatterbot/storage/django_storage.py
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text. """ Statement = self.get_model('statement') statements = Statement.objects.filter(text=statement_text) statements.delete()
def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text. """ Statement = self.get_model('statement') statements = Statement.objects.filter(text=statement_text) statements.delete()
[ "Removes", "the", "statement", "that", "matches", "the", "input", "text", ".", "Removes", "any", "responses", "from", "statements", "if", "the", "response", "text", "matches", "the", "input", "text", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L198-L208
[ "def", "remove", "(", "self", ",", "statement_text", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "statements", "=", "Statement", ".", "objects", ".", "filter", "(", "text", "=", "statement_text", ")", "statements", ".", "...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
DjangoStorageAdapter.drop
Remove all data from the database.
chatterbot/storage/django_storage.py
def drop(self): """ Remove all data from the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') Statement.objects.all().delete() Tag.objects.all().delete()
def drop(self): """ Remove all data from the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') Statement.objects.all().delete() Tag.objects.all().delete()
[ "Remove", "all", "data", "from", "the", "database", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/django_storage.py#L210-L218
[ "def", "drop", "(", "self", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "Statement", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "Tag", "...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
clean_whitespace
Remove any consecutive whitespace characters from the statement text.
chatterbot/preprocessors.py
def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement
def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement
[ "Remove", "any", "consecutive", "whitespace", "characters", "from", "the", "statement", "text", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/preprocessors.py#L6-L21
[ "def", "clean_whitespace", "(", "statement", ")", ":", "import", "re", "# Replace linebreaks and tabs with spaces", "statement", ".", "text", "=", "statement", ".", "text", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "replace", "(", "'\\r'", ",", "' '"...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
unescape_html
Convert escaped html characters into unescaped html characters. For example: "&lt;b&gt;" becomes "<b>".
chatterbot/preprocessors.py
def unescape_html(statement): """ Convert escaped html characters into unescaped html characters. For example: "&lt;b&gt;" becomes "<b>". """ import html statement.text = html.unescape(statement.text) return statement
def unescape_html(statement): """ Convert escaped html characters into unescaped html characters. For example: "&lt;b&gt;" becomes "<b>". """ import html statement.text = html.unescape(statement.text) return statement
[ "Convert", "escaped", "html", "characters", "into", "unescaped", "html", "characters", ".", "For", "example", ":", "&lt", ";", "b&gt", ";", "becomes", "<b", ">", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/preprocessors.py#L24-L33
[ "def", "unescape_html", "(", "statement", ")", ":", "import", "html", "statement", ".", "text", "=", "html", ".", "unescape", "(", "statement", ".", "text", ")", "return", "statement" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
convert_to_ascii
Converts unicode characters to ASCII character equivalents. For example: "på fédéral" becomes "pa federal".
chatterbot/preprocessors.py
def convert_to_ascii(statement): """ Converts unicode characters to ASCII character equivalents. For example: "på fédéral" becomes "pa federal". """ import unicodedata text = unicodedata.normalize('NFKD', statement.text) text = text.encode('ascii', 'ignore').decode('utf-8') statement.text = str(text) return statement
def convert_to_ascii(statement): """ Converts unicode characters to ASCII character equivalents. For example: "på fédéral" becomes "pa federal". """ import unicodedata text = unicodedata.normalize('NFKD', statement.text) text = text.encode('ascii', 'ignore').decode('utf-8') statement.text = str(text) return statement
[ "Converts", "unicode", "characters", "to", "ASCII", "character", "equivalents", ".", "For", "example", ":", "på", "fédéral", "becomes", "pa", "federal", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/preprocessors.py#L36-L47
[ "def", "convert_to_ascii", "(", "statement", ")", ":", "import", "unicodedata", "text", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "statement", ".", "text", ")", "text", "=", "text", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", ...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
convert_string_to_number
Convert strings to numbers
chatterbot/parsing.py
def convert_string_to_number(value): """ Convert strings to numbers """ if value is None: return 1 if isinstance(value, int): return value if value.isdigit(): return int(value) num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower())) return sum(num_list)
def convert_string_to_number(value): """ Convert strings to numbers """ if value is None: return 1 if isinstance(value, int): return value if value.isdigit(): return int(value) num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower())) return sum(num_list)
[ "Convert", "strings", "to", "numbers" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L506-L517
[ "def", "convert_string_to_number", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "1", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "value", "if", "value", ".", "isdigit", "(", ")", ":", "return", "int", "(", ...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
convert_time_to_hour_minute
Convert time to hour, minute
chatterbot/parsing.py
def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute}
def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute}
[ "Convert", "time", "to", "hour", "minute" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L520-L537
[ "def", "convert_time_to_hour_minute", "(", "hour", ",", "minute", ",", "convention", ")", ":", "if", "hour", "is", "None", ":", "hour", "=", "0", "if", "minute", "is", "None", ":", "minute", "=", "0", "if", "convention", "is", "None", ":", "convention", ...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
date_from_quarter
Extract date from quarter of a year
chatterbot/parsing.py
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
[ "Extract", "date", "from", "quarter", "of", "a", "year" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L540-L554
[ "def", "date_from_quarter", "(", "base_date", ",", "ordinal", ",", "year", ")", ":", "interval", "=", "3", "month_start", "=", "interval", "*", "(", "ordinal", "-", "1", ")", "if", "month_start", "<", "0", ":", "month_start", "=", "9", "month_end", "=", ...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
date_from_relative_day
Converts relative day to time Ex: this tuesday, last tuesday
chatterbot/parsing.py
def date_from_relative_day(base_date, time, dow): """ Converts relative day to time Ex: this tuesday, last tuesday """ # Reset date to start of the day base_date = datetime(base_date.year, base_date.month, base_date.day) time = time.lower() dow = dow.lower() if time == 'this' or time == 'coming': # Else day of week num = HASHWEEKDAYS[dow] return this_week_day(base_date, num) elif time == 'last' or time == 'previous': # Else day of week num = HASHWEEKDAYS[dow] return previous_week_day(base_date, num) elif time == 'next' or time == 'following': # Else day of week num = HASHWEEKDAYS[dow] return next_week_day(base_date, num)
def date_from_relative_day(base_date, time, dow): """ Converts relative day to time Ex: this tuesday, last tuesday """ # Reset date to start of the day base_date = datetime(base_date.year, base_date.month, base_date.day) time = time.lower() dow = dow.lower() if time == 'this' or time == 'coming': # Else day of week num = HASHWEEKDAYS[dow] return this_week_day(base_date, num) elif time == 'last' or time == 'previous': # Else day of week num = HASHWEEKDAYS[dow] return previous_week_day(base_date, num) elif time == 'next' or time == 'following': # Else day of week num = HASHWEEKDAYS[dow] return next_week_day(base_date, num)
[ "Converts", "relative", "day", "to", "time", "Ex", ":", "this", "tuesday", "last", "tuesday" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L557-L577
[ "def", "date_from_relative_day", "(", "base_date", ",", "time", ",", "dow", ")", ":", "# Reset date to start of the day", "base_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "time", "=...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
date_from_relative_week_year
Converts relative day to time Eg. this tuesday, last tuesday
chatterbot/parsing.py
def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59)
def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59)
[ "Converts", "relative", "day", "to", "time", "Eg", ".", "this", "tuesday", "last", "tuesday" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L580-L636
[ "def", "date_from_relative_week_year", "(", "base_date", ",", "time", ",", "dow", ",", "ordinal", "=", "1", ")", ":", "# If there is an ordinal (next 3 weeks) => return a start and end range", "# Reset date to start of the day", "relative_date", "=", "datetime", "(", "base_da...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
date_from_adverb
Convert Day adverbs to dates Tomorrow => Date Today => Date
chatterbot/parsing.py
def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today() elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1)
def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today() elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1)
[ "Convert", "Day", "adverbs", "to", "dates", "Tomorrow", "=", ">", "Date", "Today", "=", ">", "Date" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L639-L652
[ "def", "date_from_adverb", "(", "base_date", ",", "name", ")", ":", "# Reset date to start of the day", "adverb_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "if", "name", "==", "'toda...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
date_from_duration
Find dates from duration Eg: 20 days from now Currently does not support strings like "20 days from last monday".
chatterbot/parsing.py
def date_from_duration(base_date, number_as_string, unit, duration, base_time=None): """ Find dates from duration Eg: 20 days from now Currently does not support strings like "20 days from last monday". """ # Check if query is `2 days before yesterday` or `day before yesterday` if base_time is not None: base_date = date_from_adverb(base_date, base_time) num = convert_string_to_number(number_as_string) if unit in day_variations: args = {'days': num} elif unit in minute_variations: args = {'minutes': num} elif unit in week_variations: args = {'weeks': num} elif unit in month_variations: args = {'days': 365 * num / 12} elif unit in year_variations: args = {'years': num} if duration == 'ago' or duration == 'before' or duration == 'earlier': if 'years' in args: return datetime(base_date.year - args['years'], base_date.month, base_date.day) return base_date - timedelta(**args) elif duration == 'after' or duration == 'later' or duration == 'from now': if 'years' in args: return datetime(base_date.year + args['years'], base_date.month, base_date.day) return base_date + timedelta(**args)
def date_from_duration(base_date, number_as_string, unit, duration, base_time=None): """ Find dates from duration Eg: 20 days from now Currently does not support strings like "20 days from last monday". """ # Check if query is `2 days before yesterday` or `day before yesterday` if base_time is not None: base_date = date_from_adverb(base_date, base_time) num = convert_string_to_number(number_as_string) if unit in day_variations: args = {'days': num} elif unit in minute_variations: args = {'minutes': num} elif unit in week_variations: args = {'weeks': num} elif unit in month_variations: args = {'days': 365 * num / 12} elif unit in year_variations: args = {'years': num} if duration == 'ago' or duration == 'before' or duration == 'earlier': if 'years' in args: return datetime(base_date.year - args['years'], base_date.month, base_date.day) return base_date - timedelta(**args) elif duration == 'after' or duration == 'later' or duration == 'from now': if 'years' in args: return datetime(base_date.year + args['years'], base_date.month, base_date.day) return base_date + timedelta(**args)
[ "Find", "dates", "from", "duration", "Eg", ":", "20", "days", "from", "now", "Currently", "does", "not", "support", "strings", "like", "20", "days", "from", "last", "monday", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L655-L682
[ "def", "date_from_duration", "(", "base_date", ",", "number_as_string", ",", "unit", ",", "duration", ",", "base_time", "=", "None", ")", ":", "# Check if query is `2 days before yesterday` or `day before yesterday`", "if", "base_time", "is", "not", "None", ":", "base_d...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
this_week_day
Finds coming weekday
chatterbot/parsing.py
def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
[ "Finds", "coming", "weekday" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L685-L698
[ "def", "this_week_day", "(", "base_date", ",", "weekday", ")", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "# If today is Tuesday and the query is `this monday`", "# We should output the next_week monday", "if", "day_of_week", ">", "weekday", ":", "ret...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
previous_week_day
Finds previous weekday
chatterbot/parsing.py
def previous_week_day(base_date, weekday): """ Finds previous weekday """ day = base_date - timedelta(days=1) while day.weekday() != weekday: day = day - timedelta(days=1) return day
def previous_week_day(base_date, weekday): """ Finds previous weekday """ day = base_date - timedelta(days=1) while day.weekday() != weekday: day = day - timedelta(days=1) return day
[ "Finds", "previous", "weekday" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L701-L708
[ "def", "previous_week_day", "(", "base_date", ",", "weekday", ")", ":", "day", "=", "base_date", "-", "timedelta", "(", "days", "=", "1", ")", "while", "day", ".", "weekday", "(", ")", "!=", "weekday", ":", "day", "=", "day", "-", "timedelta", "(", "...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
next_week_day
Finds next weekday
chatterbot/parsing.py
def next_week_day(base_date, weekday): """ Finds next weekday """ day_of_week = base_date.weekday() end_of_this_week = base_date + timedelta(days=6 - day_of_week) day = end_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
def next_week_day(base_date, weekday): """ Finds next weekday """ day_of_week = base_date.weekday() end_of_this_week = base_date + timedelta(days=6 - day_of_week) day = end_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
[ "Finds", "next", "weekday" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L711-L720
[ "def", "next_week_day", "(", "base_date", ",", "weekday", ")", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "end_of_this_week", "=", "base_date", "+", "timedelta", "(", "days", "=", "6", "-", "day_of_week", ")", "day", "=", "end_of_this_we...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
datetime_parsing
Extract datetime objects from a string of text.
chatterbot/parsing.py
def datetime_parsing(text, base_date=datetime.now()): """ Extract datetime objects from a string of text. """ matches = [] found_array = [] # Find the position in the string for expression, function in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) # Wrap the matched text with TAG element to prevent nested selections for match, value, spans in matches: subn = re.subn( '(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text ) text = subn[0] is_substituted = subn[1] if is_substituted != 0: found_array.append((match, value, spans)) # To preserve order of the match, sort based on the start position return sorted(found_array, key=lambda match: match and match[2][0])
def datetime_parsing(text, base_date=datetime.now()): """ Extract datetime objects from a string of text. """ matches = [] found_array = [] # Find the position in the string for expression, function in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) # Wrap the matched text with TAG element to prevent nested selections for match, value, spans in matches: subn = re.subn( '(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text ) text = subn[0] is_substituted = subn[1] if is_substituted != 0: found_array.append((match, value, spans)) # To preserve order of the match, sort based on the start position return sorted(found_array, key=lambda match: match and match[2][0])
[ "Extract", "datetime", "objects", "from", "a", "string", "of", "text", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L723-L746
[ "def", "datetime_parsing", "(", "text", ",", "base_date", "=", "datetime", ".", "now", "(", ")", ")", ":", "matches", "=", "[", "]", "found_array", "=", "[", "]", "# Find the position in the string", "for", "expression", ",", "function", "in", "regex", ":", ...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
IndexedTextSearch.search
Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time.
chatterbot/search.py
def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') input_search_text = input_statement.search_text if not input_statement.search_text: self.chatbot.logger.warn( 'No value for search_text was available on the provided input' ) input_search_text = self.chatbot.storage.tagger.get_bigram_pair_string( input_statement.text ) search_parameters = { 'search_text_contains': input_search_text, 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) closest_match = Statement(text='') closest_match.confidence = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > closest_match.confidence: statement.confidence = confidence closest_match = statement self.chatbot.logger.info('Similar text found: {} {}'.format( closest_match.text, confidence )) yield closest_match
def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') input_search_text = input_statement.search_text if not input_statement.search_text: self.chatbot.logger.warn( 'No value for search_text was available on the provided input' ) input_search_text = self.chatbot.storage.tagger.get_bigram_pair_string( input_statement.text ) search_parameters = { 'search_text_contains': input_search_text, 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) closest_match = Statement(text='') closest_match.confidence = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > closest_match.confidence: statement.confidence = confidence closest_match = statement self.chatbot.logger.info('Similar text found: {} {}'.format( closest_match.text, confidence )) yield closest_match
[ "Search", "for", "close", "matches", "to", "the", "input", ".", "Confidence", "scores", "for", "subsequent", "results", "will", "order", "of", "increasing", "value", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/search.py#L35-L89
[ "def", "search", "(", "self", ",", "input_statement", ",", "*", "*", "additional_parameters", ")", ":", "self", ".", "chatbot", ".", "logger", ".", "info", "(", "'Beginning search for close text match'", ")", "input_search_text", "=", "input_statement", ".", "sear...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
TkinterGUIExample.initialize
Set window layout.
examples/tkinter_gui.py
def initialize(self): """ Set window layout. """ self.grid() self.respond = ttk.Button(self, text='Get Response', command=self.get_response) self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3) self.usr_input = ttk.Entry(self, state='normal') self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3) self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:') self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3) self.conversation = ScrolledText.ScrolledText(self, state='disabled') self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3)
def initialize(self): """ Set window layout. """ self.grid() self.respond = ttk.Button(self, text='Get Response', command=self.get_response) self.respond.grid(column=0, row=0, sticky='nesw', padx=3, pady=3) self.usr_input = ttk.Entry(self, state='normal') self.usr_input.grid(column=1, row=0, sticky='nesw', padx=3, pady=3) self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Conversation:') self.conversation_lbl.grid(column=0, row=1, sticky='nesw', padx=3, pady=3) self.conversation = ScrolledText.ScrolledText(self, state='disabled') self.conversation.grid(column=0, row=2, columnspan=2, sticky='nesw', padx=3, pady=3)
[ "Set", "window", "layout", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/tkinter_gui.py#L33-L49
[ "def", "initialize", "(", "self", ")", ":", "self", ".", "grid", "(", ")", "self", ".", "respond", "=", "ttk", ".", "Button", "(", "self", ",", "text", "=", "'Get Response'", ",", "command", "=", "self", ".", "get_response", ")", "self", ".", "respon...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
TkinterGUIExample.get_response
Get a response from the chatbot and display it.
examples/tkinter_gui.py
def get_response(self): """ Get a response from the chatbot and display it. """ user_input = self.usr_input.get() self.usr_input.delete(0, tk.END) response = self.chatbot.get_response(user_input) self.conversation['state'] = 'normal' self.conversation.insert( tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n" ) self.conversation['state'] = 'disabled' time.sleep(0.5)
def get_response(self): """ Get a response from the chatbot and display it. """ user_input = self.usr_input.get() self.usr_input.delete(0, tk.END) response = self.chatbot.get_response(user_input) self.conversation['state'] = 'normal' self.conversation.insert( tk.END, "Human: " + user_input + "\n" + "ChatBot: " + str(response.text) + "\n" ) self.conversation['state'] = 'disabled' time.sleep(0.5)
[ "Get", "a", "response", "from", "the", "chatbot", "and", "display", "it", "." ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/examples/tkinter_gui.py#L51-L66
[ "def", "get_response", "(", "self", ")", ":", "user_input", "=", "self", ".", "usr_input", ".", "get", "(", ")", "self", ".", "usr_input", ".", "delete", "(", "0", ",", "tk", ".", "END", ")", "response", "=", "self", ".", "chatbot", ".", "get_respons...
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
AbstractBaseStatement.add_tags
Add a list of strings to the statement as tags. (Overrides the method from StatementMixin)
chatterbot/ext/django_chatterbot/abstract_models.py
def add_tags(self, *tags): """ Add a list of strings to the statement as tags. (Overrides the method from StatementMixin) """ for _tag in tags: self.tags.get_or_create(name=_tag)
def add_tags(self, *tags): """ Add a list of strings to the statement as tags. (Overrides the method from StatementMixin) """ for _tag in tags: self.tags.get_or_create(name=_tag)
[ "Add", "a", "list", "of", "strings", "to", "the", "statement", "as", "tags", ".", "(", "Overrides", "the", "method", "from", "StatementMixin", ")" ]
gunthercox/ChatterBot
python
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/ext/django_chatterbot/abstract_models.py#L110-L116
[ "def", "add_tags", "(", "self", ",", "*", "tags", ")", ":", "for", "_tag", "in", "tags", ":", "self", ".", "tags", ".", "get_or_create", "(", "name", "=", "_tag", ")" ]
1a03dcb45cba7bdc24d3db5e750582e0cb1518e2
train
SvelteComponent
Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython.
lucid/scratch/web/svelte.py
def SvelteComponent(name, path): """Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython. """ if path[-3:] == ".js": js_path = path elif path[-5:] == ".html": print("Trying to build svelte component from html...") js_path = build_svelte(path) js_content = read(js_path, mode='r') def inner(data): id_str = js_id(name) html = _template \ .replace("$js", js_content) \ .replace("$name", name) \ .replace("$data", json.dumps(data)) \ .replace("$id", id_str) _display_html(html) return inner
def SvelteComponent(name, path): """Display svelte components in iPython. Args: name: name of svelte component (must match component filename when built) path: path to compile svelte .js file or source svelte .html file. (If html file, we try to call svelte and build the file.) Returns: A function mapping data to a rendered svelte component in ipython. """ if path[-3:] == ".js": js_path = path elif path[-5:] == ".html": print("Trying to build svelte component from html...") js_path = build_svelte(path) js_content = read(js_path, mode='r') def inner(data): id_str = js_id(name) html = _template \ .replace("$js", js_content) \ .replace("$name", name) \ .replace("$data", json.dumps(data)) \ .replace("$id", id_str) _display_html(html) return inner
[ "Display", "svelte", "components", "in", "iPython", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/web/svelte.py#L43-L68
[ "def", "SvelteComponent", "(", "name", ",", "path", ")", ":", "if", "path", "[", "-", "3", ":", "]", "==", "\".js\"", ":", "js_path", "=", "path", "elif", "path", "[", "-", "5", ":", "]", "==", "\".html\"", ":", "print", "(", "\"Trying to build svelt...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
save_json
Save object as json on CNS.
lucid/misc/io/saving.py
def save_json(object, handle, indent=2): """Save object as json on CNS.""" obj_json = json.dumps(object, indent=indent, cls=NumpyJSONEncoder) handle.write(obj_json)
def save_json(object, handle, indent=2): """Save object as json on CNS.""" obj_json = json.dumps(object, indent=indent, cls=NumpyJSONEncoder) handle.write(obj_json)
[ "Save", "object", "as", "json", "on", "CNS", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L58-L61
[ "def", "save_json", "(", "object", ",", "handle", ",", "indent", "=", "2", ")", ":", "obj_json", "=", "json", ".", "dumps", "(", "object", ",", "indent", "=", "indent", ",", "cls", "=", "NumpyJSONEncoder", ")", "handle", ".", "write", "(", "obj_json", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
save_npz
Save dict of numpy array as npz file.
lucid/misc/io/saving.py
def save_npz(object, handle): """Save dict of numpy array as npz file.""" # there is a bug where savez doesn't actually accept a file handle. log.warning("Saving npz files currently only works locally. :/") path = handle.name handle.close() if type(object) is dict: np.savez(path, **object) elif type(object) is list: np.savez(path, *object) else: log.warning("Saving non dict or list as npz file, did you maybe want npy?") np.savez(path, object)
def save_npz(object, handle): """Save dict of numpy array as npz file.""" # there is a bug where savez doesn't actually accept a file handle. log.warning("Saving npz files currently only works locally. :/") path = handle.name handle.close() if type(object) is dict: np.savez(path, **object) elif type(object) is list: np.savez(path, *object) else: log.warning("Saving non dict or list as npz file, did you maybe want npy?") np.savez(path, object)
[ "Save", "dict", "of", "numpy", "array", "as", "npz", "file", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L69-L81
[ "def", "save_npz", "(", "object", ",", "handle", ")", ":", "# there is a bug where savez doesn't actually accept a file handle.", "log", ".", "warning", "(", "\"Saving npz files currently only works locally. :/\"", ")", "path", "=", "handle", ".", "name", "handle", ".", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
save_img
Save numpy array as image file on CNS.
lucid/misc/io/saving.py
def save_img(object, handle, **kwargs): """Save numpy array as image file on CNS.""" if isinstance(object, np.ndarray): normalized = _normalize_array(object) object = PIL.Image.fromarray(normalized) if isinstance(object, PIL.Image.Image): object.save(handle, **kwargs) # will infer format from handle's url ext. else: raise ValueError("Can only save_img for numpy arrays or PIL.Images!")
def save_img(object, handle, **kwargs): """Save numpy array as image file on CNS.""" if isinstance(object, np.ndarray): normalized = _normalize_array(object) object = PIL.Image.fromarray(normalized) if isinstance(object, PIL.Image.Image): object.save(handle, **kwargs) # will infer format from handle's url ext. else: raise ValueError("Can only save_img for numpy arrays or PIL.Images!")
[ "Save", "numpy", "array", "as", "image", "file", "on", "CNS", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L84-L94
[ "def", "save_img", "(", "object", ",", "handle", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "object", ",", "np", ".", "ndarray", ")", ":", "normalized", "=", "_normalize_array", "(", "object", ")", "object", "=", "PIL", ".", "Image", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
save
Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported.
lucid/misc/io/saving.py
def save(thing, url_or_handle, **kwargs): """Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported. """ is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name") if is_handle: _, ext = os.path.splitext(url_or_handle.name) else: _, ext = os.path.splitext(url_or_handle) if not ext: raise RuntimeError("No extension in URL: " + url_or_handle) if ext in savers: saver = savers[ext] if is_handle: saver(thing, url_or_handle, **kwargs) else: with write_handle(url_or_handle) as handle: saver(thing, handle, **kwargs) else: saver_names = [(key, fn.__name__) for (key, fn) in savers.items()] message = "Unknown extension '{}', supports {}." raise ValueError(message.format(ext, saver_names))
def save(thing, url_or_handle, **kwargs): """Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported. """ is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name") if is_handle: _, ext = os.path.splitext(url_or_handle.name) else: _, ext = os.path.splitext(url_or_handle) if not ext: raise RuntimeError("No extension in URL: " + url_or_handle) if ext in savers: saver = savers[ext] if is_handle: saver(thing, url_or_handle, **kwargs) else: with write_handle(url_or_handle) as handle: saver(thing, handle, **kwargs) else: saver_names = [(key, fn.__name__) for (key, fn) in savers.items()] message = "Unknown extension '{}', supports {}." raise ValueError(message.format(ext, saver_names))
[ "Save", "object", "to", "file", "on", "CNS", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/saving.py#L135-L166
[ "def", "save", "(", "thing", ",", "url_or_handle", ",", "*", "*", "kwargs", ")", ":", "is_handle", "=", "hasattr", "(", "url_or_handle", ",", "\"write\"", ")", "and", "hasattr", "(", "url_or_handle", ",", "\"name\"", ")", "if", "is_handle", ":", "_", ","...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
frustum
Create view frustum matrix.
lucid/misc/gl/meshutil.py
def frustum(left, right, bottom, top, znear, zfar): """Create view frustum matrix.""" assert right != left assert bottom != top assert znear != zfar M = np.zeros((4, 4), dtype=np.float32) M[0, 0] = +2.0 * znear / (right - left) M[2, 0] = (right + left) / (right - left) M[1, 1] = +2.0 * znear / (top - bottom) M[3, 1] = (top + bottom) / (top - bottom) M[2, 2] = -(zfar + znear) / (zfar - znear) M[3, 2] = -2.0 * znear * zfar / (zfar - znear) M[2, 3] = -1.0 return M
def frustum(left, right, bottom, top, znear, zfar): """Create view frustum matrix.""" assert right != left assert bottom != top assert znear != zfar M = np.zeros((4, 4), dtype=np.float32) M[0, 0] = +2.0 * znear / (right - left) M[2, 0] = (right + left) / (right - left) M[1, 1] = +2.0 * znear / (top - bottom) M[3, 1] = (top + bottom) / (top - bottom) M[2, 2] = -(zfar + znear) / (zfar - znear) M[3, 2] = -2.0 * znear * zfar / (zfar - znear) M[2, 3] = -1.0 return M
[ "Create", "view", "frustum", "matrix", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L8-L22
[ "def", "frustum", "(", "left", ",", "right", ",", "bottom", ",", "top", ",", "znear", ",", "zfar", ")", ":", "assert", "right", "!=", "left", "assert", "bottom", "!=", "top", "assert", "znear", "!=", "zfar", "M", "=", "np", ".", "zeros", "(", "(", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
anorm
Compute L2 norms alogn specified axes.
lucid/misc/gl/meshutil.py
def anorm(x, axis=None, keepdims=False): """Compute L2 norms alogn specified axes.""" return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims))
def anorm(x, axis=None, keepdims=False): """Compute L2 norms alogn specified axes.""" return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims))
[ "Compute", "L2", "norms", "alogn", "specified", "axes", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L33-L35
[ "def", "anorm", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "np", ".", "sqrt", "(", "(", "x", "*", "x", ")", ".", "sum", "(", "axis", "=", "axis", ",", "keepdims", "=", "keepdims", ")", ")" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
normalize
L2 Normalize along specified axes.
lucid/misc/gl/meshutil.py
def normalize(v, axis=None, eps=1e-10): """L2 Normalize along specified axes.""" return v / max(anorm(v, axis=axis, keepdims=True), eps)
def normalize(v, axis=None, eps=1e-10): """L2 Normalize along specified axes.""" return v / max(anorm(v, axis=axis, keepdims=True), eps)
[ "L2", "Normalize", "along", "specified", "axes", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L38-L40
[ "def", "normalize", "(", "v", ",", "axis", "=", "None", ",", "eps", "=", "1e-10", ")", ":", "return", "v", "/", "max", "(", "anorm", "(", "v", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", ",", "eps", ")" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
lookat
Generate LookAt modelview matrix.
lucid/misc/gl/meshutil.py
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]): """Generate LookAt modelview matrix.""" eye = np.float32(eye) forward = normalize(target - eye) side = normalize(np.cross(forward, up)) up = np.cross(side, forward) M = np.eye(4, dtype=np.float32) R = M[:3, :3] R[:] = [side, up, -forward] M[:3, 3] = -R.dot(eye) return M
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]): """Generate LookAt modelview matrix.""" eye = np.float32(eye) forward = normalize(target - eye) side = normalize(np.cross(forward, up)) up = np.cross(side, forward) M = np.eye(4, dtype=np.float32) R = M[:3, :3] R[:] = [side, up, -forward] M[:3, 3] = -R.dot(eye) return M
[ "Generate", "LookAt", "modelview", "matrix", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L43-L53
[ "def", "lookat", "(", "eye", ",", "target", "=", "[", "0", ",", "0", ",", "0", "]", ",", "up", "=", "[", "0", ",", "1", ",", "0", "]", ")", ":", "eye", "=", "np", ".", "float32", "(", "eye", ")", "forward", "=", "normalize", "(", "target", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
sample_view
Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned.
lucid/misc/gl/meshutil.py
def sample_view(min_dist, max_dist=None): '''Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned. ''' if max_dist is None: max_dist = min_dist dist = np.random.uniform(min_dist, max_dist) eye = np.random.normal(size=3) eye = normalize(eye)*dist return lookat(eye)
def sample_view(min_dist, max_dist=None): '''Sample random camera position. Sample origin directed camera position in given distance range from the origin. ModelView matrix is returned. ''' if max_dist is None: max_dist = min_dist dist = np.random.uniform(min_dist, max_dist) eye = np.random.normal(size=3) eye = normalize(eye)*dist return lookat(eye)
[ "Sample", "random", "camera", "position", ".", "Sample", "origin", "directed", "camera", "position", "in", "given", "distance", "range", "from", "the", "origin", ".", "ModelView", "matrix", "is", "returned", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L56-L67
[ "def", "sample_view", "(", "min_dist", ",", "max_dist", "=", "None", ")", ":", "if", "max_dist", "is", "None", ":", "max_dist", "=", "min_dist", "dist", "=", "np", ".", "random", ".", "uniform", "(", "min_dist", ",", "max_dist", ")", "eye", "=", "np", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_parse_vertex_tuple
Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...).
lucid/misc/gl/meshutil.py
def _parse_vertex_tuple(s): """Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...).""" vt = [0, 0, 0] for i, c in enumerate(s.split('/')): if c: vt[i] = int(c) return tuple(vt)
def _parse_vertex_tuple(s): """Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...).""" vt = [0, 0, 0] for i, c in enumerate(s.split('/')): if c: vt[i] = int(c) return tuple(vt)
[ "Parse", "vertex", "indices", "in", "/", "separated", "form", "(", "like", "i", "/", "j", "/", "k", "i", "//", "k", "...", ")", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L78-L84
[ "def", "_parse_vertex_tuple", "(", "s", ")", ":", "vt", "=", "[", "0", ",", "0", ",", "0", "]", "for", "i", ",", "c", "in", "enumerate", "(", "s", ".", "split", "(", "'/'", ")", ")", ":", "if", "c", ":", "vt", "[", "i", "]", "=", "int", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_unify_rows
Unify lengths of each row of a.
lucid/misc/gl/meshutil.py
def _unify_rows(a): """Unify lengths of each row of a.""" lens = np.fromiter(map(len, a), np.int32) if not (lens[0] == lens).all(): out = np.zeros((len(a), lens.max()), np.float32) for i, row in enumerate(a): out[i, :lens[i]] = row else: out = np.float32(a) return out
def _unify_rows(a): """Unify lengths of each row of a.""" lens = np.fromiter(map(len, a), np.int32) if not (lens[0] == lens).all(): out = np.zeros((len(a), lens.max()), np.float32) for i, row in enumerate(a): out[i, :lens[i]] = row else: out = np.float32(a) return out
[ "Unify", "lengths", "of", "each", "row", "of", "a", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L87-L96
[ "def", "_unify_rows", "(", "a", ")", ":", "lens", "=", "np", ".", "fromiter", "(", "map", "(", "len", ",", "a", ")", ",", "np", ".", "int32", ")", "if", "not", "(", "lens", "[", "0", "]", "==", "lens", ")", ".", "all", "(", ")", ":", "out",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
load_obj
Load 3d mesh form .obj' file. Args: fn: Input file name or file-like object. Returns: dictionary with the following keys (some of which may be missing): position: np.float32, (n, 3) array, vertex positions uv: np.float32, (n, 2) array, vertex uv coordinates normal: np.float32, (n, 3) array, vertex uv normals face: np.int32, (k*3,) traingular face indices
lucid/misc/gl/meshutil.py
def load_obj(fn): """Load 3d mesh form .obj' file. Args: fn: Input file name or file-like object. Returns: dictionary with the following keys (some of which may be missing): position: np.float32, (n, 3) array, vertex positions uv: np.float32, (n, 2) array, vertex uv coordinates normal: np.float32, (n, 3) array, vertex uv normals face: np.int32, (k*3,) traingular face indices """ position = [np.zeros(3, dtype=np.float32)] normal = [np.zeros(3, dtype=np.float32)] uv = [np.zeros(2, dtype=np.float32)] tuple2idx = OrderedDict() trinagle_indices = [] input_file = open(fn) if isinstance(fn, str) else fn for line in input_file: line = line.strip() if not line or line[0] == '#': continue line = line.split(' ', 1) tag = line[0] if len(line) > 1: line = line[1] else: line = '' if tag == 'v': position.append(np.fromstring(line, sep=' ')) elif tag == 'vt': uv.append(np.fromstring(line, sep=' ')) elif tag == 'vn': normal.append(np.fromstring(line, sep=' ')) elif tag == 'f': output_face_indices = [] for chunk in line.split(): # tuple order: pos_idx, uv_idx, normal_idx vt = _parse_vertex_tuple(chunk) if vt not in tuple2idx: # create a new output vertex? tuple2idx[vt] = len(tuple2idx) output_face_indices.append(tuple2idx[vt]) # generate face triangles for i in range(1, len(output_face_indices)-1): for vi in [0, i, i+1]: trinagle_indices.append(output_face_indices[vi]) outputs = {} outputs['face'] = np.int32(trinagle_indices) pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T if np.any(pos_idx): outputs['position'] = _unify_rows(position)[pos_idx] if np.any(uv_idx): outputs['uv'] = _unify_rows(uv)[uv_idx] if np.any(normal_idx): outputs['normal'] = _unify_rows(normal)[normal_idx] return outputs
def load_obj(fn): """Load 3d mesh form .obj' file. Args: fn: Input file name or file-like object. Returns: dictionary with the following keys (some of which may be missing): position: np.float32, (n, 3) array, vertex positions uv: np.float32, (n, 2) array, vertex uv coordinates normal: np.float32, (n, 3) array, vertex uv normals face: np.int32, (k*3,) traingular face indices """ position = [np.zeros(3, dtype=np.float32)] normal = [np.zeros(3, dtype=np.float32)] uv = [np.zeros(2, dtype=np.float32)] tuple2idx = OrderedDict() trinagle_indices = [] input_file = open(fn) if isinstance(fn, str) else fn for line in input_file: line = line.strip() if not line or line[0] == '#': continue line = line.split(' ', 1) tag = line[0] if len(line) > 1: line = line[1] else: line = '' if tag == 'v': position.append(np.fromstring(line, sep=' ')) elif tag == 'vt': uv.append(np.fromstring(line, sep=' ')) elif tag == 'vn': normal.append(np.fromstring(line, sep=' ')) elif tag == 'f': output_face_indices = [] for chunk in line.split(): # tuple order: pos_idx, uv_idx, normal_idx vt = _parse_vertex_tuple(chunk) if vt not in tuple2idx: # create a new output vertex? tuple2idx[vt] = len(tuple2idx) output_face_indices.append(tuple2idx[vt]) # generate face triangles for i in range(1, len(output_face_indices)-1): for vi in [0, i, i+1]: trinagle_indices.append(output_face_indices[vi]) outputs = {} outputs['face'] = np.int32(trinagle_indices) pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T if np.any(pos_idx): outputs['position'] = _unify_rows(position)[pos_idx] if np.any(uv_idx): outputs['uv'] = _unify_rows(uv)[uv_idx] if np.any(normal_idx): outputs['normal'] = _unify_rows(normal)[normal_idx] return outputs
[ "Load", "3d", "mesh", "form", ".", "obj", "file", ".", "Args", ":", "fn", ":", "Input", "file", "name", "or", "file", "-", "like", "object", ".", "Returns", ":", "dictionary", "with", "the", "following", "keys", "(", "some", "of", "which", "may", "be...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L99-L158
[ "def", "load_obj", "(", "fn", ")", ":", "position", "=", "[", "np", ".", "zeros", "(", "3", ",", "dtype", "=", "np", ".", "float32", ")", "]", "normal", "=", "[", "np", ".", "zeros", "(", "3", ",", "dtype", "=", "np", ".", "float32", ")", "]"...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
normalize_mesh
Scale mesh to fit into -1..1 cube
lucid/misc/gl/meshutil.py
def normalize_mesh(mesh): '''Scale mesh to fit into -1..1 cube''' mesh = dict(mesh) pos = mesh['position'][:,:3].copy() pos -= (pos.max(0)+pos.min(0)) / 2.0 pos /= np.abs(pos).max() mesh['position'] = pos return mesh
def normalize_mesh(mesh): '''Scale mesh to fit into -1..1 cube''' mesh = dict(mesh) pos = mesh['position'][:,:3].copy() pos -= (pos.max(0)+pos.min(0)) / 2.0 pos /= np.abs(pos).max() mesh['position'] = pos return mesh
[ "Scale", "mesh", "to", "fit", "into", "-", "1", "..", "1", "cube" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L161-L168
[ "def", "normalize_mesh", "(", "mesh", ")", ":", "mesh", "=", "dict", "(", "mesh", ")", "pos", "=", "mesh", "[", "'position'", "]", "[", ":", ",", ":", "3", "]", ".", "copy", "(", ")", "pos", "-=", "(", "pos", ".", "max", "(", "0", ")", "+", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
Layer.activations
Loads sampled activations, which requires network access.
lucid/modelzoo/vision_base.py
def activations(self): """Loads sampled activations, which requires network access.""" if self._activations is None: self._activations = _get_aligned_activations(self) return self._activations
def activations(self): """Loads sampled activations, which requires network access.""" if self._activations is None: self._activations = _get_aligned_activations(self) return self._activations
[ "Loads", "sampled", "activations", "which", "requires", "network", "access", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L71-L75
[ "def", "activations", "(", "self", ")", ":", "if", "self", ".", "_activations", "is", "None", ":", "self", ".", "_activations", "=", "_get_aligned_activations", "(", "self", ")", "return", "self", ".", "_activations" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
Model.create_input
Create input tensor.
lucid/modelzoo/vision_base.py
def create_input(self, t_input=None, forget_xy_shape=True): """Create input tensor.""" if t_input is None: t_input = tf.placeholder(tf.float32, self.image_shape) t_prep_input = t_input if len(t_prep_input.shape) == 3: t_prep_input = tf.expand_dims(t_prep_input, 0) if forget_xy_shape: t_prep_input = model_util.forget_xy(t_prep_input) if hasattr(self, "is_BGR") and self.is_BGR is True: t_prep_input = tf.reverse(t_prep_input, [-1]) lo, hi = self.image_value_range t_prep_input = lo + t_prep_input * (hi - lo) return t_input, t_prep_input
def create_input(self, t_input=None, forget_xy_shape=True): """Create input tensor.""" if t_input is None: t_input = tf.placeholder(tf.float32, self.image_shape) t_prep_input = t_input if len(t_prep_input.shape) == 3: t_prep_input = tf.expand_dims(t_prep_input, 0) if forget_xy_shape: t_prep_input = model_util.forget_xy(t_prep_input) if hasattr(self, "is_BGR") and self.is_BGR is True: t_prep_input = tf.reverse(t_prep_input, [-1]) lo, hi = self.image_value_range t_prep_input = lo + t_prep_input * (hi - lo) return t_input, t_prep_input
[ "Create", "input", "tensor", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L161-L174
[ "def", "create_input", "(", "self", ",", "t_input", "=", "None", ",", "forget_xy_shape", "=", "True", ")", ":", "if", "t_input", "is", "None", ":", "t_input", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "self", ".", "image_shape", ")...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
Model.import_graph
Import model GraphDef into the current graph.
lucid/modelzoo/vision_base.py
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True): """Import model GraphDef into the current graph.""" graph = tf.get_default_graph() assert graph.unique_name(scope, False) == scope, ( 'Scope "%s" already exists. Provide explicit scope names when ' 'importing multiple instances of the model.') % scope t_input, t_prep_input = self.create_input(t_input, forget_xy_shape) tf.import_graph_def( self.graph_def, {self.input_name: t_prep_input}, name=scope) self.post_import(scope)
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True): """Import model GraphDef into the current graph.""" graph = tf.get_default_graph() assert graph.unique_name(scope, False) == scope, ( 'Scope "%s" already exists. Provide explicit scope names when ' 'importing multiple instances of the model.') % scope t_input, t_prep_input = self.create_input(t_input, forget_xy_shape) tf.import_graph_def( self.graph_def, {self.input_name: t_prep_input}, name=scope) self.post_import(scope)
[ "Import", "model", "GraphDef", "into", "the", "current", "graph", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/vision_base.py#L176-L185
[ "def", "import_graph", "(", "self", ",", "t_input", "=", "None", ",", "scope", "=", "'import'", ",", "forget_xy_shape", "=", "True", ")", ":", "graph", "=", "tf", ".", "get_default_graph", "(", ")", "assert", "graph", ".", "unique_name", "(", "scope", ",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
normalize_layout
Removes outliers and scales layout to between [0,1].
lucid/recipes/activation_atlas/layout.py
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1): """Removes outliers and scales layout to between [0,1].""" # compute percentiles mins = np.percentile(layout, min_percentile, axis=(0)) maxs = np.percentile(layout, max_percentile, axis=(0)) # add margins mins -= relative_margin * (maxs - mins) maxs += relative_margin * (maxs - mins) # `clip` broadcasts, `[None]`s added only for readability clipped = np.clip(layout, mins, maxs) # embed within [0,1] along both axes clipped -= clipped.min(axis=0) clipped /= clipped.max(axis=0) return clipped
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1): """Removes outliers and scales layout to between [0,1].""" # compute percentiles mins = np.percentile(layout, min_percentile, axis=(0)) maxs = np.percentile(layout, max_percentile, axis=(0)) # add margins mins -= relative_margin * (maxs - mins) maxs += relative_margin * (maxs - mins) # `clip` broadcasts, `[None]`s added only for readability clipped = np.clip(layout, mins, maxs) # embed within [0,1] along both axes clipped -= clipped.min(axis=0) clipped /= clipped.max(axis=0) return clipped
[ "Removes", "outliers", "and", "scales", "layout", "to", "between", "[", "0", "1", "]", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/recipes/activation_atlas/layout.py#L25-L43
[ "def", "normalize_layout", "(", "layout", ",", "min_percentile", "=", "1", ",", "max_percentile", "=", "99", ",", "relative_margin", "=", "0.1", ")", ":", "# compute percentiles", "mins", "=", "np", ".", "percentile", "(", "layout", ",", "min_percentile", ",",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
aligned_umap
`activations` can be a list of ndarrays. In that case a list of layouts is returned.
lucid/recipes/activation_atlas/layout.py
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False): """`activations` can be a list of ndarrays. In that case a list of layouts is returned.""" umap_defaults = dict( n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine" ) umap_defaults.update(umap_options) # if passed a list of activations, we combine them and later split the layouts if type(activations) is list or type(activations) is tuple: num_activation_groups = len(activations) combined_activations = np.concatenate(activations) else: num_activation_groups = 1 combined_activations = activations try: layout = UMAP(**umap_defaults).fit_transform(combined_activations) except (RecursionError, SystemError) as exception: log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.") raise ValueError("UMAP failed to fit activations: %s", exception) if normalize: layout = normalize_layout(layout) if num_activation_groups > 1: layouts = np.split(layout, num_activation_groups, axis=0) return layouts else: return layout
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False): """`activations` can be a list of ndarrays. In that case a list of layouts is returned.""" umap_defaults = dict( n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine" ) umap_defaults.update(umap_options) # if passed a list of activations, we combine them and later split the layouts if type(activations) is list or type(activations) is tuple: num_activation_groups = len(activations) combined_activations = np.concatenate(activations) else: num_activation_groups = 1 combined_activations = activations try: layout = UMAP(**umap_defaults).fit_transform(combined_activations) except (RecursionError, SystemError) as exception: log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.") raise ValueError("UMAP failed to fit activations: %s", exception) if normalize: layout = normalize_layout(layout) if num_activation_groups > 1: layouts = np.split(layout, num_activation_groups, axis=0) return layouts else: return layout
[ "activations", "can", "be", "a", "list", "of", "ndarrays", ".", "In", "that", "case", "a", "list", "of", "layouts", "is", "returned", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/recipes/activation_atlas/layout.py#L46-L74
[ "def", "aligned_umap", "(", "activations", ",", "umap_options", "=", "{", "}", ",", "normalize", "=", "True", ",", "verbose", "=", "False", ")", ":", "umap_defaults", "=", "dict", "(", "n_components", "=", "2", ",", "n_neighbors", "=", "50", ",", "min_di...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
render_tile
Render each cell in the tile and stitch it into a single image
lucid/scratch/atlas_pipeline/render_tile.py
def render_tile(cells, ti, tj, render, params, metadata, layout, summary): """ Render each cell in the tile and stitch it into a single image """ image_size = params["cell_size"] * params["n_tile"] tile = Image.new("RGB", (image_size, image_size), (255,255,255)) keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_image = render(cells[key], params, metadata, layout, summary) # stitch this rendering into the tile image ci = key[0] % params["n_tile"] cj = key[1] % params["n_tile"] xmin = ci*params["cell_size"] ymin = cj*params["cell_size"] xmax = (ci+1)*params["cell_size"] ymax = (cj+1)*params["cell_size"] if params.get("scale_density", False): density = len(cells[key]["gi"]) # scale = density/summary["max_density"] scale = math.log(density)/(math.log(summary["max_density"]) or 1) owidth = xmax - xmin width = int(round(owidth * scale)) if(width < 1): width = 1 offsetL = int(round((owidth - width)/2)) offsetR = owidth - width - offsetL # handle odd numbers # print("\n") # print("width", width, offsetL, offsetR) box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR] resample = params.get("scale_type", Image.NEAREST) cell_image = cell_image.resize(size=(width,width), resample=resample) # print(cell_image) else: box = [xmin, ymin, xmax, ymax] # print("box", box) tile.paste(cell_image, box) print("\n") return tile
def render_tile(cells, ti, tj, render, params, metadata, layout, summary): """ Render each cell in the tile and stitch it into a single image """ image_size = params["cell_size"] * params["n_tile"] tile = Image.new("RGB", (image_size, image_size), (255,255,255)) keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_image = render(cells[key], params, metadata, layout, summary) # stitch this rendering into the tile image ci = key[0] % params["n_tile"] cj = key[1] % params["n_tile"] xmin = ci*params["cell_size"] ymin = cj*params["cell_size"] xmax = (ci+1)*params["cell_size"] ymax = (cj+1)*params["cell_size"] if params.get("scale_density", False): density = len(cells[key]["gi"]) # scale = density/summary["max_density"] scale = math.log(density)/(math.log(summary["max_density"]) or 1) owidth = xmax - xmin width = int(round(owidth * scale)) if(width < 1): width = 1 offsetL = int(round((owidth - width)/2)) offsetR = owidth - width - offsetL # handle odd numbers # print("\n") # print("width", width, offsetL, offsetR) box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR] resample = params.get("scale_type", Image.NEAREST) cell_image = cell_image.resize(size=(width,width), resample=resample) # print(cell_image) else: box = [xmin, ymin, xmax, ymax] # print("box", box) tile.paste(cell_image, box) print("\n") return tile
[ "Render", "each", "cell", "in", "the", "tile", "and", "stitch", "it", "into", "a", "single", "image" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/render_tile.py#L11-L51
[ "def", "render_tile", "(", "cells", ",", "ti", ",", "tj", ",", "render", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", ":", "image_size", "=", "params", "[", "\"cell_size\"", "]", "*", "params", "[", "\"n_tile\"", "]", "tile", "="...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
aggregate_tile
Call the user defined aggregation function on each cell and combine into a single json object
lucid/scratch/atlas_pipeline/render_tile.py
def aggregate_tile(cells, ti, tj, aggregate, params, metadata, layout, summary): """ Call the user defined aggregation function on each cell and combine into a single json object """ tile = [] keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_json = aggregate(cells[key], params, metadata, layout, summary) tile.append({"aggregate":cell_json, "i":int(key[0]), "j":int(key[1])}) return tile
def aggregate_tile(cells, ti, tj, aggregate, params, metadata, layout, summary): """ Call the user defined aggregation function on each cell and combine into a single json object """ tile = [] keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end='\r') cell_json = aggregate(cells[key], params, metadata, layout, summary) tile.append({"aggregate":cell_json, "i":int(key[0]), "j":int(key[1])}) return tile
[ "Call", "the", "user", "defined", "aggregation", "function", "on", "each", "cell", "and", "combine", "into", "a", "single", "json", "object" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/render_tile.py#L54-L64
[ "def", "aggregate_tile", "(", "cells", ",", "ti", ",", "tj", ",", "aggregate", ",", "params", ",", "metadata", ",", "layout", ",", "summary", ")", ":", "tile", "=", "[", "]", "keys", "=", "cells", ".", "keys", "(", ")", "for", "i", ",", "key", "i...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
create_opengl_context
Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface.
lucid/misc/gl/glcontext.py
def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
[ "Create", "offscreen", "OpenGL", "context", "and", "make", "it", "current", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/glcontext.py#L79-L120
[ "def", "create_opengl_context", "(", "surface_size", "=", "(", "640", ",", "480", ")", ")", ":", "egl_display", "=", "egl", ".", "eglGetDisplay", "(", "egl", ".", "EGL_DEFAULT_DISPLAY", ")", "major", ",", "minor", "=", "egl", ".", "EGLint", "(", ")", ","...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
collapse_shape
Collapse `shape` outside the interval (`a`,`b`). This function collapses `shape` outside the interval (`a`,`b`) by multiplying the dimensions before `a` into a single dimension, and mutliplying the dimensions after `b` into a single dimension. Args: shape: a tensor shape a: integer, position in shape b: integer, position in shape Returns: The collapsed shape, represented as a list. Examples: [1, 2, 3, 4, 5], (a=0, b=2) => [1, 1, 2, 60] [1, 2, 3, 4, 5], (a=1, b=3) => [1, 2, 3, 20] [1, 2, 3, 4, 5], (a=2, b=4) => [2, 3, 4, 5 ] [1, 2, 3, 4, 5], (a=3, b=5) => [6, 4, 5, 1 ]
lucid/optvis/param/resize_bilinear_nd.py
def collapse_shape(shape, a, b): """Collapse `shape` outside the interval (`a`,`b`). This function collapses `shape` outside the interval (`a`,`b`) by multiplying the dimensions before `a` into a single dimension, and mutliplying the dimensions after `b` into a single dimension. Args: shape: a tensor shape a: integer, position in shape b: integer, position in shape Returns: The collapsed shape, represented as a list. Examples: [1, 2, 3, 4, 5], (a=0, b=2) => [1, 1, 2, 60] [1, 2, 3, 4, 5], (a=1, b=3) => [1, 2, 3, 20] [1, 2, 3, 4, 5], (a=2, b=4) => [2, 3, 4, 5 ] [1, 2, 3, 4, 5], (a=3, b=5) => [6, 4, 5, 1 ] """ shape = list(shape) if a < 0: n_pad = -a pad = n_pad * [1] return collapse_shape(pad + shape, a + n_pad, b + n_pad) if b > len(shape): n_pad = b - len(shape) pad = n_pad * [1] return collapse_shape(shape + pad, a, b) return [product(shape[:a])] + shape[a:b] + [product(shape[b:])]
def collapse_shape(shape, a, b): """Collapse `shape` outside the interval (`a`,`b`). This function collapses `shape` outside the interval (`a`,`b`) by multiplying the dimensions before `a` into a single dimension, and mutliplying the dimensions after `b` into a single dimension. Args: shape: a tensor shape a: integer, position in shape b: integer, position in shape Returns: The collapsed shape, represented as a list. Examples: [1, 2, 3, 4, 5], (a=0, b=2) => [1, 1, 2, 60] [1, 2, 3, 4, 5], (a=1, b=3) => [1, 2, 3, 20] [1, 2, 3, 4, 5], (a=2, b=4) => [2, 3, 4, 5 ] [1, 2, 3, 4, 5], (a=3, b=5) => [6, 4, 5, 1 ] """ shape = list(shape) if a < 0: n_pad = -a pad = n_pad * [1] return collapse_shape(pad + shape, a + n_pad, b + n_pad) if b > len(shape): n_pad = b - len(shape) pad = n_pad * [1] return collapse_shape(shape + pad, a, b) return [product(shape[:a])] + shape[a:b] + [product(shape[b:])]
[ "Collapse", "shape", "outside", "the", "interval", "(", "a", "b", ")", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/resize_bilinear_nd.py#L35-L65
[ "def", "collapse_shape", "(", "shape", ",", "a", ",", "b", ")", ":", "shape", "=", "list", "(", "shape", ")", "if", "a", "<", "0", ":", "n_pad", "=", "-", "a", "pad", "=", "n_pad", "*", "[", "1", "]", "return", "collapse_shape", "(", "pad", "+"...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
resize_bilinear_nd
Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor
lucid/optvis/param/resize_bilinear_nd.py
def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = t.get_shape().as_list() target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimesnion, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d : d+2] = target_shape[d : d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimesnions we want to deal with in the middle. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use the 2d tf.image.resize_bilinear() on the # inner two dimesions. t_ = tf.reshape(t, shape_) t_ = tf.image.resize_bilinear(t_, new_shape_[1:3]) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = tf.reshape(t_, new_shape) shape = new_shape d += 2 return t
def resize_bilinear_nd(t, target_shape): """Bilinear resizes a tensor t to have shape target_shape. This function bilinearly resizes a n-dimensional tensor by iteratively applying tf.image.resize_bilinear (which can only resize 2 dimensions). For bilinear interpolation, the order in which it is applied does not matter. Args: t: tensor to be resized target_shape: the desired shape of the new tensor. Returns: The resized tensor """ shape = t.get_shape().as_list() target_shape = list(target_shape) assert len(shape) == len(target_shape) # We progressively move through the shape, resizing dimensions... d = 0 while d < len(shape): # If we don't need to deal with the next dimesnion, step over it if shape[d] == target_shape[d]: d += 1 continue # Otherwise, we'll resize the next two dimensions... # If d+2 doesn't need to be resized, this will just be a null op for it new_shape = shape[:] new_shape[d : d+2] = target_shape[d : d+2] # The helper collapse_shape() makes our shapes 4-dimensional with # the two dimesnions we want to deal with in the middle. shape_ = collapse_shape(shape, d, d+2) new_shape_ = collapse_shape(new_shape, d, d+2) # We can then reshape and use the 2d tf.image.resize_bilinear() on the # inner two dimesions. t_ = tf.reshape(t, shape_) t_ = tf.image.resize_bilinear(t_, new_shape_[1:3]) # And then reshape back to our uncollapsed version, having finished resizing # two more dimensions in our shape. t = tf.reshape(t_, new_shape) shape = new_shape d += 2 return t
[ "Bilinear", "resizes", "a", "tensor", "t", "to", "have", "shape", "target_shape", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/resize_bilinear_nd.py#L68-L116
[ "def", "resize_bilinear_nd", "(", "t", ",", "target_shape", ")", ":", "shape", "=", "t", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "target_shape", "=", "list", "(", "target_shape", ")", "assert", "len", "(", "shape", ")", "==", "len", "(", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
get_aligned_activations
Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.
lucid/modelzoo/aligned_activations.py
def get_aligned_activations(layer): """Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.""" activation_paths = [ PATH_TEMPLATE.format( sanitize(layer.model_class.name), sanitize(layer.name), page ) for page in range(NUMBER_OF_PAGES) ] activations = np.vstack([load(path) for path in activation_paths]) assert np.all(np.isfinite(activations)) return activations
def get_aligned_activations(layer): """Downloads 100k activations of the specified layer sampled from iterating over ImageNet. Activations of all layers where sampled at the same spatial positions for each image, allowing the calculation of correlations.""" activation_paths = [ PATH_TEMPLATE.format( sanitize(layer.model_class.name), sanitize(layer.name), page ) for page in range(NUMBER_OF_PAGES) ] activations = np.vstack([load(path) for path in activation_paths]) assert np.all(np.isfinite(activations)) return activations
[ "Downloads", "100k", "activations", "of", "the", "specified", "layer", "sampled", "from", "iterating", "over", "ImageNet", ".", "Activations", "of", "all", "layers", "where", "sampled", "at", "the", "same", "spatial", "positions", "for", "each", "image", "allowi...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L35-L47
[ "def", "get_aligned_activations", "(", "layer", ")", ":", "activation_paths", "=", "[", "PATH_TEMPLATE", ".", "format", "(", "sanitize", "(", "layer", ".", "model_class", ".", "name", ")", ",", "sanitize", "(", "layer", ".", "name", ")", ",", "page", ")", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
layer_covariance
Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.
lucid/modelzoo/aligned_activations.py
def layer_covariance(layer1, layer2=None): """Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.""" layer2 = layer2 or layer1 act1, act2 = layer1.activations, layer2.activations num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division return np.matmul(act1.T, act2) / float(num_datapoints)
def layer_covariance(layer1, layer2=None): """Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.""" layer2 = layer2 or layer1 act1, act2 = layer1.activations, layer2.activations num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division return np.matmul(act1.T, act2) / float(num_datapoints)
[ "Computes", "the", "covariance", "matrix", "between", "the", "neurons", "of", "two", "layers", ".", "If", "only", "one", "layer", "is", "passed", "computes", "the", "symmetric", "covariance", "matrix", "of", "that", "layer", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L51-L57
[ "def", "layer_covariance", "(", "layer1", ",", "layer2", "=", "None", ")", ":", "layer2", "=", "layer2", "or", "layer1", "act1", ",", "act2", "=", "layer1", ".", "activations", ",", "layer2", ".", "activations", "num_datapoints", "=", "act1", ".", "shape",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
push_activations
Push activations from one model to another using prerecorded correlations
lucid/modelzoo/aligned_activations.py
def push_activations(activations, from_layer, to_layer): """Push activations from one model to another using prerecorded correlations""" inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
def push_activations(activations, from_layer, to_layer): """Push activations from one model to another using prerecorded correlations""" inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
[ "Push", "activations", "from", "one", "model", "to", "another", "using", "prerecorded", "correlations" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/aligned_activations.py#L66-L72
[ "def", "push_activations", "(", "activations", ",", "from_layer", ",", "to_layer", ")", ":", "inverse_covariance_matrix", "=", "layer_inverse_covariance", "(", "from_layer", ")", "activations_decorrelated", "=", "np", ".", "dot", "(", "inverse_covariance_matrix", ",", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
multi_interpolation_basis
A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i
lucid/recipes/image_interpolation_params.py
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3): """A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i """ N, M, W, Ch = n_objectives, n_interp_steps, width, channels const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch]) example_interps = [ sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) for _ in range(N)] example_basis = [] for n in range(N): col = [] for m in range(N): interp = example_interps[n] + example_interps[m][::-1] col.append(interp) example_basis.append(col) interp_basis = [] for n in range(N): col = [interp_basis[m][N-n][::-1] for m in range(n)] col.append(tf.zeros([M, W, W, 3])) for m in range(n+1, N): interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch]) for k in [1, 2]]) col.append(interp) interp_basis.append(col) basis = [] for n in range(N): col_ex = tf.stack(example_basis[n]) col_in = tf.stack(interp_basis[n]) basis.append(col_ex + col_in) basis = tf.stack(basis) return basis + const_term
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3): """A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i """ N, M, W, Ch = n_objectives, n_interp_steps, width, channels const_term = sum([lowres_tensor([W, W, Ch], [W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch]) example_interps = [ sum([lowres_tensor([M, W, W, Ch], [2, W//k, W//k, Ch]) for k in [1, 2, 4, 8]]) for _ in range(N)] example_basis = [] for n in range(N): col = [] for m in range(N): interp = example_interps[n] + example_interps[m][::-1] col.append(interp) example_basis.append(col) interp_basis = [] for n in range(N): col = [interp_basis[m][N-n][::-1] for m in range(n)] col.append(tf.zeros([M, W, W, 3])) for m in range(n+1, N): interp = sum([lowres_tensor([M, W, W, Ch], [M, W//k, W//k, Ch]) for k in [1, 2]]) col.append(interp) interp_basis.append(col) basis = [] for n in range(N): col_ex = tf.stack(example_basis[n]) col_in = tf.stack(interp_basis[n]) basis.append(col_ex + col_in) basis = tf.stack(basis) return basis + const_term
[ "A", "paramaterization", "for", "interpolating", "between", "each", "pair", "of", "N", "objectives", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/recipes/image_interpolation_params.py#L22-L82
[ "def", "multi_interpolation_basis", "(", "n_objectives", "=", "6", ",", "n_interp_steps", "=", "5", ",", "width", "=", "128", ",", "channels", "=", "3", ")", ":", "N", ",", "M", ",", "W", ",", "Ch", "=", "n_objectives", ",", "n_interp_steps", ",", "wid...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
register_to_random_name
Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to.
lucid/optvis/overrides/gradient_override.py
def register_to_random_name(grad_f): """Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to. """ grad_f_name = grad_f.__name__ + "_" + str(uuid.uuid4()) tf.RegisterGradient(grad_f_name)(grad_f) return grad_f_name
def register_to_random_name(grad_f): """Register a gradient function to a random string. In order to use a custom gradient in TensorFlow, it must be registered to a string. This is both a hassle, and -- because only one function can every be registered to a string -- annoying to iterate on in an interactive environemnt. This function registers a function to a unique random string of the form: {FUNCTION_NAME}_{RANDOM_SALT} And then returns the random string. This is a helper in creating more convenient gradient overrides. Args: grad_f: gradient function to register. Should map (op, grad) -> grad(s) Returns: String that gradient function was registered to. """ grad_f_name = grad_f.__name__ + "_" + str(uuid.uuid4()) tf.RegisterGradient(grad_f_name)(grad_f) return grad_f_name
[ "Register", "a", "gradient", "function", "to", "a", "random", "string", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/overrides/gradient_override.py#L50-L73
[ "def", "register_to_random_name", "(", "grad_f", ")", ":", "grad_f_name", "=", "grad_f", ".", "__name__", "+", "\"_\"", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "tf", ".", "RegisterGradient", "(", "grad_f_name", ")", "(", "grad_f", ")", "ret...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
gradient_override_map
Convenience wrapper for graph.gradient_override_map(). This functions provides two conveniences over normal tensorflow gradient overrides: it auomatically uses the default graph instead of you needing to find the graph, and it automatically Example: def _foo_grad_alt(op, grad): ... with gradient_override({"Foo": _foo_grad_alt}): Args: override_dict: A dictionary describing how to override the gradient. keys: strings correponding to the op type that should have their gradient overriden. values: functions or strings registered to gradient functions
lucid/optvis/overrides/gradient_override.py
def gradient_override_map(override_dict): """Convenience wrapper for graph.gradient_override_map(). This functions provides two conveniences over normal tensorflow gradient overrides: it auomatically uses the default graph instead of you needing to find the graph, and it automatically Example: def _foo_grad_alt(op, grad): ... with gradient_override({"Foo": _foo_grad_alt}): Args: override_dict: A dictionary describing how to override the gradient. keys: strings correponding to the op type that should have their gradient overriden. values: functions or strings registered to gradient functions """ override_dict_by_name = {} for (op_name, grad_f) in override_dict.items(): if isinstance(grad_f, str): override_dict_by_name[op_name] = grad_f else: override_dict_by_name[op_name] = register_to_random_name(grad_f) with tf.get_default_graph().gradient_override_map(override_dict_by_name): yield
def gradient_override_map(override_dict): """Convenience wrapper for graph.gradient_override_map(). This functions provides two conveniences over normal tensorflow gradient overrides: it auomatically uses the default graph instead of you needing to find the graph, and it automatically Example: def _foo_grad_alt(op, grad): ... with gradient_override({"Foo": _foo_grad_alt}): Args: override_dict: A dictionary describing how to override the gradient. keys: strings correponding to the op type that should have their gradient overriden. values: functions or strings registered to gradient functions """ override_dict_by_name = {} for (op_name, grad_f) in override_dict.items(): if isinstance(grad_f, str): override_dict_by_name[op_name] = grad_f else: override_dict_by_name[op_name] = register_to_random_name(grad_f) with tf.get_default_graph().gradient_override_map(override_dict_by_name): yield
[ "Convenience", "wrapper", "for", "graph", ".", "gradient_override_map", "()", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/overrides/gradient_override.py#L77-L104
[ "def", "gradient_override_map", "(", "override_dict", ")", ":", "override_dict_by_name", "=", "{", "}", "for", "(", "op_name", ",", "grad_f", ")", "in", "override_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "grad_f", ",", "str", ")", ":", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
use_gradient
Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of.
lucid/optvis/overrides/gradient_override.py
def use_gradient(grad_f): """Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of. """ grad_f_name = register_to_random_name(grad_f) def function_wrapper(f): def inner(*inputs): # TensorFlow only supports (as of writing) overriding the gradient of # individual ops. In order to override the gardient of `f`, we need to # somehow make it appear to be an individual TensorFlow op. # # Our solution is to create a PyFunc that mimics `f`. # # In particular, we construct a graph for `f` and run it, then use a # stateful PyFunc to stash it's results in Python. Then we have another # PyFunc mimic it by taking all the same inputs and returning the stashed # output. # # I wish we could do this without PyFunc, but I don't see a way to have # it be fully general. state = {"out_value": None} # First, we need to run `f` and store it's output. out = f(*inputs) def store_out(out_value): """Store the value of out to a python variable.""" state["out_value"] = out_value store_name = "store_" + f.__name__ store = tf.py_func(store_out, [out], (), stateful=True, name=store_name) # Next, we create the mock function, with an overriden gradient. # Note that we need to make sure store gets evaluated before the mock # runs. def mock_f(*inputs): """Mimic f by retrieving the stored value of out.""" return state["out_value"] with tf.control_dependencies([store]): with gradient_override_map({"PyFunc": grad_f_name}): mock_name = "mock_" + f.__name__ mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name) mock_out.set_shape(out.get_shape()) # Finally, we can return the mock. return mock_out return inner return function_wrapper
def use_gradient(grad_f): """Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of. """ grad_f_name = register_to_random_name(grad_f) def function_wrapper(f): def inner(*inputs): # TensorFlow only supports (as of writing) overriding the gradient of # individual ops. In order to override the gardient of `f`, we need to # somehow make it appear to be an individual TensorFlow op. # # Our solution is to create a PyFunc that mimics `f`. # # In particular, we construct a graph for `f` and run it, then use a # stateful PyFunc to stash it's results in Python. Then we have another # PyFunc mimic it by taking all the same inputs and returning the stashed # output. # # I wish we could do this without PyFunc, but I don't see a way to have # it be fully general. state = {"out_value": None} # First, we need to run `f` and store it's output. out = f(*inputs) def store_out(out_value): """Store the value of out to a python variable.""" state["out_value"] = out_value store_name = "store_" + f.__name__ store = tf.py_func(store_out, [out], (), stateful=True, name=store_name) # Next, we create the mock function, with an overriden gradient. # Note that we need to make sure store gets evaluated before the mock # runs. def mock_f(*inputs): """Mimic f by retrieving the stored value of out.""" return state["out_value"] with tf.control_dependencies([store]): with gradient_override_map({"PyFunc": grad_f_name}): mock_name = "mock_" + f.__name__ mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name) mock_out.set_shape(out.get_shape()) # Finally, we can return the mock. return mock_out return inner return function_wrapper
[ "Decorator", "for", "easily", "setting", "custom", "gradients", "for", "TensorFlow", "functions", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/overrides/gradient_override.py#L107-L178
[ "def", "use_gradient", "(", "grad_f", ")", ":", "grad_f_name", "=", "register_to_random_name", "(", "grad_f", ")", "def", "function_wrapper", "(", "f", ")", ":", "def", "inner", "(", "*", "inputs", ")", ":", "# TensorFlow only supports (as of writing) overriding the...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
pixel_image
A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument.
lucid/optvis/param/spatial.py
def pixel_image(shape, sd=None, init_val=None): """A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument. """ if sd is not None and init_val is not None: warnings.warn( "`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value." ) sd = sd or 0.01 init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32) return tf.Variable(init_val)
def pixel_image(shape, sd=None, init_val=None): """A naive, pixel-based image parameterization. Defaults to a random initialization, but can take a supplied init_val argument instead. Args: shape: shape of resulting image, [batch, width, height, channels]. sd: standard deviation of param initialization noise. init_val: an initial value to use instead of a random initialization. Needs to have the same shape as the supplied shape argument. Returns: tensor with shape from first argument. """ if sd is not None and init_val is not None: warnings.warn( "`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value." ) sd = sd or 0.01 init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32) return tf.Variable(init_val)
[ "A", "naive", "pixel", "-", "based", "image", "parameterization", ".", "Defaults", "to", "a", "random", "initialization", "but", "can", "take", "a", "supplied", "init_val", "argument", "instead", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L24-L45
[ "def", "pixel_image", "(", "shape", ",", "sd", "=", "None", ",", "init_val", "=", "None", ")", ":", "if", "sd", "is", "not", "None", "and", "init_val", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"`pixel_image` received both an initial value an...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
rfft2d_freqs
Computes 2D spectrum frequencies.
lucid/optvis/param/spatial.py
def rfft2d_freqs(h, w): """Computes 2D spectrum frequencies.""" fy = np.fft.fftfreq(h)[:, None] # when we have an odd input dimension we need to keep one additional # frequency and later cut off 1 pixel if w % 2 == 1: fx = np.fft.fftfreq(w)[: w // 2 + 2] else: fx = np.fft.fftfreq(w)[: w // 2 + 1] return np.sqrt(fx * fx + fy * fy)
def rfft2d_freqs(h, w): """Computes 2D spectrum frequencies.""" fy = np.fft.fftfreq(h)[:, None] # when we have an odd input dimension we need to keep one additional # frequency and later cut off 1 pixel if w % 2 == 1: fx = np.fft.fftfreq(w)[: w // 2 + 2] else: fx = np.fft.fftfreq(w)[: w // 2 + 1] return np.sqrt(fx * fx + fy * fy)
[ "Computes", "2D", "spectrum", "frequencies", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L48-L58
[ "def", "rfft2d_freqs", "(", "h", ",", "w", ")", ":", "fy", "=", "np", ".", "fft", ".", "fftfreq", "(", "h", ")", "[", ":", ",", "None", "]", "# when we have an odd input dimension we need to keep one additional", "# frequency and later cut off 1 pixel", "if", "w",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
fft_image
An image paramaterization using 2D Fourier coefficients.
lucid/optvis/param/spatial.py
def fft_image(shape, sd=None, decay_power=1): """An image paramaterization using 2D Fourier coefficients.""" sd = sd or 0.01 batch, h, w, ch = shape freqs = rfft2d_freqs(h, w) init_val_size = (2, ch) + freqs.shape images = [] for _ in range(batch): # Create a random variable holding the actual 2D fourier coefficients init_val = np.random.normal(size=init_val_size, scale=sd).astype(np.float32) spectrum_real_imag_t = tf.Variable(init_val) spectrum_t = tf.complex(spectrum_real_imag_t[0], spectrum_real_imag_t[1]) # Scale the spectrum. First normalize energy, then scale by the square-root # of the number of pixels to get a unitary transformation. # This allows to use similar leanring rates to pixel-wise optimisation. scale = 1.0 / np.maximum(freqs, 1.0 / max(w, h)) ** decay_power scale *= np.sqrt(w * h) scaled_spectrum_t = scale * spectrum_t # convert complex scaled spectrum to shape (h, w, ch) image tensor # needs to transpose because irfft2d returns channels first image_t = tf.transpose(tf.spectral.irfft2d(scaled_spectrum_t), (1, 2, 0)) # in case of odd spatial input dimensions we need to crop image_t = image_t[:h, :w, :ch] images.append(image_t) batched_image_t = tf.stack(images) / 4.0 # TODO: is that a magic constant? return batched_image_t
def fft_image(shape, sd=None, decay_power=1): """An image paramaterization using 2D Fourier coefficients.""" sd = sd or 0.01 batch, h, w, ch = shape freqs = rfft2d_freqs(h, w) init_val_size = (2, ch) + freqs.shape images = [] for _ in range(batch): # Create a random variable holding the actual 2D fourier coefficients init_val = np.random.normal(size=init_val_size, scale=sd).astype(np.float32) spectrum_real_imag_t = tf.Variable(init_val) spectrum_t = tf.complex(spectrum_real_imag_t[0], spectrum_real_imag_t[1]) # Scale the spectrum. First normalize energy, then scale by the square-root # of the number of pixels to get a unitary transformation. # This allows to use similar leanring rates to pixel-wise optimisation. scale = 1.0 / np.maximum(freqs, 1.0 / max(w, h)) ** decay_power scale *= np.sqrt(w * h) scaled_spectrum_t = scale * spectrum_t # convert complex scaled spectrum to shape (h, w, ch) image tensor # needs to transpose because irfft2d returns channels first image_t = tf.transpose(tf.spectral.irfft2d(scaled_spectrum_t), (1, 2, 0)) # in case of odd spatial input dimensions we need to crop image_t = image_t[:h, :w, :ch] images.append(image_t) batched_image_t = tf.stack(images) / 4.0 # TODO: is that a magic constant? return batched_image_t
[ "An", "image", "paramaterization", "using", "2D", "Fourier", "coefficients", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L61-L93
[ "def", "fft_image", "(", "shape", ",", "sd", "=", "None", ",", "decay_power", "=", "1", ")", ":", "sd", "=", "sd", "or", "0.01", "batch", ",", "h", ",", "w", ",", "ch", "=", "shape", "freqs", "=", "rfft2d_freqs", "(", "h", ",", "w", ")", "init_...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
laplacian_pyramid_image
Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument.
lucid/optvis/param/spatial.py
def laplacian_pyramid_image(shape, n_levels=4, sd=None): """Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument. """ batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd) return pyramid
def laplacian_pyramid_image(shape, n_levels=4, sd=None): """Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument. """ batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd) return pyramid
[ "Simple", "laplacian", "pyramid", "paramaterization", "of", "an", "image", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L96-L115
[ "def", "laplacian_pyramid_image", "(", "shape", ",", "n_levels", "=", "4", ",", "sd", "=", "None", ")", ":", "batch_dims", "=", "shape", "[", ":", "-", "3", "]", "w", ",", "h", ",", "ch", "=", "shape", "[", "-", "3", ":", "]", "pyramid", "=", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
bilinearly_sampled_image
Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values.
lucid/optvis/param/spatial.py
def bilinearly_sampled_image(texture, uv): """Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values. """ h, w = tf.unstack(tf.shape(texture)[:2]) u, v = tf.split(uv, 2, axis=-1) v = 1.0 - v # vertical flip to match GL convention u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5 u0, u1 = tf.floor(u), tf.ceil(u) v0, v1 = tf.floor(v), tf.ceil(v) uf, vf = u - u0, v - v0 u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1]) def sample(u, v): vu = tf.concat([v % h, u % w], axis=-1) return tf.gather_nd(texture, vu) s00, s01 = sample(u0, v0), sample(u0, v1) s10, s11 = sample(u1, v0), sample(u1, v1) s0 = s00 * (1.0 - vf) + s01 * vf s1 = s10 * (1.0 - vf) + s11 * vf s = s0 * (1.0 - uf) + s1 * uf return s
def bilinearly_sampled_image(texture, uv): """Build bilinear texture sampling graph. Coordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR interpolation modes. Args: texture: [tex_h, tex_w, channel_n] tensor. uv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1] Returns: [frame_h, frame_h, channel_n] tensor with per-pixel sampled values. """ h, w = tf.unstack(tf.shape(texture)[:2]) u, v = tf.split(uv, 2, axis=-1) v = 1.0 - v # vertical flip to match GL convention u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5 u0, u1 = tf.floor(u), tf.ceil(u) v0, v1 = tf.floor(v), tf.ceil(v) uf, vf = u - u0, v - v0 u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1]) def sample(u, v): vu = tf.concat([v % h, u % w], axis=-1) return tf.gather_nd(texture, vu) s00, s01 = sample(u0, v0), sample(u0, v1) s10, s11 = sample(u1, v0), sample(u1, v1) s0 = s00 * (1.0 - vf) + s01 * vf s1 = s10 * (1.0 - vf) + s11 * vf s = s0 * (1.0 - uf) + s1 * uf return s
[ "Build", "bilinear", "texture", "sampling", "graph", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/spatial.py#L118-L149
[ "def", "bilinearly_sampled_image", "(", "texture", ",", "uv", ")", ":", "h", ",", "w", "=", "tf", ".", "unstack", "(", "tf", ".", "shape", "(", "texture", ")", "[", ":", "2", "]", ")", "u", ",", "v", "=", "tf", ".", "split", "(", "uv", ",", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_linear_decorelate_color
Multiply input by sqrt of emperical (ImageNet) color correlation matrix. If you interpret t's innermost dimension as describing colors in a decorrelated version of the color space (which is a very natural way to describe colors -- see discussion in Feature Visualization article) the way to map back to normal colors is multiply the square root of your color correlations.
lucid/optvis/param/color.py
def _linear_decorelate_color(t): """Multiply input by sqrt of emperical (ImageNet) color correlation matrix. If you interpret t's innermost dimension as describing colors in a decorrelated version of the color space (which is a very natural way to describe colors -- see discussion in Feature Visualization article) the way to map back to normal colors is multiply the square root of your color correlations. """ # check that inner dimension is 3? t_flat = tf.reshape(t, [-1, 3]) color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt t_flat = tf.matmul(t_flat, color_correlation_normalized.T) t = tf.reshape(t_flat, tf.shape(t)) return t
def _linear_decorelate_color(t): """Multiply input by sqrt of emperical (ImageNet) color correlation matrix. If you interpret t's innermost dimension as describing colors in a decorrelated version of the color space (which is a very natural way to describe colors -- see discussion in Feature Visualization article) the way to map back to normal colors is multiply the square root of your color correlations. """ # check that inner dimension is 3? t_flat = tf.reshape(t, [-1, 3]) color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt t_flat = tf.matmul(t_flat, color_correlation_normalized.T) t = tf.reshape(t_flat, tf.shape(t)) return t
[ "Multiply", "input", "by", "sqrt", "of", "emperical", "(", "ImageNet", ")", "color", "correlation", "matrix", ".", "If", "you", "interpret", "t", "s", "innermost", "dimension", "as", "describing", "colors", "in", "a", "decorrelated", "version", "of", "the", ...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/color.py#L32-L46
[ "def", "_linear_decorelate_color", "(", "t", ")", ":", "# check that inner dimension is 3?", "t_flat", "=", "tf", ".", "reshape", "(", "t", ",", "[", "-", "1", ",", "3", "]", ")", "color_correlation_normalized", "=", "color_correlation_svd_sqrt", "/", "max_norm_sv...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
to_valid_rgb
Transform inner dimension of t to valid rgb colors. In practice this consistes of two parts: (1) If requested, transform the colors from a decorrelated color space to RGB. (2) Constrain the color channels to be in [0,1], either using a sigmoid function or clipping. Args: t: input tensor, innermost dimension will be interpreted as colors and transformed/constrained. decorrelate: should the input tensor's colors be interpreted as coming from a whitened space or not? sigmoid: should the colors be constrained using sigmoid (if True) or clipping (if False). Returns: t with the innermost dimension transformed.
lucid/optvis/param/color.py
def to_valid_rgb(t, decorrelate=False, sigmoid=True): """Transform inner dimension of t to valid rgb colors. In practice this consistes of two parts: (1) If requested, transform the colors from a decorrelated color space to RGB. (2) Constrain the color channels to be in [0,1], either using a sigmoid function or clipping. Args: t: input tensor, innermost dimension will be interpreted as colors and transformed/constrained. decorrelate: should the input tensor's colors be interpreted as coming from a whitened space or not? sigmoid: should the colors be constrained using sigmoid (if True) or clipping (if False). Returns: t with the innermost dimension transformed. """ if decorrelate: t = _linear_decorelate_color(t) if decorrelate and not sigmoid: t += color_mean if sigmoid: return tf.nn.sigmoid(t) else: return constrain_L_inf(2*t-1)/2 + 0.5
def to_valid_rgb(t, decorrelate=False, sigmoid=True): """Transform inner dimension of t to valid rgb colors. In practice this consistes of two parts: (1) If requested, transform the colors from a decorrelated color space to RGB. (2) Constrain the color channels to be in [0,1], either using a sigmoid function or clipping. Args: t: input tensor, innermost dimension will be interpreted as colors and transformed/constrained. decorrelate: should the input tensor's colors be interpreted as coming from a whitened space or not? sigmoid: should the colors be constrained using sigmoid (if True) or clipping (if False). Returns: t with the innermost dimension transformed. """ if decorrelate: t = _linear_decorelate_color(t) if decorrelate and not sigmoid: t += color_mean if sigmoid: return tf.nn.sigmoid(t) else: return constrain_L_inf(2*t-1)/2 + 0.5
[ "Transform", "inner", "dimension", "of", "t", "to", "valid", "rgb", "colors", ".", "In", "practice", "this", "consistes", "of", "two", "parts", ":", "(", "1", ")", "If", "requested", "transform", "the", "colors", "from", "a", "decorrelated", "color", "spac...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/color.py#L49-L75
[ "def", "to_valid_rgb", "(", "t", ",", "decorrelate", "=", "False", ",", "sigmoid", "=", "True", ")", ":", "if", "decorrelate", ":", "t", "=", "_linear_decorelate_color", "(", "t", ")", "if", "decorrelate", "and", "not", "sigmoid", ":", "t", "+=", "color_...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_populate_inception_bottlenecks
Add Inception bottlenecks and their pre-Relu versions to the graph.
lucid/modelzoo/other_models/InceptionV1.py
def _populate_inception_bottlenecks(scope): """Add Inception bottlenecks and their pre-Relu versions to the graph.""" graph = tf.get_default_graph() for op in graph.get_operations(): if op.name.startswith(scope+'/') and 'Concat' in op.type: name = op.name.split('/')[1] pre_relus = [] for tower in op.inputs[1:]: if tower.op.type == 'Relu': tower = tower.op.inputs[0] pre_relus.append(tower) concat_name = scope + '/' + name + '_pre_relu' _ = tf.concat(pre_relus, -1, name=concat_name)
def _populate_inception_bottlenecks(scope): """Add Inception bottlenecks and their pre-Relu versions to the graph.""" graph = tf.get_default_graph() for op in graph.get_operations(): if op.name.startswith(scope+'/') and 'Concat' in op.type: name = op.name.split('/')[1] pre_relus = [] for tower in op.inputs[1:]: if tower.op.type == 'Relu': tower = tower.op.inputs[0] pre_relus.append(tower) concat_name = scope + '/' + name + '_pre_relu' _ = tf.concat(pre_relus, -1, name=concat_name)
[ "Add", "Inception", "bottlenecks", "and", "their", "pre", "-", "Relu", "versions", "to", "the", "graph", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/modelzoo/other_models/InceptionV1.py#L22-L34
[ "def", "_populate_inception_bottlenecks", "(", "scope", ")", ":", "graph", "=", "tf", ".", "get_default_graph", "(", ")", "for", "op", "in", "graph", ".", "get_operations", "(", ")", ":", "if", "op", ".", "name", ".", "startswith", "(", "scope", "+", "'/...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
wrap_objective
Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python.
lucid/optvis/objectives.py
def wrap_objective(f, *args, **kwds): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python. """ objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str return Objective(objective_func, objective_name, description)
def wrap_objective(f, *args, **kwds): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python. """ objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str return Objective(objective_func, objective_name, description)
[ "Decorator", "for", "creating", "Objective", "factories", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L117-L129
[ "def", "wrap_objective", "(", "f", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "objective_func", "=", "f", "(", "*", "args", ",", "*", "*", "kwds", ")", "objective_name", "=", "f", ".", "__name__", "args_str", "=", "\" [\"", "+", "\", \"", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
neuron
Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+
lucid/optvis/objectives.py
def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
[ "Visualize", "a", "single", "neuron", "of", "a", "single", "channel", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L133-L161
[ "def", "neuron", "(", "layer_name", ",", "channel_n", ",", "x", "=", "None", ",", "y", "=", "None", ",", "batch", "=", "None", ")", ":", "def", "inner", "(", "T", ")", ":", "layer", "=", "T", "(", "layer_name", ")", "shape", "=", "tf", ".", "sh...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
channel
Visualize a single channel
lucid/optvis/objectives.py
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
[ "Visualize", "a", "single", "channel" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L165-L170
[ "def", "channel", "(", "layer", ",", "n_channel", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "...", ",", "n_channel", "]", ")", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
direction
Visualize a direction
lucid/optvis/objectives.py
def direction(layer, vec, batch=None, cossim_pow=0): """Visualize a direction""" if batch is None: vec = vec[None, None, None] return lambda T: _dot_cossim(T(layer), vec) else: vec = vec[None, None] return lambda T: _dot_cossim(T(layer)[batch], vec)
def direction(layer, vec, batch=None, cossim_pow=0): """Visualize a direction""" if batch is None: vec = vec[None, None, None] return lambda T: _dot_cossim(T(layer), vec) else: vec = vec[None, None] return lambda T: _dot_cossim(T(layer)[batch], vec)
[ "Visualize", "a", "direction" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L189-L196
[ "def", "direction", "(", "layer", ",", "vec", ",", "batch", "=", "None", ",", "cossim_pow", "=", "0", ")", ":", "if", "batch", "is", "None", ":", "vec", "=", "vec", "[", "None", ",", "None", ",", "None", "]", "return", "lambda", "T", ":", "_dot_c...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
direction_neuron
Visualize a single (x, y) position along the given direction
lucid/optvis/objectives.py
def direction_neuron(layer_name, vec, batch=None, x=None, y=None, cossim_pow=0): """Visualize a single (x, y) position along the given direction""" def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return _dot_cossim(layer[:, x_, y_], vec[None], cossim_pow=cossim_pow) else: return _dot_cossim(layer[batch, x_, y_], vec, cossim_pow=cossim_pow) return inner
def direction_neuron(layer_name, vec, batch=None, x=None, y=None, cossim_pow=0): """Visualize a single (x, y) position along the given direction""" def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return _dot_cossim(layer[:, x_, y_], vec[None], cossim_pow=cossim_pow) else: return _dot_cossim(layer[batch, x_, y_], vec, cossim_pow=cossim_pow) return inner
[ "Visualize", "a", "single", "(", "x", "y", ")", "position", "along", "the", "given", "direction" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L200-L211
[ "def", "direction_neuron", "(", "layer_name", ",", "vec", ",", "batch", "=", "None", ",", "x", "=", "None", ",", "y", "=", "None", ",", "cossim_pow", "=", "0", ")", ":", "def", "inner", "(", "T", ")", ":", "layer", "=", "T", "(", "layer_name", ")...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
direction_cossim
Visualize a direction (cossine similarity)
lucid/optvis/objectives.py
def direction_cossim(layer, vec, batch=None): """Visualize a direction (cossine similarity)""" def inner(T): act_mags = tf.sqrt(tf.reduce_sum(T(layer)**2, -1, keepdims=True)) vec_mag = tf.sqrt(tf.reduce_sum(vec**2)) mags = act_mags * vec_mag if batch is None: return tf.reduce_mean(T(layer) * vec.reshape([1, 1, 1, -1]) / mags) else: return tf.reduce_mean(T(layer)[batch] * vec.reshape([1, 1, -1]) / mags) return inner
def direction_cossim(layer, vec, batch=None): """Visualize a direction (cossine similarity)""" def inner(T): act_mags = tf.sqrt(tf.reduce_sum(T(layer)**2, -1, keepdims=True)) vec_mag = tf.sqrt(tf.reduce_sum(vec**2)) mags = act_mags * vec_mag if batch is None: return tf.reduce_mean(T(layer) * vec.reshape([1, 1, 1, -1]) / mags) else: return tf.reduce_mean(T(layer)[batch] * vec.reshape([1, 1, -1]) / mags) return inner
[ "Visualize", "a", "direction", "(", "cossine", "similarity", ")" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L214-L224
[ "def", "direction_cossim", "(", "layer", ",", "vec", ",", "batch", "=", "None", ")", ":", "def", "inner", "(", "T", ")", ":", "act_mags", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "T", "(", "layer", ")", "**", "2", ",", "-", "1...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
L1
L1 norm of layer. Generally used as penalty.
lucid/optvis/objectives.py
def L1(layer="input", constant=0, batch=None): """L1 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant)) else: return lambda T: tf.reduce_sum(tf.abs(T(layer)[batch] - constant))
def L1(layer="input", constant=0, batch=None): """L1 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.reduce_sum(tf.abs(T(layer) - constant)) else: return lambda T: tf.reduce_sum(tf.abs(T(layer)[batch] - constant))
[ "L1", "norm", "of", "layer", ".", "Generally", "used", "as", "penalty", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L247-L252
[ "def", "L1", "(", "layer", "=", "\"input\"", ",", "constant", "=", "0", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_sum", "(", "tf", ".", "abs", "(", "T", "(", "layer", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
L2
L2 norm of layer. Generally used as penalty.
lucid/optvis/objectives.py
def L2(layer="input", constant=0, epsilon=1e-6, batch=None): """L2 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2)) else: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer)[batch] - constant) ** 2))
def L2(layer="input", constant=0, epsilon=1e-6, batch=None): """L2 norm of layer. Generally used as penalty.""" if batch is None: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer) - constant) ** 2)) else: return lambda T: tf.sqrt(epsilon + tf.reduce_sum((T(layer)[batch] - constant) ** 2))
[ "L2", "norm", "of", "layer", ".", "Generally", "used", "as", "penalty", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L256-L261
[ "def", "L2", "(", "layer", "=", "\"input\"", ",", "constant", "=", "0", ",", "epsilon", "=", "1e-6", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "sqrt", "(", "epsilon", "+", "tf"...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
blur_input_each_step
Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015.
lucid/optvis/objectives.py
def blur_input_each_step(): """Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015. """ def inner(T): t_input = T("input") t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner
def blur_input_each_step(): """Minimizing this objective is equivelant to blurring input each step. Optimizing (-k)*blur_input_each_step() is equivelant to: input <- (1-k)*input + k*blur(input) An operation that was used in early feature visualization work. See Nguyen, et al., 2015. """ def inner(T): t_input = T("input") t_input_blurred = tf.stop_gradient(_tf_blur(t_input)) return 0.5*tf.reduce_sum((t_input - t_input_blurred)**2) return inner
[ "Minimizing", "this", "objective", "is", "equivelant", "to", "blurring", "input", "each", "step", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L277-L291
[ "def", "blur_input_each_step", "(", ")", ":", "def", "inner", "(", "T", ")", ":", "t_input", "=", "T", "(", "\"input\"", ")", "t_input_blurred", "=", "tf", ".", "stop_gradient", "(", "_tf_blur", "(", "t_input", ")", ")", "return", "0.5", "*", "tf", "."...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
channel_interpolate
Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective
lucid/optvis/objectives.py
def channel_interpolate(layer1, n_channel1, layer2, n_channel2): """Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective """ def inner(T): batch_n = T(layer1).get_shape().as_list()[0] arr1 = T(layer1)[..., n_channel1] arr2 = T(layer2)[..., n_channel2] weights = (np.arange(batch_n)/float(batch_n-1)) S = 0 for n in range(batch_n): S += (1-weights[n]) * tf.reduce_mean(arr1[n]) S += weights[n] * tf.reduce_mean(arr2[n]) return S return inner
def channel_interpolate(layer1, n_channel1, layer2, n_channel2): """Interpolate between layer1, n_channel1 and layer2, n_channel2. Optimize for a convex combination of layer1, n_channel1 and layer2, n_channel2, transitioning across the batch. Args: layer1: layer to optimize 100% at batch=0. n_channel1: neuron index to optimize 100% at batch=0. layer2: layer to optimize 100% at batch=N. n_channel2: neuron index to optimize 100% at batch=N. Returns: Objective """ def inner(T): batch_n = T(layer1).get_shape().as_list()[0] arr1 = T(layer1)[..., n_channel1] arr2 = T(layer2)[..., n_channel2] weights = (np.arange(batch_n)/float(batch_n-1)) S = 0 for n in range(batch_n): S += (1-weights[n]) * tf.reduce_mean(arr1[n]) S += weights[n] * tf.reduce_mean(arr2[n]) return S return inner
[ "Interpolate", "between", "layer1", "n_channel1", "and", "layer2", "n_channel2", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L303-L328
[ "def", "channel_interpolate", "(", "layer1", ",", "n_channel1", ",", "layer2", ",", "n_channel2", ")", ":", "def", "inner", "(", "T", ")", ":", "batch_n", "=", "T", "(", "layer1", ")", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
penalize_boundary_complexity
Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective.
lucid/optvis/objectives.py
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5): """Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective. """ def inner(T): arr = T("input") # print shp if mask is None: mask_ = np.ones(shp) mask_[:, w:-w, w:-w] = 0 else: mask_ = mask blur = _tf_blur(arr, w=5) diffs = (blur-arr)**2 diffs += 0.8*(arr-C)**2 return -tf.reduce_sum(diffs*mask_) return inner
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5): """Encourage the boundaries of an image to have less variation and of color C. Args: shp: shape of T("input") because this may not be known. w: width of boundary to penalize. Ignored if mask is set. mask: mask describing what area should be penalized. Returns: Objective. """ def inner(T): arr = T("input") # print shp if mask is None: mask_ = np.ones(shp) mask_[:, w:-w, w:-w] = 0 else: mask_ = mask blur = _tf_blur(arr, w=5) diffs = (blur-arr)**2 diffs += 0.8*(arr-C)**2 return -tf.reduce_sum(diffs*mask_) return inner
[ "Encourage", "the", "boundaries", "of", "an", "image", "to", "have", "less", "variation", "and", "of", "color", "C", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L332-L358
[ "def", "penalize_boundary_complexity", "(", "shp", ",", "w", "=", "20", ",", "mask", "=", "None", ",", "C", "=", "0.5", ")", ":", "def", "inner", "(", "T", ")", ":", "arr", "=", "T", "(", "\"input\"", ")", "# print shp", "if", "mask", "is", "None",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
alignment
Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective.
lucid/optvis/objectives.py
def alignment(layer, decay_ratio=2): """Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective. """ def inner(T): batch_n = T(layer).get_shape().as_list()[0] arr = T(layer) accum = 0 for d in [1, 2, 3, 4]: for i in range(batch_n - d): a, b = i, i+d arr1, arr2 = arr[a], arr[b] accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d) return -accum return inner
def alignment(layer, decay_ratio=2): """Encourage neighboring images to be similar. When visualizing the interpolation between two objectives, it's often desireable to encourage analagous boejcts to be drawn in the same position, to make them more comparable. This term penalizes L2 distance between neighboring images, as evaluated at layer. In general, we find this most effective if used with a paramaterization that shares across the batch. (In fact, that works quite well by iteself, so this function may just be obselete.) Args: layer: layer to penalize at. decay_ratio: how much to decay penalty as images move apart in batch. Returns: Objective. """ def inner(T): batch_n = T(layer).get_shape().as_list()[0] arr = T(layer) accum = 0 for d in [1, 2, 3, 4]: for i in range(batch_n - d): a, b = i, i+d arr1, arr2 = arr[a], arr[b] accum += tf.reduce_mean((arr1-arr2)**2) / decay_ratio**float(d) return -accum return inner
[ "Encourage", "neighboring", "images", "to", "be", "similar", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L362-L393
[ "def", "alignment", "(", "layer", ",", "decay_ratio", "=", "2", ")", ":", "def", "inner", "(", "T", ")", ":", "batch_n", "=", "T", "(", "layer", ")", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "arr", "=", "T", "(", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
diversity
Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective.
lucid/optvis/objectives.py
def diversity(layer): """Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective. """ def inner(T): layer_t = T(layer) batch_n, _, _, channels = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, -1, channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10) return sum([ sum([ tf.reduce_sum(grams[i]*grams[j]) for j in range(batch_n) if j != i]) for i in range(batch_n)]) / batch_n return inner
def diversity(layer): """Encourage diversity between each batch element. A neural net feature often responds to multiple things, but naive feature visualization often only shows us one. If you optimize a batch of images, this objective will encourage them all to be different. In particular, it caculuates the correlation matrix of activations at layer for each image, and then penalizes cossine similarity between them. This is very similar to ideas in style transfer, except we're *penalizing* style similarity instead of encouraging it. Args: layer: layer to evaluate activation correlations on. Returns: Objective. """ def inner(T): layer_t = T(layer) batch_n, _, _, channels = layer_t.get_shape().as_list() flattened = tf.reshape(layer_t, [batch_n, -1, channels]) grams = tf.matmul(flattened, flattened, transpose_a=True) grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10) return sum([ sum([ tf.reduce_sum(grams[i]*grams[j]) for j in range(batch_n) if j != i]) for i in range(batch_n)]) / batch_n return inner
[ "Encourage", "diversity", "between", "each", "batch", "element", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L396-L425
[ "def", "diversity", "(", "layer", ")", ":", "def", "inner", "(", "T", ")", ":", "layer_t", "=", "T", "(", "layer", ")", "batch_n", ",", "_", ",", "_", ",", "channels", "=", "layer_t", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "flatten...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
input_diff
Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples.
lucid/optvis/objectives.py
def input_diff(orig_img): """Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples. """ def inner(T): diff = T("input") - orig_img return tf.sqrt(tf.reduce_mean(diff**2)) return inner
def input_diff(orig_img): """Average L2 difference between optimized image and orig_img. This objective is usually mutliplied by a negative number and used as a penalty in making advarsarial counterexamples. """ def inner(T): diff = T("input") - orig_img return tf.sqrt(tf.reduce_mean(diff**2)) return inner
[ "Average", "L2", "difference", "between", "optimized", "image", "and", "orig_img", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L429-L438
[ "def", "input_diff", "(", "orig_img", ")", ":", "def", "inner", "(", "T", ")", ":", "diff", "=", "T", "(", "\"input\"", ")", "-", "orig_img", "return", "tf", ".", "sqrt", "(", "tf", ".", "reduce_mean", "(", "diff", "**", "2", ")", ")", "return", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
class_logit
Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit.
lucid/optvis/objectives.py
def class_logit(layer, label): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
def class_logit(layer, label): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
[ "Like", "channel", "but", "for", "softmax", "layers", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L442-L461
[ "def", "class_logit", "(", "layer", ",", "label", ")", ":", "def", "inner", "(", "T", ")", ":", "if", "isinstance", "(", "label", ",", "int", ")", ":", "class_n", "=", "label", "else", ":", "class_n", "=", "T", "(", "\"labels\"", ")", ".", "index",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
as_objective
Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective
lucid/optvis/objectives.py
def as_objective(obj): """Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective """ if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): layer, n = obj.split(":") layer, n = layer.strip(), int(n) return channel(layer, n)
def as_objective(obj): """Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective """ if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): layer, n = obj.split(":") layer, n = layer.strip(), int(n) return channel(layer, n)
[ "Convert", "obj", "into", "Objective", "class", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L464-L483
[ "def", "as_objective", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Objective", ")", ":", "return", "obj", "elif", "callable", "(", "obj", ")", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "layer", ",",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_constrain_L2_grad
Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient.
lucid/optvis/param/unit_balls.py
def _constrain_L2_grad(op, grad): """Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient. """ inp = op.inputs[0] inp_norm = tf.norm(inp) unit_inp = inp / inp_norm grad_projection = dot(unit_inp, grad) parallel_grad = unit_inp * grad_projection is_in_ball = tf.less_equal(inp_norm, 1) is_pointed_inward = tf.less(grad_projection, 0) allow_grad = tf.logical_or(is_in_ball, is_pointed_inward) clip_grad = tf.logical_not(allow_grad) clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad) return clipped_grad
def _constrain_L2_grad(op, grad): """Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient. """ inp = op.inputs[0] inp_norm = tf.norm(inp) unit_inp = inp / inp_norm grad_projection = dot(unit_inp, grad) parallel_grad = unit_inp * grad_projection is_in_ball = tf.less_equal(inp_norm, 1) is_pointed_inward = tf.less(grad_projection, 0) allow_grad = tf.logical_or(is_in_ball, is_pointed_inward) clip_grad = tf.logical_not(allow_grad) clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad) return clipped_grad
[ "Gradient", "for", "constrained", "optimization", "on", "an", "L2", "unit", "ball", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L20-L47
[ "def", "_constrain_L2_grad", "(", "op", ",", "grad", ")", ":", "inp", "=", "op", ".", "inputs", "[", "0", "]", "inp_norm", "=", "tf", ".", "norm", "(", "inp", ")", "unit_inp", "=", "inp", "/", "inp_norm", "grad_projection", "=", "dot", "(", "unit_inp...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
unit_ball_L2
A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code.
lucid/optvis/param/unit_balls.py
def unit_ball_L2(shape): """A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) return constrain_L2(x)
def unit_ball_L2(shape): """A tensorflow variable tranfomed to be constrained in a L2 unit ball. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) return constrain_L2(x)
[ "A", "tensorflow", "variable", "tranfomed", "to", "be", "constrained", "in", "a", "L2", "unit", "ball", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L55-L62
[ "def", "unit_ball_L2", "(", "shape", ")", ":", "x", "=", "tf", ".", "Variable", "(", "tf", ".", "zeros", "(", "shape", ")", ")", "return", "constrain_L2", "(", "x", ")" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
unit_ball_L_inf
A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code.
lucid/optvis/param/unit_balls.py
def unit_ball_L_inf(shape, precondition=True): """A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) if precondition: return constrain_L_inf_precondition(x) else: return constrain_L_inf(x)
def unit_ball_L_inf(shape, precondition=True): """A tensorflow variable tranfomed to be constrained in a L_inf unit ball. Note that this code also preconditions the gradient to go in the L_inf direction of steepest descent. EXPERIMENTAL: Do not use for adverserial examples if you need to be confident they are strong attacks. We are not yet confident in this code. """ x = tf.Variable(tf.zeros(shape)) if precondition: return constrain_L_inf_precondition(x) else: return constrain_L_inf(x)
[ "A", "tensorflow", "variable", "tranfomed", "to", "be", "constrained", "in", "a", "L_inf", "unit", "ball", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/param/unit_balls.py#L106-L119
[ "def", "unit_ball_L_inf", "(", "shape", ",", "precondition", "=", "True", ")", ":", "x", "=", "tf", ".", "Variable", "(", "tf", ".", "zeros", "(", "shape", ")", ")", "if", "precondition", ":", "return", "constrain_L_inf_precondition", "(", "x", ")", "els...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
render_vis
Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other.
lucid/optvis/render.py
def render_vis(model, objective_f, param_f=None, optimizer=None, transforms=None, thresholds=(512,), print_objectives=None, verbose=True, relu_gradient_override=True, use_fixed_seed=False): """Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other. """ with tf.Graph().as_default() as graph, tf.Session() as sess: if use_fixed_seed: # does not mean results are reproducible, see Args doc tf.set_random_seed(0) T = make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override) print_objective_func = make_print_objective_func(print_objectives, T) loss, vis_op, t_image = T("loss"), T("vis_op"), T("input") tf.global_variables_initializer().run() images = [] try: for i in range(max(thresholds)+1): loss_, _ = sess.run([loss, vis_op]) if i in thresholds: vis = t_image.eval() images.append(vis) if verbose: print(i, loss_) print_objective_func(sess) show(np.hstack(vis)) except KeyboardInterrupt: log.warning("Interrupted optimization at step {:d}.".format(i+1)) vis = t_image.eval() show(np.hstack(vis)) return images
def render_vis(model, objective_f, param_f=None, optimizer=None, transforms=None, thresholds=(512,), print_objectives=None, verbose=True, relu_gradient_override=True, use_fixed_seed=False): """Flexible optimization-base feature vis. There's a lot of ways one might wish to customize otpimization-based feature visualization. It's hard to create an abstraction that stands up to all the things one might wish to try. This function probably can't do *everything* you want, but it's much more flexible than a naive attempt. The basic abstraction is to split the problem into several parts. Consider the rguments: Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. thresholds: A list of numbers of optimization steps, at which we should save (and display if verbose=True) the visualization. print_objectives: A list of objectives separate from those being optimized, whose values get logged during the optimization. verbose: Should we display the visualization when we hit a threshold? This should only be used in IPython. relu_gradient_override: Whether to use the gradient override scheme described in lucid/misc/redirected_relu_grad.py. On by default! use_fixed_seed: Seed the RNG with a fixed value so results are reproducible. Off by default. As of tf 1.8 this does not work as intended, see: https://github.com/tensorflow/tensorflow/issues/9171 Returns: 2D array of optimization results containing of evaluations of supplied param_f snapshotted at specified thresholds. Usually that will mean one or multiple channel visualizations stacked on top of each other. """ with tf.Graph().as_default() as graph, tf.Session() as sess: if use_fixed_seed: # does not mean results are reproducible, see Args doc tf.set_random_seed(0) T = make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override) print_objective_func = make_print_objective_func(print_objectives, T) loss, vis_op, t_image = T("loss"), T("vis_op"), T("input") tf.global_variables_initializer().run() images = [] try: for i in range(max(thresholds)+1): loss_, _ = sess.run([loss, vis_op]) if i in thresholds: vis = t_image.eval() images.append(vis) if verbose: print(i, loss_) print_objective_func(sess) show(np.hstack(vis)) except KeyboardInterrupt: log.warning("Interrupted optimization at step {:d}.".format(i+1)) vis = t_image.eval() show(np.hstack(vis)) return images
[ "Flexible", "optimization", "-", "base", "feature", "vis", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/render.py#L44-L115
[ "def", "render_vis", "(", "model", ",", "objective_f", ",", "param_f", "=", "None", ",", "optimizer", "=", "None", ",", "transforms", "=", "None", ",", "thresholds", "=", "(", "512", ",", ")", ",", "print_objectives", "=", "None", ",", "verbose", "=", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
make_vis_T
Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network
lucid/optvis/render.py
def make_vis_T(model, objective_f, param_f=None, optimizer=None, transforms=None, relu_gradient_override=False): """Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network """ # pylint: disable=unused-variable t_image = make_t_image(param_f) objective_f = objectives.as_objective(objective_f) transform_f = make_transform_f(transforms) optimizer = make_optimizer(optimizer, []) global_step = tf.train.get_or_create_global_step() init_global_step = tf.variables_initializer([global_step]) init_global_step.run() if relu_gradient_override: with gradient_override_map({'Relu': redirected_relu_grad, 'Relu6': redirected_relu6_grad}): T = import_model(model, transform_f(t_image), t_image) else: T = import_model(model, transform_f(t_image), t_image) loss = objective_f(T) vis_op = optimizer.minimize(-loss, global_step=global_step) local_vars = locals() # pylint: enable=unused-variable def T2(name): if name in local_vars: return local_vars[name] else: return T(name) return T2
def make_vis_T(model, objective_f, param_f=None, optimizer=None, transforms=None, relu_gradient_override=False): """Even more flexible optimization-base feature vis. This function is the inner core of render_vis(), and can be used when render_vis() isn't flexible enough. Unfortunately, it's a bit more tedious to use: > with tf.Graph().as_default() as graph, tf.Session() as sess: > > T = make_vis_T(model, "mixed4a_pre_relu:0") > tf.initialize_all_variables().run() > > for i in range(10): > T("vis_op").run() > showarray(T("input").eval()[0]) This approach allows more control over how the visualizaiton is displayed as it renders. It also allows a lot more flexibility in constructing objectives / params because the session is already in scope. Args: model: The model to be visualized, from Alex' modelzoo. objective_f: The objective our visualization maximizes. See the objectives module for more details. param_f: Paramaterization of the image we're optimizing. See the paramaterization module for more details. Defaults to a naively paramaterized [1, 128, 128, 3] image. optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance, or a function from (graph, sess) to such an instance. Defaults to Adam with lr .05. transforms: A list of stochastic transformations that get composed, which our visualization should robustly activate the network against. See the transform module for more details. Defaults to [transform.jitter(8)]. Returns: A function T, which allows access to: * T("vis_op") -- the operation for to optimize the visualization * T("input") -- the visualization itself * T("loss") -- the loss for the visualization * T(layer) -- any layer inside the network """ # pylint: disable=unused-variable t_image = make_t_image(param_f) objective_f = objectives.as_objective(objective_f) transform_f = make_transform_f(transforms) optimizer = make_optimizer(optimizer, []) global_step = tf.train.get_or_create_global_step() init_global_step = tf.variables_initializer([global_step]) init_global_step.run() if relu_gradient_override: with gradient_override_map({'Relu': redirected_relu_grad, 'Relu6': redirected_relu6_grad}): T = import_model(model, transform_f(t_image), t_image) else: T = import_model(model, transform_f(t_image), t_image) loss = objective_f(T) vis_op = optimizer.minimize(-loss, global_step=global_step) local_vars = locals() # pylint: enable=unused-variable def T2(name): if name in local_vars: return local_vars[name] else: return T(name) return T2
[ "Even", "more", "flexible", "optimization", "-", "base", "feature", "vis", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/render.py#L118-L192
[ "def", "make_vis_T", "(", "model", ",", "objective_f", ",", "param_f", "=", "None", ",", "optimizer", "=", "None", ",", "transforms", "=", "None", ",", "relu_gradient_override", "=", "False", ")", ":", "# pylint: disable=unused-variable", "t_image", "=", "make_t...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
grid
layout: numpy arrays x, y metadata: user-defined numpy arrays with metadata n_layer: number of cells in the layer (squared) n_tile: number of cells in the tile (squared)
lucid/scratch/atlas_pipeline/grid.py
def grid(metadata, layout, params): """ layout: numpy arrays x, y metadata: user-defined numpy arrays with metadata n_layer: number of cells in the layer (squared) n_tile: number of cells in the tile (squared) """ x = layout["x"] y = layout["y"] x_min = np.min(x) x_max = np.max(x) y_min = np.min(y) y_max = np.max(y) # this creates the grid bins = np.linspace(x_min, x_max, params["n_layer"] - 1) xd = np.digitize(x, bins) bins = np.linspace(y_min, y_max, params["n_layer"] - 1) yd = np.digitize(y, bins) # the number of tiles is the number of cells divided by the number of cells in each tile num_tiles = int(params["n_layer"]/params["n_tile"]) print("num tiles", num_tiles) # we will save the tiles in an array indexed by the tile coordinates tiles = {} for ti in range(num_tiles): for tj in range(num_tiles): tiles[(ti,tj)] = { "x": [], "y": [], "ci": [], # cell-space x coordinate "cj": [], # cell-space y coordinate "gi": [], # global index } for i,xi in enumerate(x): if(i % 1000 == 0 or i+1 == len(x)): print("point", i+1, "/", len(x), end="\r") # layout-space coordinates yi = y[i] # grid-space cell coordinates ci = xd[i] cj = yd[i] # tile coordinate ti = math.floor(ci / params["n_tile"]) tj = math.floor(cj / params["n_tile"]) # TODO: don't append a point if it doesn't match a filter function provided in params filter = params.get("filter", lambda i,metadata: True) if(filter(i, metadata=metadata)): tiles[(ti,tj)]["x"].append(xi) tiles[(ti,tj)]["y"].append(yi) tiles[(ti,tj)]["ci"].append(ci) tiles[(ti,tj)]["cj"].append(cj) tiles[(ti,tj)]["gi"].append(i) return tiles
def grid(metadata, layout, params): """ layout: numpy arrays x, y metadata: user-defined numpy arrays with metadata n_layer: number of cells in the layer (squared) n_tile: number of cells in the tile (squared) """ x = layout["x"] y = layout["y"] x_min = np.min(x) x_max = np.max(x) y_min = np.min(y) y_max = np.max(y) # this creates the grid bins = np.linspace(x_min, x_max, params["n_layer"] - 1) xd = np.digitize(x, bins) bins = np.linspace(y_min, y_max, params["n_layer"] - 1) yd = np.digitize(y, bins) # the number of tiles is the number of cells divided by the number of cells in each tile num_tiles = int(params["n_layer"]/params["n_tile"]) print("num tiles", num_tiles) # we will save the tiles in an array indexed by the tile coordinates tiles = {} for ti in range(num_tiles): for tj in range(num_tiles): tiles[(ti,tj)] = { "x": [], "y": [], "ci": [], # cell-space x coordinate "cj": [], # cell-space y coordinate "gi": [], # global index } for i,xi in enumerate(x): if(i % 1000 == 0 or i+1 == len(x)): print("point", i+1, "/", len(x), end="\r") # layout-space coordinates yi = y[i] # grid-space cell coordinates ci = xd[i] cj = yd[i] # tile coordinate ti = math.floor(ci / params["n_tile"]) tj = math.floor(cj / params["n_tile"]) # TODO: don't append a point if it doesn't match a filter function provided in params filter = params.get("filter", lambda i,metadata: True) if(filter(i, metadata=metadata)): tiles[(ti,tj)]["x"].append(xi) tiles[(ti,tj)]["y"].append(yi) tiles[(ti,tj)]["ci"].append(ci) tiles[(ti,tj)]["cj"].append(cj) tiles[(ti,tj)]["gi"].append(i) return tiles
[ "layout", ":", "numpy", "arrays", "x", "y", "metadata", ":", "user", "-", "defined", "numpy", "arrays", "with", "metadata", "n_layer", ":", "number", "of", "cells", "in", "the", "layer", "(", "squared", ")", "n_tile", ":", "number", "of", "cells", "in", ...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/grid.py#L12-L68
[ "def", "grid", "(", "metadata", ",", "layout", ",", "params", ")", ":", "x", "=", "layout", "[", "\"x\"", "]", "y", "=", "layout", "[", "\"y\"", "]", "x_min", "=", "np", ".", "min", "(", "x", ")", "x_max", "=", "np", ".", "max", "(", "x", ")"...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
write_grid_local
Write a file for each tile
lucid/scratch/atlas_pipeline/grid.py
def write_grid_local(tiles, params): """ Write a file for each tile """ # TODO: this isn't being used right now, will need to be # ported to gfile if we want to keep it for ti,tj,tile in enumerate_tiles(tiles): filename = "{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}".format(ti=ti, tj=tj, **params) #directory=directory, name=name, n_layer=n_layer, n_tile=n_tile, # write out the tile as a npz print("saving", filename + ".npz") np.savez_compressed(filename + ".npz", **tile) # write out the tile as a csv print("saving", filename + ".csv") df = pd.DataFrame(tile) df.to_csv(filename + ".csv", index=False)
def write_grid_local(tiles, params): """ Write a file for each tile """ # TODO: this isn't being used right now, will need to be # ported to gfile if we want to keep it for ti,tj,tile in enumerate_tiles(tiles): filename = "{directory}/{name}/tile_{n_layer}_{n_tile}_{ti}_{tj}".format(ti=ti, tj=tj, **params) #directory=directory, name=name, n_layer=n_layer, n_tile=n_tile, # write out the tile as a npz print("saving", filename + ".npz") np.savez_compressed(filename + ".npz", **tile) # write out the tile as a csv print("saving", filename + ".csv") df = pd.DataFrame(tile) df.to_csv(filename + ".csv", index=False)
[ "Write", "a", "file", "for", "each", "tile" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/grid.py#L70-L84
[ "def", "write_grid_local", "(", "tiles", ",", "params", ")", ":", "# TODO: this isn't being used right now, will need to be", "# ported to gfile if we want to keep it", "for", "ti", ",", "tj", ",", "tile", "in", "enumerate_tiles", "(", "tiles", ")", ":", "filename", "="...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
enumerate_tiles
Convenience
lucid/scratch/atlas_pipeline/grid.py
def enumerate_tiles(tiles): """ Convenience """ enumerated = [] for key in tiles.keys(): enumerated.append((key[0], key[1], tiles[key])) return enumerated
def enumerate_tiles(tiles): """ Convenience """ enumerated = [] for key in tiles.keys(): enumerated.append((key[0], key[1], tiles[key])) return enumerated
[ "Convenience" ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/scratch/atlas_pipeline/grid.py#L86-L93
[ "def", "enumerate_tiles", "(", "tiles", ")", ":", "enumerated", "=", "[", "]", "for", "key", "in", "tiles", ".", "keys", "(", ")", ":", "enumerated", ".", "append", "(", "(", "key", "[", "0", "]", ",", "key", "[", "1", "]", ",", "tiles", "[", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_load_img
Load image file as numpy array.
lucid/misc/io/loading.py
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs): """Load image file as numpy array.""" image_pil = PIL.Image.open(handle, **kwargs) # resize the image to the requested size, if one was specified if size is not None: if len(size) > 2: size = size[:2] log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size)) image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS) image_array = np.asarray(image_pil) # remove alpha channel if it contains no information # if image_array.shape[-1] > 3 and 'A' not in image_pil.mode: # image_array = image_array[..., :-1] image_dtype = image_array.dtype image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc. # using np.divide should avoid an extra copy compared to doing division first ndimage = np.divide(image_array, image_max_value, dtype=target_dtype) rank = len(ndimage.shape) if rank == 3: return ndimage elif rank == 2: return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2) else: message = "Loaded image has more dimensions than expected: {}".format(rank) raise NotImplementedError(message)
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs): """Load image file as numpy array.""" image_pil = PIL.Image.open(handle, **kwargs) # resize the image to the requested size, if one was specified if size is not None: if len(size) > 2: size = size[:2] log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size)) image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS) image_array = np.asarray(image_pil) # remove alpha channel if it contains no information # if image_array.shape[-1] > 3 and 'A' not in image_pil.mode: # image_array = image_array[..., :-1] image_dtype = image_array.dtype image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc. # using np.divide should avoid an extra copy compared to doing division first ndimage = np.divide(image_array, image_max_value, dtype=target_dtype) rank = len(ndimage.shape) if rank == 3: return ndimage elif rank == 2: return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2) else: message = "Loaded image has more dimensions than expected: {}".format(rank) raise NotImplementedError(message)
[ "Load", "image", "file", "as", "numpy", "array", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L47-L78
[ "def", "_load_img", "(", "handle", ",", "target_dtype", "=", "np", ".", "float32", ",", "size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "image_pil", "=", "PIL", ".", "Image", ".", "open", "(", "handle", ",", "*", "*", "kwargs", ")", "# resi...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_load_text
Load and decode a string.
lucid/misc/io/loading.py
def _load_text(handle, split=False, encoding="utf-8"): """Load and decode a string.""" string = handle.read().decode(encoding) return string.splitlines() if split else string
def _load_text(handle, split=False, encoding="utf-8"): """Load and decode a string.""" string = handle.read().decode(encoding) return string.splitlines() if split else string
[ "Load", "and", "decode", "a", "string", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L86-L89
[ "def", "_load_text", "(", "handle", ",", "split", "=", "False", ",", "encoding", "=", "\"utf-8\"", ")", ":", "string", "=", "handle", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")", "return", "string", ".", "splitlines", "(", ")", "if", "...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_load_graphdef_protobuf
Load GraphDef from a binary proto file.
lucid/misc/io/loading.py
def _load_graphdef_protobuf(handle, **kwargs): """Load GraphDef from a binary proto file.""" # as_graph_def graph_def = tf.GraphDef.FromString(handle.read()) # check if this is a lucid-saved model # metadata = modelzoo.util.extract_metadata(graph_def) # if metadata is not None: # url = handle.name # return modelzoo.vision_base.Model.load_from_metadata(url, metadata) # else return a normal graph_def return graph_def
def _load_graphdef_protobuf(handle, **kwargs): """Load GraphDef from a binary proto file.""" # as_graph_def graph_def = tf.GraphDef.FromString(handle.read()) # check if this is a lucid-saved model # metadata = modelzoo.util.extract_metadata(graph_def) # if metadata is not None: # url = handle.name # return modelzoo.vision_base.Model.load_from_metadata(url, metadata) # else return a normal graph_def return graph_def
[ "Load", "GraphDef", "from", "a", "binary", "proto", "file", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L92-L104
[ "def", "_load_graphdef_protobuf", "(", "handle", ",", "*", "*", "kwargs", ")", ":", "# as_graph_def", "graph_def", "=", "tf", ".", "GraphDef", ".", "FromString", "(", "handle", ".", "read", "(", ")", ")", "# check if this is a lucid-saved model", "# metadata = mod...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
load
Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported.
lucid/misc/io/loading.py
def load(url_or_handle, cache=None, **kwargs): """Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported. """ ext = get_extension(url_or_handle) try: loader = loaders[ext.lower()] message = "Using inferred loader '%s' due to passed file extension '%s'." log.debug(message, loader.__name__[6:], ext) return load_using_loader(url_or_handle, loader, cache, **kwargs) except KeyError: log.warning("Unknown extension '%s', attempting to load as image.", ext) try: with read_handle(url_or_handle, cache=cache) as handle: result = _load_img(handle) except Exception as e: message = "Could not load resource %s as image. Supported extensions: %s" log.error(message, url_or_handle, list(loaders)) raise RuntimeError(message.format(url_or_handle, list(loaders))) else: log.info("Unknown extension '%s' successfully loaded as image.", ext) return result
def load(url_or_handle, cache=None, **kwargs): """Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle Raises: RuntimeError: If file extension or URL is not supported. """ ext = get_extension(url_or_handle) try: loader = loaders[ext.lower()] message = "Using inferred loader '%s' due to passed file extension '%s'." log.debug(message, loader.__name__[6:], ext) return load_using_loader(url_or_handle, loader, cache, **kwargs) except KeyError: log.warning("Unknown extension '%s', attempting to load as image.", ext) try: with read_handle(url_or_handle, cache=cache) as handle: result = _load_img(handle) except Exception as e: message = "Could not load resource %s as image. Supported extensions: %s" log.error(message, url_or_handle, list(loaders)) raise RuntimeError(message.format(url_or_handle, list(loaders))) else: log.info("Unknown extension '%s' successfully loaded as image.", ext) return result
[ "Load", "a", "file", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L120-L152
[ "def", "load", "(", "url_or_handle", ",", "cache", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ext", "=", "get_extension", "(", "url_or_handle", ")", "try", ":", "loader", "=", "loaders", "[", "ext", ".", "lower", "(", ")", "]", "message", "=", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
crop_or_pad_to
Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs.
lucid/optvis/transform.py
def crop_or_pad_to(height, width): """Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs. """ def inner(t_image): return tf.image.resize_image_with_crop_or_pad(t_image, height, width) return inner
def crop_or_pad_to(height, width): """Ensures the specified spatial shape by either padding or cropping. Meant to be used as a last transform for architectures insisting on a specific spatial shape of their inputs. """ def inner(t_image): return tf.image.resize_image_with_crop_or_pad(t_image, height, width) return inner
[ "Ensures", "the", "specified", "spatial", "shape", "by", "either", "padding", "or", "cropping", ".", "Meant", "to", "be", "used", "as", "a", "last", "transform", "for", "architectures", "insisting", "on", "a", "specific", "spatial", "shape", "of", "their", "...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/transform.py#L154-L161
[ "def", "crop_or_pad_to", "(", "height", ",", "width", ")", ":", "def", "inner", "(", "t_image", ")", ":", "return", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "t_image", ",", "height", ",", "width", ")", "return", "inner" ]
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_normalize_array
Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image
lucid/misc/io/serialize_array.py
def _normalize_array(array, domain=(0, 1)): """Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image """ # first copy the input so we're never mutating the user's data array = np.array(array) # squeeze helps both with batch=1 and B/W and PIL's mode inference array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) # clip values if domain was specified and array contains values outside of it if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255 # convert signed to unsigned if needed if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
def _normalize_array(array, domain=(0, 1)): """Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image """ # first copy the input so we're never mutating the user's data array = np.array(array) # squeeze helps both with batch=1 and B/W and PIL's mode inference array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) # clip values if domain was specified and array contains values outside of it if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255 # convert signed to unsigned if needed if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
[ "Given", "an", "arbitrary", "rank", "-", "3", "NumPy", "array", "produce", "one", "representing", "an", "image", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L31-L77
[ "def", "_normalize_array", "(", "array", ",", "domain", "=", "(", "0", ",", "1", ")", ")", ":", "# first copy the input so we're never mutating the user's data", "array", "=", "np", ".", "array", "(", "array", ")", "# squeeze helps both with batch=1 and B/W and PIL's mo...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_serialize_normalized_array
Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
lucid/misc/io/serialize_array.py
def _serialize_normalized_array(array, fmt='png', quality=70): """Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert np.max(array) <= np.iinfo(dtype).max assert array.shape[-1] > 1 # array dims must have been squeezed image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) # TODO: Python 3 could save a copy here by using `getbuffer()` instead. image_data = image_bytes.getvalue() return image_data
def _serialize_normalized_array(array, fmt='png', quality=70): """Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert np.max(array) <= np.iinfo(dtype).max assert array.shape[-1] > 1 # array dims must have been squeezed image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) # TODO: Python 3 could save a copy here by using `getbuffer()` instead. image_data = image_bytes.getvalue() return image_data
[ "Given", "a", "normalized", "array", "returns", "byte", "representation", "of", "image", "encoding", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L80-L101
[ "def", "_serialize_normalized_array", "(", "array", ",", "fmt", "=", "'png'", ",", "quality", "=", "70", ")", ":", "dtype", "=", "array", ".", "dtype", "assert", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "unsignedinteger", ")", "assert", "np...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
serialize_array
Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
lucid/misc/io/serialize_array.py
def serialize_array(array, domain=(0, 1), fmt='png', quality=70): """Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ normalized = _normalize_array(array, domain=domain) return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)
def serialize_array(array, domain=(0, 1), fmt='png', quality=70): """Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ normalized = _normalize_array(array, domain=domain) return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)
[ "Given", "an", "arbitrary", "rank", "-", "3", "NumPy", "array", "returns", "the", "byte", "representation", "of", "the", "encoded", "image", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L104-L118
[ "def", "serialize_array", "(", "array", ",", "domain", "=", "(", "0", ",", "1", ")", ",", "fmt", "=", "'png'", ",", "quality", "=", "70", ")", ":", "normalized", "=", "_normalize_array", "(", "array", ",", "domain", "=", "domain", ")", "return", "_se...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
array_to_jsbuffer
Serialize 1d NumPy array to JS TypedArray. Data is serialized to base64-encoded string, which is much faster and memory-efficient than json list serialization. Args: array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES. Returns: JS code that evaluates to a TypedArray as string. Raises: TypeError: if array dtype or shape not supported.
lucid/misc/io/serialize_array.py
def array_to_jsbuffer(array): """Serialize 1d NumPy array to JS TypedArray. Data is serialized to base64-encoded string, which is much faster and memory-efficient than json list serialization. Args: array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES. Returns: JS code that evaluates to a TypedArray as string. Raises: TypeError: if array dtype or shape not supported. """ if array.ndim != 1: raise TypeError('Only 1d arrays can be converted JS TypedArray.') if array.dtype.name not in JS_ARRAY_TYPES: raise TypeError('Array dtype not supported by JS TypedArray.') js_type_name = array.dtype.name.capitalize() + 'Array' data_base64 = base64.b64encode(array.tobytes()).decode('ascii') code = """ (function() { const data = atob("%s"); const buf = new Uint8Array(data.length); for (var i=0; i<data.length; ++i) { buf[i] = data.charCodeAt(i); } var array_type = %s; if (array_type == Uint8Array) { return buf; } return new array_type(buf.buffer); })() """ % (data_base64, js_type_name) return code
def array_to_jsbuffer(array): """Serialize 1d NumPy array to JS TypedArray. Data is serialized to base64-encoded string, which is much faster and memory-efficient than json list serialization. Args: array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES. Returns: JS code that evaluates to a TypedArray as string. Raises: TypeError: if array dtype or shape not supported. """ if array.ndim != 1: raise TypeError('Only 1d arrays can be converted JS TypedArray.') if array.dtype.name not in JS_ARRAY_TYPES: raise TypeError('Array dtype not supported by JS TypedArray.') js_type_name = array.dtype.name.capitalize() + 'Array' data_base64 = base64.b64encode(array.tobytes()).decode('ascii') code = """ (function() { const data = atob("%s"); const buf = new Uint8Array(data.length); for (var i=0; i<data.length; ++i) { buf[i] = data.charCodeAt(i); } var array_type = %s; if (array_type == Uint8Array) { return buf; } return new array_type(buf.buffer); })() """ % (data_base64, js_type_name) return code
[ "Serialize", "1d", "NumPy", "array", "to", "JS", "TypedArray", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L126-L161
[ "def", "array_to_jsbuffer", "(", "array", ")", ":", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "TypeError", "(", "'Only 1d arrays can be converted JS TypedArray.'", ")", "if", "array", ".", "dtype", ".", "name", "not", "in", "JS_ARRAY_TYPES", ":", "r...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
ChannelReducer._apply_flat
Utility for applying f to inner dimension of acts. Flattens acts into a 2D tensor, applies f, then unflattens so that all dimesnions except innermost are unchanged.
lucid/misc/channel_reducer.py
def _apply_flat(cls, f, acts): """Utility for applying f to inner dimension of acts. Flattens acts into a 2D tensor, applies f, then unflattens so that all dimesnions except innermost are unchanged. """ orig_shape = acts.shape acts_flat = acts.reshape([-1, acts.shape[-1]]) new_flat = f(acts_flat) if not isinstance(new_flat, np.ndarray): return new_flat shape = list(orig_shape[:-1]) + [-1] return new_flat.reshape(shape)
def _apply_flat(cls, f, acts): """Utility for applying f to inner dimension of acts. Flattens acts into a 2D tensor, applies f, then unflattens so that all dimesnions except innermost are unchanged. """ orig_shape = acts.shape acts_flat = acts.reshape([-1, acts.shape[-1]]) new_flat = f(acts_flat) if not isinstance(new_flat, np.ndarray): return new_flat shape = list(orig_shape[:-1]) + [-1] return new_flat.reshape(shape)
[ "Utility", "for", "applying", "f", "to", "inner", "dimension", "of", "acts", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/channel_reducer.py#L52-L64
[ "def", "_apply_flat", "(", "cls", ",", "f", ",", "acts", ")", ":", "orig_shape", "=", "acts", ".", "shape", "acts_flat", "=", "acts", ".", "reshape", "(", "[", "-", "1", ",", "acts", ".", "shape", "[", "-", "1", "]", "]", ")", "new_flat", "=", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
StyleLoss.set_style
Set target style variables. Expected usage: style_loss = StyleLoss(style_layers) ... init_op = tf.global_variables_initializer() init_op.run() feeds = {... session.run() 'feeds' argument that will make 'style_layers' tensors evaluate to activation values of style image...} style_loss.set_style(feeds) # this must be called after 'init_op.run()'
lucid/optvis/style.py
def set_style(self, input_feeds): """Set target style variables. Expected usage: style_loss = StyleLoss(style_layers) ... init_op = tf.global_variables_initializer() init_op.run() feeds = {... session.run() 'feeds' argument that will make 'style_layers' tensors evaluate to activation values of style image...} style_loss.set_style(feeds) # this must be called after 'init_op.run()' """ sess = tf.get_default_session() computed = sess.run(self.input_grams, input_feeds) for v, g in zip(self.target_vars, computed): v.load(g)
def set_style(self, input_feeds): """Set target style variables. Expected usage: style_loss = StyleLoss(style_layers) ... init_op = tf.global_variables_initializer() init_op.run() feeds = {... session.run() 'feeds' argument that will make 'style_layers' tensors evaluate to activation values of style image...} style_loss.set_style(feeds) # this must be called after 'init_op.run()' """ sess = tf.get_default_session() computed = sess.run(self.input_grams, input_feeds) for v, g in zip(self.target_vars, computed): v.load(g)
[ "Set", "target", "style", "variables", ".", "Expected", "usage", ":", "style_loss", "=", "StyleLoss", "(", "style_layers", ")", "...", "init_op", "=", "tf", ".", "global_variables_initializer", "()", "init_op", ".", "run", "()", "feeds", "=", "{", "...", "se...
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/style.py#L74-L90
[ "def", "set_style", "(", "self", ",", "input_feeds", ")", ":", "sess", "=", "tf", ".", "get_default_session", "(", ")", "computed", "=", "sess", ".", "run", "(", "self", ".", "input_grams", ",", "input_feeds", ")", "for", "v", ",", "g", "in", "zip", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
_image_url
Create a data URL representing an image from a PIL.Image. Args: image: a numpy mode: presently only supports "data" for data URL Returns: URL representing image
lucid/misc/io/showing.py
def _image_url(array, fmt='png', mode="data", quality=90, domain=None): """Create a data URL representing an image from a PIL.Image. Args: image: a numpy mode: presently only supports "data" for data URL Returns: URL representing image """ supported_modes = ("data") if mode not in supported_modes: message = "Unsupported mode '%s', should be one of '%s'." raise ValueError(message, mode, supported_modes) image_data = serialize_array(array, fmt=fmt, quality=quality) base64_byte_string = base64.b64encode(image_data).decode('ascii') return "data:image/" + fmt.upper() + ";base64," + base64_byte_string
def _image_url(array, fmt='png', mode="data", quality=90, domain=None): """Create a data URL representing an image from a PIL.Image. Args: image: a numpy mode: presently only supports "data" for data URL Returns: URL representing image """ supported_modes = ("data") if mode not in supported_modes: message = "Unsupported mode '%s', should be one of '%s'." raise ValueError(message, mode, supported_modes) image_data = serialize_array(array, fmt=fmt, quality=quality) base64_byte_string = base64.b64encode(image_data).decode('ascii') return "data:image/" + fmt.upper() + ";base64," + base64_byte_string
[ "Create", "a", "data", "URL", "representing", "an", "image", "from", "a", "PIL", ".", "Image", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/showing.py#L39-L56
[ "def", "_image_url", "(", "array", ",", "fmt", "=", "'png'", ",", "mode", "=", "\"data\"", ",", "quality", "=", "90", ",", "domain", "=", "None", ")", ":", "supported_modes", "=", "(", "\"data\"", ")", "if", "mode", "not", "in", "supported_modes", ":",...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
image
Display an image. Args: array: NumPy array representing the image fmt: Image format e.g. png, jpeg domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None
lucid/misc/io/showing.py
def image(array, domain=None, width=None, format='png', **kwargs): """Display an image. Args: array: NumPy array representing the image fmt: Image format e.g. png, jpeg domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None """ image_data = serialize_array(array, fmt=format, domain=domain) image = IPython.display.Image(data=image_data, format=format, width=width) IPython.display.display(image)
def image(array, domain=None, width=None, format='png', **kwargs): """Display an image. Args: array: NumPy array representing the image fmt: Image format e.g. png, jpeg domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None """ image_data = serialize_array(array, fmt=format, domain=domain) image = IPython.display.Image(data=image_data, format=format, width=width) IPython.display.display(image)
[ "Display", "an", "image", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/showing.py#L62-L75
[ "def", "image", "(", "array", ",", "domain", "=", "None", ",", "width", "=", "None", ",", "format", "=", "'png'", ",", "*", "*", "kwargs", ")", ":", "image_data", "=", "serialize_array", "(", "array", ",", "fmt", "=", "format", ",", "domain", "=", ...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e
train
images
Display a list of images with optional labels. Args: arrays: A list of NumPy arrays representing images labels: A list of strings to label each image. Defaults to show index if None domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None
lucid/misc/io/showing.py
def images(arrays, labels=None, domain=None, w=None): """Display a list of images with optional labels. Args: arrays: A list of NumPy arrays representing images labels: A list of strings to label each image. Defaults to show index if None domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None """ s = '<div style="display: flex; flex-direction: row;">' for i, array in enumerate(arrays): url = _image_url(array) label = labels[i] if labels is not None else i s += """<div style="margin-right:10px;"> {label}<br/> <img src="{url}" style="margin-top:4px;"> </div>""".format(label=label, url=url) s += "</div>" _display_html(s)
def images(arrays, labels=None, domain=None, w=None): """Display a list of images with optional labels. Args: arrays: A list of NumPy arrays representing images labels: A list of strings to label each image. Defaults to show index if None domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None """ s = '<div style="display: flex; flex-direction: row;">' for i, array in enumerate(arrays): url = _image_url(array) label = labels[i] if labels is not None else i s += """<div style="margin-right:10px;"> {label}<br/> <img src="{url}" style="margin-top:4px;"> </div>""".format(label=label, url=url) s += "</div>" _display_html(s)
[ "Display", "a", "list", "of", "images", "with", "optional", "labels", "." ]
tensorflow/lucid
python
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/showing.py#L78-L99
[ "def", "images", "(", "arrays", ",", "labels", "=", "None", ",", "domain", "=", "None", ",", "w", "=", "None", ")", ":", "s", "=", "'<div style=\"display: flex; flex-direction: row;\">'", "for", "i", ",", "array", "in", "enumerate", "(", "arrays", ")", ":"...
d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e