id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
17,000
MisterY/gnucash-portfolio
gnucash_portfolio/lib/database.py
Database.open_book
def open_book(self, for_writing=False) -> piecash.Book: """ Opens the database. Call this using 'with'. If database file is not found, an in-memory database will be created. """ filename = None # check if the file path is already a URL. file_url = urllib.parse.urlparse(self.filename) if file_url.scheme == "file" or file_url.scheme == "sqlite": filename = file_url.path[1:] else: filename = self.filename if not os.path.isfile(filename): log(WARN, "Database %s requested but not found. Creating an in-memory book.", filename) return self.create_book() access_type = "read/write" if for_writing else "readonly" log(INFO, "Using %s in %s mode.", filename, access_type) # file_path = path.relpath(self.filename) file_path = path.abspath(filename) if not for_writing: book = piecash.open_book(file_path, open_if_lock=True) else: book = piecash.open_book(file_path, open_if_lock=True, readonly=False) # book = create_book() return book
python
def open_book(self, for_writing=False) -> piecash.Book: """ Opens the database. Call this using 'with'. If database file is not found, an in-memory database will be created. """ filename = None # check if the file path is already a URL. file_url = urllib.parse.urlparse(self.filename) if file_url.scheme == "file" or file_url.scheme == "sqlite": filename = file_url.path[1:] else: filename = self.filename if not os.path.isfile(filename): log(WARN, "Database %s requested but not found. Creating an in-memory book.", filename) return self.create_book() access_type = "read/write" if for_writing else "readonly" log(INFO, "Using %s in %s mode.", filename, access_type) # file_path = path.relpath(self.filename) file_path = path.abspath(filename) if not for_writing: book = piecash.open_book(file_path, open_if_lock=True) else: book = piecash.open_book(file_path, open_if_lock=True, readonly=False) # book = create_book() return book
[ "def", "open_book", "(", "self", ",", "for_writing", "=", "False", ")", "->", "piecash", ".", "Book", ":", "filename", "=", "None", "# check if the file path is already a URL.", "file_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "self", ".", "filename", ")", "if", "file_url", ".", "scheme", "==", "\"file\"", "or", "file_url", ".", "scheme", "==", "\"sqlite\"", ":", "filename", "=", "file_url", ".", "path", "[", "1", ":", "]", "else", ":", "filename", "=", "self", ".", "filename", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "log", "(", "WARN", ",", "\"Database %s requested but not found. Creating an in-memory book.\"", ",", "filename", ")", "return", "self", ".", "create_book", "(", ")", "access_type", "=", "\"read/write\"", "if", "for_writing", "else", "\"readonly\"", "log", "(", "INFO", ",", "\"Using %s in %s mode.\"", ",", "filename", ",", "access_type", ")", "# file_path = path.relpath(self.filename)", "file_path", "=", "path", ".", "abspath", "(", "filename", ")", "if", "not", "for_writing", ":", "book", "=", "piecash", ".", "open_book", "(", "file_path", ",", "open_if_lock", "=", "True", ")", "else", ":", "book", "=", "piecash", ".", "open_book", "(", "file_path", ",", "open_if_lock", "=", "True", ",", "readonly", "=", "False", ")", "# book = create_book()", "return", "book" ]
Opens the database. Call this using 'with'. If database file is not found, an in-memory database will be created.
[ "Opens", "the", "database", ".", "Call", "this", "using", "with", ".", "If", "database", "file", "is", "not", "found", "an", "in", "-", "memory", "database", "will", "be", "created", "." ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/database.py#L37-L65
17,001
MisterY/gnucash-portfolio
gnucash_portfolio/lib/fileutils.py
read_text_from_file
def read_text_from_file(path: str) -> str: """ Reads text file contents """ with open(path) as text_file: content = text_file.read() return content
python
def read_text_from_file(path: str) -> str: """ Reads text file contents """ with open(path) as text_file: content = text_file.read() return content
[ "def", "read_text_from_file", "(", "path", ":", "str", ")", "->", "str", ":", "with", "open", "(", "path", ")", "as", "text_file", ":", "content", "=", "text_file", ".", "read", "(", ")", "return", "content" ]
Reads text file contents
[ "Reads", "text", "file", "contents" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/fileutils.py#L3-L8
17,002
MisterY/gnucash-portfolio
gnucash_portfolio/lib/fileutils.py
save_text_to_file
def save_text_to_file(content: str, path: str): """ Saves text to file """ with open(path, mode='w') as text_file: text_file.write(content)
python
def save_text_to_file(content: str, path: str): """ Saves text to file """ with open(path, mode='w') as text_file: text_file.write(content)
[ "def", "save_text_to_file", "(", "content", ":", "str", ",", "path", ":", "str", ")", ":", "with", "open", "(", "path", ",", "mode", "=", "'w'", ")", "as", "text_file", ":", "text_file", ".", "write", "(", "content", ")" ]
Saves text to file
[ "Saves", "text", "to", "file" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/fileutils.py#L10-L13
17,003
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.get_amount_in_base_currency
def get_amount_in_base_currency(self, currency: str, amount: Decimal) -> Decimal: """ Calculates the amount in base currency """ assert isinstance(amount, Decimal) # If this is already the base currency, do nothing. if currency == self.get_default_currency().mnemonic: return amount agg = self.get_currency_aggregate_by_symbol(currency) if not agg: raise ValueError(f"Currency not found: {currency}!") # TODO use pricedb for the price. rate_to_base = agg.get_latest_price() if not rate_to_base: raise ValueError(f"Latest price not found for {currency}!") assert isinstance(rate_to_base.value, Decimal) result = amount * rate_to_base.value return result
python
def get_amount_in_base_currency(self, currency: str, amount: Decimal) -> Decimal: """ Calculates the amount in base currency """ assert isinstance(amount, Decimal) # If this is already the base currency, do nothing. if currency == self.get_default_currency().mnemonic: return amount agg = self.get_currency_aggregate_by_symbol(currency) if not agg: raise ValueError(f"Currency not found: {currency}!") # TODO use pricedb for the price. rate_to_base = agg.get_latest_price() if not rate_to_base: raise ValueError(f"Latest price not found for {currency}!") assert isinstance(rate_to_base.value, Decimal) result = amount * rate_to_base.value return result
[ "def", "get_amount_in_base_currency", "(", "self", ",", "currency", ":", "str", ",", "amount", ":", "Decimal", ")", "->", "Decimal", ":", "assert", "isinstance", "(", "amount", ",", "Decimal", ")", "# If this is already the base currency, do nothing.", "if", "currency", "==", "self", ".", "get_default_currency", "(", ")", ".", "mnemonic", ":", "return", "amount", "agg", "=", "self", ".", "get_currency_aggregate_by_symbol", "(", "currency", ")", "if", "not", "agg", ":", "raise", "ValueError", "(", "f\"Currency not found: {currency}!\"", ")", "# TODO use pricedb for the price.", "rate_to_base", "=", "agg", ".", "get_latest_price", "(", ")", "if", "not", "rate_to_base", ":", "raise", "ValueError", "(", "f\"Latest price not found for {currency}!\"", ")", "assert", "isinstance", "(", "rate_to_base", ".", "value", ",", "Decimal", ")", "result", "=", "amount", "*", "rate_to_base", ".", "value", "return", "result" ]
Calculates the amount in base currency
[ "Calculates", "the", "amount", "in", "base", "currency" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L70-L89
17,004
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.get_default_currency
def get_default_currency(self) -> Commodity: """ returns the book default currency """ result = None if self.default_currency: result = self.default_currency else: def_currency = self.__get_default_currency() self.default_currency = def_currency result = def_currency return result
python
def get_default_currency(self) -> Commodity: """ returns the book default currency """ result = None if self.default_currency: result = self.default_currency else: def_currency = self.__get_default_currency() self.default_currency = def_currency result = def_currency return result
[ "def", "get_default_currency", "(", "self", ")", "->", "Commodity", ":", "result", "=", "None", "if", "self", ".", "default_currency", ":", "result", "=", "self", ".", "default_currency", "else", ":", "def_currency", "=", "self", ".", "__get_default_currency", "(", ")", "self", ".", "default_currency", "=", "def_currency", "result", "=", "def_currency", "return", "result" ]
returns the book default currency
[ "returns", "the", "book", "default", "currency" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L91-L102
17,005
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.get_book_currencies
def get_book_currencies(self) -> List[Commodity]: """ Returns currencies used in the book """ query = ( self.currencies_query .order_by(Commodity.mnemonic) ) return query.all()
python
def get_book_currencies(self) -> List[Commodity]: """ Returns currencies used in the book """ query = ( self.currencies_query .order_by(Commodity.mnemonic) ) return query.all()
[ "def", "get_book_currencies", "(", "self", ")", "->", "List", "[", "Commodity", "]", ":", "query", "=", "(", "self", ".", "currencies_query", ".", "order_by", "(", "Commodity", ".", "mnemonic", ")", ")", "return", "query", ".", "all", "(", ")" ]
Returns currencies used in the book
[ "Returns", "currencies", "used", "in", "the", "book" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L104-L110
17,006
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.get_currency_aggregate_by_symbol
def get_currency_aggregate_by_symbol(self, symbol: str) -> CurrencyAggregate: """ Creates currency aggregate for the given currency symbol """ currency = self.get_by_symbol(symbol) result = self.get_currency_aggregate(currency) return result
python
def get_currency_aggregate_by_symbol(self, symbol: str) -> CurrencyAggregate: """ Creates currency aggregate for the given currency symbol """ currency = self.get_by_symbol(symbol) result = self.get_currency_aggregate(currency) return result
[ "def", "get_currency_aggregate_by_symbol", "(", "self", ",", "symbol", ":", "str", ")", "->", "CurrencyAggregate", ":", "currency", "=", "self", ".", "get_by_symbol", "(", "symbol", ")", "result", "=", "self", ".", "get_currency_aggregate", "(", "currency", ")", "return", "result" ]
Creates currency aggregate for the given currency symbol
[ "Creates", "currency", "aggregate", "for", "the", "given", "currency", "symbol" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L116-L120
17,007
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.get_by_symbol
def get_by_symbol(self, symbol: str) -> Commodity: """ Loads currency by symbol """ assert isinstance(symbol, str) query = ( self.currencies_query .filter(Commodity.mnemonic == symbol) ) return query.one()
python
def get_by_symbol(self, symbol: str) -> Commodity: """ Loads currency by symbol """ assert isinstance(symbol, str) query = ( self.currencies_query .filter(Commodity.mnemonic == symbol) ) return query.one()
[ "def", "get_by_symbol", "(", "self", ",", "symbol", ":", "str", ")", "->", "Commodity", ":", "assert", "isinstance", "(", "symbol", ",", "str", ")", "query", "=", "(", "self", ".", "currencies_query", ".", "filter", "(", "Commodity", ".", "mnemonic", "==", "symbol", ")", ")", "return", "query", ".", "one", "(", ")" ]
Loads currency by symbol
[ "Loads", "currency", "by", "symbol" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L122-L130
17,008
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.import_fx_rates
def import_fx_rates(self, rates: List[PriceModel]): """ Imports the given prices into database. Write operation! """ have_new_rates = False base_currency = self.get_default_currency() for rate in rates: assert isinstance(rate, PriceModel) currency = self.get_by_symbol(rate.symbol) amount = rate.value # Do not import duplicate prices. # todo: if the price differs, update it! # exists_query = exists(rates_query) has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first() # has_rate = ( # self.book.session.query(Price) # .filter(Price.date == rate.date.date()) # .filter(Price.currency == currency) # ) if not has_rate: log(INFO, "Creating entry for %s, %s, %s, %s", base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount) # Save the price in the exchange currency, not the default. # Invert the rate in that case. inverted_rate = 1 / amount inverted_rate = inverted_rate.quantize(Decimal('.00000000')) price = Price(commodity=currency, currency=base_currency, date=rate.datetime.date(), value=str(inverted_rate)) have_new_rates = True # Save the book after the prices have been created. if have_new_rates: log(INFO, "Saving new prices...") self.book.flush() self.book.save() else: log(INFO, "No prices imported.")
python
def import_fx_rates(self, rates: List[PriceModel]): """ Imports the given prices into database. Write operation! """ have_new_rates = False base_currency = self.get_default_currency() for rate in rates: assert isinstance(rate, PriceModel) currency = self.get_by_symbol(rate.symbol) amount = rate.value # Do not import duplicate prices. # todo: if the price differs, update it! # exists_query = exists(rates_query) has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first() # has_rate = ( # self.book.session.query(Price) # .filter(Price.date == rate.date.date()) # .filter(Price.currency == currency) # ) if not has_rate: log(INFO, "Creating entry for %s, %s, %s, %s", base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount) # Save the price in the exchange currency, not the default. # Invert the rate in that case. inverted_rate = 1 / amount inverted_rate = inverted_rate.quantize(Decimal('.00000000')) price = Price(commodity=currency, currency=base_currency, date=rate.datetime.date(), value=str(inverted_rate)) have_new_rates = True # Save the book after the prices have been created. if have_new_rates: log(INFO, "Saving new prices...") self.book.flush() self.book.save() else: log(INFO, "No prices imported.")
[ "def", "import_fx_rates", "(", "self", ",", "rates", ":", "List", "[", "PriceModel", "]", ")", ":", "have_new_rates", "=", "False", "base_currency", "=", "self", ".", "get_default_currency", "(", ")", "for", "rate", "in", "rates", ":", "assert", "isinstance", "(", "rate", ",", "PriceModel", ")", "currency", "=", "self", ".", "get_by_symbol", "(", "rate", ".", "symbol", ")", "amount", "=", "rate", ".", "value", "# Do not import duplicate prices.", "# todo: if the price differs, update it!", "# exists_query = exists(rates_query)", "has_rate", "=", "currency", ".", "prices", ".", "filter", "(", "Price", ".", "date", "==", "rate", ".", "datetime", ".", "date", "(", ")", ")", ".", "first", "(", ")", "# has_rate = (", "# self.book.session.query(Price)", "# .filter(Price.date == rate.date.date())", "# .filter(Price.currency == currency)", "# )", "if", "not", "has_rate", ":", "log", "(", "INFO", ",", "\"Creating entry for %s, %s, %s, %s\"", ",", "base_currency", ".", "mnemonic", ",", "currency", ".", "mnemonic", ",", "rate", ".", "datetime", ".", "date", "(", ")", ",", "amount", ")", "# Save the price in the exchange currency, not the default.", "# Invert the rate in that case.", "inverted_rate", "=", "1", "/", "amount", "inverted_rate", "=", "inverted_rate", ".", "quantize", "(", "Decimal", "(", "'.00000000'", ")", ")", "price", "=", "Price", "(", "commodity", "=", "currency", ",", "currency", "=", "base_currency", ",", "date", "=", "rate", ".", "datetime", ".", "date", "(", ")", ",", "value", "=", "str", "(", "inverted_rate", ")", ")", "have_new_rates", "=", "True", "# Save the book after the prices have been created.", "if", "have_new_rates", ":", "log", "(", "INFO", ",", "\"Saving new prices...\"", ")", "self", ".", "book", ".", "flush", "(", ")", "self", ".", "book", ".", "save", "(", ")", "else", ":", "log", "(", "INFO", ",", "\"No prices imported.\"", ")" ]
Imports the given prices into database. Write operation!
[ "Imports", "the", "given", "prices", "into", "database", ".", "Write", "operation!" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L132-L173
17,009
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.__get_default_currency
def __get_default_currency(self): """Read the default currency from GnuCash preferences""" # If we are on Windows, read from registry. if sys.platform == "win32": # read from registry def_curr = self.book["default-currency"] = self.__get_default_currency_windows() else: # return the currency from locale. # todo: Read the preferences on other operating systems. def_curr = self.book["default-currency"] = self.__get_locale_currency() return def_curr
python
def __get_default_currency(self): """Read the default currency from GnuCash preferences""" # If we are on Windows, read from registry. if sys.platform == "win32": # read from registry def_curr = self.book["default-currency"] = self.__get_default_currency_windows() else: # return the currency from locale. # todo: Read the preferences on other operating systems. def_curr = self.book["default-currency"] = self.__get_locale_currency() return def_curr
[ "def", "__get_default_currency", "(", "self", ")", ":", "# If we are on Windows, read from registry.", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "# read from registry", "def_curr", "=", "self", ".", "book", "[", "\"default-currency\"", "]", "=", "self", ".", "__get_default_currency_windows", "(", ")", "else", ":", "# return the currency from locale.", "# todo: Read the preferences on other operating systems.", "def_curr", "=", "self", ".", "book", "[", "\"default-currency\"", "]", "=", "self", ".", "__get_locale_currency", "(", ")", "return", "def_curr" ]
Read the default currency from GnuCash preferences
[ "Read", "the", "default", "currency", "from", "GnuCash", "preferences" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L178-L189
17,010
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
CurrenciesAggregate.__get_registry_key
def __get_registry_key(self, key): """ Read currency from windows registry """ import winreg root = winreg.OpenKey( winreg.HKEY_CURRENT_USER, r'SOFTWARE\GSettings\org\gnucash\general', 0, winreg.KEY_READ) [pathname, regtype] = (winreg.QueryValueEx(root, key)) winreg.CloseKey(root) return pathname
python
def __get_registry_key(self, key): """ Read currency from windows registry """ import winreg root = winreg.OpenKey( winreg.HKEY_CURRENT_USER, r'SOFTWARE\GSettings\org\gnucash\general', 0, winreg.KEY_READ) [pathname, regtype] = (winreg.QueryValueEx(root, key)) winreg.CloseKey(root) return pathname
[ "def", "__get_registry_key", "(", "self", ",", "key", ")", ":", "import", "winreg", "root", "=", "winreg", ".", "OpenKey", "(", "winreg", ".", "HKEY_CURRENT_USER", ",", "r'SOFTWARE\\GSettings\\org\\gnucash\\general'", ",", "0", ",", "winreg", ".", "KEY_READ", ")", "[", "pathname", ",", "regtype", "]", "=", "(", "winreg", ".", "QueryValueEx", "(", "root", ",", "key", ")", ")", "winreg", ".", "CloseKey", "(", "root", ")", "return", "pathname" ]
Read currency from windows registry
[ "Read", "currency", "from", "windows", "registry" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L209-L217
17,011
MisterY/gnucash-portfolio
gnucash_portfolio/splitsaggregate.py
SplitsAggregate.get_for_accounts
def get_for_accounts(self, accounts: List[Account]): ''' Get all splits for the given accounts ''' account_ids = [acc.guid for acc in accounts] query = ( self.query .filter(Split.account_guid.in_(account_ids)) ) splits = query.all() return splits
python
def get_for_accounts(self, accounts: List[Account]): ''' Get all splits for the given accounts ''' account_ids = [acc.guid for acc in accounts] query = ( self.query .filter(Split.account_guid.in_(account_ids)) ) splits = query.all() return splits
[ "def", "get_for_accounts", "(", "self", ",", "accounts", ":", "List", "[", "Account", "]", ")", ":", "account_ids", "=", "[", "acc", ".", "guid", "for", "acc", "in", "accounts", "]", "query", "=", "(", "self", ".", "query", ".", "filter", "(", "Split", ".", "account_guid", ".", "in_", "(", "account_ids", ")", ")", ")", "splits", "=", "query", ".", "all", "(", ")", "return", "splits" ]
Get all splits for the given accounts
[ "Get", "all", "splits", "for", "the", "given", "accounts" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/splitsaggregate.py#L39-L47
17,012
MisterY/gnucash-portfolio
gnucash_portfolio/reports/portfolio_value.py
__get_model_for_portfolio_value
def __get_model_for_portfolio_value(input_model: PortfolioValueInputModel ) -> PortfolioValueViewModel: """ loads the data for portfolio value """ result = PortfolioValueViewModel() result.filter = input_model ref_datum = Datum() ref_datum.from_datetime(input_model.as_of_date) ref_date = ref_datum.end_of_day() result.stock_rows = [] with BookAggregate() as svc: book = svc.book stocks_svc = svc.securities if input_model.stock: symbols = input_model.stock.split(",") stocks = stocks_svc.get_stocks(symbols) else: stocks = stocks_svc.get_all() for stock in stocks: row: StockViewModel = portfoliovalue.get_stock_model_from( book, stock, as_of_date=ref_date) if row and row.balance > 0: result.stock_rows.append(row) return result
python
def __get_model_for_portfolio_value(input_model: PortfolioValueInputModel ) -> PortfolioValueViewModel: """ loads the data for portfolio value """ result = PortfolioValueViewModel() result.filter = input_model ref_datum = Datum() ref_datum.from_datetime(input_model.as_of_date) ref_date = ref_datum.end_of_day() result.stock_rows = [] with BookAggregate() as svc: book = svc.book stocks_svc = svc.securities if input_model.stock: symbols = input_model.stock.split(",") stocks = stocks_svc.get_stocks(symbols) else: stocks = stocks_svc.get_all() for stock in stocks: row: StockViewModel = portfoliovalue.get_stock_model_from( book, stock, as_of_date=ref_date) if row and row.balance > 0: result.stock_rows.append(row) return result
[ "def", "__get_model_for_portfolio_value", "(", "input_model", ":", "PortfolioValueInputModel", ")", "->", "PortfolioValueViewModel", ":", "result", "=", "PortfolioValueViewModel", "(", ")", "result", ".", "filter", "=", "input_model", "ref_datum", "=", "Datum", "(", ")", "ref_datum", ".", "from_datetime", "(", "input_model", ".", "as_of_date", ")", "ref_date", "=", "ref_datum", ".", "end_of_day", "(", ")", "result", ".", "stock_rows", "=", "[", "]", "with", "BookAggregate", "(", ")", "as", "svc", ":", "book", "=", "svc", ".", "book", "stocks_svc", "=", "svc", ".", "securities", "if", "input_model", ".", "stock", ":", "symbols", "=", "input_model", ".", "stock", ".", "split", "(", "\",\"", ")", "stocks", "=", "stocks_svc", ".", "get_stocks", "(", "symbols", ")", "else", ":", "stocks", "=", "stocks_svc", ".", "get_all", "(", ")", "for", "stock", "in", "stocks", ":", "row", ":", "StockViewModel", "=", "portfoliovalue", ".", "get_stock_model_from", "(", "book", ",", "stock", ",", "as_of_date", "=", "ref_date", ")", "if", "row", "and", "row", ".", "balance", ">", "0", ":", "result", ".", "stock_rows", ".", "append", "(", "row", ")", "return", "result" ]
loads the data for portfolio value
[ "loads", "the", "data", "for", "portfolio", "value" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/reports/portfolio_value.py#L17-L44
17,013
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.__load_settings
def __load_settings(self): """ Load settings from .json file """ #file_path = path.relpath(settings_file_path) #file_path = path.abspath(settings_file_path) file_path = self.file_path try: self.data = json.load(open(file_path)) except FileNotFoundError: print("Could not load", file_path)
python
def __load_settings(self): """ Load settings from .json file """ #file_path = path.relpath(settings_file_path) #file_path = path.abspath(settings_file_path) file_path = self.file_path try: self.data = json.load(open(file_path)) except FileNotFoundError: print("Could not load", file_path)
[ "def", "__load_settings", "(", "self", ")", ":", "#file_path = path.relpath(settings_file_path)", "#file_path = path.abspath(settings_file_path)", "file_path", "=", "self", ".", "file_path", "try", ":", "self", ".", "data", "=", "json", ".", "load", "(", "open", "(", "file_path", ")", ")", "except", "FileNotFoundError", ":", "print", "(", "\"Could not load\"", ",", "file_path", ")" ]
Load settings from .json file
[ "Load", "settings", "from", ".", "json", "file" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L27-L36
17,014
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.file_exists
def file_exists(self) -> bool: """ Check if the settings file exists or not """ cfg_path = self.file_path assert cfg_path return path.isfile(cfg_path)
python
def file_exists(self) -> bool: """ Check if the settings file exists or not """ cfg_path = self.file_path assert cfg_path return path.isfile(cfg_path)
[ "def", "file_exists", "(", "self", ")", "->", "bool", ":", "cfg_path", "=", "self", ".", "file_path", "assert", "cfg_path", "return", "path", ".", "isfile", "(", "cfg_path", ")" ]
Check if the settings file exists or not
[ "Check", "if", "the", "settings", "file", "exists", "or", "not" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L46-L51
17,015
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.save
def save(self): """ Saves the settings contents """ content = self.dumps() fileutils.save_text_to_file(content, self.file_path)
python
def save(self): """ Saves the settings contents """ content = self.dumps() fileutils.save_text_to_file(content, self.file_path)
[ "def", "save", "(", "self", ")", ":", "content", "=", "self", ".", "dumps", "(", ")", "fileutils", ".", "save_text_to_file", "(", "content", ",", "self", ".", "file_path", ")" ]
Saves the settings contents
[ "Saves", "the", "settings", "contents" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L53-L56
17,016
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.database_path
def database_path(self): """ Full database path. Includes the default location + the database filename. """ filename = self.database_filename db_path = ":memory:" if filename == ":memory:" else ( path.abspath(path.join(__file__, "../..", "..", "data", filename))) return db_path
python
def database_path(self): """ Full database path. Includes the default location + the database filename. """ filename = self.database_filename db_path = ":memory:" if filename == ":memory:" else ( path.abspath(path.join(__file__, "../..", "..", "data", filename))) return db_path
[ "def", "database_path", "(", "self", ")", ":", "filename", "=", "self", ".", "database_filename", "db_path", "=", "\":memory:\"", "if", "filename", "==", "\":memory:\"", "else", "(", "path", ".", "abspath", "(", "path", ".", "join", "(", "__file__", ",", "\"../..\"", ",", "\"..\"", ",", "\"data\"", ",", "filename", ")", ")", ")", "return", "db_path" ]
Full database path. Includes the default location + the database filename.
[ "Full", "database", "path", ".", "Includes", "the", "default", "location", "+", "the", "database", "filename", "." ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L67-L74
17,017
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.file_path
def file_path(self) -> str: """ Settings file absolute path""" user_dir = self.__get_user_path() file_path = path.abspath(path.join(user_dir, self.FILENAME)) return file_path
python
def file_path(self) -> str: """ Settings file absolute path""" user_dir = self.__get_user_path() file_path = path.abspath(path.join(user_dir, self.FILENAME)) return file_path
[ "def", "file_path", "(", "self", ")", "->", "str", ":", "user_dir", "=", "self", ".", "__get_user_path", "(", ")", "file_path", "=", "path", ".", "abspath", "(", "path", ".", "join", "(", "user_dir", ",", "self", ".", "FILENAME", ")", ")", "return", "file_path" ]
Settings file absolute path
[ "Settings", "file", "absolute", "path" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L89-L93
17,018
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.dumps
def dumps(self) -> str: """ Dumps the json content as a string """ return json.dumps(self.data, sort_keys=True, indent=4)
python
def dumps(self) -> str: """ Dumps the json content as a string """ return json.dumps(self.data, sort_keys=True, indent=4)
[ "def", "dumps", "(", "self", ")", "->", "str", ":", "return", "json", ".", "dumps", "(", "self", ".", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")" ]
Dumps the json content as a string
[ "Dumps", "the", "json", "content", "as", "a", "string" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L95-L97
17,019
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
Settings.__copy_template
def __copy_template(self): """ Copy the settings template into the user's directory """ import shutil template_filename = "settings.json.template" template_path = path.abspath( path.join(__file__, "..", "..", "config", template_filename)) settings_path = self.file_path shutil.copyfile(template_path, settings_path) self.__ensure_file_exists()
python
def __copy_template(self): """ Copy the settings template into the user's directory """ import shutil template_filename = "settings.json.template" template_path = path.abspath( path.join(__file__, "..", "..", "config", template_filename)) settings_path = self.file_path shutil.copyfile(template_path, settings_path) self.__ensure_file_exists()
[ "def", "__copy_template", "(", "self", ")", ":", "import", "shutil", "template_filename", "=", "\"settings.json.template\"", "template_path", "=", "path", ".", "abspath", "(", "path", ".", "join", "(", "__file__", ",", "\"..\"", ",", "\"..\"", ",", "\"config\"", ",", "template_filename", ")", ")", "settings_path", "=", "self", ".", "file_path", "shutil", ".", "copyfile", "(", "template_path", ",", "settings_path", ")", "self", ".", "__ensure_file_exists", "(", ")" ]
Copy the settings template into the user's directory
[ "Copy", "the", "settings", "template", "into", "the", "user", "s", "directory" ]
bfaad8345a5479d1cd111acee1939e25c2a638c2
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L117-L127
17,020
alephdata/memorious
memorious/logic/check.py
ContextCheck.is_not_empty
def is_not_empty(self, value, strict=False): """if value is not empty""" value = stringify(value) if value is not None: return self.shout('Value %r is empty', strict, value)
python
def is_not_empty(self, value, strict=False): """if value is not empty""" value = stringify(value) if value is not None: return self.shout('Value %r is empty', strict, value)
[ "def", "is_not_empty", "(", "self", ",", "value", ",", "strict", "=", "False", ")", ":", "value", "=", "stringify", "(", "value", ")", "if", "value", "is", "not", "None", ":", "return", "self", ".", "shout", "(", "'Value %r is empty'", ",", "strict", ",", "value", ")" ]
if value is not empty
[ "if", "value", "is", "not", "empty" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L18-L23
17,021
alephdata/memorious
memorious/logic/check.py
ContextCheck.is_numeric
def is_numeric(self, value, strict=False): """if value is numeric""" value = stringify(value) if value is not None: if value.isnumeric(): return self.shout('value %r is not numeric', strict, value)
python
def is_numeric(self, value, strict=False): """if value is numeric""" value = stringify(value) if value is not None: if value.isnumeric(): return self.shout('value %r is not numeric', strict, value)
[ "def", "is_numeric", "(", "self", ",", "value", ",", "strict", "=", "False", ")", ":", "value", "=", "stringify", "(", "value", ")", "if", "value", "is", "not", "None", ":", "if", "value", ".", "isnumeric", "(", ")", ":", "return", "self", ".", "shout", "(", "'value %r is not numeric'", ",", "strict", ",", "value", ")" ]
if value is numeric
[ "if", "value", "is", "numeric" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L25-L31
17,022
alephdata/memorious
memorious/logic/check.py
ContextCheck.is_integer
def is_integer(self, value, strict=False): """if value is an integer""" if value is not None: if isinstance(value, numbers.Number): return value = stringify(value) if value is not None and value.isnumeric(): return self.shout('value %r is not an integer', strict, value)
python
def is_integer(self, value, strict=False): """if value is an integer""" if value is not None: if isinstance(value, numbers.Number): return value = stringify(value) if value is not None and value.isnumeric(): return self.shout('value %r is not an integer', strict, value)
[ "def", "is_integer", "(", "self", ",", "value", ",", "strict", "=", "False", ")", ":", "if", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "numbers", ".", "Number", ")", ":", "return", "value", "=", "stringify", "(", "value", ")", "if", "value", "is", "not", "None", "and", "value", ".", "isnumeric", "(", ")", ":", "return", "self", ".", "shout", "(", "'value %r is not an integer'", ",", "strict", ",", "value", ")" ]
if value is an integer
[ "if", "value", "is", "an", "integer" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L33-L41
17,023
alephdata/memorious
memorious/logic/check.py
ContextCheck.match_date
def match_date(self, value, strict=False): """if value is a date""" value = stringify(value) try: parse(value) except Exception: self.shout('Value %r is not a valid date', strict, value)
python
def match_date(self, value, strict=False): """if value is a date""" value = stringify(value) try: parse(value) except Exception: self.shout('Value %r is not a valid date', strict, value)
[ "def", "match_date", "(", "self", ",", "value", ",", "strict", "=", "False", ")", ":", "value", "=", "stringify", "(", "value", ")", "try", ":", "parse", "(", "value", ")", "except", "Exception", ":", "self", ".", "shout", "(", "'Value %r is not a valid date'", ",", "strict", ",", "value", ")" ]
if value is a date
[ "if", "value", "is", "a", "date" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L43-L49
17,024
alephdata/memorious
memorious/logic/check.py
ContextCheck.match_regexp
def match_regexp(self, value, q, strict=False): """if value matches a regexp q""" value = stringify(value) mr = re.compile(q) if value is not None: if mr.match(value): return self.shout('%r not matching the regexp %r', strict, value, q)
python
def match_regexp(self, value, q, strict=False): """if value matches a regexp q""" value = stringify(value) mr = re.compile(q) if value is not None: if mr.match(value): return self.shout('%r not matching the regexp %r', strict, value, q)
[ "def", "match_regexp", "(", "self", ",", "value", ",", "q", ",", "strict", "=", "False", ")", ":", "value", "=", "stringify", "(", "value", ")", "mr", "=", "re", ".", "compile", "(", "q", ")", "if", "value", "is", "not", "None", ":", "if", "mr", ".", "match", "(", "value", ")", ":", "return", "self", ".", "shout", "(", "'%r not matching the regexp %r'", ",", "strict", ",", "value", ",", "q", ")" ]
if value matches a regexp q
[ "if", "value", "matches", "a", "regexp", "q" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L51-L58
17,025
alephdata/memorious
memorious/logic/check.py
ContextCheck.has_length
def has_length(self, value, q, strict=False): """if value has a length of q""" value = stringify(value) if value is not None: if len(value) == q: return self.shout('Value %r not matching length %r', strict, value, q)
python
def has_length(self, value, q, strict=False): """if value has a length of q""" value = stringify(value) if value is not None: if len(value) == q: return self.shout('Value %r not matching length %r', strict, value, q)
[ "def", "has_length", "(", "self", ",", "value", ",", "q", ",", "strict", "=", "False", ")", ":", "value", "=", "stringify", "(", "value", ")", "if", "value", "is", "not", "None", ":", "if", "len", "(", "value", ")", "==", "q", ":", "return", "self", ".", "shout", "(", "'Value %r not matching length %r'", ",", "strict", ",", "value", ",", "q", ")" ]
if value has a length of q
[ "if", "value", "has", "a", "length", "of", "q" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L60-L66
17,026
alephdata/memorious
memorious/logic/check.py
ContextCheck.must_contain
def must_contain(self, value, q, strict=False): """if value must contain q""" if value is not None: if value.find(q) != -1: return self.shout('Value %r does not contain %r', strict, value, q)
python
def must_contain(self, value, q, strict=False): """if value must contain q""" if value is not None: if value.find(q) != -1: return self.shout('Value %r does not contain %r', strict, value, q)
[ "def", "must_contain", "(", "self", ",", "value", ",", "q", ",", "strict", "=", "False", ")", ":", "if", "value", "is", "not", "None", ":", "if", "value", ".", "find", "(", "q", ")", "!=", "-", "1", ":", "return", "self", ".", "shout", "(", "'Value %r does not contain %r'", ",", "strict", ",", "value", ",", "q", ")" ]
if value must contain q
[ "if", "value", "must", "contain", "q" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L68-L73
17,027
alephdata/memorious
memorious/operations/extract.py
extract
def extract(context, data): """Extract a compressed file""" with context.http.rehash(data) as result: file_path = result.file_path content_type = result.content_type extract_dir = random_filename(context.work_path) if content_type in ZIP_MIME_TYPES: extracted_files = extract_zip(file_path, extract_dir) elif content_type in TAR_MIME_TYPES: extracted_files = extract_tar(file_path, extract_dir, context) elif content_type in SEVENZIP_MIME_TYPES: extracted_files = extract_7zip(file_path, extract_dir, context) else: context.log.warning( "Unsupported archive content type: %s", content_type ) return extracted_content_hashes = {} for path in extracted_files: relative_path = os.path.relpath(path, extract_dir) content_hash = context.store_file(path) extracted_content_hashes[relative_path] = content_hash data['content_hash'] = content_hash data['file_name'] = relative_path context.emit(data=data.copy())
python
def extract(context, data): """Extract a compressed file""" with context.http.rehash(data) as result: file_path = result.file_path content_type = result.content_type extract_dir = random_filename(context.work_path) if content_type in ZIP_MIME_TYPES: extracted_files = extract_zip(file_path, extract_dir) elif content_type in TAR_MIME_TYPES: extracted_files = extract_tar(file_path, extract_dir, context) elif content_type in SEVENZIP_MIME_TYPES: extracted_files = extract_7zip(file_path, extract_dir, context) else: context.log.warning( "Unsupported archive content type: %s", content_type ) return extracted_content_hashes = {} for path in extracted_files: relative_path = os.path.relpath(path, extract_dir) content_hash = context.store_file(path) extracted_content_hashes[relative_path] = content_hash data['content_hash'] = content_hash data['file_name'] = relative_path context.emit(data=data.copy())
[ "def", "extract", "(", "context", ",", "data", ")", ":", "with", "context", ".", "http", ".", "rehash", "(", "data", ")", "as", "result", ":", "file_path", "=", "result", ".", "file_path", "content_type", "=", "result", ".", "content_type", "extract_dir", "=", "random_filename", "(", "context", ".", "work_path", ")", "if", "content_type", "in", "ZIP_MIME_TYPES", ":", "extracted_files", "=", "extract_zip", "(", "file_path", ",", "extract_dir", ")", "elif", "content_type", "in", "TAR_MIME_TYPES", ":", "extracted_files", "=", "extract_tar", "(", "file_path", ",", "extract_dir", ",", "context", ")", "elif", "content_type", "in", "SEVENZIP_MIME_TYPES", ":", "extracted_files", "=", "extract_7zip", "(", "file_path", ",", "extract_dir", ",", "context", ")", "else", ":", "context", ".", "log", ".", "warning", "(", "\"Unsupported archive content type: %s\"", ",", "content_type", ")", "return", "extracted_content_hashes", "=", "{", "}", "for", "path", "in", "extracted_files", ":", "relative_path", "=", "os", ".", "path", ".", "relpath", "(", "path", ",", "extract_dir", ")", "content_hash", "=", "context", ".", "store_file", "(", "path", ")", "extracted_content_hashes", "[", "relative_path", "]", "=", "content_hash", "data", "[", "'content_hash'", "]", "=", "content_hash", "data", "[", "'file_name'", "]", "=", "relative_path", "context", ".", "emit", "(", "data", "=", "data", ".", "copy", "(", ")", ")" ]
Extract a compressed file
[ "Extract", "a", "compressed", "file" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/extract.py#L78-L102
17,028
alephdata/memorious
memorious/model/queue.py
Queue.size
def size(cls, crawler): """Total operations pending for this crawler""" key = make_key('queue_pending', crawler) return unpack_int(conn.get(key))
python
def size(cls, crawler): """Total operations pending for this crawler""" key = make_key('queue_pending', crawler) return unpack_int(conn.get(key))
[ "def", "size", "(", "cls", ",", "crawler", ")", ":", "key", "=", "make_key", "(", "'queue_pending'", ",", "crawler", ")", "return", "unpack_int", "(", "conn", ".", "get", "(", "key", ")", ")" ]
Total operations pending for this crawler
[ "Total", "operations", "pending", "for", "this", "crawler" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/queue.py#L71-L74
17,029
alephdata/memorious
memorious/helpers/ocr.py
read_word
def read_word(image, whitelist=None, chars=None, spaces=False): """ OCR a single word from an image. Useful for captchas. Image should be pre-processed to remove noise etc. """ from tesserocr import PyTessBaseAPI api = PyTessBaseAPI() api.SetPageSegMode(8) if whitelist is not None: api.SetVariable("tessedit_char_whitelist", whitelist) api.SetImage(image) api.Recognize() guess = api.GetUTF8Text() if not spaces: guess = ''.join([c for c in guess if c != " "]) guess = guess.strip() if chars is not None and len(guess) != chars: return guess, None return guess, api.MeanTextConf()
python
def read_word(image, whitelist=None, chars=None, spaces=False): """ OCR a single word from an image. Useful for captchas. Image should be pre-processed to remove noise etc. """ from tesserocr import PyTessBaseAPI api = PyTessBaseAPI() api.SetPageSegMode(8) if whitelist is not None: api.SetVariable("tessedit_char_whitelist", whitelist) api.SetImage(image) api.Recognize() guess = api.GetUTF8Text() if not spaces: guess = ''.join([c for c in guess if c != " "]) guess = guess.strip() if chars is not None and len(guess) != chars: return guess, None return guess, api.MeanTextConf()
[ "def", "read_word", "(", "image", ",", "whitelist", "=", "None", ",", "chars", "=", "None", ",", "spaces", "=", "False", ")", ":", "from", "tesserocr", "import", "PyTessBaseAPI", "api", "=", "PyTessBaseAPI", "(", ")", "api", ".", "SetPageSegMode", "(", "8", ")", "if", "whitelist", "is", "not", "None", ":", "api", ".", "SetVariable", "(", "\"tessedit_char_whitelist\"", ",", "whitelist", ")", "api", ".", "SetImage", "(", "image", ")", "api", ".", "Recognize", "(", ")", "guess", "=", "api", ".", "GetUTF8Text", "(", ")", "if", "not", "spaces", ":", "guess", "=", "''", ".", "join", "(", "[", "c", "for", "c", "in", "guess", "if", "c", "!=", "\" \"", "]", ")", "guess", "=", "guess", ".", "strip", "(", ")", "if", "chars", "is", "not", "None", "and", "len", "(", "guess", ")", "!=", "chars", ":", "return", "guess", ",", "None", "return", "guess", ",", "api", ".", "MeanTextConf", "(", ")" ]
OCR a single word from an image. Useful for captchas. Image should be pre-processed to remove noise etc.
[ "OCR", "a", "single", "word", "from", "an", "image", ".", "Useful", "for", "captchas", ".", "Image", "should", "be", "pre", "-", "processed", "to", "remove", "noise", "etc", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/ocr.py#L3-L22
17,030
alephdata/memorious
memorious/helpers/ocr.py
read_char
def read_char(image, whitelist=None): """ OCR a single character from an image. Useful for captchas.""" from tesserocr import PyTessBaseAPI api = PyTessBaseAPI() api.SetPageSegMode(10) if whitelist is not None: api.SetVariable("tessedit_char_whitelist", whitelist) api.SetImage(image) api.Recognize() return api.GetUTF8Text().strip()
python
def read_char(image, whitelist=None): """ OCR a single character from an image. Useful for captchas.""" from tesserocr import PyTessBaseAPI api = PyTessBaseAPI() api.SetPageSegMode(10) if whitelist is not None: api.SetVariable("tessedit_char_whitelist", whitelist) api.SetImage(image) api.Recognize() return api.GetUTF8Text().strip()
[ "def", "read_char", "(", "image", ",", "whitelist", "=", "None", ")", ":", "from", "tesserocr", "import", "PyTessBaseAPI", "api", "=", "PyTessBaseAPI", "(", ")", "api", ".", "SetPageSegMode", "(", "10", ")", "if", "whitelist", "is", "not", "None", ":", "api", ".", "SetVariable", "(", "\"tessedit_char_whitelist\"", ",", "whitelist", ")", "api", ".", "SetImage", "(", "image", ")", "api", ".", "Recognize", "(", ")", "return", "api", ".", "GetUTF8Text", "(", ")", ".", "strip", "(", ")" ]
OCR a single character from an image. Useful for captchas.
[ "OCR", "a", "single", "character", "from", "an", "image", ".", "Useful", "for", "captchas", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/ocr.py#L25-L34
17,031
alephdata/memorious
memorious/logic/context.py
Context.get
def get(self, name, default=None): """Get a configuration value and expand environment variables.""" value = self.params.get(name, default) if isinstance(value, str): value = os.path.expandvars(value) return value
python
def get(self, name, default=None): """Get a configuration value and expand environment variables.""" value = self.params.get(name, default) if isinstance(value, str): value = os.path.expandvars(value) return value
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "value", "=", "self", ".", "params", ".", "get", "(", "name", ",", "default", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "os", ".", "path", ".", "expandvars", "(", "value", ")", "return", "value" ]
Get a configuration value and expand environment variables.
[ "Get", "a", "configuration", "value", "and", "expand", "environment", "variables", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L34-L39
17,032
alephdata/memorious
memorious/logic/context.py
Context.emit
def emit(self, rule='pass', stage=None, data={}, delay=None, optional=False): """Invoke the next stage, either based on a handling rule, or by calling the `pass` rule by default.""" if stage is None: stage = self.stage.handlers.get(rule) if optional and stage is None: return if stage is None or stage not in self.crawler.stages: self.log.info("No next stage: %s (%s)" % (stage, rule)) return state = self.dump_state() delay = delay or self.crawler.delay Queue.queue(stage, state, data, delay)
python
def emit(self, rule='pass', stage=None, data={}, delay=None, optional=False): """Invoke the next stage, either based on a handling rule, or by calling the `pass` rule by default.""" if stage is None: stage = self.stage.handlers.get(rule) if optional and stage is None: return if stage is None or stage not in self.crawler.stages: self.log.info("No next stage: %s (%s)" % (stage, rule)) return state = self.dump_state() delay = delay or self.crawler.delay Queue.queue(stage, state, data, delay)
[ "def", "emit", "(", "self", ",", "rule", "=", "'pass'", ",", "stage", "=", "None", ",", "data", "=", "{", "}", ",", "delay", "=", "None", ",", "optional", "=", "False", ")", ":", "if", "stage", "is", "None", ":", "stage", "=", "self", ".", "stage", ".", "handlers", ".", "get", "(", "rule", ")", "if", "optional", "and", "stage", "is", "None", ":", "return", "if", "stage", "is", "None", "or", "stage", "not", "in", "self", ".", "crawler", ".", "stages", ":", "self", ".", "log", ".", "info", "(", "\"No next stage: %s (%s)\"", "%", "(", "stage", ",", "rule", ")", ")", "return", "state", "=", "self", ".", "dump_state", "(", ")", "delay", "=", "delay", "or", "self", ".", "crawler", ".", "delay", "Queue", ".", "queue", "(", "stage", ",", "state", ",", "data", ",", "delay", ")" ]
Invoke the next stage, either based on a handling rule, or by calling the `pass` rule by default.
[ "Invoke", "the", "next", "stage", "either", "based", "on", "a", "handling", "rule", "or", "by", "calling", "the", "pass", "rule", "by", "default", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L41-L54
17,033
alephdata/memorious
memorious/logic/context.py
Context.recurse
def recurse(self, data={}, delay=None): """Have a stage invoke itself with a modified set of arguments.""" return self.emit(stage=self.stage.name, data=data, delay=delay)
python
def recurse(self, data={}, delay=None): """Have a stage invoke itself with a modified set of arguments.""" return self.emit(stage=self.stage.name, data=data, delay=delay)
[ "def", "recurse", "(", "self", ",", "data", "=", "{", "}", ",", "delay", "=", "None", ")", ":", "return", "self", ".", "emit", "(", "stage", "=", "self", ".", "stage", ".", "name", ",", "data", "=", "data", ",", "delay", "=", "delay", ")" ]
Have a stage invoke itself with a modified set of arguments.
[ "Have", "a", "stage", "invoke", "itself", "with", "a", "modified", "set", "of", "arguments", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L56-L60
17,034
alephdata/memorious
memorious/logic/context.py
Context.execute
def execute(self, data): """Execute the crawler and create a database record of having done so.""" if Crawl.is_aborted(self.crawler, self.run_id): return try: Crawl.operation_start(self.crawler, self.stage, self.run_id) self.log.info('[%s->%s(%s)]: %s', self.crawler.name, self.stage.name, self.stage.method_name, self.run_id) return self.stage.method(self, data) except Exception as exc: self.emit_exception(exc) finally: Crawl.operation_end(self.crawler, self.run_id) shutil.rmtree(self.work_path)
python
def execute(self, data): """Execute the crawler and create a database record of having done so.""" if Crawl.is_aborted(self.crawler, self.run_id): return try: Crawl.operation_start(self.crawler, self.stage, self.run_id) self.log.info('[%s->%s(%s)]: %s', self.crawler.name, self.stage.name, self.stage.method_name, self.run_id) return self.stage.method(self, data) except Exception as exc: self.emit_exception(exc) finally: Crawl.operation_end(self.crawler, self.run_id) shutil.rmtree(self.work_path)
[ "def", "execute", "(", "self", ",", "data", ")", ":", "if", "Crawl", ".", "is_aborted", "(", "self", ".", "crawler", ",", "self", ".", "run_id", ")", ":", "return", "try", ":", "Crawl", ".", "operation_start", "(", "self", ".", "crawler", ",", "self", ".", "stage", ",", "self", ".", "run_id", ")", "self", ".", "log", ".", "info", "(", "'[%s->%s(%s)]: %s'", ",", "self", ".", "crawler", ".", "name", ",", "self", ".", "stage", ".", "name", ",", "self", ".", "stage", ".", "method_name", ",", "self", ".", "run_id", ")", "return", "self", ".", "stage", ".", "method", "(", "self", ",", "data", ")", "except", "Exception", "as", "exc", ":", "self", ".", "emit_exception", "(", "exc", ")", "finally", ":", "Crawl", ".", "operation_end", "(", "self", ".", "crawler", ",", "self", ".", "run_id", ")", "shutil", ".", "rmtree", "(", "self", ".", "work_path", ")" ]
Execute the crawler and create a database record of having done so.
[ "Execute", "the", "crawler", "and", "create", "a", "database", "record", "of", "having", "done", "so", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L62-L80
17,035
alephdata/memorious
memorious/logic/context.py
Context.skip_incremental
def skip_incremental(self, *criteria): """Perform an incremental check on a set of criteria. This can be used to execute a part of a crawler only once per an interval (which is specified by the ``expire`` setting). If the operation has already been performed (and should thus be skipped), this will return ``True``. If the operation needs to be executed, the returned value will be ``False``. """ if not self.incremental: return False # this is pure convenience, and will probably backfire at some point. key = make_key(*criteria) if key is None: return False if self.check_tag(key): return True self.set_tag(key, None) return False
python
def skip_incremental(self, *criteria): """Perform an incremental check on a set of criteria. This can be used to execute a part of a crawler only once per an interval (which is specified by the ``expire`` setting). If the operation has already been performed (and should thus be skipped), this will return ``True``. If the operation needs to be executed, the returned value will be ``False``. """ if not self.incremental: return False # this is pure convenience, and will probably backfire at some point. key = make_key(*criteria) if key is None: return False if self.check_tag(key): return True self.set_tag(key, None) return False
[ "def", "skip_incremental", "(", "self", ",", "*", "criteria", ")", ":", "if", "not", "self", ".", "incremental", ":", "return", "False", "# this is pure convenience, and will probably backfire at some point.", "key", "=", "make_key", "(", "*", "criteria", ")", "if", "key", "is", "None", ":", "return", "False", "if", "self", ".", "check_tag", "(", "key", ")", ":", "return", "True", "self", ".", "set_tag", "(", "key", ",", "None", ")", "return", "False" ]
Perform an incremental check on a set of criteria. This can be used to execute a part of a crawler only once per an interval (which is specified by the ``expire`` setting). If the operation has already been performed (and should thus be skipped), this will return ``True``. If the operation needs to be executed, the returned value will be ``False``.
[ "Perform", "an", "incremental", "check", "on", "a", "set", "of", "criteria", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L115-L136
17,036
alephdata/memorious
memorious/logic/context.py
Context.store_data
def store_data(self, data, encoding='utf-8'): """Put the given content into a file, possibly encoding it as UTF-8 in the process.""" path = random_filename(self.work_path) try: with open(path, 'wb') as fh: if isinstance(data, str): data = data.encode(encoding) if data is not None: fh.write(data) return self.store_file(path) finally: try: os.unlink(path) except OSError: pass
python
def store_data(self, data, encoding='utf-8'): """Put the given content into a file, possibly encoding it as UTF-8 in the process.""" path = random_filename(self.work_path) try: with open(path, 'wb') as fh: if isinstance(data, str): data = data.encode(encoding) if data is not None: fh.write(data) return self.store_file(path) finally: try: os.unlink(path) except OSError: pass
[ "def", "store_data", "(", "self", ",", "data", ",", "encoding", "=", "'utf-8'", ")", ":", "path", "=", "random_filename", "(", "self", ".", "work_path", ")", "try", ":", "with", "open", "(", "path", ",", "'wb'", ")", "as", "fh", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", "encoding", ")", "if", "data", "is", "not", "None", ":", "fh", ".", "write", "(", "data", ")", "return", "self", ".", "store_file", "(", "path", ")", "finally", ":", "try", ":", "os", ".", "unlink", "(", "path", ")", "except", "OSError", ":", "pass" ]
Put the given content into a file, possibly encoding it as UTF-8 in the process.
[ "Put", "the", "given", "content", "into", "a", "file", "possibly", "encoding", "it", "as", "UTF", "-", "8", "in", "the", "process", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L143-L158
17,037
alephdata/memorious
memorious/logic/crawler.py
Crawler.check_due
def check_due(self): """Check if the last execution of this crawler is older than the scheduled interval.""" if self.disabled: return False if self.is_running: return False if self.delta is None: return False last_run = self.last_run if last_run is None: return True now = datetime.utcnow() if now > last_run + self.delta: return True return False
python
def check_due(self): """Check if the last execution of this crawler is older than the scheduled interval.""" if self.disabled: return False if self.is_running: return False if self.delta is None: return False last_run = self.last_run if last_run is None: return True now = datetime.utcnow() if now > last_run + self.delta: return True return False
[ "def", "check_due", "(", "self", ")", ":", "if", "self", ".", "disabled", ":", "return", "False", "if", "self", ".", "is_running", ":", "return", "False", "if", "self", ".", "delta", "is", "None", ":", "return", "False", "last_run", "=", "self", ".", "last_run", "if", "last_run", "is", "None", ":", "return", "True", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "now", ">", "last_run", "+", "self", ".", "delta", ":", "return", "True", "return", "False" ]
Check if the last execution of this crawler is older than the scheduled interval.
[ "Check", "if", "the", "last", "execution", "of", "this", "crawler", "is", "older", "than", "the", "scheduled", "interval", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/crawler.py#L48-L63
17,038
alephdata/memorious
memorious/logic/crawler.py
Crawler.flush
def flush(self): """Delete all run-time data generated by this crawler.""" Queue.flush(self) Event.delete(self) Crawl.flush(self)
python
def flush(self): """Delete all run-time data generated by this crawler.""" Queue.flush(self) Event.delete(self) Crawl.flush(self)
[ "def", "flush", "(", "self", ")", ":", "Queue", ".", "flush", "(", "self", ")", "Event", ".", "delete", "(", "self", ")", "Crawl", ".", "flush", "(", "self", ")" ]
Delete all run-time data generated by this crawler.
[ "Delete", "all", "run", "-", "time", "data", "generated", "by", "this", "crawler", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/crawler.py#L83-L87
17,039
alephdata/memorious
memorious/logic/crawler.py
Crawler.run
def run(self, incremental=None, run_id=None): """Queue the execution of a particular crawler.""" state = { 'crawler': self.name, 'run_id': run_id, 'incremental': settings.INCREMENTAL } if incremental is not None: state['incremental'] = incremental # Cancel previous runs: self.cancel() # Flush out previous events: Event.delete(self) Queue.queue(self.init_stage, state, {})
python
def run(self, incremental=None, run_id=None): """Queue the execution of a particular crawler.""" state = { 'crawler': self.name, 'run_id': run_id, 'incremental': settings.INCREMENTAL } if incremental is not None: state['incremental'] = incremental # Cancel previous runs: self.cancel() # Flush out previous events: Event.delete(self) Queue.queue(self.init_stage, state, {})
[ "def", "run", "(", "self", ",", "incremental", "=", "None", ",", "run_id", "=", "None", ")", ":", "state", "=", "{", "'crawler'", ":", "self", ".", "name", ",", "'run_id'", ":", "run_id", ",", "'incremental'", ":", "settings", ".", "INCREMENTAL", "}", "if", "incremental", "is", "not", "None", ":", "state", "[", "'incremental'", "]", "=", "incremental", "# Cancel previous runs:", "self", ".", "cancel", "(", ")", "# Flush out previous events:", "Event", ".", "delete", "(", "self", ")", "Queue", ".", "queue", "(", "self", ".", "init_stage", ",", "state", ",", "{", "}", ")" ]
Queue the execution of a particular crawler.
[ "Queue", "the", "execution", "of", "a", "particular", "crawler", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/crawler.py#L96-L110
17,040
alephdata/memorious
memorious/operations/fetch.py
fetch
def fetch(context, data): """Do an HTTP GET on the ``url`` specified in the inbound data.""" url = data.get('url') attempt = data.pop('retry_attempt', 1) try: result = context.http.get(url, lazy=True) rules = context.get('rules', {'match_all': {}}) if not Rule.get_rule(rules).apply(result): context.log.info('Fetch skip: %r', result.url) return if not result.ok: err = (result.url, result.status_code) context.emit_warning("Fetch fail [%s]: HTTP %s" % err) if not context.params.get('emit_errors', False): return else: context.log.info("Fetched [%s]: %r", result.status_code, result.url) data.update(result.serialize()) if url != result.url: tag = make_key(context.run_id, url) context.set_tag(tag, None) context.emit(data=data) except RequestException as ce: retries = int(context.get('retry', 3)) if retries >= attempt: context.log.warn("Retry: %s (error: %s)", url, ce) data['retry_attempt'] = attempt + 1 context.recurse(data=data, delay=2 ** attempt) else: context.emit_warning("Fetch fail [%s]: %s" % (url, ce))
python
def fetch(context, data): """Do an HTTP GET on the ``url`` specified in the inbound data.""" url = data.get('url') attempt = data.pop('retry_attempt', 1) try: result = context.http.get(url, lazy=True) rules = context.get('rules', {'match_all': {}}) if not Rule.get_rule(rules).apply(result): context.log.info('Fetch skip: %r', result.url) return if not result.ok: err = (result.url, result.status_code) context.emit_warning("Fetch fail [%s]: HTTP %s" % err) if not context.params.get('emit_errors', False): return else: context.log.info("Fetched [%s]: %r", result.status_code, result.url) data.update(result.serialize()) if url != result.url: tag = make_key(context.run_id, url) context.set_tag(tag, None) context.emit(data=data) except RequestException as ce: retries = int(context.get('retry', 3)) if retries >= attempt: context.log.warn("Retry: %s (error: %s)", url, ce) data['retry_attempt'] = attempt + 1 context.recurse(data=data, delay=2 ** attempt) else: context.emit_warning("Fetch fail [%s]: %s" % (url, ce))
[ "def", "fetch", "(", "context", ",", "data", ")", ":", "url", "=", "data", ".", "get", "(", "'url'", ")", "attempt", "=", "data", ".", "pop", "(", "'retry_attempt'", ",", "1", ")", "try", ":", "result", "=", "context", ".", "http", ".", "get", "(", "url", ",", "lazy", "=", "True", ")", "rules", "=", "context", ".", "get", "(", "'rules'", ",", "{", "'match_all'", ":", "{", "}", "}", ")", "if", "not", "Rule", ".", "get_rule", "(", "rules", ")", ".", "apply", "(", "result", ")", ":", "context", ".", "log", ".", "info", "(", "'Fetch skip: %r'", ",", "result", ".", "url", ")", "return", "if", "not", "result", ".", "ok", ":", "err", "=", "(", "result", ".", "url", ",", "result", ".", "status_code", ")", "context", ".", "emit_warning", "(", "\"Fetch fail [%s]: HTTP %s\"", "%", "err", ")", "if", "not", "context", ".", "params", ".", "get", "(", "'emit_errors'", ",", "False", ")", ":", "return", "else", ":", "context", ".", "log", ".", "info", "(", "\"Fetched [%s]: %r\"", ",", "result", ".", "status_code", ",", "result", ".", "url", ")", "data", ".", "update", "(", "result", ".", "serialize", "(", ")", ")", "if", "url", "!=", "result", ".", "url", ":", "tag", "=", "make_key", "(", "context", ".", "run_id", ",", "url", ")", "context", ".", "set_tag", "(", "tag", ",", "None", ")", "context", ".", "emit", "(", "data", "=", "data", ")", "except", "RequestException", "as", "ce", ":", "retries", "=", "int", "(", "context", ".", "get", "(", "'retry'", ",", "3", ")", ")", "if", "retries", ">=", "attempt", ":", "context", ".", "log", ".", "warn", "(", "\"Retry: %s (error: %s)\"", ",", "url", ",", "ce", ")", "data", "[", "'retry_attempt'", "]", "=", "attempt", "+", "1", "context", ".", "recurse", "(", "data", "=", "data", ",", "delay", "=", "2", "**", "attempt", ")", "else", ":", "context", ".", "emit_warning", "(", "\"Fetch fail [%s]: %s\"", "%", "(", "url", ",", "ce", ")", ")" ]
Do an HTTP GET on the ``url`` specified in the inbound data.
[ "Do", "an", "HTTP", "GET", "on", "the", "url", "specified", "in", "the", "inbound", "data", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/fetch.py#L8-L41
17,041
alephdata/memorious
memorious/operations/fetch.py
dav_index
def dav_index(context, data): """List files in a WebDAV directory.""" # This is made to work with ownCloud/nextCloud, but some rumor has # it they are "standards compliant" and it should thus work for # other DAV servers. url = data.get('url') result = context.http.request('PROPFIND', url) for resp in result.xml.findall('./{DAV:}response'): href = resp.findtext('./{DAV:}href') if href is None: continue rurl = urljoin(url, href) rdata = data.copy() rdata['url'] = rurl rdata['foreign_id'] = rurl if rdata['url'] == url: continue if resp.find('.//{DAV:}collection') is not None: rdata['parent_foreign_id'] = rurl context.log.info("Fetching contents of folder: %s" % rurl) context.recurse(data=rdata) else: rdata['parent_foreign_id'] = url # Do GET requests on the urls fetch(context, rdata)
python
def dav_index(context, data): """List files in a WebDAV directory.""" # This is made to work with ownCloud/nextCloud, but some rumor has # it they are "standards compliant" and it should thus work for # other DAV servers. url = data.get('url') result = context.http.request('PROPFIND', url) for resp in result.xml.findall('./{DAV:}response'): href = resp.findtext('./{DAV:}href') if href is None: continue rurl = urljoin(url, href) rdata = data.copy() rdata['url'] = rurl rdata['foreign_id'] = rurl if rdata['url'] == url: continue if resp.find('.//{DAV:}collection') is not None: rdata['parent_foreign_id'] = rurl context.log.info("Fetching contents of folder: %s" % rurl) context.recurse(data=rdata) else: rdata['parent_foreign_id'] = url # Do GET requests on the urls fetch(context, rdata)
[ "def", "dav_index", "(", "context", ",", "data", ")", ":", "# This is made to work with ownCloud/nextCloud, but some rumor has", "# it they are \"standards compliant\" and it should thus work for", "# other DAV servers.", "url", "=", "data", ".", "get", "(", "'url'", ")", "result", "=", "context", ".", "http", ".", "request", "(", "'PROPFIND'", ",", "url", ")", "for", "resp", "in", "result", ".", "xml", ".", "findall", "(", "'./{DAV:}response'", ")", ":", "href", "=", "resp", ".", "findtext", "(", "'./{DAV:}href'", ")", "if", "href", "is", "None", ":", "continue", "rurl", "=", "urljoin", "(", "url", ",", "href", ")", "rdata", "=", "data", ".", "copy", "(", ")", "rdata", "[", "'url'", "]", "=", "rurl", "rdata", "[", "'foreign_id'", "]", "=", "rurl", "if", "rdata", "[", "'url'", "]", "==", "url", ":", "continue", "if", "resp", ".", "find", "(", "'.//{DAV:}collection'", ")", "is", "not", "None", ":", "rdata", "[", "'parent_foreign_id'", "]", "=", "rurl", "context", ".", "log", ".", "info", "(", "\"Fetching contents of folder: %s\"", "%", "rurl", ")", "context", ".", "recurse", "(", "data", "=", "rdata", ")", "else", ":", "rdata", "[", "'parent_foreign_id'", "]", "=", "url", "# Do GET requests on the urls", "fetch", "(", "context", ",", "rdata", ")" ]
List files in a WebDAV directory.
[ "List", "files", "in", "a", "WebDAV", "directory", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/fetch.py#L44-L71
17,042
alephdata/memorious
memorious/operations/fetch.py
session
def session(context, data): """Set some HTTP parameters for all subsequent requests. This includes ``user`` and ``password`` for HTTP basic authentication, and ``user_agent`` as a header. """ context.http.reset() user = context.get('user') password = context.get('password') if user is not None and password is not None: context.http.session.auth = (user, password) user_agent = context.get('user_agent') if user_agent is not None: context.http.session.headers['User-Agent'] = user_agent referer = context.get('url') if referer is not None: context.http.session.headers['Referer'] = referer proxy = context.get('proxy') if proxy is not None: proxies = {'http': proxy, 'https': proxy} context.http.session.proxies = proxies # Explictly save the session because no actual HTTP requests were made. context.http.save() context.emit(data=data)
python
def session(context, data): """Set some HTTP parameters for all subsequent requests. This includes ``user`` and ``password`` for HTTP basic authentication, and ``user_agent`` as a header. """ context.http.reset() user = context.get('user') password = context.get('password') if user is not None and password is not None: context.http.session.auth = (user, password) user_agent = context.get('user_agent') if user_agent is not None: context.http.session.headers['User-Agent'] = user_agent referer = context.get('url') if referer is not None: context.http.session.headers['Referer'] = referer proxy = context.get('proxy') if proxy is not None: proxies = {'http': proxy, 'https': proxy} context.http.session.proxies = proxies # Explictly save the session because no actual HTTP requests were made. context.http.save() context.emit(data=data)
[ "def", "session", "(", "context", ",", "data", ")", ":", "context", ".", "http", ".", "reset", "(", ")", "user", "=", "context", ".", "get", "(", "'user'", ")", "password", "=", "context", ".", "get", "(", "'password'", ")", "if", "user", "is", "not", "None", "and", "password", "is", "not", "None", ":", "context", ".", "http", ".", "session", ".", "auth", "=", "(", "user", ",", "password", ")", "user_agent", "=", "context", ".", "get", "(", "'user_agent'", ")", "if", "user_agent", "is", "not", "None", ":", "context", ".", "http", ".", "session", ".", "headers", "[", "'User-Agent'", "]", "=", "user_agent", "referer", "=", "context", ".", "get", "(", "'url'", ")", "if", "referer", "is", "not", "None", ":", "context", ".", "http", ".", "session", ".", "headers", "[", "'Referer'", "]", "=", "referer", "proxy", "=", "context", ".", "get", "(", "'proxy'", ")", "if", "proxy", "is", "not", "None", ":", "proxies", "=", "{", "'http'", ":", "proxy", ",", "'https'", ":", "proxy", "}", "context", ".", "http", ".", "session", ".", "proxies", "=", "proxies", "# Explictly save the session because no actual HTTP requests were made.", "context", ".", "http", ".", "save", "(", ")", "context", ".", "emit", "(", "data", "=", "data", ")" ]
Set some HTTP parameters for all subsequent requests. This includes ``user`` and ``password`` for HTTP basic authentication, and ``user_agent`` as a header.
[ "Set", "some", "HTTP", "parameters", "for", "all", "subsequent", "requests", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/fetch.py#L74-L103
17,043
alephdata/memorious
memorious/model/event.py
Event.save
def save(cls, crawler, stage, level, run_id, error=None, message=None): """Create an event, possibly based on an exception.""" event = { 'stage': stage.name, 'level': level, 'timestamp': pack_now(), 'error': error, 'message': message } data = dump_json(event) conn.lpush(make_key(crawler, "events"), data) conn.lpush(make_key(crawler, "events", level), data) conn.lpush(make_key(crawler, "events", stage), data) conn.lpush(make_key(crawler, "events", stage, level), data) conn.lpush(make_key(crawler, "events", run_id), data) conn.lpush(make_key(crawler, "events", run_id, level), data) return event
python
def save(cls, crawler, stage, level, run_id, error=None, message=None): """Create an event, possibly based on an exception.""" event = { 'stage': stage.name, 'level': level, 'timestamp': pack_now(), 'error': error, 'message': message } data = dump_json(event) conn.lpush(make_key(crawler, "events"), data) conn.lpush(make_key(crawler, "events", level), data) conn.lpush(make_key(crawler, "events", stage), data) conn.lpush(make_key(crawler, "events", stage, level), data) conn.lpush(make_key(crawler, "events", run_id), data) conn.lpush(make_key(crawler, "events", run_id, level), data) return event
[ "def", "save", "(", "cls", ",", "crawler", ",", "stage", ",", "level", ",", "run_id", ",", "error", "=", "None", ",", "message", "=", "None", ")", ":", "event", "=", "{", "'stage'", ":", "stage", ".", "name", ",", "'level'", ":", "level", ",", "'timestamp'", ":", "pack_now", "(", ")", ",", "'error'", ":", "error", ",", "'message'", ":", "message", "}", "data", "=", "dump_json", "(", "event", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ")", ",", "data", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ",", "level", ")", ",", "data", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ",", "stage", ")", ",", "data", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ",", "stage", ",", "level", ")", ",", "data", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ",", "run_id", ")", ",", "data", ")", "conn", ".", "lpush", "(", "make_key", "(", "crawler", ",", "\"events\"", ",", "run_id", ",", "level", ")", ",", "data", ")", "return", "event" ]
Create an event, possibly based on an exception.
[ "Create", "an", "event", "possibly", "based", "on", "an", "exception", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L19-L35
17,044
alephdata/memorious
memorious/model/event.py
Event.get_stage_events
def get_stage_events(cls, crawler, stage_name, start, end, level=None): """events from a particular stage""" key = make_key(crawler, "events", stage_name, level) return cls.event_list(key, start, end)
python
def get_stage_events(cls, crawler, stage_name, start, end, level=None): """events from a particular stage""" key = make_key(crawler, "events", stage_name, level) return cls.event_list(key, start, end)
[ "def", "get_stage_events", "(", "cls", ",", "crawler", ",", "stage_name", ",", "start", ",", "end", ",", "level", "=", "None", ")", ":", "key", "=", "make_key", "(", "crawler", ",", "\"events\"", ",", "stage_name", ",", "level", ")", "return", "cls", ".", "event_list", "(", "key", ",", "start", ",", "end", ")" ]
events from a particular stage
[ "events", "from", "a", "particular", "stage" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L93-L96
17,045
alephdata/memorious
memorious/model/event.py
Event.get_run_events
def get_run_events(cls, crawler, run_id, start, end, level=None): """Events from a particular run""" key = make_key(crawler, "events", run_id, level) return cls.event_list(key, start, end)
python
def get_run_events(cls, crawler, run_id, start, end, level=None): """Events from a particular run""" key = make_key(crawler, "events", run_id, level) return cls.event_list(key, start, end)
[ "def", "get_run_events", "(", "cls", ",", "crawler", ",", "run_id", ",", "start", ",", "end", ",", "level", "=", "None", ")", ":", "key", "=", "make_key", "(", "crawler", ",", "\"events\"", ",", "run_id", ",", "level", ")", "return", "cls", ".", "event_list", "(", "key", ",", "start", ",", "end", ")" ]
Events from a particular run
[ "Events", "from", "a", "particular", "run" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L99-L102
17,046
alephdata/memorious
memorious/helpers/__init__.py
soviet_checksum
def soviet_checksum(code): """Courtesy of Sir Vlad Lavrov.""" def sum_digits(code, offset=1): total = 0 for digit, index in zip(code[:7], count(offset)): total += int(digit) * index summed = (total / 11 * 11) return total - summed check = sum_digits(code, 1) if check == 10: check = sum_digits(code, 3) if check == 10: return code + '0' return code + str(check)
python
def soviet_checksum(code): """Courtesy of Sir Vlad Lavrov.""" def sum_digits(code, offset=1): total = 0 for digit, index in zip(code[:7], count(offset)): total += int(digit) * index summed = (total / 11 * 11) return total - summed check = sum_digits(code, 1) if check == 10: check = sum_digits(code, 3) if check == 10: return code + '0' return code + str(check)
[ "def", "soviet_checksum", "(", "code", ")", ":", "def", "sum_digits", "(", "code", ",", "offset", "=", "1", ")", ":", "total", "=", "0", "for", "digit", ",", "index", "in", "zip", "(", "code", "[", ":", "7", "]", ",", "count", "(", "offset", ")", ")", ":", "total", "+=", "int", "(", "digit", ")", "*", "index", "summed", "=", "(", "total", "/", "11", "*", "11", ")", "return", "total", "-", "summed", "check", "=", "sum_digits", "(", "code", ",", "1", ")", "if", "check", "==", "10", ":", "check", "=", "sum_digits", "(", "code", ",", "3", ")", "if", "check", "==", "10", ":", "return", "code", "+", "'0'", "return", "code", "+", "str", "(", "check", ")" ]
Courtesy of Sir Vlad Lavrov.
[ "Courtesy", "of", "Sir", "Vlad", "Lavrov", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L16-L30
17,047
alephdata/memorious
memorious/helpers/__init__.py
search_results_total
def search_results_total(html, xpath, check, delimiter): """ Get the total number of results from the DOM of a search index. """ for container in html.findall(xpath): if check in container.findtext('.'): text = container.findtext('.').split(delimiter) total = int(text[-1].strip()) return total
python
def search_results_total(html, xpath, check, delimiter): """ Get the total number of results from the DOM of a search index. """ for container in html.findall(xpath): if check in container.findtext('.'): text = container.findtext('.').split(delimiter) total = int(text[-1].strip()) return total
[ "def", "search_results_total", "(", "html", ",", "xpath", ",", "check", ",", "delimiter", ")", ":", "for", "container", "in", "html", ".", "findall", "(", "xpath", ")", ":", "if", "check", "in", "container", ".", "findtext", "(", "'.'", ")", ":", "text", "=", "container", ".", "findtext", "(", "'.'", ")", ".", "split", "(", "delimiter", ")", "total", "=", "int", "(", "text", "[", "-", "1", "]", ".", "strip", "(", ")", ")", "return", "total" ]
Get the total number of results from the DOM of a search index.
[ "Get", "the", "total", "number", "of", "results", "from", "the", "DOM", "of", "a", "search", "index", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L33-L39
17,048
alephdata/memorious
memorious/helpers/__init__.py
search_results_last_url
def search_results_last_url(html, xpath, label): """ Get the URL of the 'last' button in a search results listing. """ for container in html.findall(xpath): if container.text_content().strip() == label: return container.find('.//a').get('href')
python
def search_results_last_url(html, xpath, label): """ Get the URL of the 'last' button in a search results listing. """ for container in html.findall(xpath): if container.text_content().strip() == label: return container.find('.//a').get('href')
[ "def", "search_results_last_url", "(", "html", ",", "xpath", ",", "label", ")", ":", "for", "container", "in", "html", ".", "findall", "(", "xpath", ")", ":", "if", "container", ".", "text_content", "(", ")", ".", "strip", "(", ")", "==", "label", ":", "return", "container", ".", "find", "(", "'.//a'", ")", ".", "get", "(", "'href'", ")" ]
Get the URL of the 'last' button in a search results listing.
[ "Get", "the", "URL", "of", "the", "last", "button", "in", "a", "search", "results", "listing", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L42-L46
17,049
alephdata/memorious
memorious/model/crawl.py
Crawl.op_count
def op_count(cls, crawler, stage=None): """Total operations performed for this crawler""" if stage: total_ops = conn.get(make_key(crawler, stage)) else: total_ops = conn.get(make_key(crawler, "total_ops")) return unpack_int(total_ops)
python
def op_count(cls, crawler, stage=None): """Total operations performed for this crawler""" if stage: total_ops = conn.get(make_key(crawler, stage)) else: total_ops = conn.get(make_key(crawler, "total_ops")) return unpack_int(total_ops)
[ "def", "op_count", "(", "cls", ",", "crawler", ",", "stage", "=", "None", ")", ":", "if", "stage", ":", "total_ops", "=", "conn", ".", "get", "(", "make_key", "(", "crawler", ",", "stage", ")", ")", "else", ":", "total_ops", "=", "conn", ".", "get", "(", "make_key", "(", "crawler", ",", "\"total_ops\"", ")", ")", "return", "unpack_int", "(", "total_ops", ")" ]
Total operations performed for this crawler
[ "Total", "operations", "performed", "for", "this", "crawler" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/crawl.py#L21-L27
17,050
alephdata/memorious
memorious/ui/views.py
index
def index(): """Generate a list of all crawlers, alphabetically, with op counts.""" crawlers = [] for crawler in manager: data = Event.get_counts(crawler) data['last_active'] = crawler.last_run data['total_ops'] = crawler.op_count data['running'] = crawler.is_running data['crawler'] = crawler crawlers.append(data) return render_template('index.html', crawlers=crawlers)
python
def index(): """Generate a list of all crawlers, alphabetically, with op counts.""" crawlers = [] for crawler in manager: data = Event.get_counts(crawler) data['last_active'] = crawler.last_run data['total_ops'] = crawler.op_count data['running'] = crawler.is_running data['crawler'] = crawler crawlers.append(data) return render_template('index.html', crawlers=crawlers)
[ "def", "index", "(", ")", ":", "crawlers", "=", "[", "]", "for", "crawler", "in", "manager", ":", "data", "=", "Event", ".", "get_counts", "(", "crawler", ")", "data", "[", "'last_active'", "]", "=", "crawler", ".", "last_run", "data", "[", "'total_ops'", "]", "=", "crawler", ".", "op_count", "data", "[", "'running'", "]", "=", "crawler", ".", "is_running", "data", "[", "'crawler'", "]", "=", "crawler", "crawlers", ".", "append", "(", "data", ")", "return", "render_template", "(", "'index.html'", ",", "crawlers", "=", "crawlers", ")" ]
Generate a list of all crawlers, alphabetically, with op counts.
[ "Generate", "a", "list", "of", "all", "crawlers", "alphabetically", "with", "op", "counts", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/ui/views.py#L67-L77
17,051
alephdata/memorious
memorious/operations/clean.py
clean_html
def clean_html(context, data): """Clean an HTML DOM and store the changed version.""" doc = _get_html_document(context, data) if doc is None: context.emit(data=data) return remove_paths = context.params.get('remove_paths') for path in ensure_list(remove_paths): for el in doc.findall(path): el.drop_tree() html_text = html.tostring(doc, pretty_print=True) content_hash = context.store_data(html_text) data['content_hash'] = content_hash context.emit(data=data)
python
def clean_html(context, data): """Clean an HTML DOM and store the changed version.""" doc = _get_html_document(context, data) if doc is None: context.emit(data=data) return remove_paths = context.params.get('remove_paths') for path in ensure_list(remove_paths): for el in doc.findall(path): el.drop_tree() html_text = html.tostring(doc, pretty_print=True) content_hash = context.store_data(html_text) data['content_hash'] = content_hash context.emit(data=data)
[ "def", "clean_html", "(", "context", ",", "data", ")", ":", "doc", "=", "_get_html_document", "(", "context", ",", "data", ")", "if", "doc", "is", "None", ":", "context", ".", "emit", "(", "data", "=", "data", ")", "return", "remove_paths", "=", "context", ".", "params", ".", "get", "(", "'remove_paths'", ")", "for", "path", "in", "ensure_list", "(", "remove_paths", ")", ":", "for", "el", "in", "doc", ".", "findall", "(", "path", ")", ":", "el", ".", "drop_tree", "(", ")", "html_text", "=", "html", ".", "tostring", "(", "doc", ",", "pretty_print", "=", "True", ")", "content_hash", "=", "context", ".", "store_data", "(", "html_text", ")", "data", "[", "'content_hash'", "]", "=", "content_hash", "context", ".", "emit", "(", "data", "=", "data", ")" ]
Clean an HTML DOM and store the changed version.
[ "Clean", "an", "HTML", "DOM", "and", "store", "the", "changed", "version", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/clean.py#L11-L26
17,052
alephdata/memorious
memorious/task_runner.py
TaskRunner.execute
def execute(cls, stage, state, data, next_allowed_exec_time=None): """Execute the operation, rate limiting allowing.""" try: context = Context.from_state(state, stage) now = datetime.utcnow() if next_allowed_exec_time and now < next_allowed_exec_time: # task not allowed to run yet; put it back in the queue Queue.queue(stage, state, data, delay=next_allowed_exec_time) elif context.crawler.disabled: pass elif context.stage.rate_limit: try: with rate_limiter(context): context.execute(data) except RateLimitException: delay = max(1, 1.0/context.stage.rate_limit) delay = random.randint(1, int(delay)) context.log.info( "Rate limit exceeded, delaying %d sec.", delay ) Queue.queue(stage, state, data, delay=delay) else: context.execute(data) except Exception: log.exception("Task failed to execute:") finally: # Decrease the pending task count after excuting a task. Queue.decr_pending(context.crawler) # If we don't have anymore tasks to execute, time to clean up. if not context.crawler.is_running: context.crawler.aggregate(context)
python
def execute(cls, stage, state, data, next_allowed_exec_time=None): """Execute the operation, rate limiting allowing.""" try: context = Context.from_state(state, stage) now = datetime.utcnow() if next_allowed_exec_time and now < next_allowed_exec_time: # task not allowed to run yet; put it back in the queue Queue.queue(stage, state, data, delay=next_allowed_exec_time) elif context.crawler.disabled: pass elif context.stage.rate_limit: try: with rate_limiter(context): context.execute(data) except RateLimitException: delay = max(1, 1.0/context.stage.rate_limit) delay = random.randint(1, int(delay)) context.log.info( "Rate limit exceeded, delaying %d sec.", delay ) Queue.queue(stage, state, data, delay=delay) else: context.execute(data) except Exception: log.exception("Task failed to execute:") finally: # Decrease the pending task count after excuting a task. Queue.decr_pending(context.crawler) # If we don't have anymore tasks to execute, time to clean up. if not context.crawler.is_running: context.crawler.aggregate(context)
[ "def", "execute", "(", "cls", ",", "stage", ",", "state", ",", "data", ",", "next_allowed_exec_time", "=", "None", ")", ":", "try", ":", "context", "=", "Context", ".", "from_state", "(", "state", ",", "stage", ")", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "next_allowed_exec_time", "and", "now", "<", "next_allowed_exec_time", ":", "# task not allowed to run yet; put it back in the queue", "Queue", ".", "queue", "(", "stage", ",", "state", ",", "data", ",", "delay", "=", "next_allowed_exec_time", ")", "elif", "context", ".", "crawler", ".", "disabled", ":", "pass", "elif", "context", ".", "stage", ".", "rate_limit", ":", "try", ":", "with", "rate_limiter", "(", "context", ")", ":", "context", ".", "execute", "(", "data", ")", "except", "RateLimitException", ":", "delay", "=", "max", "(", "1", ",", "1.0", "/", "context", ".", "stage", ".", "rate_limit", ")", "delay", "=", "random", ".", "randint", "(", "1", ",", "int", "(", "delay", ")", ")", "context", ".", "log", ".", "info", "(", "\"Rate limit exceeded, delaying %d sec.\"", ",", "delay", ")", "Queue", ".", "queue", "(", "stage", ",", "state", ",", "data", ",", "delay", "=", "delay", ")", "else", ":", "context", ".", "execute", "(", "data", ")", "except", "Exception", ":", "log", ".", "exception", "(", "\"Task failed to execute:\"", ")", "finally", ":", "# Decrease the pending task count after excuting a task.", "Queue", ".", "decr_pending", "(", "context", ".", "crawler", ")", "# If we don't have anymore tasks to execute, time to clean up.", "if", "not", "context", ".", "crawler", ".", "is_running", ":", "context", ".", "crawler", ".", "aggregate", "(", "context", ")" ]
Execute the operation, rate limiting allowing.
[ "Execute", "the", "operation", "rate", "limiting", "allowing", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/task_runner.py#L19-L49
17,053
alephdata/memorious
memorious/operations/db.py
_recursive_upsert
def _recursive_upsert(context, params, data): """Insert or update nested dicts recursively into db tables""" children = params.get("children", {}) nested_calls = [] for child_params in children: key = child_params.get("key") child_data_list = ensure_list(data.pop(key)) if isinstance(child_data_list, dict): child_data_list = [child_data_list] if not (isinstance(child_data_list, list) and all(isinstance(i, dict) for i in child_data_list)): context.log.warn( "Expecting a dict or a lost of dicts as children for key", key ) continue if child_data_list: table_suffix = child_params.get("table_suffix", key) child_params["table"] = params.get("table") + "_" + table_suffix # copy some properties over from parent to child inherit = child_params.get("inherit", {}) for child_data in child_data_list: for dest, src in inherit.items(): child_data[dest] = data.get(src) nested_calls.append((child_params, child_data)) # Insert or update data _upsert(context, params, data) for child_params, child_data in nested_calls: _recursive_upsert(context, child_params, child_data)
python
def _recursive_upsert(context, params, data): """Insert or update nested dicts recursively into db tables""" children = params.get("children", {}) nested_calls = [] for child_params in children: key = child_params.get("key") child_data_list = ensure_list(data.pop(key)) if isinstance(child_data_list, dict): child_data_list = [child_data_list] if not (isinstance(child_data_list, list) and all(isinstance(i, dict) for i in child_data_list)): context.log.warn( "Expecting a dict or a lost of dicts as children for key", key ) continue if child_data_list: table_suffix = child_params.get("table_suffix", key) child_params["table"] = params.get("table") + "_" + table_suffix # copy some properties over from parent to child inherit = child_params.get("inherit", {}) for child_data in child_data_list: for dest, src in inherit.items(): child_data[dest] = data.get(src) nested_calls.append((child_params, child_data)) # Insert or update data _upsert(context, params, data) for child_params, child_data in nested_calls: _recursive_upsert(context, child_params, child_data)
[ "def", "_recursive_upsert", "(", "context", ",", "params", ",", "data", ")", ":", "children", "=", "params", ".", "get", "(", "\"children\"", ",", "{", "}", ")", "nested_calls", "=", "[", "]", "for", "child_params", "in", "children", ":", "key", "=", "child_params", ".", "get", "(", "\"key\"", ")", "child_data_list", "=", "ensure_list", "(", "data", ".", "pop", "(", "key", ")", ")", "if", "isinstance", "(", "child_data_list", ",", "dict", ")", ":", "child_data_list", "=", "[", "child_data_list", "]", "if", "not", "(", "isinstance", "(", "child_data_list", ",", "list", ")", "and", "all", "(", "isinstance", "(", "i", ",", "dict", ")", "for", "i", "in", "child_data_list", ")", ")", ":", "context", ".", "log", ".", "warn", "(", "\"Expecting a dict or a lost of dicts as children for key\"", ",", "key", ")", "continue", "if", "child_data_list", ":", "table_suffix", "=", "child_params", ".", "get", "(", "\"table_suffix\"", ",", "key", ")", "child_params", "[", "\"table\"", "]", "=", "params", ".", "get", "(", "\"table\"", ")", "+", "\"_\"", "+", "table_suffix", "# copy some properties over from parent to child", "inherit", "=", "child_params", ".", "get", "(", "\"inherit\"", ",", "{", "}", ")", "for", "child_data", "in", "child_data_list", ":", "for", "dest", ",", "src", "in", "inherit", ".", "items", "(", ")", ":", "child_data", "[", "dest", "]", "=", "data", ".", "get", "(", "src", ")", "nested_calls", ".", "append", "(", "(", "child_params", ",", "child_data", ")", ")", "# Insert or update data", "_upsert", "(", "context", ",", "params", ",", "data", ")", "for", "child_params", ",", "child_data", "in", "nested_calls", ":", "_recursive_upsert", "(", "context", ",", "child_params", ",", "child_data", ")" ]
Insert or update nested dicts recursively into db tables
[ "Insert", "or", "update", "nested", "dicts", "recursively", "into", "db", "tables" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/db.py#L21-L48
17,054
alephdata/memorious
memorious/operations/db.py
db
def db(context, data): """Insert or update `data` as a row into specified db table""" table = context.params.get("table", context.crawler.name) params = context.params params["table"] = table _recursive_upsert(context, params, data)
python
def db(context, data): """Insert or update `data` as a row into specified db table""" table = context.params.get("table", context.crawler.name) params = context.params params["table"] = table _recursive_upsert(context, params, data)
[ "def", "db", "(", "context", ",", "data", ")", ":", "table", "=", "context", ".", "params", ".", "get", "(", "\"table\"", ",", "context", ".", "crawler", ".", "name", ")", "params", "=", "context", ".", "params", "params", "[", "\"table\"", "]", "=", "table", "_recursive_upsert", "(", "context", ",", "params", ",", "data", ")" ]
Insert or update `data` as a row into specified db table
[ "Insert", "or", "update", "data", "as", "a", "row", "into", "specified", "db", "table" ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/db.py#L51-L56
17,055
alephdata/memorious
memorious/cli.py
cli
def cli(debug, cache, incremental): """Crawler framework for documents and structured scrapers.""" settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
python
def cli(debug, cache, incremental): """Crawler framework for documents and structured scrapers.""" settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
[ "def", "cli", "(", "debug", ",", "cache", ",", "incremental", ")", ":", "settings", ".", "HTTP_CACHE", "=", "cache", "settings", ".", "INCREMENTAL", "=", "incremental", "settings", ".", "DEBUG", "=", "debug", "if", "settings", ".", "DEBUG", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "init_memorious", "(", ")" ]
Crawler framework for documents and structured scrapers.
[ "Crawler", "framework", "for", "documents", "and", "structured", "scrapers", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L21-L30
17,056
alephdata/memorious
memorious/cli.py
run
def run(crawler): """Run a specified crawler.""" crawler = get_crawler(crawler) crawler.run() if is_sync_mode(): TaskRunner.run_sync()
python
def run(crawler): """Run a specified crawler.""" crawler = get_crawler(crawler) crawler.run() if is_sync_mode(): TaskRunner.run_sync()
[ "def", "run", "(", "crawler", ")", ":", "crawler", "=", "get_crawler", "(", "crawler", ")", "crawler", ".", "run", "(", ")", "if", "is_sync_mode", "(", ")", ":", "TaskRunner", ".", "run_sync", "(", ")" ]
Run a specified crawler.
[ "Run", "a", "specified", "crawler", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L43-L48
17,057
alephdata/memorious
memorious/cli.py
index
def index(): """List the available crawlers.""" crawler_list = [] for crawler in manager: is_due = 'yes' if crawler.check_due() else 'no' if crawler.disabled: is_due = 'off' crawler_list.append([crawler.name, crawler.description, crawler.schedule, is_due, Queue.size(crawler)]) headers = ['Name', 'Description', 'Schedule', 'Due', 'Pending'] print(tabulate(crawler_list, headers=headers))
python
def index(): """List the available crawlers.""" crawler_list = [] for crawler in manager: is_due = 'yes' if crawler.check_due() else 'no' if crawler.disabled: is_due = 'off' crawler_list.append([crawler.name, crawler.description, crawler.schedule, is_due, Queue.size(crawler)]) headers = ['Name', 'Description', 'Schedule', 'Due', 'Pending'] print(tabulate(crawler_list, headers=headers))
[ "def", "index", "(", ")", ":", "crawler_list", "=", "[", "]", "for", "crawler", "in", "manager", ":", "is_due", "=", "'yes'", "if", "crawler", ".", "check_due", "(", ")", "else", "'no'", "if", "crawler", ".", "disabled", ":", "is_due", "=", "'off'", "crawler_list", ".", "append", "(", "[", "crawler", ".", "name", ",", "crawler", ".", "description", ",", "crawler", ".", "schedule", ",", "is_due", ",", "Queue", ".", "size", "(", "crawler", ")", "]", ")", "headers", "=", "[", "'Name'", ",", "'Description'", ",", "'Schedule'", ",", "'Due'", ",", "'Pending'", "]", "print", "(", "tabulate", "(", "crawler_list", ",", "headers", "=", "headers", ")", ")" ]
List the available crawlers.
[ "List", "the", "available", "crawlers", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L74-L87
17,058
alephdata/memorious
memorious/cli.py
scheduled
def scheduled(wait=False): """Run crawlers that are due.""" manager.run_scheduled() while wait: # Loop and try to run scheduled crawlers at short intervals manager.run_scheduled() time.sleep(settings.SCHEDULER_INTERVAL)
python
def scheduled(wait=False): """Run crawlers that are due.""" manager.run_scheduled() while wait: # Loop and try to run scheduled crawlers at short intervals manager.run_scheduled() time.sleep(settings.SCHEDULER_INTERVAL)
[ "def", "scheduled", "(", "wait", "=", "False", ")", ":", "manager", ".", "run_scheduled", "(", ")", "while", "wait", ":", "# Loop and try to run scheduled crawlers at short intervals", "manager", ".", "run_scheduled", "(", ")", "time", ".", "sleep", "(", "settings", ".", "SCHEDULER_INTERVAL", ")" ]
Run crawlers that are due.
[ "Run", "crawlers", "that", "are", "due", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L92-L98
17,059
alephdata/memorious
memorious/operations/store.py
_get_directory_path
def _get_directory_path(context): """Get the storage path fro the output.""" path = os.path.join(settings.BASE_PATH, 'store') path = context.params.get('path', path) path = os.path.join(path, context.crawler.name) path = os.path.abspath(os.path.expandvars(path)) try: os.makedirs(path) except Exception: pass return path
python
def _get_directory_path(context): """Get the storage path fro the output.""" path = os.path.join(settings.BASE_PATH, 'store') path = context.params.get('path', path) path = os.path.join(path, context.crawler.name) path = os.path.abspath(os.path.expandvars(path)) try: os.makedirs(path) except Exception: pass return path
[ "def", "_get_directory_path", "(", "context", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "settings", ".", "BASE_PATH", ",", "'store'", ")", "path", "=", "context", ".", "params", ".", "get", "(", "'path'", ",", "path", ")", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "context", ".", "crawler", ".", "name", ")", "path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expandvars", "(", "path", ")", ")", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "Exception", ":", "pass", "return", "path" ]
Get the storage path fro the output.
[ "Get", "the", "storage", "path", "fro", "the", "output", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/store.py#L9-L19
17,060
alephdata/memorious
memorious/operations/store.py
directory
def directory(context, data): """Store the collected files to a given directory.""" with context.http.rehash(data) as result: if not result.ok: return content_hash = data.get('content_hash') if content_hash is None: context.emit_warning("No content hash in data.") return path = _get_directory_path(context) file_name = data.get('file_name', result.file_name) file_name = safe_filename(file_name, default='raw') file_name = '%s.%s' % (content_hash, file_name) data['_file_name'] = file_name file_path = os.path.join(path, file_name) if not os.path.exists(file_path): shutil.copyfile(result.file_path, file_path) context.log.info("Store [directory]: %s", file_name) meta_path = os.path.join(path, '%s.json' % content_hash) with open(meta_path, 'w') as fh: json.dump(data, fh)
python
def directory(context, data): """Store the collected files to a given directory.""" with context.http.rehash(data) as result: if not result.ok: return content_hash = data.get('content_hash') if content_hash is None: context.emit_warning("No content hash in data.") return path = _get_directory_path(context) file_name = data.get('file_name', result.file_name) file_name = safe_filename(file_name, default='raw') file_name = '%s.%s' % (content_hash, file_name) data['_file_name'] = file_name file_path = os.path.join(path, file_name) if not os.path.exists(file_path): shutil.copyfile(result.file_path, file_path) context.log.info("Store [directory]: %s", file_name) meta_path = os.path.join(path, '%s.json' % content_hash) with open(meta_path, 'w') as fh: json.dump(data, fh)
[ "def", "directory", "(", "context", ",", "data", ")", ":", "with", "context", ".", "http", ".", "rehash", "(", "data", ")", "as", "result", ":", "if", "not", "result", ".", "ok", ":", "return", "content_hash", "=", "data", ".", "get", "(", "'content_hash'", ")", "if", "content_hash", "is", "None", ":", "context", ".", "emit_warning", "(", "\"No content hash in data.\"", ")", "return", "path", "=", "_get_directory_path", "(", "context", ")", "file_name", "=", "data", ".", "get", "(", "'file_name'", ",", "result", ".", "file_name", ")", "file_name", "=", "safe_filename", "(", "file_name", ",", "default", "=", "'raw'", ")", "file_name", "=", "'%s.%s'", "%", "(", "content_hash", ",", "file_name", ")", "data", "[", "'_file_name'", "]", "=", "file_name", "file_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "file_name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "shutil", ".", "copyfile", "(", "result", ".", "file_path", ",", "file_path", ")", "context", ".", "log", ".", "info", "(", "\"Store [directory]: %s\"", ",", "file_name", ")", "meta_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%s.json'", "%", "content_hash", ")", "with", "open", "(", "meta_path", ",", "'w'", ")", "as", "fh", ":", "json", ".", "dump", "(", "data", ",", "fh", ")" ]
Store the collected files to a given directory.
[ "Store", "the", "collected", "files", "to", "a", "given", "directory", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/store.py#L22-L46
17,061
alephdata/memorious
memorious/operations/initializers.py
seed
def seed(context, data): """Initialize a crawler with a set of seed URLs. The URLs are given as a list or single value to the ``urls`` parameter. If this is called as a second stage in a crawler, the URL will be formatted against the supplied ``data`` values, e.g.: https://crawl.site/entries/%(number)s.html """ for key in ('url', 'urls'): for url in ensure_list(context.params.get(key)): url = url % data context.emit(data={'url': url})
python
def seed(context, data): """Initialize a crawler with a set of seed URLs. The URLs are given as a list or single value to the ``urls`` parameter. If this is called as a second stage in a crawler, the URL will be formatted against the supplied ``data`` values, e.g.: https://crawl.site/entries/%(number)s.html """ for key in ('url', 'urls'): for url in ensure_list(context.params.get(key)): url = url % data context.emit(data={'url': url})
[ "def", "seed", "(", "context", ",", "data", ")", ":", "for", "key", "in", "(", "'url'", ",", "'urls'", ")", ":", "for", "url", "in", "ensure_list", "(", "context", ".", "params", ".", "get", "(", "key", ")", ")", ":", "url", "=", "url", "%", "data", "context", ".", "emit", "(", "data", "=", "{", "'url'", ":", "url", "}", ")" ]
Initialize a crawler with a set of seed URLs. The URLs are given as a list or single value to the ``urls`` parameter. If this is called as a second stage in a crawler, the URL will be formatted against the supplied ``data`` values, e.g.: https://crawl.site/entries/%(number)s.html
[ "Initialize", "a", "crawler", "with", "a", "set", "of", "seed", "URLs", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L5-L18
17,062
alephdata/memorious
memorious/operations/initializers.py
enumerate
def enumerate(context, data): """Iterate through a set of items and emit each one of them.""" items = ensure_list(context.params.get('items')) for item in items: data['item'] = item context.emit(data=data)
python
def enumerate(context, data): """Iterate through a set of items and emit each one of them.""" items = ensure_list(context.params.get('items')) for item in items: data['item'] = item context.emit(data=data)
[ "def", "enumerate", "(", "context", ",", "data", ")", ":", "items", "=", "ensure_list", "(", "context", ".", "params", ".", "get", "(", "'items'", ")", ")", "for", "item", "in", "items", ":", "data", "[", "'item'", "]", "=", "item", "context", ".", "emit", "(", "data", "=", "data", ")" ]
Iterate through a set of items and emit each one of them.
[ "Iterate", "through", "a", "set", "of", "items", "and", "emit", "each", "one", "of", "them", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L21-L26
17,063
alephdata/memorious
memorious/operations/initializers.py
sequence
def sequence(context, data): """Generate a sequence of numbers. It is the memorious equivalent of the xrange function, accepting the ``start``, ``stop`` and ``step`` parameters. This can run in two ways: * As a single function generating all numbers in the given range. * Recursively, generating numbers one by one with an optional ``delay``. The latter mode is useful in order to generate very large sequences without completely clogging up the user queue. If an optional ``tag`` is given, each number will be emitted only once across multiple runs of the crawler. """ number = data.get('number', context.params.get('start', 1)) stop = context.params.get('stop') step = context.params.get('step', 1) delay = context.params.get('delay') prefix = context.params.get('tag') while True: tag = None if prefix is None else '%s:%s' % (prefix, number) if tag is None or not context.check_tag(tag): context.emit(data={'number': number}) if tag is not None: context.set_tag(tag, True) number = number + step if step > 0 and number >= stop: break if step < 0 and number <= stop: break if delay is not None: context.recurse(data={'number': number}, delay=delay) break
python
def sequence(context, data): """Generate a sequence of numbers. It is the memorious equivalent of the xrange function, accepting the ``start``, ``stop`` and ``step`` parameters. This can run in two ways: * As a single function generating all numbers in the given range. * Recursively, generating numbers one by one with an optional ``delay``. The latter mode is useful in order to generate very large sequences without completely clogging up the user queue. If an optional ``tag`` is given, each number will be emitted only once across multiple runs of the crawler. """ number = data.get('number', context.params.get('start', 1)) stop = context.params.get('stop') step = context.params.get('step', 1) delay = context.params.get('delay') prefix = context.params.get('tag') while True: tag = None if prefix is None else '%s:%s' % (prefix, number) if tag is None or not context.check_tag(tag): context.emit(data={'number': number}) if tag is not None: context.set_tag(tag, True) number = number + step if step > 0 and number >= stop: break if step < 0 and number <= stop: break if delay is not None: context.recurse(data={'number': number}, delay=delay) break
[ "def", "sequence", "(", "context", ",", "data", ")", ":", "number", "=", "data", ".", "get", "(", "'number'", ",", "context", ".", "params", ".", "get", "(", "'start'", ",", "1", ")", ")", "stop", "=", "context", ".", "params", ".", "get", "(", "'stop'", ")", "step", "=", "context", ".", "params", ".", "get", "(", "'step'", ",", "1", ")", "delay", "=", "context", ".", "params", ".", "get", "(", "'delay'", ")", "prefix", "=", "context", ".", "params", ".", "get", "(", "'tag'", ")", "while", "True", ":", "tag", "=", "None", "if", "prefix", "is", "None", "else", "'%s:%s'", "%", "(", "prefix", ",", "number", ")", "if", "tag", "is", "None", "or", "not", "context", ".", "check_tag", "(", "tag", ")", ":", "context", ".", "emit", "(", "data", "=", "{", "'number'", ":", "number", "}", ")", "if", "tag", "is", "not", "None", ":", "context", ".", "set_tag", "(", "tag", ",", "True", ")", "number", "=", "number", "+", "step", "if", "step", ">", "0", "and", "number", ">=", "stop", ":", "break", "if", "step", "<", "0", "and", "number", "<=", "stop", ":", "break", "if", "delay", "is", "not", "None", ":", "context", ".", "recurse", "(", "data", "=", "{", "'number'", ":", "number", "}", ",", "delay", "=", "delay", ")", "break" ]
Generate a sequence of numbers. It is the memorious equivalent of the xrange function, accepting the ``start``, ``stop`` and ``step`` parameters. This can run in two ways: * As a single function generating all numbers in the given range. * Recursively, generating numbers one by one with an optional ``delay``. The latter mode is useful in order to generate very large sequences without completely clogging up the user queue. If an optional ``tag`` is given, each number will be emitted only once across multiple runs of the crawler.
[ "Generate", "a", "sequence", "of", "numbers", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L29-L67
17,064
alephdata/memorious
memorious/logic/http.py
ContextHttpResponse.fetch
def fetch(self): """Lazily trigger download of the data when requested.""" if self._file_path is not None: return self._file_path temp_path = self.context.work_path if self._content_hash is not None: self._file_path = storage.load_file(self._content_hash, temp_path=temp_path) return self._file_path if self.response is not None: self._file_path = random_filename(temp_path) content_hash = sha1() with open(self._file_path, 'wb') as fh: for chunk in self.response.iter_content(chunk_size=8192): content_hash.update(chunk) fh.write(chunk) self._remove_file = True chash = content_hash.hexdigest() self._content_hash = storage.archive_file(self._file_path, content_hash=chash) if self.http.cache and self.ok: self.context.set_tag(self.request_id, self.serialize()) self.retrieved_at = datetime.utcnow().isoformat() return self._file_path
python
def fetch(self): """Lazily trigger download of the data when requested.""" if self._file_path is not None: return self._file_path temp_path = self.context.work_path if self._content_hash is not None: self._file_path = storage.load_file(self._content_hash, temp_path=temp_path) return self._file_path if self.response is not None: self._file_path = random_filename(temp_path) content_hash = sha1() with open(self._file_path, 'wb') as fh: for chunk in self.response.iter_content(chunk_size=8192): content_hash.update(chunk) fh.write(chunk) self._remove_file = True chash = content_hash.hexdigest() self._content_hash = storage.archive_file(self._file_path, content_hash=chash) if self.http.cache and self.ok: self.context.set_tag(self.request_id, self.serialize()) self.retrieved_at = datetime.utcnow().isoformat() return self._file_path
[ "def", "fetch", "(", "self", ")", ":", "if", "self", ".", "_file_path", "is", "not", "None", ":", "return", "self", ".", "_file_path", "temp_path", "=", "self", ".", "context", ".", "work_path", "if", "self", ".", "_content_hash", "is", "not", "None", ":", "self", ".", "_file_path", "=", "storage", ".", "load_file", "(", "self", ".", "_content_hash", ",", "temp_path", "=", "temp_path", ")", "return", "self", ".", "_file_path", "if", "self", ".", "response", "is", "not", "None", ":", "self", ".", "_file_path", "=", "random_filename", "(", "temp_path", ")", "content_hash", "=", "sha1", "(", ")", "with", "open", "(", "self", ".", "_file_path", ",", "'wb'", ")", "as", "fh", ":", "for", "chunk", "in", "self", ".", "response", ".", "iter_content", "(", "chunk_size", "=", "8192", ")", ":", "content_hash", ".", "update", "(", "chunk", ")", "fh", ".", "write", "(", "chunk", ")", "self", ".", "_remove_file", "=", "True", "chash", "=", "content_hash", ".", "hexdigest", "(", ")", "self", ".", "_content_hash", "=", "storage", ".", "archive_file", "(", "self", ".", "_file_path", ",", "content_hash", "=", "chash", ")", "if", "self", ".", "http", ".", "cache", "and", "self", ".", "ok", ":", "self", ".", "context", ".", "set_tag", "(", "self", ".", "request_id", ",", "self", ".", "serialize", "(", ")", ")", "self", ".", "retrieved_at", "=", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", "return", "self", ".", "_file_path" ]
Lazily trigger download of the data when requested.
[ "Lazily", "trigger", "download", "of", "the", "data", "when", "requested", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/http.py#L162-L185
17,065
alephdata/memorious
memorious/util.py
make_key
def make_key(*criteria): """Make a string key out of many criteria.""" criteria = [stringify(c) for c in criteria] criteria = [c for c in criteria if c is not None] if len(criteria): return ':'.join(criteria)
python
def make_key(*criteria): """Make a string key out of many criteria.""" criteria = [stringify(c) for c in criteria] criteria = [c for c in criteria if c is not None] if len(criteria): return ':'.join(criteria)
[ "def", "make_key", "(", "*", "criteria", ")", ":", "criteria", "=", "[", "stringify", "(", "c", ")", "for", "c", "in", "criteria", "]", "criteria", "=", "[", "c", "for", "c", "in", "criteria", "if", "c", "is", "not", "None", "]", "if", "len", "(", "criteria", ")", ":", "return", "':'", ".", "join", "(", "criteria", ")" ]
Make a string key out of many criteria.
[ "Make", "a", "string", "key", "out", "of", "many", "criteria", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/util.py#L6-L11
17,066
alephdata/memorious
memorious/util.py
random_filename
def random_filename(path=None): """Make a UUID-based file name which is extremely unlikely to exist already.""" filename = uuid4().hex if path is not None: filename = os.path.join(path, filename) return filename
python
def random_filename(path=None): """Make a UUID-based file name which is extremely unlikely to exist already.""" filename = uuid4().hex if path is not None: filename = os.path.join(path, filename) return filename
[ "def", "random_filename", "(", "path", "=", "None", ")", ":", "filename", "=", "uuid4", "(", ")", ".", "hex", "if", "path", "is", "not", "None", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "return", "filename" ]
Make a UUID-based file name which is extremely unlikely to exist already.
[ "Make", "a", "UUID", "-", "based", "file", "name", "which", "is", "extremely", "unlikely", "to", "exist", "already", "." ]
b4033c5064447ed5f696f9c2bbbc6c12062d2fa4
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/util.py#L14-L20
17,067
jasonlaska/spherecluster
spherecluster/util.py
sample_vMF
def sample_vMF(mu, kappa, num_samples): """Generate num_samples N-dimensional samples from von Mises Fisher distribution around center mu \in R^N with concentration kappa. """ dim = len(mu) result = np.zeros((num_samples, dim)) for nn in range(num_samples): # sample offset from center (on sphere) with spread kappa w = _sample_weight(kappa, dim) # sample a point v on the unit sphere that's orthogonal to mu v = _sample_orthonormal_to(mu) # compute new point result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu return result
python
def sample_vMF(mu, kappa, num_samples): """Generate num_samples N-dimensional samples from von Mises Fisher distribution around center mu \in R^N with concentration kappa. """ dim = len(mu) result = np.zeros((num_samples, dim)) for nn in range(num_samples): # sample offset from center (on sphere) with spread kappa w = _sample_weight(kappa, dim) # sample a point v on the unit sphere that's orthogonal to mu v = _sample_orthonormal_to(mu) # compute new point result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu return result
[ "def", "sample_vMF", "(", "mu", ",", "kappa", ",", "num_samples", ")", ":", "dim", "=", "len", "(", "mu", ")", "result", "=", "np", ".", "zeros", "(", "(", "num_samples", ",", "dim", ")", ")", "for", "nn", "in", "range", "(", "num_samples", ")", ":", "# sample offset from center (on sphere) with spread kappa", "w", "=", "_sample_weight", "(", "kappa", ",", "dim", ")", "# sample a point v on the unit sphere that's orthogonal to mu", "v", "=", "_sample_orthonormal_to", "(", "mu", ")", "# compute new point", "result", "[", "nn", ",", ":", "]", "=", "v", "*", "np", ".", "sqrt", "(", "1.", "-", "w", "**", "2", ")", "+", "w", "*", "mu", "return", "result" ]
Generate num_samples N-dimensional samples from von Mises Fisher distribution around center mu \in R^N with concentration kappa.
[ "Generate", "num_samples", "N", "-", "dimensional", "samples", "from", "von", "Mises", "Fisher", "distribution", "around", "center", "mu", "\\", "in", "R^N", "with", "concentration", "kappa", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L16-L32
17,068
jasonlaska/spherecluster
spherecluster/util.py
_sample_weight
def _sample_weight(kappa, dim): """Rejection sampling scheme for sampling distance from center on surface of the sphere. """ dim = dim - 1 # since S^{n-1} b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) x = (1. - b) / (1. + b) c = kappa * x + dim * np.log(1 - x ** 2) while True: z = np.random.beta(dim / 2., dim / 2.) w = (1. - (1. + b) * z) / (1. - (1. - b) * z) u = np.random.uniform(low=0, high=1) if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u): return w
python
def _sample_weight(kappa, dim): """Rejection sampling scheme for sampling distance from center on surface of the sphere. """ dim = dim - 1 # since S^{n-1} b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) x = (1. - b) / (1. + b) c = kappa * x + dim * np.log(1 - x ** 2) while True: z = np.random.beta(dim / 2., dim / 2.) w = (1. - (1. + b) * z) / (1. - (1. - b) * z) u = np.random.uniform(low=0, high=1) if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u): return w
[ "def", "_sample_weight", "(", "kappa", ",", "dim", ")", ":", "dim", "=", "dim", "-", "1", "# since S^{n-1}", "b", "=", "dim", "/", "(", "np", ".", "sqrt", "(", "4.", "*", "kappa", "**", "2", "+", "dim", "**", "2", ")", "+", "2", "*", "kappa", ")", "x", "=", "(", "1.", "-", "b", ")", "/", "(", "1.", "+", "b", ")", "c", "=", "kappa", "*", "x", "+", "dim", "*", "np", ".", "log", "(", "1", "-", "x", "**", "2", ")", "while", "True", ":", "z", "=", "np", ".", "random", ".", "beta", "(", "dim", "/", "2.", ",", "dim", "/", "2.", ")", "w", "=", "(", "1.", "-", "(", "1.", "+", "b", ")", "*", "z", ")", "/", "(", "1.", "-", "(", "1.", "-", "b", ")", "*", "z", ")", "u", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "0", ",", "high", "=", "1", ")", "if", "kappa", "*", "w", "+", "dim", "*", "np", ".", "log", "(", "1.", "-", "x", "*", "w", ")", "-", "c", ">=", "np", ".", "log", "(", "u", ")", ":", "return", "w" ]
Rejection sampling scheme for sampling distance from center on surface of the sphere.
[ "Rejection", "sampling", "scheme", "for", "sampling", "distance", "from", "center", "on", "surface", "of", "the", "sphere", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L35-L49
17,069
jasonlaska/spherecluster
spherecluster/util.py
_sample_orthonormal_to
def _sample_orthonormal_to(mu): """Sample point on sphere orthogonal to mu.""" v = np.random.randn(mu.shape[0]) proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu) orthto = v - proj_mu_v return orthto / np.linalg.norm(orthto)
python
def _sample_orthonormal_to(mu): """Sample point on sphere orthogonal to mu.""" v = np.random.randn(mu.shape[0]) proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu) orthto = v - proj_mu_v return orthto / np.linalg.norm(orthto)
[ "def", "_sample_orthonormal_to", "(", "mu", ")", ":", "v", "=", "np", ".", "random", ".", "randn", "(", "mu", ".", "shape", "[", "0", "]", ")", "proj_mu_v", "=", "mu", "*", "np", ".", "dot", "(", "mu", ",", "v", ")", "/", "np", ".", "linalg", ".", "norm", "(", "mu", ")", "orthto", "=", "v", "-", "proj_mu_v", "return", "orthto", "/", "np", ".", "linalg", ".", "norm", "(", "orthto", ")" ]
Sample point on sphere orthogonal to mu.
[ "Sample", "point", "on", "sphere", "orthogonal", "to", "mu", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L52-L57
17,070
jasonlaska/spherecluster
spherecluster/spherical_kmeans.py
_spherical_kmeans_single_lloyd
def _spherical_kmeans_single_lloyd( X, n_clusters, sample_weight=None, max_iter=300, init="k-means++", verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True, ): """ Modified from sklearn.cluster.k_means_.k_means_single_lloyd. """ random_state = check_random_state(random_state) sample_weight = _check_sample_weight(X, sample_weight) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids( X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms ) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment # TODO: _labels_inertia should be done with cosine distance # since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized # this doesn't really matter. labels, inertia = _labels_inertia( X, sample_weight, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances, ) # computation of the means if sp.issparse(X): centers = _k_means._centers_sparse( X, sample_weight, labels, n_clusters, distances ) else: centers = _k_means._centers_dense( X, sample_weight, labels, n_clusters, distances ) # l2-normalize centers (this is the main contibution here) centers = normalize(centers) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia center_shift_total = squared_norm(centers_old - centers) if center_shift_total <= tol: if verbose: print( "Converged at iteration %d: " "center shift %e within tolerance %e" % (i, center_shift_total, tol) ) break if center_shift_total > 0: # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels, best_inertia = _labels_inertia( X, sample_weight, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances, ) return best_labels, best_inertia, best_centers, i + 1
python
def _spherical_kmeans_single_lloyd( X, n_clusters, sample_weight=None, max_iter=300, init="k-means++", verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True, ): """ Modified from sklearn.cluster.k_means_.k_means_single_lloyd. """ random_state = check_random_state(random_state) sample_weight = _check_sample_weight(X, sample_weight) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids( X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms ) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment # TODO: _labels_inertia should be done with cosine distance # since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized # this doesn't really matter. labels, inertia = _labels_inertia( X, sample_weight, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances, ) # computation of the means if sp.issparse(X): centers = _k_means._centers_sparse( X, sample_weight, labels, n_clusters, distances ) else: centers = _k_means._centers_dense( X, sample_weight, labels, n_clusters, distances ) # l2-normalize centers (this is the main contibution here) centers = normalize(centers) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia center_shift_total = squared_norm(centers_old - centers) if center_shift_total <= tol: if verbose: print( "Converged at iteration %d: " "center shift %e within tolerance %e" % (i, center_shift_total, tol) ) break if center_shift_total > 0: # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels, best_inertia = _labels_inertia( X, sample_weight, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances, ) return best_labels, best_inertia, best_centers, i + 1
[ "def", "_spherical_kmeans_single_lloyd", "(", "X", ",", "n_clusters", ",", "sample_weight", "=", "None", ",", "max_iter", "=", "300", ",", "init", "=", "\"k-means++\"", ",", "verbose", "=", "False", ",", "x_squared_norms", "=", "None", ",", "random_state", "=", "None", ",", "tol", "=", "1e-4", ",", "precompute_distances", "=", "True", ",", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "sample_weight", "=", "_check_sample_weight", "(", "X", ",", "sample_weight", ")", "best_labels", ",", "best_inertia", ",", "best_centers", "=", "None", ",", "None", ",", "None", "# init", "centers", "=", "_init_centroids", "(", "X", ",", "n_clusters", ",", "init", ",", "random_state", "=", "random_state", ",", "x_squared_norms", "=", "x_squared_norms", ")", "if", "verbose", ":", "print", "(", "\"Initialization complete\"", ")", "# Allocate memory to store the distances for each sample to its", "# closer center for reallocation in case of ties", "distances", "=", "np", ".", "zeros", "(", "shape", "=", "(", "X", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "X", ".", "dtype", ")", "# iterations", "for", "i", "in", "range", "(", "max_iter", ")", ":", "centers_old", "=", "centers", ".", "copy", "(", ")", "# labels assignment", "# TODO: _labels_inertia should be done with cosine distance", "# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized", "# this doesn't really matter.", "labels", ",", "inertia", "=", "_labels_inertia", "(", "X", ",", "sample_weight", ",", "x_squared_norms", ",", "centers", ",", "precompute_distances", "=", "precompute_distances", ",", "distances", "=", "distances", ",", ")", "# computation of the means", "if", "sp", ".", "issparse", "(", "X", ")", ":", "centers", "=", "_k_means", ".", "_centers_sparse", "(", "X", ",", "sample_weight", ",", "labels", ",", "n_clusters", ",", "distances", ")", "else", ":", "centers", "=", "_k_means", ".", "_centers_dense", "(", "X", ",", "sample_weight", ",", "labels", ",", "n_clusters", ",", "distances", ")", "# l2-normalize centers (this is the main contibution here)", "centers", "=", "normalize", "(", "centers", ")", "if", "verbose", ":", "print", "(", "\"Iteration %2d, inertia %.3f\"", "%", "(", "i", ",", "inertia", ")", ")", "if", "best_inertia", "is", "None", "or", "inertia", "<", "best_inertia", ":", "best_labels", "=", "labels", ".", "copy", "(", ")", "best_centers", "=", "centers", ".", "copy", "(", ")", "best_inertia", "=", "inertia", "center_shift_total", "=", "squared_norm", "(", "centers_old", "-", "centers", ")", "if", "center_shift_total", "<=", "tol", ":", "if", "verbose", ":", "print", "(", "\"Converged at iteration %d: \"", "\"center shift %e within tolerance %e\"", "%", "(", "i", ",", "center_shift_total", ",", "tol", ")", ")", "break", "if", "center_shift_total", ">", "0", ":", "# rerun E-step in case of non-convergence so that predicted labels", "# match cluster centers", "best_labels", ",", "best_inertia", "=", "_labels_inertia", "(", "X", ",", "sample_weight", ",", "x_squared_norms", ",", "best_centers", ",", "precompute_distances", "=", "precompute_distances", ",", "distances", "=", "distances", ",", ")", "return", "best_labels", ",", "best_inertia", ",", "best_centers", ",", "i", "+", "1" ]
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
[ "Modified", "from", "sklearn", ".", "cluster", ".", "k_means_", ".", "k_means_single_lloyd", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L22-L113
17,071
jasonlaska/spherecluster
spherecluster/spherical_kmeans.py
spherical_k_means
def spherical_k_means( X, n_clusters, sample_weight=None, init="k-means++", n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False, ): """Modified from sklearn.cluster.k_means_.k_means. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty # avoid forcing order when copy_x=False order = "C" if copy_x else None X = check_array( X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x ) # verify that the number of samples given is larger than k if _num_samples(X) < n_clusters: raise ValueError( "n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters) ) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, order="C", copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd( X, n_clusters, sample_weight, max_iter=max_iter, init=init, verbose=verbose, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_spherical_kmeans_single_lloyd)( X, n_clusters, sample_weight, max_iter=max_iter, init=init, verbose=verbose, tol=tol, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed, ) for seed in seeds ) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia
python
def spherical_k_means( X, n_clusters, sample_weight=None, init="k-means++", n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False, ): """Modified from sklearn.cluster.k_means_.k_means. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty # avoid forcing order when copy_x=False order = "C" if copy_x else None X = check_array( X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x ) # verify that the number of samples given is larger than k if _num_samples(X) < n_clusters: raise ValueError( "n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters) ) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, order="C", copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd( X, n_clusters, sample_weight, max_iter=max_iter, init=init, verbose=verbose, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_spherical_kmeans_single_lloyd)( X, n_clusters, sample_weight, max_iter=max_iter, init=init, verbose=verbose, tol=tol, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed, ) for seed in seeds ) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia
[ "def", "spherical_k_means", "(", "X", ",", "n_clusters", ",", "sample_weight", "=", "None", ",", "init", "=", "\"k-means++\"", ",", "n_init", "=", "10", ",", "max_iter", "=", "300", ",", "verbose", "=", "False", ",", "tol", "=", "1e-4", ",", "random_state", "=", "None", ",", "copy_x", "=", "True", ",", "n_jobs", "=", "1", ",", "algorithm", "=", "\"auto\"", ",", "return_n_iter", "=", "False", ",", ")", ":", "if", "n_init", "<=", "0", ":", "raise", "ValueError", "(", "\"Invalid number of initializations.\"", "\" n_init=%d must be bigger than zero.\"", "%", "n_init", ")", "random_state", "=", "check_random_state", "(", "random_state", ")", "if", "max_iter", "<=", "0", ":", "raise", "ValueError", "(", "\"Number of iterations should be a positive number,\"", "\" got %d instead\"", "%", "max_iter", ")", "best_inertia", "=", "np", ".", "infty", "# avoid forcing order when copy_x=False", "order", "=", "\"C\"", "if", "copy_x", "else", "None", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "\"csr\"", ",", "dtype", "=", "[", "np", ".", "float64", ",", "np", ".", "float32", "]", ",", "order", "=", "order", ",", "copy", "=", "copy_x", ")", "# verify that the number of samples given is larger than k", "if", "_num_samples", "(", "X", ")", "<", "n_clusters", ":", "raise", "ValueError", "(", "\"n_samples=%d should be >= n_clusters=%d\"", "%", "(", "_num_samples", "(", "X", ")", ",", "n_clusters", ")", ")", "tol", "=", "_tolerance", "(", "X", ",", "tol", ")", "if", "hasattr", "(", "init", ",", "\"__array__\"", ")", ":", "init", "=", "check_array", "(", "init", ",", "dtype", "=", "X", ".", "dtype", ".", "type", ",", "order", "=", "\"C\"", ",", "copy", "=", "True", ")", "_validate_center_shape", "(", "X", ",", "n_clusters", ",", "init", ")", "if", "n_init", "!=", "1", ":", "warnings", ".", "warn", "(", "\"Explicit initial center position passed: \"", "\"performing only one init in k-means instead of n_init=%d\"", "%", "n_init", ",", "RuntimeWarning", ",", "stacklevel", "=", "2", ",", ")", "n_init", "=", "1", "# precompute squared norms of data points", "x_squared_norms", "=", "row_norms", "(", "X", ",", "squared", "=", "True", ")", "if", "n_jobs", "==", "1", ":", "# For a single thread, less memory is needed if we just store one set", "# of the best results (as opposed to one set per run per thread).", "for", "it", "in", "range", "(", "n_init", ")", ":", "# run a k-means once", "labels", ",", "inertia", ",", "centers", ",", "n_iter_", "=", "_spherical_kmeans_single_lloyd", "(", "X", ",", "n_clusters", ",", "sample_weight", ",", "max_iter", "=", "max_iter", ",", "init", "=", "init", ",", "verbose", "=", "verbose", ",", "tol", "=", "tol", ",", "x_squared_norms", "=", "x_squared_norms", ",", "random_state", "=", "random_state", ",", ")", "# determine if these results are the best so far", "if", "best_inertia", "is", "None", "or", "inertia", "<", "best_inertia", ":", "best_labels", "=", "labels", ".", "copy", "(", ")", "best_centers", "=", "centers", ".", "copy", "(", ")", "best_inertia", "=", "inertia", "best_n_iter", "=", "n_iter_", "else", ":", "# parallelisation of k-means runs", "seeds", "=", "random_state", ".", "randint", "(", "np", ".", "iinfo", "(", "np", ".", "int32", ")", ".", "max", ",", "size", "=", "n_init", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "0", ")", "(", "delayed", "(", "_spherical_kmeans_single_lloyd", ")", "(", "X", ",", "n_clusters", ",", "sample_weight", ",", "max_iter", "=", "max_iter", ",", "init", "=", "init", ",", "verbose", "=", "verbose", ",", "tol", "=", "tol", ",", "x_squared_norms", "=", "x_squared_norms", ",", "# Change seed to ensure variety", "random_state", "=", "seed", ",", ")", "for", "seed", "in", "seeds", ")", "# Get results with the lowest inertia", "labels", ",", "inertia", ",", "centers", ",", "n_iters", "=", "zip", "(", "*", "results", ")", "best", "=", "np", ".", "argmin", "(", "inertia", ")", "best_labels", "=", "labels", "[", "best", "]", "best_inertia", "=", "inertia", "[", "best", "]", "best_centers", "=", "centers", "[", "best", "]", "best_n_iter", "=", "n_iters", "[", "best", "]", "if", "return_n_iter", ":", "return", "best_centers", ",", "best_labels", ",", "best_inertia", ",", "best_n_iter", "else", ":", "return", "best_centers", ",", "best_labels", ",", "best_inertia" ]
Modified from sklearn.cluster.k_means_.k_means.
[ "Modified", "from", "sklearn", ".", "cluster", ".", "k_means_", ".", "k_means", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L116-L228
17,072
jasonlaska/spherecluster
spherecluster/spherical_kmeans.py
SphericalKMeans.fit
def fit(self, X, y=None, sample_weight=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) y : Ignored not used, present here for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional The weights for each observation in X. If None, all observations are assigned equal weight (default: None) """ if self.normalize: X = normalize(X) random_state = check_random_state(self.random_state) # TODO: add check that all data is unit-normalized self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means( X, n_clusters=self.n_clusters, sample_weight=sample_weight, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, return_n_iter=True, ) return self
python
def fit(self, X, y=None, sample_weight=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) y : Ignored not used, present here for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional The weights for each observation in X. If None, all observations are assigned equal weight (default: None) """ if self.normalize: X = normalize(X) random_state = check_random_state(self.random_state) # TODO: add check that all data is unit-normalized self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means( X, n_clusters=self.n_clusters, sample_weight=sample_weight, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, return_n_iter=True, ) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "sample_weight", "=", "None", ")", ":", "if", "self", ".", "normalize", ":", "X", "=", "normalize", "(", "X", ")", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "# TODO: add check that all data is unit-normalized", "self", ".", "cluster_centers_", ",", "self", ".", "labels_", ",", "self", ".", "inertia_", ",", "self", ".", "n_iter_", "=", "spherical_k_means", "(", "X", ",", "n_clusters", "=", "self", ".", "n_clusters", ",", "sample_weight", "=", "sample_weight", ",", "init", "=", "self", ".", "init", ",", "n_init", "=", "self", ".", "n_init", ",", "max_iter", "=", "self", ".", "max_iter", ",", "verbose", "=", "self", ".", "verbose", ",", "tol", "=", "self", ".", "tol", ",", "random_state", "=", "random_state", ",", "copy_x", "=", "self", ".", "copy_x", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", "return_n_iter", "=", "True", ",", ")", "return", "self" ]
Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) y : Ignored not used, present here for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional The weights for each observation in X. If None, all observations are assigned equal weight (default: None)
[ "Compute", "k", "-", "means", "clustering", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L329-L366
17,073
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_inertia_from_labels
def _inertia_from_labels(X, centers, labels): """Compute inertia with cosine distance using known labels. """ n_examples, n_features = X.shape inertia = np.zeros((n_examples,)) for ee in range(n_examples): inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T) return np.sum(inertia)
python
def _inertia_from_labels(X, centers, labels): """Compute inertia with cosine distance using known labels. """ n_examples, n_features = X.shape inertia = np.zeros((n_examples,)) for ee in range(n_examples): inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T) return np.sum(inertia)
[ "def", "_inertia_from_labels", "(", "X", ",", "centers", ",", "labels", ")", ":", "n_examples", ",", "n_features", "=", "X", ".", "shape", "inertia", "=", "np", ".", "zeros", "(", "(", "n_examples", ",", ")", ")", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "inertia", "[", "ee", "]", "=", "1", "-", "X", "[", "ee", ",", ":", "]", ".", "dot", "(", "centers", "[", "int", "(", "labels", "[", "ee", "]", ")", ",", ":", "]", ".", "T", ")", "return", "np", ".", "sum", "(", "inertia", ")" ]
Compute inertia with cosine distance using known labels.
[ "Compute", "inertia", "with", "cosine", "distance", "using", "known", "labels", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L25-L33
17,074
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_labels_inertia
def _labels_inertia(X, centers): """Compute labels and inertia with cosine distance. """ n_examples, n_features = X.shape n_clusters, n_features = centers.shape labels = np.zeros((n_examples,)) inertia = np.zeros((n_examples,)) for ee in range(n_examples): dists = np.zeros((n_clusters,)) for cc in range(n_clusters): dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T) labels[ee] = np.argmin(dists) inertia[ee] = dists[int(labels[ee])] return labels, np.sum(inertia)
python
def _labels_inertia(X, centers): """Compute labels and inertia with cosine distance. """ n_examples, n_features = X.shape n_clusters, n_features = centers.shape labels = np.zeros((n_examples,)) inertia = np.zeros((n_examples,)) for ee in range(n_examples): dists = np.zeros((n_clusters,)) for cc in range(n_clusters): dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T) labels[ee] = np.argmin(dists) inertia[ee] = dists[int(labels[ee])] return labels, np.sum(inertia)
[ "def", "_labels_inertia", "(", "X", ",", "centers", ")", ":", "n_examples", ",", "n_features", "=", "X", ".", "shape", "n_clusters", ",", "n_features", "=", "centers", ".", "shape", "labels", "=", "np", ".", "zeros", "(", "(", "n_examples", ",", ")", ")", "inertia", "=", "np", ".", "zeros", "(", "(", "n_examples", ",", ")", ")", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "dists", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", ")", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "dists", "[", "cc", "]", "=", "1", "-", "X", "[", "ee", ",", ":", "]", ".", "dot", "(", "centers", "[", "cc", ",", ":", "]", ".", "T", ")", "labels", "[", "ee", "]", "=", "np", ".", "argmin", "(", "dists", ")", "inertia", "[", "ee", "]", "=", "dists", "[", "int", "(", "labels", "[", "ee", "]", ")", "]", "return", "labels", ",", "np", ".", "sum", "(", "inertia", ")" ]
Compute labels and inertia with cosine distance.
[ "Compute", "labels", "and", "inertia", "with", "cosine", "distance", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L36-L53
17,075
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_S
def _S(kappa, alpha, beta): """Compute the antiderivative of the Amos-type bound G on the modified Bessel function ratio. Note: Handles scalar kappa, alpha, and beta only. See "S <-" in movMF.R and utility function implementation notes from https://cran.r-project.org/web/packages/movMF/index.html """ kappa = 1. * np.abs(kappa) alpha = 1. * alpha beta = 1. * np.abs(beta) a_plus_b = alpha + beta u = np.sqrt(kappa ** 2 + beta ** 2) if alpha == 0: alpha_scale = 0 else: alpha_scale = alpha * np.log((alpha + u) / a_plus_b) return u - beta - alpha_scale
python
def _S(kappa, alpha, beta): """Compute the antiderivative of the Amos-type bound G on the modified Bessel function ratio. Note: Handles scalar kappa, alpha, and beta only. See "S <-" in movMF.R and utility function implementation notes from https://cran.r-project.org/web/packages/movMF/index.html """ kappa = 1. * np.abs(kappa) alpha = 1. * alpha beta = 1. * np.abs(beta) a_plus_b = alpha + beta u = np.sqrt(kappa ** 2 + beta ** 2) if alpha == 0: alpha_scale = 0 else: alpha_scale = alpha * np.log((alpha + u) / a_plus_b) return u - beta - alpha_scale
[ "def", "_S", "(", "kappa", ",", "alpha", ",", "beta", ")", ":", "kappa", "=", "1.", "*", "np", ".", "abs", "(", "kappa", ")", "alpha", "=", "1.", "*", "alpha", "beta", "=", "1.", "*", "np", ".", "abs", "(", "beta", ")", "a_plus_b", "=", "alpha", "+", "beta", "u", "=", "np", ".", "sqrt", "(", "kappa", "**", "2", "+", "beta", "**", "2", ")", "if", "alpha", "==", "0", ":", "alpha_scale", "=", "0", "else", ":", "alpha_scale", "=", "alpha", "*", "np", ".", "log", "(", "(", "alpha", "+", "u", ")", "/", "a_plus_b", ")", "return", "u", "-", "beta", "-", "alpha_scale" ]
Compute the antiderivative of the Amos-type bound G on the modified Bessel function ratio. Note: Handles scalar kappa, alpha, and beta only. See "S <-" in movMF.R and utility function implementation notes from https://cran.r-project.org/web/packages/movMF/index.html
[ "Compute", "the", "antiderivative", "of", "the", "Amos", "-", "type", "bound", "G", "on", "the", "modified", "Bessel", "function", "ratio", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L105-L124
17,076
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_init_unit_centers
def _init_unit_centers(X, n_clusters, random_state, init): """Initializes unit norm centers. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. init: (string) one of k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. """ n_examples, n_features = np.shape(X) if isinstance(init, np.ndarray): n_init_clusters, n_init_features = init.shape assert n_init_clusters == n_clusters assert n_init_features == n_features # ensure unit normed centers centers = init for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "spherical-k-means": labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd( X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++" ) return centers elif init == "random": centers = np.random.randn(n_clusters, n_features) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "k-means++": centers = _init_centroids( X, n_clusters, "k-means++", random_state=random_state, x_squared_norms=np.ones((n_examples,)), ) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "random-orthonormal": centers = np.random.randn(n_clusters, n_features) q, r = np.linalg.qr(centers.T, mode="reduced") return q.T elif init == "random-class": centers = np.zeros((n_clusters, n_features)) for cc in range(n_clusters): while np.linalg.norm(centers[cc, :]) == 0: labels = np.random.randint(0, n_clusters, n_examples) centers[cc, :] = X[labels == cc, :].sum(axis=0) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers
python
def _init_unit_centers(X, n_clusters, random_state, init): """Initializes unit norm centers. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. init: (string) one of k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. """ n_examples, n_features = np.shape(X) if isinstance(init, np.ndarray): n_init_clusters, n_init_features = init.shape assert n_init_clusters == n_clusters assert n_init_features == n_features # ensure unit normed centers centers = init for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "spherical-k-means": labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd( X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++" ) return centers elif init == "random": centers = np.random.randn(n_clusters, n_features) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "k-means++": centers = _init_centroids( X, n_clusters, "k-means++", random_state=random_state, x_squared_norms=np.ones((n_examples,)), ) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers elif init == "random-orthonormal": centers = np.random.randn(n_clusters, n_features) q, r = np.linalg.qr(centers.T, mode="reduced") return q.T elif init == "random-class": centers = np.zeros((n_clusters, n_features)) for cc in range(n_clusters): while np.linalg.norm(centers[cc, :]) == 0: labels = np.random.randint(0, n_clusters, n_examples) centers[cc, :] = X[labels == cc, :].sum(axis=0) for cc in range(n_clusters): centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :]) return centers
[ "def", "_init_unit_centers", "(", "X", ",", "n_clusters", ",", "random_state", ",", "init", ")", ":", "n_examples", ",", "n_features", "=", "np", ".", "shape", "(", "X", ")", "if", "isinstance", "(", "init", ",", "np", ".", "ndarray", ")", ":", "n_init_clusters", ",", "n_init_features", "=", "init", ".", "shape", "assert", "n_init_clusters", "==", "n_clusters", "assert", "n_init_features", "==", "n_features", "# ensure unit normed centers", "centers", "=", "init", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "return", "centers", "elif", "init", "==", "\"spherical-k-means\"", ":", "labels", ",", "inertia", ",", "centers", ",", "iters", "=", "spherical_kmeans", ".", "_spherical_kmeans_single_lloyd", "(", "X", ",", "n_clusters", ",", "x_squared_norms", "=", "np", ".", "ones", "(", "(", "n_examples", ",", ")", ")", ",", "init", "=", "\"k-means++\"", ")", "return", "centers", "elif", "init", "==", "\"random\"", ":", "centers", "=", "np", ".", "random", ".", "randn", "(", "n_clusters", ",", "n_features", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "return", "centers", "elif", "init", "==", "\"k-means++\"", ":", "centers", "=", "_init_centroids", "(", "X", ",", "n_clusters", ",", "\"k-means++\"", ",", "random_state", "=", "random_state", ",", "x_squared_norms", "=", "np", ".", "ones", "(", "(", "n_examples", ",", ")", ")", ",", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "return", "centers", "elif", "init", "==", "\"random-orthonormal\"", ":", "centers", "=", "np", ".", "random", ".", "randn", "(", "n_clusters", ",", "n_features", ")", "q", ",", "r", "=", "np", ".", "linalg", ".", "qr", "(", "centers", ".", "T", ",", "mode", "=", "\"reduced\"", ")", "return", "q", ".", "T", "elif", "init", "==", "\"random-class\"", ":", "centers", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", "n_features", ")", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "while", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "==", "0", ":", "labels", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "n_clusters", ",", "n_examples", ")", "centers", "[", "cc", ",", ":", "]", "=", "X", "[", "labels", "==", "cc", ",", ":", "]", ".", "sum", "(", "axis", "=", "0", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "return", "centers" ]
Initializes unit norm centers. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. init: (string) one of k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers.
[ "Initializes", "unit", "norm", "centers", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L171-L252
17,077
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_expectation
def _expectation(X, centers, weights, concentrations, posterior_type="soft"): """Compute the log-likelihood of each datapoint being in each cluster. Parameters ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] Returns ---------- posterior : array, [n_centers, n_examples] """ n_examples, n_features = np.shape(X) n_clusters, _ = centers.shape if n_features <= 50: # works up to about 50 before numrically unstable vmf_f = _vmf_log else: vmf_f = _vmf_log_asymptotic f_log = np.zeros((n_clusters, n_examples)) for cc in range(n_clusters): f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :]) posterior = np.zeros((n_clusters, n_examples)) if posterior_type == "soft": weights_log = np.log(weights) posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log for ee in range(n_examples): posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee])) elif posterior_type == "hard": weights_log = np.log(weights) weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log for ee in range(n_examples): posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0 return posterior
python
def _expectation(X, centers, weights, concentrations, posterior_type="soft"): """Compute the log-likelihood of each datapoint being in each cluster. Parameters ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] Returns ---------- posterior : array, [n_centers, n_examples] """ n_examples, n_features = np.shape(X) n_clusters, _ = centers.shape if n_features <= 50: # works up to about 50 before numrically unstable vmf_f = _vmf_log else: vmf_f = _vmf_log_asymptotic f_log = np.zeros((n_clusters, n_examples)) for cc in range(n_clusters): f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :]) posterior = np.zeros((n_clusters, n_examples)) if posterior_type == "soft": weights_log = np.log(weights) posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log for ee in range(n_examples): posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee])) elif posterior_type == "hard": weights_log = np.log(weights) weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log for ee in range(n_examples): posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0 return posterior
[ "def", "_expectation", "(", "X", ",", "centers", ",", "weights", ",", "concentrations", ",", "posterior_type", "=", "\"soft\"", ")", ":", "n_examples", ",", "n_features", "=", "np", ".", "shape", "(", "X", ")", "n_clusters", ",", "_", "=", "centers", ".", "shape", "if", "n_features", "<=", "50", ":", "# works up to about 50 before numrically unstable", "vmf_f", "=", "_vmf_log", "else", ":", "vmf_f", "=", "_vmf_log_asymptotic", "f_log", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", "n_examples", ")", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "f_log", "[", "cc", ",", ":", "]", "=", "vmf_f", "(", "X", ",", "concentrations", "[", "cc", "]", ",", "centers", "[", "cc", ",", ":", "]", ")", "posterior", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", "n_examples", ")", ")", "if", "posterior_type", "==", "\"soft\"", ":", "weights_log", "=", "np", ".", "log", "(", "weights", ")", "posterior", "=", "np", ".", "tile", "(", "weights_log", ".", "T", ",", "(", "n_examples", ",", "1", ")", ")", ".", "T", "+", "f_log", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "posterior", "[", ":", ",", "ee", "]", "=", "np", ".", "exp", "(", "posterior", "[", ":", ",", "ee", "]", "-", "logsumexp", "(", "posterior", "[", ":", ",", "ee", "]", ")", ")", "elif", "posterior_type", "==", "\"hard\"", ":", "weights_log", "=", "np", ".", "log", "(", "weights", ")", "weighted_f_log", "=", "np", ".", "tile", "(", "weights_log", ".", "T", ",", "(", "n_examples", ",", "1", ")", ")", ".", "T", "+", "f_log", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "posterior", "[", "np", ".", "argmax", "(", "weighted_f_log", "[", ":", ",", "ee", "]", ")", ",", "ee", "]", "=", "1.0", "return", "posterior" ]
Compute the log-likelihood of each datapoint being in each cluster. Parameters ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] Returns ---------- posterior : array, [n_centers, n_examples]
[ "Compute", "the", "log", "-", "likelihood", "of", "each", "datapoint", "being", "in", "each", "cluster", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L255-L293
17,078
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_maximization
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ n_examples, n_features = X.shape n_clusters, n_examples = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-8: centers[cc, :] = centers[cc, :] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION else: concentrations[cc] /= 1. - np.power(rbar, 2.) # let python know we can free this (good for large dense X) del X_scaled return centers, weights, concentrations
python
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ n_examples, n_features = X.shape n_clusters, n_examples = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-8: centers[cc, :] = centers[cc, :] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION else: concentrations[cc] /= 1. - np.power(rbar, 2.) # let python know we can free this (good for large dense X) del X_scaled return centers, weights, concentrations
[ "def", "_maximization", "(", "X", ",", "posterior", ",", "force_weights", "=", "None", ")", ":", "n_examples", ",", "n_features", "=", "X", ".", "shape", "n_clusters", ",", "n_examples", "=", "posterior", ".", "shape", "concentrations", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", ")", ")", "centers", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", "n_features", ")", ")", "if", "force_weights", "is", "None", ":", "weights", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", ")", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "# update weights (alpha)", "if", "force_weights", "is", "None", ":", "weights", "[", "cc", "]", "=", "np", ".", "mean", "(", "posterior", "[", "cc", ",", ":", "]", ")", "else", ":", "weights", "=", "force_weights", "# update centers (mu)", "X_scaled", "=", "X", ".", "copy", "(", ")", "if", "sp", ".", "issparse", "(", "X", ")", ":", "X_scaled", ".", "data", "*=", "posterior", "[", "cc", ",", ":", "]", ".", "repeat", "(", "np", ".", "diff", "(", "X_scaled", ".", "indptr", ")", ")", "else", ":", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "X_scaled", "[", "ee", ",", ":", "]", "*=", "posterior", "[", "cc", ",", "ee", "]", "centers", "[", "cc", ",", ":", "]", "=", "X_scaled", ".", "sum", "(", "axis", "=", "0", ")", "# normalize centers", "center_norm", "=", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "if", "center_norm", ">", "1e-8", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "center_norm", "# update concentration (kappa) [TODO: add other kappa approximations]", "rbar", "=", "center_norm", "/", "(", "n_examples", "*", "weights", "[", "cc", "]", ")", "concentrations", "[", "cc", "]", "=", "rbar", "*", "n_features", "-", "np", ".", "power", "(", "rbar", ",", "3.", ")", "if", "np", ".", "abs", "(", "rbar", "-", "1.0", ")", "<", "1e-10", ":", "concentrations", "[", "cc", "]", "=", "MAX_CONTENTRATION", "else", ":", "concentrations", "[", "cc", "]", "/=", "1.", "-", "np", ".", "power", "(", "rbar", ",", "2.", ")", "# let python know we can free this (good for large dense X)", "del", "X_scaled", "return", "centers", ",", "weights", ",", "concentrations" ]
Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ]
[ "Estimate", "new", "centers", "weights", "and", "concentrations", "from" ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L296-L354
17,079
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
_movMF
def _movMF( X, n_clusters, posterior_type="soft", force_weights=None, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, ): """Mixture of von Mises Fisher clustering. Implements the algorithms (i) and (ii) from "Clustering on the Unit Hypersphere using von Mises-Fisher Distributions" by Banerjee, Dhillon, Ghosh, and Sra. TODO: Currently only supports Banerjee et al 2005 approximation of kappa, however, there are numerous other approximations see _update_params. Attribution ---------- Approximation of log-vmf distribution function from movMF R-package. movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions by Kurt Hornik, Bettina Grun, 2014 Find more at: https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf https://cran.r-project.org/web/packages/movMF/index.html Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. posterior_type: 'soft' or 'hard' Type of posterior computed in exepectation step. See note about attribute: self.posterior_ force_weights : None or array [n_clusters, ] If None, the algorithm will estimate the weights. If an array of weights, algorithm will estimate concentrations and centers with given weights. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init: (string) one of random-class [default]: random class assignment & centroid computation k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. tol : float, default: 1e-6 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. """ random_state = check_random_state(random_state) n_examples, n_features = np.shape(X) # init centers (mus) centers = _init_unit_centers(X, n_clusters, random_state, init) # init weights (alphas) if force_weights is None: weights = np.ones((n_clusters,)) weights = weights / np.sum(weights) else: weights = force_weights # init concentrations (kappas) concentrations = np.ones((n_clusters,)) if verbose: print("Initialization complete") for iter in range(max_iter): centers_prev = centers.copy() # expectation step posterior = _expectation( X, centers, weights, concentrations, posterior_type=posterior_type ) # maximization step centers, weights, concentrations = _maximization( X, posterior, force_weights=force_weights ) # check convergence tolcheck = squared_norm(centers_prev - centers) if tolcheck <= tol: if verbose: print( "Converged at iteration %d: " "center shift %e within tolerance %e" % (iter, tolcheck, tol) ) break # labels come for free via posterior labels = np.zeros((n_examples,)) for ee in range(n_examples): labels[ee] = np.argmax(posterior[:, ee]) inertia = _inertia_from_labels(X, centers, labels) return centers, weights, concentrations, posterior, labels, inertia
python
def _movMF( X, n_clusters, posterior_type="soft", force_weights=None, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, ): """Mixture of von Mises Fisher clustering. Implements the algorithms (i) and (ii) from "Clustering on the Unit Hypersphere using von Mises-Fisher Distributions" by Banerjee, Dhillon, Ghosh, and Sra. TODO: Currently only supports Banerjee et al 2005 approximation of kappa, however, there are numerous other approximations see _update_params. Attribution ---------- Approximation of log-vmf distribution function from movMF R-package. movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions by Kurt Hornik, Bettina Grun, 2014 Find more at: https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf https://cran.r-project.org/web/packages/movMF/index.html Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. posterior_type: 'soft' or 'hard' Type of posterior computed in exepectation step. See note about attribute: self.posterior_ force_weights : None or array [n_clusters, ] If None, the algorithm will estimate the weights. If an array of weights, algorithm will estimate concentrations and centers with given weights. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init: (string) one of random-class [default]: random class assignment & centroid computation k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. tol : float, default: 1e-6 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. """ random_state = check_random_state(random_state) n_examples, n_features = np.shape(X) # init centers (mus) centers = _init_unit_centers(X, n_clusters, random_state, init) # init weights (alphas) if force_weights is None: weights = np.ones((n_clusters,)) weights = weights / np.sum(weights) else: weights = force_weights # init concentrations (kappas) concentrations = np.ones((n_clusters,)) if verbose: print("Initialization complete") for iter in range(max_iter): centers_prev = centers.copy() # expectation step posterior = _expectation( X, centers, weights, concentrations, posterior_type=posterior_type ) # maximization step centers, weights, concentrations = _maximization( X, posterior, force_weights=force_weights ) # check convergence tolcheck = squared_norm(centers_prev - centers) if tolcheck <= tol: if verbose: print( "Converged at iteration %d: " "center shift %e within tolerance %e" % (iter, tolcheck, tol) ) break # labels come for free via posterior labels = np.zeros((n_examples,)) for ee in range(n_examples): labels[ee] = np.argmax(posterior[:, ee]) inertia = _inertia_from_labels(X, centers, labels) return centers, weights, concentrations, posterior, labels, inertia
[ "def", "_movMF", "(", "X", ",", "n_clusters", ",", "posterior_type", "=", "\"soft\"", ",", "force_weights", "=", "None", ",", "max_iter", "=", "300", ",", "verbose", "=", "False", ",", "init", "=", "\"random-class\"", ",", "random_state", "=", "None", ",", "tol", "=", "1e-6", ",", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "n_examples", ",", "n_features", "=", "np", ".", "shape", "(", "X", ")", "# init centers (mus)", "centers", "=", "_init_unit_centers", "(", "X", ",", "n_clusters", ",", "random_state", ",", "init", ")", "# init weights (alphas)", "if", "force_weights", "is", "None", ":", "weights", "=", "np", ".", "ones", "(", "(", "n_clusters", ",", ")", ")", "weights", "=", "weights", "/", "np", ".", "sum", "(", "weights", ")", "else", ":", "weights", "=", "force_weights", "# init concentrations (kappas)", "concentrations", "=", "np", ".", "ones", "(", "(", "n_clusters", ",", ")", ")", "if", "verbose", ":", "print", "(", "\"Initialization complete\"", ")", "for", "iter", "in", "range", "(", "max_iter", ")", ":", "centers_prev", "=", "centers", ".", "copy", "(", ")", "# expectation step", "posterior", "=", "_expectation", "(", "X", ",", "centers", ",", "weights", ",", "concentrations", ",", "posterior_type", "=", "posterior_type", ")", "# maximization step", "centers", ",", "weights", ",", "concentrations", "=", "_maximization", "(", "X", ",", "posterior", ",", "force_weights", "=", "force_weights", ")", "# check convergence", "tolcheck", "=", "squared_norm", "(", "centers_prev", "-", "centers", ")", "if", "tolcheck", "<=", "tol", ":", "if", "verbose", ":", "print", "(", "\"Converged at iteration %d: \"", "\"center shift %e within tolerance %e\"", "%", "(", "iter", ",", "tolcheck", ",", "tol", ")", ")", "break", "# labels come for free via posterior", "labels", "=", "np", ".", "zeros", "(", "(", "n_examples", ",", ")", ")", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "labels", "[", "ee", "]", "=", "np", ".", "argmax", "(", "posterior", "[", ":", ",", "ee", "]", ")", "inertia", "=", "_inertia_from_labels", "(", "X", ",", "centers", ",", "labels", ")", "return", "centers", ",", "weights", ",", "concentrations", ",", "posterior", ",", "labels", ",", "inertia" ]
Mixture of von Mises Fisher clustering. Implements the algorithms (i) and (ii) from "Clustering on the Unit Hypersphere using von Mises-Fisher Distributions" by Banerjee, Dhillon, Ghosh, and Sra. TODO: Currently only supports Banerjee et al 2005 approximation of kappa, however, there are numerous other approximations see _update_params. Attribution ---------- Approximation of log-vmf distribution function from movMF R-package. movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions by Kurt Hornik, Bettina Grun, 2014 Find more at: https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf https://cran.r-project.org/web/packages/movMF/index.html Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. posterior_type: 'soft' or 'hard' Type of posterior computed in exepectation step. See note about attribute: self.posterior_ force_weights : None or array [n_clusters, ] If None, the algorithm will estimate the weights. If an array of weights, algorithm will estimate concentrations and centers with given weights. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init: (string) one of random-class [default]: random class assignment & centroid computation k-means++ : uses sklearn k-means++ initialization algorithm spherical-k-means : use centroids from one pass of spherical k-means random : random unit norm vectors random-orthonormal : random orthonormal vectors If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. tol : float, default: 1e-6 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean.
[ "Mixture", "of", "von", "Mises", "Fisher", "clustering", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L357-L497
17,080
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
movMF
def movMF( X, n_clusters, posterior_type="soft", force_weights=None, n_init=10, n_jobs=1, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, copy_x=True, ): """Wrapper for parallelization of _movMF and running n_init times. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # defaults best_centers = None best_labels = None best_weights = None best_concentrations = None best_posterior = None best_inertia = None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # cluster on the sphere (centers, weights, concentrations, posterior, labels, inertia) = _movMF( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_centers = centers.copy() best_labels = labels.copy() best_weights = weights.copy() best_concentrations = concentrations.copy() best_posterior = posterior.copy() best_inertia = inertia else: # parallelisation of movMF runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_movMF)( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) for seed in seeds ) # Get results with the lowest inertia centers, weights, concentrations, posteriors, labels, inertia = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_concentrations = concentrations[best] best_posterior = posteriors[best] best_weights = weights[best] return ( best_centers, best_labels, best_inertia, best_weights, best_concentrations, best_posterior, )
python
def movMF( X, n_clusters, posterior_type="soft", force_weights=None, n_init=10, n_jobs=1, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, copy_x=True, ): """Wrapper for parallelization of _movMF and running n_init times. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # defaults best_centers = None best_labels = None best_weights = None best_concentrations = None best_posterior = None best_inertia = None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # cluster on the sphere (centers, weights, concentrations, posterior, labels, inertia) = _movMF( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_centers = centers.copy() best_labels = labels.copy() best_weights = weights.copy() best_concentrations = concentrations.copy() best_posterior = posterior.copy() best_inertia = inertia else: # parallelisation of movMF runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_movMF)( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) for seed in seeds ) # Get results with the lowest inertia centers, weights, concentrations, posteriors, labels, inertia = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_concentrations = concentrations[best] best_posterior = posteriors[best] best_weights = weights[best] return ( best_centers, best_labels, best_inertia, best_weights, best_concentrations, best_posterior, )
[ "def", "movMF", "(", "X", ",", "n_clusters", ",", "posterior_type", "=", "\"soft\"", ",", "force_weights", "=", "None", ",", "n_init", "=", "10", ",", "n_jobs", "=", "1", ",", "max_iter", "=", "300", ",", "verbose", "=", "False", ",", "init", "=", "\"random-class\"", ",", "random_state", "=", "None", ",", "tol", "=", "1e-6", ",", "copy_x", "=", "True", ",", ")", ":", "if", "n_init", "<=", "0", ":", "raise", "ValueError", "(", "\"Invalid number of initializations.\"", "\" n_init=%d must be bigger than zero.\"", "%", "n_init", ")", "random_state", "=", "check_random_state", "(", "random_state", ")", "if", "max_iter", "<=", "0", ":", "raise", "ValueError", "(", "\"Number of iterations should be a positive number,\"", "\" got %d instead\"", "%", "max_iter", ")", "best_inertia", "=", "np", ".", "infty", "X", "=", "as_float_array", "(", "X", ",", "copy", "=", "copy_x", ")", "tol", "=", "_tolerance", "(", "X", ",", "tol", ")", "if", "hasattr", "(", "init", ",", "\"__array__\"", ")", ":", "init", "=", "check_array", "(", "init", ",", "dtype", "=", "X", ".", "dtype", ".", "type", ",", "copy", "=", "True", ")", "_validate_center_shape", "(", "X", ",", "n_clusters", ",", "init", ")", "if", "n_init", "!=", "1", ":", "warnings", ".", "warn", "(", "\"Explicit initial center position passed: \"", "\"performing only one init in k-means instead of n_init=%d\"", "%", "n_init", ",", "RuntimeWarning", ",", "stacklevel", "=", "2", ",", ")", "n_init", "=", "1", "# defaults", "best_centers", "=", "None", "best_labels", "=", "None", "best_weights", "=", "None", "best_concentrations", "=", "None", "best_posterior", "=", "None", "best_inertia", "=", "None", "if", "n_jobs", "==", "1", ":", "# For a single thread, less memory is needed if we just store one set", "# of the best results (as opposed to one set per run per thread).", "for", "it", "in", "range", "(", "n_init", ")", ":", "# cluster on the sphere", "(", "centers", ",", "weights", ",", "concentrations", ",", "posterior", ",", "labels", ",", "inertia", ")", "=", "_movMF", "(", "X", ",", "n_clusters", ",", "posterior_type", "=", "posterior_type", ",", "force_weights", "=", "force_weights", ",", "max_iter", "=", "max_iter", ",", "verbose", "=", "verbose", ",", "init", "=", "init", ",", "random_state", "=", "random_state", ",", "tol", "=", "tol", ",", ")", "# determine if these results are the best so far", "if", "best_inertia", "is", "None", "or", "inertia", "<", "best_inertia", ":", "best_centers", "=", "centers", ".", "copy", "(", ")", "best_labels", "=", "labels", ".", "copy", "(", ")", "best_weights", "=", "weights", ".", "copy", "(", ")", "best_concentrations", "=", "concentrations", ".", "copy", "(", ")", "best_posterior", "=", "posterior", ".", "copy", "(", ")", "best_inertia", "=", "inertia", "else", ":", "# parallelisation of movMF runs", "seeds", "=", "random_state", ".", "randint", "(", "np", ".", "iinfo", "(", "np", ".", "int32", ")", ".", "max", ",", "size", "=", "n_init", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "0", ")", "(", "delayed", "(", "_movMF", ")", "(", "X", ",", "n_clusters", ",", "posterior_type", "=", "posterior_type", ",", "force_weights", "=", "force_weights", ",", "max_iter", "=", "max_iter", ",", "verbose", "=", "verbose", ",", "init", "=", "init", ",", "random_state", "=", "random_state", ",", "tol", "=", "tol", ",", ")", "for", "seed", "in", "seeds", ")", "# Get results with the lowest inertia", "centers", ",", "weights", ",", "concentrations", ",", "posteriors", ",", "labels", ",", "inertia", "=", "zip", "(", "*", "results", ")", "best", "=", "np", ".", "argmin", "(", "inertia", ")", "best_labels", "=", "labels", "[", "best", "]", "best_inertia", "=", "inertia", "[", "best", "]", "best_centers", "=", "centers", "[", "best", "]", "best_concentrations", "=", "concentrations", "[", "best", "]", "best_posterior", "=", "posteriors", "[", "best", "]", "best_weights", "=", "weights", "[", "best", "]", "return", "(", "best_centers", ",", "best_labels", ",", "best_inertia", ",", "best_weights", ",", "best_concentrations", ",", "best_posterior", ",", ")" ]
Wrapper for parallelization of _movMF and running n_init times.
[ "Wrapper", "for", "parallelization", "of", "_movMF", "and", "running", "n_init", "times", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L500-L614
17,081
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
VonMisesFisherMixture._check_fit_data
def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if X.shape[0] < self.n_clusters: raise ValueError( "n_samples=%d should be >= n_clusters=%d" % (X.shape[0], self.n_clusters) ) for ee in range(n_samples): if sp.issparse(X): n = sp.linalg.norm(X[ee, :]) else: n = np.linalg.norm(X[ee, :]) if np.abs(n - 1.) > 1e-4: raise ValueError("Data l2-norm must be 1, found {}".format(n)) return X
python
def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if X.shape[0] < self.n_clusters: raise ValueError( "n_samples=%d should be >= n_clusters=%d" % (X.shape[0], self.n_clusters) ) for ee in range(n_samples): if sp.issparse(X): n = sp.linalg.norm(X[ee, :]) else: n = np.linalg.norm(X[ee, :]) if np.abs(n - 1.) > 1e-4: raise ValueError("Data l2-norm must be 1, found {}".format(n)) return X
[ "def", "_check_fit_data", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "\"csr\"", ",", "dtype", "=", "[", "np", ".", "float64", ",", "np", ".", "float32", "]", ")", "n_samples", ",", "n_features", "=", "X", ".", "shape", "if", "X", ".", "shape", "[", "0", "]", "<", "self", ".", "n_clusters", ":", "raise", "ValueError", "(", "\"n_samples=%d should be >= n_clusters=%d\"", "%", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "n_clusters", ")", ")", "for", "ee", "in", "range", "(", "n_samples", ")", ":", "if", "sp", ".", "issparse", "(", "X", ")", ":", "n", "=", "sp", ".", "linalg", ".", "norm", "(", "X", "[", "ee", ",", ":", "]", ")", "else", ":", "n", "=", "np", ".", "linalg", ".", "norm", "(", "X", "[", "ee", ",", ":", "]", ")", "if", "np", ".", "abs", "(", "n", "-", "1.", ")", ">", "1e-4", ":", "raise", "ValueError", "(", "\"Data l2-norm must be 1, found {}\"", ".", "format", "(", "n", ")", ")", "return", "X" ]
Verify that the number of samples given is larger than k
[ "Verify", "that", "the", "number", "of", "samples", "given", "is", "larger", "than", "k" ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L772-L791
17,082
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
VonMisesFisherMixture.fit
def fit(self, X, y=None): """Compute mixture of von Mises Fisher clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ if self.normalize: X = normalize(X) self._check_force_weights() random_state = check_random_state(self.random_state) X = self._check_fit_data(X) ( self.cluster_centers_, self.labels_, self.inertia_, self.weights_, self.concentrations_, self.posterior_, ) = movMF( X, self.n_clusters, posterior_type=self.posterior_type, force_weights=self.force_weights, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, init=self.init, random_state=random_state, tol=self.tol, copy_x=self.copy_x, ) return self
python
def fit(self, X, y=None): """Compute mixture of von Mises Fisher clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ if self.normalize: X = normalize(X) self._check_force_weights() random_state = check_random_state(self.random_state) X = self._check_fit_data(X) ( self.cluster_centers_, self.labels_, self.inertia_, self.weights_, self.concentrations_, self.posterior_, ) = movMF( X, self.n_clusters, posterior_type=self.posterior_type, force_weights=self.force_weights, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, init=self.init, random_state=random_state, tol=self.tol, copy_x=self.copy_x, ) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "self", ".", "normalize", ":", "X", "=", "normalize", "(", "X", ")", "self", ".", "_check_force_weights", "(", ")", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "X", "=", "self", ".", "_check_fit_data", "(", "X", ")", "(", "self", ".", "cluster_centers_", ",", "self", ".", "labels_", ",", "self", ".", "inertia_", ",", "self", ".", "weights_", ",", "self", ".", "concentrations_", ",", "self", ".", "posterior_", ",", ")", "=", "movMF", "(", "X", ",", "self", ".", "n_clusters", ",", "posterior_type", "=", "self", ".", "posterior_type", ",", "force_weights", "=", "self", ".", "force_weights", ",", "n_init", "=", "self", ".", "n_init", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", "max_iter", "=", "self", ".", "max_iter", ",", "verbose", "=", "self", ".", "verbose", ",", "init", "=", "self", ".", "init", ",", "random_state", "=", "random_state", ",", "tol", "=", "self", ".", "tol", ",", "copy_x", "=", "self", ".", "copy_x", ",", ")", "return", "self" ]
Compute mixture of von Mises Fisher clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features)
[ "Compute", "mixture", "of", "von", "Mises", "Fisher", "clustering", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L814-L850
17,083
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
VonMisesFisherMixture.transform
def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the cosine distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ if self.normalize: X = normalize(X) check_is_fitted(self, "cluster_centers_") X = self._check_test_data(X) return self._transform(X)
python
def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the cosine distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ if self.normalize: X = normalize(X) check_is_fitted(self, "cluster_centers_") X = self._check_test_data(X) return self._transform(X)
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "self", ".", "normalize", ":", "X", "=", "normalize", "(", "X", ")", "check_is_fitted", "(", "self", ",", "\"cluster_centers_\"", ")", "X", "=", "self", ".", "_check_test_data", "(", "X", ")", "return", "self", ".", "_transform", "(", "X", ")" ]
Transform X to a cluster-distance space. In the new space, each dimension is the cosine distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space.
[ "Transform", "X", "to", "a", "cluster", "-", "distance", "space", ".", "In", "the", "new", "space", "each", "dimension", "is", "the", "cosine", "distance", "to", "the", "cluster", "centers", ".", "Note", "that", "even", "if", "X", "is", "sparse", "the", "array", "returned", "by", "transform", "will", "typically", "be", "dense", "." ]
701b0b1909088a56e353b363b2672580d4fe9d93
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L869-L890
17,084
skggm/skggm
inverse_covariance/metrics.py
log_likelihood
def log_likelihood(covariance, precision): """Computes the log-likelihood between the covariance and precision estimate. Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- log-likelihood """ assert covariance.shape == precision.shape dim, _ = precision.shape log_likelihood_ = ( -np.sum(covariance * precision) + fast_logdet(precision) - dim * np.log(2 * np.pi) ) log_likelihood_ /= 2. return log_likelihood_
python
def log_likelihood(covariance, precision): """Computes the log-likelihood between the covariance and precision estimate. Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- log-likelihood """ assert covariance.shape == precision.shape dim, _ = precision.shape log_likelihood_ = ( -np.sum(covariance * precision) + fast_logdet(precision) - dim * np.log(2 * np.pi) ) log_likelihood_ /= 2. return log_likelihood_
[ "def", "log_likelihood", "(", "covariance", ",", "precision", ")", ":", "assert", "covariance", ".", "shape", "==", "precision", ".", "shape", "dim", ",", "_", "=", "precision", ".", "shape", "log_likelihood_", "=", "(", "-", "np", ".", "sum", "(", "covariance", "*", "precision", ")", "+", "fast_logdet", "(", "precision", ")", "-", "dim", "*", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", ")", "log_likelihood_", "/=", "2.", "return", "log_likelihood_" ]
Computes the log-likelihood between the covariance and precision estimate. Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- log-likelihood
[ "Computes", "the", "log", "-", "likelihood", "between", "the", "covariance", "and", "precision", "estimate", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L6-L30
17,085
skggm/skggm
inverse_covariance/metrics.py
kl_loss
def kl_loss(covariance, precision): """Computes the KL divergence between precision estimate and reference covariance. The loss is computed as: Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma) Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- KL-divergence """ assert covariance.shape == precision.shape dim, _ = precision.shape logdet_p_dot_c = fast_logdet(np.dot(precision, covariance)) return 0.5 * (np.sum(precision * covariance) - logdet_p_dot_c - dim)
python
def kl_loss(covariance, precision): """Computes the KL divergence between precision estimate and reference covariance. The loss is computed as: Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma) Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- KL-divergence """ assert covariance.shape == precision.shape dim, _ = precision.shape logdet_p_dot_c = fast_logdet(np.dot(precision, covariance)) return 0.5 * (np.sum(precision * covariance) - logdet_p_dot_c - dim)
[ "def", "kl_loss", "(", "covariance", ",", "precision", ")", ":", "assert", "covariance", ".", "shape", "==", "precision", ".", "shape", "dim", ",", "_", "=", "precision", ".", "shape", "logdet_p_dot_c", "=", "fast_logdet", "(", "np", ".", "dot", "(", "precision", ",", "covariance", ")", ")", "return", "0.5", "*", "(", "np", ".", "sum", "(", "precision", "*", "covariance", ")", "-", "logdet_p_dot_c", "-", "dim", ")" ]
Computes the KL divergence between precision estimate and reference covariance. The loss is computed as: Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma) Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision : 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested Returns ------- KL-divergence
[ "Computes", "the", "KL", "divergence", "between", "precision", "estimate", "and", "reference", "covariance", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L33-L56
17,086
skggm/skggm
inverse_covariance/metrics.py
ebic
def ebic(covariance, precision, n_samples, n_features, gamma=0): """ Extended Bayesian Information Criteria for model selection. When using path mode, use this as an alternative to cross-validation for finding lambda. See: "Extended Bayesian Information Criteria for Gaussian Graphical Models" R. Foygel and M. Drton, NIPS 2010 Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance (sample covariance) precision : 2D ndarray (n_features, n_features) The precision matrix of the model to be tested n_samples : int Number of examples. n_features : int Dimension of an example. lam: (float) Threshold value for precision matrix. This should be lambda scaling used to obtain this estimate. gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- ebic score (float). Caller should minimized this score. """ l_theta = -np.sum(covariance * precision) + fast_logdet(precision) l_theta *= n_features / 2. # is something goes wrong with fast_logdet, return large value if np.isinf(l_theta) or np.isnan(l_theta): return 1e10 mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri return ( -2.0 * l_theta + precision_nnz * np.log(n_samples) + 4.0 * precision_nnz * np.log(n_features) * gamma )
python
def ebic(covariance, precision, n_samples, n_features, gamma=0): """ Extended Bayesian Information Criteria for model selection. When using path mode, use this as an alternative to cross-validation for finding lambda. See: "Extended Bayesian Information Criteria for Gaussian Graphical Models" R. Foygel and M. Drton, NIPS 2010 Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance (sample covariance) precision : 2D ndarray (n_features, n_features) The precision matrix of the model to be tested n_samples : int Number of examples. n_features : int Dimension of an example. lam: (float) Threshold value for precision matrix. This should be lambda scaling used to obtain this estimate. gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- ebic score (float). Caller should minimized this score. """ l_theta = -np.sum(covariance * precision) + fast_logdet(precision) l_theta *= n_features / 2. # is something goes wrong with fast_logdet, return large value if np.isinf(l_theta) or np.isnan(l_theta): return 1e10 mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri return ( -2.0 * l_theta + precision_nnz * np.log(n_samples) + 4.0 * precision_nnz * np.log(n_features) * gamma )
[ "def", "ebic", "(", "covariance", ",", "precision", ",", "n_samples", ",", "n_features", ",", "gamma", "=", "0", ")", ":", "l_theta", "=", "-", "np", ".", "sum", "(", "covariance", "*", "precision", ")", "+", "fast_logdet", "(", "precision", ")", "l_theta", "*=", "n_features", "/", "2.", "# is something goes wrong with fast_logdet, return large value", "if", "np", ".", "isinf", "(", "l_theta", ")", "or", "np", ".", "isnan", "(", "l_theta", ")", ":", "return", "1e10", "mask", "=", "np", ".", "abs", "(", "precision", ".", "flat", ")", ">", "np", ".", "finfo", "(", "precision", ".", "dtype", ")", ".", "eps", "precision_nnz", "=", "(", "np", ".", "sum", "(", "mask", ")", "-", "n_features", ")", "/", "2.0", "# lower off diagonal tri", "return", "(", "-", "2.0", "*", "l_theta", "+", "precision_nnz", "*", "np", ".", "log", "(", "n_samples", ")", "+", "4.0", "*", "precision_nnz", "*", "np", ".", "log", "(", "n_features", ")", "*", "gamma", ")" ]
Extended Bayesian Information Criteria for model selection. When using path mode, use this as an alternative to cross-validation for finding lambda. See: "Extended Bayesian Information Criteria for Gaussian Graphical Models" R. Foygel and M. Drton, NIPS 2010 Parameters ---------- covariance : 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance (sample covariance) precision : 2D ndarray (n_features, n_features) The precision matrix of the model to be tested n_samples : int Number of examples. n_features : int Dimension of an example. lam: (float) Threshold value for precision matrix. This should be lambda scaling used to obtain this estimate. gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- ebic score (float). Caller should minimized this score.
[ "Extended", "Bayesian", "Information", "Criteria", "for", "model", "selection", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L79-L130
17,087
skggm/skggm
inverse_covariance/profiling/graphs.py
lattice
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7): """Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization. """ degree = int(1 + np.round(alpha * n_features / 2.)) if random_sign: sign_row = -1.0 * np.ones(degree) + 2 * ( prng.uniform(low=0, high=1, size=degree) > .5 ) else: sign_row = -1.0 * np.ones(degree) # in the *very unlikely* event that we draw a bad row that sums to zero # (which is only possible when random_sign=True), we try again up to # MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of # values something is probably wrong and we raise. MAX_ATTEMPTS = 5 attempt = 0 row = np.zeros((n_features,)) while np.sum(row) == 0 and attempt < MAX_ATTEMPTS: row = np.zeros((n_features,)) row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree) attempt += 1 if np.sum(row) == 0: raise Exception("InvalidLattice", "Rows sum to 0.") return # sum-normalize and keep signs row /= np.abs(np.sum(row)) return sp.linalg.toeplitz(c=row, r=row)
python
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7): """Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization. """ degree = int(1 + np.round(alpha * n_features / 2.)) if random_sign: sign_row = -1.0 * np.ones(degree) + 2 * ( prng.uniform(low=0, high=1, size=degree) > .5 ) else: sign_row = -1.0 * np.ones(degree) # in the *very unlikely* event that we draw a bad row that sums to zero # (which is only possible when random_sign=True), we try again up to # MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of # values something is probably wrong and we raise. MAX_ATTEMPTS = 5 attempt = 0 row = np.zeros((n_features,)) while np.sum(row) == 0 and attempt < MAX_ATTEMPTS: row = np.zeros((n_features,)) row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree) attempt += 1 if np.sum(row) == 0: raise Exception("InvalidLattice", "Rows sum to 0.") return # sum-normalize and keep signs row /= np.abs(np.sum(row)) return sp.linalg.toeplitz(c=row, r=row)
[ "def", "lattice", "(", "prng", ",", "n_features", ",", "alpha", ",", "random_sign", "=", "False", ",", "low", "=", "0.3", ",", "high", "=", "0.7", ")", ":", "degree", "=", "int", "(", "1", "+", "np", ".", "round", "(", "alpha", "*", "n_features", "/", "2.", ")", ")", "if", "random_sign", ":", "sign_row", "=", "-", "1.0", "*", "np", ".", "ones", "(", "degree", ")", "+", "2", "*", "(", "prng", ".", "uniform", "(", "low", "=", "0", ",", "high", "=", "1", ",", "size", "=", "degree", ")", ">", ".5", ")", "else", ":", "sign_row", "=", "-", "1.0", "*", "np", ".", "ones", "(", "degree", ")", "# in the *very unlikely* event that we draw a bad row that sums to zero", "# (which is only possible when random_sign=True), we try again up to", "# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of", "# values something is probably wrong and we raise.", "MAX_ATTEMPTS", "=", "5", "attempt", "=", "0", "row", "=", "np", ".", "zeros", "(", "(", "n_features", ",", ")", ")", "while", "np", ".", "sum", "(", "row", ")", "==", "0", "and", "attempt", "<", "MAX_ATTEMPTS", ":", "row", "=", "np", ".", "zeros", "(", "(", "n_features", ",", ")", ")", "row", "[", "1", ":", "1", "+", "degree", "]", "=", "sign_row", "*", "prng", ".", "uniform", "(", "low", "=", "low", ",", "high", "=", "high", ",", "size", "=", "degree", ")", "attempt", "+=", "1", "if", "np", ".", "sum", "(", "row", ")", "==", "0", ":", "raise", "Exception", "(", "\"InvalidLattice\"", ",", "\"Rows sum to 0.\"", ")", "return", "# sum-normalize and keep signs", "row", "/=", "np", ".", "abs", "(", "np", ".", "sum", "(", "row", ")", ")", "return", "sp", ".", "linalg", ".", "toeplitz", "(", "c", "=", "row", ",", "r", "=", "row", ")" ]
Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization.
[ "Returns", "the", "adjacency", "matrix", "for", "a", "lattice", "network", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L5-L61
17,088
skggm/skggm
inverse_covariance/profiling/graphs.py
_to_diagonally_dominant
def _to_diagonally_dominant(mat): """Make matrix unweighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(mat != 0, axis=1) + 0.01) return mat
python
def _to_diagonally_dominant(mat): """Make matrix unweighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(mat != 0, axis=1) + 0.01) return mat
[ "def", "_to_diagonally_dominant", "(", "mat", ")", ":", "mat", "+=", "np", ".", "diag", "(", "np", ".", "sum", "(", "mat", "!=", "0", ",", "axis", "=", "1", ")", "+", "0.01", ")", "return", "mat" ]
Make matrix unweighted diagonally dominant using the Laplacian.
[ "Make", "matrix", "unweighted", "diagonally", "dominant", "using", "the", "Laplacian", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L103-L106
17,089
skggm/skggm
inverse_covariance/profiling/graphs.py
_to_diagonally_dominant_weighted
def _to_diagonally_dominant_weighted(mat): """Make matrix weighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01) return mat
python
def _to_diagonally_dominant_weighted(mat): """Make matrix weighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01) return mat
[ "def", "_to_diagonally_dominant_weighted", "(", "mat", ")", ":", "mat", "+=", "np", ".", "diag", "(", "np", ".", "sum", "(", "np", ".", "abs", "(", "mat", ")", ",", "axis", "=", "1", ")", "+", "0.01", ")", "return", "mat" ]
Make matrix weighted diagonally dominant using the Laplacian.
[ "Make", "matrix", "weighted", "diagonally", "dominant", "using", "the", "Laplacian", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L109-L112
17,090
skggm/skggm
inverse_covariance/profiling/graphs.py
_rescale_to_unit_diagonals
def _rescale_to_unit_diagonals(mat): """Rescale matrix to have unit diagonals. Note: Call only after diagonal dominance is ensured. """ d = np.sqrt(np.diag(mat)) mat /= d mat /= d[:, np.newaxis] return mat
python
def _rescale_to_unit_diagonals(mat): """Rescale matrix to have unit diagonals. Note: Call only after diagonal dominance is ensured. """ d = np.sqrt(np.diag(mat)) mat /= d mat /= d[:, np.newaxis] return mat
[ "def", "_rescale_to_unit_diagonals", "(", "mat", ")", ":", "d", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "mat", ")", ")", "mat", "/=", "d", "mat", "/=", "d", "[", ":", ",", "np", ".", "newaxis", "]", "return", "mat" ]
Rescale matrix to have unit diagonals. Note: Call only after diagonal dominance is ensured.
[ "Rescale", "matrix", "to", "have", "unit", "diagonals", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L115-L123
17,091
skggm/skggm
inverse_covariance/profiling/graphs.py
Graph.create
def create(self, n_features, alpha): """Build a new graph with block structure. Parameters ----------- n_features : int alpha : float (0,1) The complexity / sparsity factor for each graph type. Returns ----------- (n_features, n_features) matrices: covariance, precision, adjacency """ n_block_features = int(np.floor(1. * n_features / self.n_blocks)) if n_block_features * self.n_blocks != n_features: raise ValueError( ( "Error: n_features {} not divisible by n_blocks {}." "Use n_features = n_blocks * int" ).format(n_features, self.n_blocks) ) return block_adj = self.prototype_adjacency(n_block_features, alpha) adjacency = blocks( self.prng, block_adj, n_blocks=self.n_blocks, chain_blocks=self.chain_blocks ) precision = self.to_precision(adjacency) covariance = self.to_covariance(precision) return covariance, precision, adjacency
python
def create(self, n_features, alpha): """Build a new graph with block structure. Parameters ----------- n_features : int alpha : float (0,1) The complexity / sparsity factor for each graph type. Returns ----------- (n_features, n_features) matrices: covariance, precision, adjacency """ n_block_features = int(np.floor(1. * n_features / self.n_blocks)) if n_block_features * self.n_blocks != n_features: raise ValueError( ( "Error: n_features {} not divisible by n_blocks {}." "Use n_features = n_blocks * int" ).format(n_features, self.n_blocks) ) return block_adj = self.prototype_adjacency(n_block_features, alpha) adjacency = blocks( self.prng, block_adj, n_blocks=self.n_blocks, chain_blocks=self.chain_blocks ) precision = self.to_precision(adjacency) covariance = self.to_covariance(precision) return covariance, precision, adjacency
[ "def", "create", "(", "self", ",", "n_features", ",", "alpha", ")", ":", "n_block_features", "=", "int", "(", "np", ".", "floor", "(", "1.", "*", "n_features", "/", "self", ".", "n_blocks", ")", ")", "if", "n_block_features", "*", "self", ".", "n_blocks", "!=", "n_features", ":", "raise", "ValueError", "(", "(", "\"Error: n_features {} not divisible by n_blocks {}.\"", "\"Use n_features = n_blocks * int\"", ")", ".", "format", "(", "n_features", ",", "self", ".", "n_blocks", ")", ")", "return", "block_adj", "=", "self", ".", "prototype_adjacency", "(", "n_block_features", ",", "alpha", ")", "adjacency", "=", "blocks", "(", "self", ".", "prng", ",", "block_adj", ",", "n_blocks", "=", "self", ".", "n_blocks", ",", "chain_blocks", "=", "self", ".", "chain_blocks", ")", "precision", "=", "self", ".", "to_precision", "(", "adjacency", ")", "covariance", "=", "self", ".", "to_covariance", "(", "precision", ")", "return", "covariance", ",", "precision", ",", "adjacency" ]
Build a new graph with block structure. Parameters ----------- n_features : int alpha : float (0,1) The complexity / sparsity factor for each graph type. Returns ----------- (n_features, n_features) matrices: covariance, precision, adjacency
[ "Build", "a", "new", "graph", "with", "block", "structure", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L176-L207
17,092
skggm/skggm
inverse_covariance/profiling/monte_carlo_profile.py
_sample_mvn
def _sample_mvn(n_samples, cov, prng): """Draw a multivariate normal sample from the graph defined by cov. Parameters ----------- n_samples : int cov : matrix of shape (n_features, n_features) Covariance matrix of the graph. prng : np.random.RandomState instance. """ n_features, _ = cov.shape return prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
python
def _sample_mvn(n_samples, cov, prng): """Draw a multivariate normal sample from the graph defined by cov. Parameters ----------- n_samples : int cov : matrix of shape (n_features, n_features) Covariance matrix of the graph. prng : np.random.RandomState instance. """ n_features, _ = cov.shape return prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
[ "def", "_sample_mvn", "(", "n_samples", ",", "cov", ",", "prng", ")", ":", "n_features", ",", "_", "=", "cov", ".", "shape", "return", "prng", ".", "multivariate_normal", "(", "np", ".", "zeros", "(", "n_features", ")", ",", "cov", ",", "size", "=", "n_samples", ")" ]
Draw a multivariate normal sample from the graph defined by cov. Parameters ----------- n_samples : int cov : matrix of shape (n_features, n_features) Covariance matrix of the graph. prng : np.random.RandomState instance.
[ "Draw", "a", "multivariate", "normal", "sample", "from", "the", "graph", "defined", "by", "cov", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/monte_carlo_profile.py#L13-L26
17,093
skggm/skggm
inverse_covariance/model_average.py
_fully_random_weights
def _fully_random_weights(n_features, lam_scale, prng): """Generate a symmetric random matrix with zeros along the diagonal.""" weights = np.zeros((n_features, n_features)) n_off_diag = int((n_features ** 2 - n_features) / 2) weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn( n_off_diag ) + (0.25 * lam_scale) weights[weights < 0] = 0 weights = weights + weights.T return weights
python
def _fully_random_weights(n_features, lam_scale, prng): """Generate a symmetric random matrix with zeros along the diagonal.""" weights = np.zeros((n_features, n_features)) n_off_diag = int((n_features ** 2 - n_features) / 2) weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn( n_off_diag ) + (0.25 * lam_scale) weights[weights < 0] = 0 weights = weights + weights.T return weights
[ "def", "_fully_random_weights", "(", "n_features", ",", "lam_scale", ",", "prng", ")", ":", "weights", "=", "np", ".", "zeros", "(", "(", "n_features", ",", "n_features", ")", ")", "n_off_diag", "=", "int", "(", "(", "n_features", "**", "2", "-", "n_features", ")", "/", "2", ")", "weights", "[", "np", ".", "triu_indices", "(", "n_features", ",", "k", "=", "1", ")", "]", "=", "0.1", "*", "lam_scale", "*", "prng", ".", "randn", "(", "n_off_diag", ")", "+", "(", "0.25", "*", "lam_scale", ")", "weights", "[", "weights", "<", "0", "]", "=", "0", "weights", "=", "weights", "+", "weights", ".", "T", "return", "weights" ]
Generate a symmetric random matrix with zeros along the diagonal.
[ "Generate", "a", "symmetric", "random", "matrix", "with", "zeros", "along", "the", "diagonal", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L17-L26
17,094
skggm/skggm
inverse_covariance/model_average.py
_fix_weights
def _fix_weights(weight_fun, *args): """Ensure random weight matrix is valid. TODO: The diagonally dominant tuning currently doesn't make sense. Our weight matrix has zeros along the diagonal, so multiplying by a diagonal matrix results in a zero-matrix. """ weights = weight_fun(*args) # TODO: fix this # disable checks for now return weights # if positive semidefinite, then we're good as is if _check_psd(weights): return weights # make diagonally dominant off_diag_sums = np.sum(weights, axis=1) # NOTE: assumes diag is zero mod_mat = np.linalg.inv(np.sqrt(np.diag(off_diag_sums))) return np.dot(mod_mat, weights, mod_mat)
python
def _fix_weights(weight_fun, *args): """Ensure random weight matrix is valid. TODO: The diagonally dominant tuning currently doesn't make sense. Our weight matrix has zeros along the diagonal, so multiplying by a diagonal matrix results in a zero-matrix. """ weights = weight_fun(*args) # TODO: fix this # disable checks for now return weights # if positive semidefinite, then we're good as is if _check_psd(weights): return weights # make diagonally dominant off_diag_sums = np.sum(weights, axis=1) # NOTE: assumes diag is zero mod_mat = np.linalg.inv(np.sqrt(np.diag(off_diag_sums))) return np.dot(mod_mat, weights, mod_mat)
[ "def", "_fix_weights", "(", "weight_fun", ",", "*", "args", ")", ":", "weights", "=", "weight_fun", "(", "*", "args", ")", "# TODO: fix this", "# disable checks for now", "return", "weights", "# if positive semidefinite, then we're good as is", "if", "_check_psd", "(", "weights", ")", ":", "return", "weights", "# make diagonally dominant", "off_diag_sums", "=", "np", ".", "sum", "(", "weights", ",", "axis", "=", "1", ")", "# NOTE: assumes diag is zero", "mod_mat", "=", "np", ".", "linalg", ".", "inv", "(", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "off_diag_sums", ")", ")", ")", "return", "np", ".", "dot", "(", "mod_mat", ",", "weights", ",", "mod_mat", ")" ]
Ensure random weight matrix is valid. TODO: The diagonally dominant tuning currently doesn't make sense. Our weight matrix has zeros along the diagonal, so multiplying by a diagonal matrix results in a zero-matrix.
[ "Ensure", "random", "weight", "matrix", "is", "valid", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L46-L66
17,095
skggm/skggm
inverse_covariance/model_average.py
_fit
def _fit( indexed_params, penalization, lam, lam_perturb, lam_scale_, estimator, penalty_name, subsample, bootstrap, prng, X=None, ): """Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function. """ index = indexed_params if isinstance(X, np.ndarray): local_X = X else: local_X = X.value n_samples, n_features = local_X.shape prec_is_real = False while not prec_is_real: boot_lam = None if penalization == "subsampling": pass elif penalization == "random": boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng) elif penalization == "fully-random": boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng) else: raise NotImplementedError( ( "Only penalization = 'subsampling', " "'random', and 'fully-random' have " "been implemented. Found {}.".format(penalization) ) ) # new instance of estimator new_estimator = clone(estimator) if boot_lam is not None: new_estimator.set_params(**{penalty_name: boot_lam}) # fit estimator num_subsamples = int(subsample * n_samples) rp = bootstrap(n_samples, num_subsamples, prng) new_estimator.fit(local_X[rp, :]) # check that new_estimator.precision_ is real # if not, skip this boot_lam and try again if isinstance(new_estimator.precision_, list): prec_real_bools = [] for prec in new_estimator.precision_: prec_real_bools.append(np.all(np.isreal(prec))) prec_is_real = np.all(np.array(prec_real_bools) is True) elif isinstance(new_estimator.precision_, np.ndarray): prec_is_real = np.all(np.isreal(new_estimator.precision_)) else: raise ValueError("Estimator returned invalid precision_.") return index, (boot_lam, rp, new_estimator)
python
def _fit( indexed_params, penalization, lam, lam_perturb, lam_scale_, estimator, penalty_name, subsample, bootstrap, prng, X=None, ): """Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function. """ index = indexed_params if isinstance(X, np.ndarray): local_X = X else: local_X = X.value n_samples, n_features = local_X.shape prec_is_real = False while not prec_is_real: boot_lam = None if penalization == "subsampling": pass elif penalization == "random": boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng) elif penalization == "fully-random": boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng) else: raise NotImplementedError( ( "Only penalization = 'subsampling', " "'random', and 'fully-random' have " "been implemented. Found {}.".format(penalization) ) ) # new instance of estimator new_estimator = clone(estimator) if boot_lam is not None: new_estimator.set_params(**{penalty_name: boot_lam}) # fit estimator num_subsamples = int(subsample * n_samples) rp = bootstrap(n_samples, num_subsamples, prng) new_estimator.fit(local_X[rp, :]) # check that new_estimator.precision_ is real # if not, skip this boot_lam and try again if isinstance(new_estimator.precision_, list): prec_real_bools = [] for prec in new_estimator.precision_: prec_real_bools.append(np.all(np.isreal(prec))) prec_is_real = np.all(np.array(prec_real_bools) is True) elif isinstance(new_estimator.precision_, np.ndarray): prec_is_real = np.all(np.isreal(new_estimator.precision_)) else: raise ValueError("Estimator returned invalid precision_.") return index, (boot_lam, rp, new_estimator)
[ "def", "_fit", "(", "indexed_params", ",", "penalization", ",", "lam", ",", "lam_perturb", ",", "lam_scale_", ",", "estimator", ",", "penalty_name", ",", "subsample", ",", "bootstrap", ",", "prng", ",", "X", "=", "None", ",", ")", ":", "index", "=", "indexed_params", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "local_X", "=", "X", "else", ":", "local_X", "=", "X", ".", "value", "n_samples", ",", "n_features", "=", "local_X", ".", "shape", "prec_is_real", "=", "False", "while", "not", "prec_is_real", ":", "boot_lam", "=", "None", "if", "penalization", "==", "\"subsampling\"", ":", "pass", "elif", "penalization", "==", "\"random\"", ":", "boot_lam", "=", "_fix_weights", "(", "_random_weights", ",", "n_features", ",", "lam", ",", "lam_perturb", ",", "prng", ")", "elif", "penalization", "==", "\"fully-random\"", ":", "boot_lam", "=", "_fix_weights", "(", "_fully_random_weights", ",", "n_features", ",", "lam_scale_", ",", "prng", ")", "else", ":", "raise", "NotImplementedError", "(", "(", "\"Only penalization = 'subsampling', \"", "\"'random', and 'fully-random' have \"", "\"been implemented. Found {}.\"", ".", "format", "(", "penalization", ")", ")", ")", "# new instance of estimator", "new_estimator", "=", "clone", "(", "estimator", ")", "if", "boot_lam", "is", "not", "None", ":", "new_estimator", ".", "set_params", "(", "*", "*", "{", "penalty_name", ":", "boot_lam", "}", ")", "# fit estimator", "num_subsamples", "=", "int", "(", "subsample", "*", "n_samples", ")", "rp", "=", "bootstrap", "(", "n_samples", ",", "num_subsamples", ",", "prng", ")", "new_estimator", ".", "fit", "(", "local_X", "[", "rp", ",", ":", "]", ")", "# check that new_estimator.precision_ is real", "# if not, skip this boot_lam and try again", "if", "isinstance", "(", "new_estimator", ".", "precision_", ",", "list", ")", ":", "prec_real_bools", "=", "[", "]", "for", "prec", "in", "new_estimator", ".", "precision_", ":", "prec_real_bools", ".", "append", "(", "np", ".", "all", "(", "np", ".", "isreal", "(", "prec", ")", ")", ")", "prec_is_real", "=", "np", ".", "all", "(", "np", ".", "array", "(", "prec_real_bools", ")", "is", "True", ")", "elif", "isinstance", "(", "new_estimator", ".", "precision_", ",", "np", ".", "ndarray", ")", ":", "prec_is_real", "=", "np", ".", "all", "(", "np", ".", "isreal", "(", "new_estimator", ".", "precision_", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Estimator returned invalid precision_.\"", ")", "return", "index", ",", "(", "boot_lam", ",", "rp", ",", "new_estimator", ")" ]
Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function.
[ "Wrapper", "function", "outside", "of", "instance", "for", "fitting", "a", "single", "model", "average", "trial", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L74-L145
17,096
skggm/skggm
inverse_covariance/model_average.py
_spark_map
def _spark_map(fun, indexed_param_grid, sc, seed, X_bc): """We cannot pass a RandomState instance to each spark worker since it will behave identically across partitions. Instead, we explictly handle the partitions with a newly seeded instance. The seed for each partition will be the "seed" (MonteCarloProfile.seed) + "split_index" which is the partition index. Following this trick: https://wegetsignal.wordpress.com/2015/05/08/ generating-random-numbers-for-rdd-in-spark/ """ def _wrap_random_state(split_index, partition): prng = np.random.RandomState(seed + split_index) yield map(partial(fun, prng=prng, X=X_bc), partition) par_param_grid = sc.parallelize(indexed_param_grid) indexed_results = par_param_grid.mapPartitionsWithIndex( _wrap_random_state ).collect() return [item for sublist in indexed_results for item in sublist]
python
def _spark_map(fun, indexed_param_grid, sc, seed, X_bc): """We cannot pass a RandomState instance to each spark worker since it will behave identically across partitions. Instead, we explictly handle the partitions with a newly seeded instance. The seed for each partition will be the "seed" (MonteCarloProfile.seed) + "split_index" which is the partition index. Following this trick: https://wegetsignal.wordpress.com/2015/05/08/ generating-random-numbers-for-rdd-in-spark/ """ def _wrap_random_state(split_index, partition): prng = np.random.RandomState(seed + split_index) yield map(partial(fun, prng=prng, X=X_bc), partition) par_param_grid = sc.parallelize(indexed_param_grid) indexed_results = par_param_grid.mapPartitionsWithIndex( _wrap_random_state ).collect() return [item for sublist in indexed_results for item in sublist]
[ "def", "_spark_map", "(", "fun", ",", "indexed_param_grid", ",", "sc", ",", "seed", ",", "X_bc", ")", ":", "def", "_wrap_random_state", "(", "split_index", ",", "partition", ")", ":", "prng", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "+", "split_index", ")", "yield", "map", "(", "partial", "(", "fun", ",", "prng", "=", "prng", ",", "X", "=", "X_bc", ")", ",", "partition", ")", "par_param_grid", "=", "sc", ".", "parallelize", "(", "indexed_param_grid", ")", "indexed_results", "=", "par_param_grid", ".", "mapPartitionsWithIndex", "(", "_wrap_random_state", ")", ".", "collect", "(", ")", "return", "[", "item", "for", "sublist", "in", "indexed_results", "for", "item", "in", "sublist", "]" ]
We cannot pass a RandomState instance to each spark worker since it will behave identically across partitions. Instead, we explictly handle the partitions with a newly seeded instance. The seed for each partition will be the "seed" (MonteCarloProfile.seed) + "split_index" which is the partition index. Following this trick: https://wegetsignal.wordpress.com/2015/05/08/ generating-random-numbers-for-rdd-in-spark/
[ "We", "cannot", "pass", "a", "RandomState", "instance", "to", "each", "spark", "worker", "since", "it", "will", "behave", "identically", "across", "partitions", ".", "Instead", "we", "explictly", "handle", "the", "partitions", "with", "a", "newly", "seeded", "instance", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L156-L177
17,097
skggm/skggm
examples/estimator_suite_spark.py
quic_graph_lasso_ebic_manual
def quic_graph_lasso_ebic_manual(X, gamma=0): """Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model selection. The EBIC criteria is built into InverseCovarianceEstimator base class so we demonstrate those utilities here. """ print("QuicGraphicalLasso (manual EBIC) with:") print(" mode: path") print(" gamma: {}".format(gamma)) model = QuicGraphicalLasso( lam=1.0, mode="path", init_method="cov", path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True), ) model.fit(X) ebic_index = model.ebic_select(gamma=gamma) covariance_ = model.covariance_[ebic_index] precision_ = model.precision_[ebic_index] lam_ = model.lam_at_index(ebic_index) print(" len(path lams): {}".format(len(model.path_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(lam_)) print(" ebic_index: {}".format(ebic_index)) return covariance_, precision_, lam_
python
def quic_graph_lasso_ebic_manual(X, gamma=0): """Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model selection. The EBIC criteria is built into InverseCovarianceEstimator base class so we demonstrate those utilities here. """ print("QuicGraphicalLasso (manual EBIC) with:") print(" mode: path") print(" gamma: {}".format(gamma)) model = QuicGraphicalLasso( lam=1.0, mode="path", init_method="cov", path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True), ) model.fit(X) ebic_index = model.ebic_select(gamma=gamma) covariance_ = model.covariance_[ebic_index] precision_ = model.precision_[ebic_index] lam_ = model.lam_at_index(ebic_index) print(" len(path lams): {}".format(len(model.path_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(lam_)) print(" ebic_index: {}".format(ebic_index)) return covariance_, precision_, lam_
[ "def", "quic_graph_lasso_ebic_manual", "(", "X", ",", "gamma", "=", "0", ")", ":", "print", "(", "\"QuicGraphicalLasso (manual EBIC) with:\"", ")", "print", "(", "\" mode: path\"", ")", "print", "(", "\" gamma: {}\"", ".", "format", "(", "gamma", ")", ")", "model", "=", "QuicGraphicalLasso", "(", "lam", "=", "1.0", ",", "mode", "=", "\"path\"", ",", "init_method", "=", "\"cov\"", ",", "path", "=", "np", ".", "logspace", "(", "np", ".", "log10", "(", "0.01", ")", ",", "np", ".", "log10", "(", "1.0", ")", ",", "num", "=", "100", ",", "endpoint", "=", "True", ")", ",", ")", "model", ".", "fit", "(", "X", ")", "ebic_index", "=", "model", ".", "ebic_select", "(", "gamma", "=", "gamma", ")", "covariance_", "=", "model", ".", "covariance_", "[", "ebic_index", "]", "precision_", "=", "model", ".", "precision_", "[", "ebic_index", "]", "lam_", "=", "model", ".", "lam_at_index", "(", "ebic_index", ")", "print", "(", "\" len(path lams): {}\"", ".", "format", "(", "len", "(", "model", ".", "path_", ")", ")", ")", "print", "(", "\" lam_scale_: {}\"", ".", "format", "(", "model", ".", "lam_scale_", ")", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "lam_", ")", ")", "print", "(", "\" ebic_index: {}\"", ".", "format", "(", "ebic_index", ")", ")", "return", "covariance_", ",", "precision_", ",", "lam_" ]
Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model selection. The EBIC criteria is built into InverseCovarianceEstimator base class so we demonstrate those utilities here.
[ "Run", "QuicGraphicalLasso", "with", "mode", "=", "path", "and", "gamma", ";", "use", "EBIC", "criteria", "for", "model", "selection", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L110-L135
17,098
skggm/skggm
examples/estimator_suite_spark.py
quic_graph_lasso_ebic
def quic_graph_lasso_ebic(X, gamma=0): """Run QuicGraphicalLassoEBIC with gamma. QuicGraphicalLassoEBIC is a convenience class. Results should be identical to those obtained via quic_graph_lasso_ebic_manual. """ print("QuicGraphicalLassoEBIC with:") print(" mode: path") print(" gamma: {}".format(gamma)) model = QuicGraphicalLassoEBIC(lam=1.0, init_method="cov", gamma=gamma) model.fit(X) print(" len(path lams): {}".format(len(model.path_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
python
def quic_graph_lasso_ebic(X, gamma=0): """Run QuicGraphicalLassoEBIC with gamma. QuicGraphicalLassoEBIC is a convenience class. Results should be identical to those obtained via quic_graph_lasso_ebic_manual. """ print("QuicGraphicalLassoEBIC with:") print(" mode: path") print(" gamma: {}".format(gamma)) model = QuicGraphicalLassoEBIC(lam=1.0, init_method="cov", gamma=gamma) model.fit(X) print(" len(path lams): {}".format(len(model.path_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
[ "def", "quic_graph_lasso_ebic", "(", "X", ",", "gamma", "=", "0", ")", ":", "print", "(", "\"QuicGraphicalLassoEBIC with:\"", ")", "print", "(", "\" mode: path\"", ")", "print", "(", "\" gamma: {}\"", ".", "format", "(", "gamma", ")", ")", "model", "=", "QuicGraphicalLassoEBIC", "(", "lam", "=", "1.0", ",", "init_method", "=", "\"cov\"", ",", "gamma", "=", "gamma", ")", "model", ".", "fit", "(", "X", ")", "print", "(", "\" len(path lams): {}\"", ".", "format", "(", "len", "(", "model", ".", "path_", ")", ")", ")", "print", "(", "\" lam_scale_: {}\"", ".", "format", "(", "model", ".", "lam_scale_", ")", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "model", ".", "lam_", ")", ")", "return", "model", ".", "covariance_", ",", "model", ".", "precision_", ",", "model", ".", "lam_" ]
Run QuicGraphicalLassoEBIC with gamma. QuicGraphicalLassoEBIC is a convenience class. Results should be identical to those obtained via quic_graph_lasso_ebic_manual.
[ "Run", "QuicGraphicalLassoEBIC", "with", "gamma", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L138-L152
17,099
skggm/skggm
examples/estimator_suite_spark.py
empirical
def empirical(X): """Compute empirical covariance as baseline estimator. """ print("Empirical") cov = np.dot(X.T, X) / n_samples return cov, np.linalg.inv(cov)
python
def empirical(X): """Compute empirical covariance as baseline estimator. """ print("Empirical") cov = np.dot(X.T, X) / n_samples return cov, np.linalg.inv(cov)
[ "def", "empirical", "(", "X", ")", ":", "print", "(", "\"Empirical\"", ")", "cov", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", "/", "n_samples", "return", "cov", ",", "np", ".", "linalg", ".", "inv", "(", "cov", ")" ]
Compute empirical covariance as baseline estimator.
[ "Compute", "empirical", "covariance", "as", "baseline", "estimator", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L232-L237