_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23900
|
Context._pop_buffer_and_writer
|
train
|
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
|
python
|
{
"resource": ""
}
|
q23901
|
LoopContext.cycle
|
train
|
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
|
python
|
{
"resource": ""
}
|
q23902
|
Namespace.include_file
|
train
|
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
|
python
|
{
"resource": ""
}
|
q23903
|
find_path
|
train
|
def find_path(dirs, path_to_find):
"""
Go through a bunch of dirs and see if dir+path_to_find exists there.
Returns the first dir that matches. Otherwise, return None.
"""
for dir in dirs:
if os.path.exists(os.path.join(dir, path_to_find)):
return dir
return None
|
python
|
{
"resource": ""
}
|
q23904
|
to_bool
|
train
|
def to_bool(s):
"""
Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false',
'False', 0.
Examples:
>>> to_bool("true")
True
>>> to_bool("0")
False
>>> to_bool(True)
True
"""
if isinstance(s, bool):
return s
elif s.lower() in ['true', '1']:
return True
elif s.lower() in ['false', '0']:
return False
else:
raise ValueError("Can't cast '%s' to bool" % (s))
|
python
|
{
"resource": ""
}
|
q23905
|
Template.render
|
train
|
def render(self, *args, **data):
"""Render the output of this template as a string.
If the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw
output uses `cStringIO` and can't handle multibyte
characters). A :class:`.Context` object is created corresponding
to the given data. Arguments that are explicitly declared
by this template's internal rendering method are also
pulled from the given ``*args``, ``**data`` members.
"""
return runtime._render(self, self.callable_, args, data)
|
python
|
{
"resource": ""
}
|
q23906
|
Template.render_unicode
|
train
|
def render_unicode(self, *args, **data):
"""Render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
|
python
|
{
"resource": ""
}
|
q23907
|
strip_exts
|
train
|
def strip_exts(s, exts):
"""
Given a string and an interable of extensions, strip the extenion off the
string if the string ends with one of the extensions.
"""
f_split = os.path.splitext(s)
if f_split[1] in exts:
return f_split[0]
else:
return s
|
python
|
{
"resource": ""
}
|
q23908
|
Ansible._parse_hosts_inventory
|
train
|
def _parse_hosts_inventory(self, inventory_path):
"""
Read all the available hosts inventory information into one big list
and parse it.
"""
hosts_contents = []
if os.path.isdir(inventory_path):
self.log.debug("Inventory path {} is a dir. Looking for inventory files in that dir.".format(inventory_path))
for fname in os.listdir(inventory_path):
# Skip .git folder
if fname == '.git':
continue
path = os.path.join(inventory_path, fname)
if os.path.isdir(path):
continue
with codecs.open(path, 'r', encoding='utf8') as f:
hosts_contents += f.readlines()
else:
self.log.debug("Inventory path {} is a file. Reading as inventory.".format(inventory_path))
with codecs.open(inventory_path, 'r', encoding='utf8') as f:
hosts_contents = f.readlines()
# Parse inventory and apply it to the hosts
hosts_parser = parser.HostsParser(hosts_contents)
for hostname, key_values in hosts_parser.hosts.items():
self.update_host(hostname, key_values)
|
python
|
{
"resource": ""
}
|
q23909
|
Ansible._parse_hostvar_dir
|
train
|
def _parse_hostvar_dir(self, inventory_path):
"""
Parse host_vars dir, if it exists.
"""
# inventory_path could point to a `hosts` file, or to a dir. So we
# construct the location to the `host_vars` differently.
if os.path.isdir(inventory_path):
path = os.path.join(inventory_path, 'host_vars')
else:
path = os.path.join(os.path.dirname(inventory_path), 'host_vars')
self.log.debug("Parsing host vars (dir): {0}".format(path))
if not os.path.exists(path):
self.log.info("No such dir {0}".format(path))
return
for entry in os.listdir(path):
# Skip .git folder
if entry == '.git':
continue
full_path = os.path.join(path, entry)
# file or dir name is the hostname
hostname = strip_exts(entry, ('.yml', '.yaml', '.json'))
if os.path.isfile(full_path):
# Parse contents of file as host vars.
self._parse_hostvar_file(hostname, full_path)
elif os.path.isdir(full_path):
# Parse each file in the directory as a file containing
# variables for the host.
for file_entry in os.listdir(full_path):
p = os.path.join(full_path, file_entry)
if not os.path.isdir(p):
self._parse_hostvar_file(hostname, p)
|
python
|
{
"resource": ""
}
|
q23910
|
Ansible._parse_hostvar_file
|
train
|
def _parse_hostvar_file(self, hostname, path):
"""
Parse a host var file and apply it to host `hostname`.
"""
# Check for ansible-vault files, because they're valid yaml for
# some reason... (psst, the reason is that yaml sucks)
first_line = open(path, 'r').readline()
if first_line.startswith('$ANSIBLE_VAULT'):
self.log.warning("Skipping encrypted vault file {0}".format(path))
return
try:
self.log.debug("Reading host vars from {}".format(path))
f = codecs.open(path, 'r', encoding='utf8')
invars = ihateyaml.safe_load(f)
f.close()
except Exception as err:
# Just catch everything because yaml...
self.log.warning("Yaml couldn't load '{0}'. Skipping. Error was: {1}".format(path, err))
return
if invars is None:
# Empty file or whatever. This is *probably* a side-effect of our
# own yaml.SafeLoader implementation ('ihateyaml'), because this
# problem didn't exist before.
return
if hostname == "all":
# Hostname 'all' is special and applies to all hosts
for hostname in self.hosts_all():
self.update_host(hostname, {'hostvars': invars}, overwrite=False)
else:
self.update_host(hostname, {'hostvars': invars}, overwrite=True)
|
python
|
{
"resource": ""
}
|
q23911
|
Ansible._parse_groupvar_dir
|
train
|
def _parse_groupvar_dir(self, inventory_path):
"""
Parse group_vars dir, if it exists. Encrypted vault files are skipped.
"""
# inventory_path could point to a `hosts` file, or to a dir. So we
# construct the location to the `group_vars` differently.
if os.path.isdir(inventory_path):
path = os.path.join(inventory_path, 'group_vars')
else:
path = os.path.join(os.path.dirname(inventory_path), 'group_vars')
self.log.debug("Parsing group vars (dir): {0}".format(path))
if not os.path.exists(path):
self.log.info("No such dir {0}".format(path))
return
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
# filename is the group name
groupname = strip_exts(filename, ('.yml', '.yaml', '.json'))
try:
self.log.debug("Reading group vars from {}".format(full_path))
f = codecs.open(full_path, 'r', encoding='utf8')
invars = ihateyaml.safe_load(f)
f.close()
except Exception as err:
# Just catch everything because yaml...
self.log.warning("Yaml couldn't load '{0}' because '{1}'. Skipping".format(full_path, err))
continue # Go to next file
if groupname == 'all':
# groupname 'all' is special and applies to all hosts.
for hostname in self.hosts_all():
self.update_host(hostname, {'hostvars': invars}, overwrite=False)
else:
for hostname in self.hosts_in_group(groupname):
self.update_host(hostname, {'hostvars': invars}, overwrite=False)
|
python
|
{
"resource": ""
}
|
q23912
|
Ansible._parse_dyn_inventory
|
train
|
def _parse_dyn_inventory(self, script):
"""
Execute a dynamic inventory script and parse the results.
"""
self.log.debug("Reading dynamic inventory {0}".format(script))
try:
proc = subprocess.Popen([script, '--list'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
stdout, stderr = proc.communicate(input)
if proc.returncode != 0:
sys.stderr.write("Dynamic inventory script '{0}' returned "
"exitcode {1}\n".format(script,
proc.returncode))
for line in stderr:
sys.stderr.write(line)
dyninv_parser = parser.DynInvParser(stdout.decode('utf8'))
for hostname, key_values in dyninv_parser.hosts.items():
self.update_host(hostname, key_values)
except OSError as err:
sys.stderr.write("Exception while executing dynamic inventory script '{0}':\n\n".format(script))
sys.stderr.write(str(err) + '\n')
|
python
|
{
"resource": ""
}
|
q23913
|
Ansible.update_host
|
train
|
def update_host(self, hostname, key_values, overwrite=True):
"""
Update a hosts information. This is called by various collectors such
as the ansible setup module output and the hosts parser to add
informatio to a host. It does some deep inspection to make sure nested
information can be updated.
"""
default_empty_host = {
'name': hostname,
'hostvars': {},
}
host_info = self.hosts.get(hostname, default_empty_host)
util.deepupdate(host_info, key_values, overwrite=overwrite)
self.hosts[hostname] = host_info
|
python
|
{
"resource": ""
}
|
q23914
|
Ansible.hosts_in_group
|
train
|
def hosts_in_group(self, groupname):
"""
Return a list of hostnames that are in a group.
"""
result = []
for hostname, hostinfo in self.hosts.items():
if groupname == 'all':
result.append(hostname)
elif 'groups' in hostinfo:
if groupname in hostinfo['groups']:
result.append(hostname)
else:
hostinfo['groups'] = [groupname]
return result
|
python
|
{
"resource": ""
}
|
q23915
|
Ansible.get_hosts
|
train
|
def get_hosts(self):
"""
Return a list of parsed hosts info, with the limit applied if required.
"""
limited_hosts = {}
if self.limit is not None:
# Find hosts and groups of hosts to include
for include in self.limit['include']:
# Include whole group
for hostname in self.hosts_in_group(include):
limited_hosts[hostname] = self.hosts[hostname]
# Include individual host
if include in self.hosts:
limited_hosts[include] = self.hosts[include]
# Find hosts and groups of hosts to exclude
for exclude in self.limit["exclude"]:
# Exclude whole group
for hostname in self.hosts_in_group(exclude):
if hostname in limited_hosts:
limited_hosts.pop(hostname)
# Exclude individual host
if exclude in limited_hosts:
limited_hosts.pop(exclude)
return limited_hosts
else:
# Return all hosts
return self.hosts
|
python
|
{
"resource": ""
}
|
q23916
|
get_logger
|
train
|
def get_logger():
"""
Instantiate a logger.
"""
root = logging.getLogger()
root.setLevel(logging.WARNING)
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
return root
|
python
|
{
"resource": ""
}
|
q23917
|
get_data_dir
|
train
|
def get_data_dir():
"""
Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed.
"""
data_dir_paths = [
os.path.join(os.path.dirname(ansiblecmdb.__file__), 'data'),
os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib', 'ansiblecmdb', 'data'),
'/usr/local/lib/ansiblecmdb/data',
'/usr/lib/ansiblecmdb/data',
]
data_dir = util.find_path(data_dir_paths, 'tpl/html_fancy.tpl')
if not data_dir:
sys.stdout.write("Couldn't find the data dir for the templates. I tried: {0}\n".format(", ".join(data_dir_paths)))
sys.exit(1)
return data_dir
|
python
|
{
"resource": ""
}
|
q23918
|
get_hosts_files
|
train
|
def get_hosts_files(option):
"""
Find out the location of the `hosts` file. This looks in multiple places
such as the `-i` option, current dir and ansible configuration files. The
first match is returned as a list.
"""
if option is not None:
return option.split(',')
# Use hosts file from the current dir if it exists
if os.path.isfile('hosts'):
return ['hosts']
# Perhaps it's configured in a configuration file. Try to find a
# configuration file and see if it contains a `hostsfile` entry.
config_locations = [
'.',
'/etc/ansible/'
]
config_dir = util.find_path(config_locations, 'ansible.cfg')
log.debug('config_dir = {0}'.format(config_dir))
if config_dir:
with open(os.path.join(config_dir, 'ansible.cfg'), 'r') as cf:
for line in cf:
if line.startswith('hostfile'):
return [line.split('=', 1)[1].strip()]
|
python
|
{
"resource": ""
}
|
q23919
|
get_cust_cols
|
train
|
def get_cust_cols(path):
"""
Load custom column definitions.
"""
required_keys = ["title", "id", "sType", "visible"]
with open(path, 'r') as f:
try:
cust_cols = ast.literal_eval(f.read())
except Exception as err:
sys.stderr.write("Invalid custom columns file: {}\n".format(path))
sys.stderr.write("{}\n".format(err))
sys.exit(1)
# Validate
for col in cust_cols:
for required_key in required_keys:
if required_key not in col:
sys.stderr.write("Missing required key '{}' in custom "
"column {}\n".format(required_key, col))
sys.exit(1)
if "jsonxs" not in col and "tpl" not in col:
sys.stderr.write("You need to specify 'jsonxs' or 'tpl' "
"for custom column {}\n".format(col))
sys.exit(1)
return cust_cols
|
python
|
{
"resource": ""
}
|
q23920
|
Render._tpl_possibilities
|
train
|
def _tpl_possibilities(self):
"""
Construct a list of possible paths to templates.
"""
tpl_possibilities = [
os.path.realpath(self.tpl)
]
for tpl_dir in self.tpl_dirs:
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "{0}.tpl".format(self.tpl))))
tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "{0}.py".format(self.tpl))))
return tpl_possibilities
|
python
|
{
"resource": ""
}
|
q23921
|
Render._find_tpl
|
train
|
def _find_tpl(self):
"""
Find a template in the list of possible paths.
"""
for tpl_possibility in self.tpl_possibilities:
if os.path.isfile(tpl_possibility):
return tpl_possibility
return None
|
python
|
{
"resource": ""
}
|
q23922
|
Render.render
|
train
|
def render(self, hosts, vars={}):
"""
Render a mako or .py file.
"""
if self.tpl_file.endswith(".tpl"):
return self._render_mako(hosts, vars)
elif self.tpl_file.endswith(".py"):
return self._render_py(hosts, vars)
else:
raise ValueError("Don't know how to handle '{0}'".format(self.tpl_file))
|
python
|
{
"resource": ""
}
|
q23923
|
shlex.sourcehook
|
train
|
def sourcehook(self, newfile, encoding='utf-8'):
"Hook called on a filename to be sourced."
from codecs import open
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r", encoding))
|
python
|
{
"resource": ""
}
|
q23924
|
TGPlugin.load_template
|
train
|
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template
return self.lookup.get_template(templatename)
|
python
|
{
"resource": ""
}
|
q23925
|
FunctionDecl.get_argument_expressions
|
train
|
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls
|
python
|
{
"resource": ""
}
|
q23926
|
legacy_html_escape
|
train
|
def legacy_html_escape(s):
"""legacy HTML escape for non-unicode mode."""
s = s.replace("&", "&")
s = s.replace(">", ">")
s = s.replace("<", "<")
s = s.replace('"', """)
s = s.replace("'", "'")
return s
|
python
|
{
"resource": ""
}
|
q23927
|
htmlentityreplace_errors
|
train
|
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (compat.text_type(text), ex.end)
raise ex
|
python
|
{
"resource": ""
}
|
q23928
|
XMLEntityEscaper.escape
|
train
|
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, compat.text_type(text)
).encode('ascii')
|
python
|
{
"resource": ""
}
|
q23929
|
MessageExtractor._split_comment
|
train
|
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]
|
python
|
{
"resource": ""
}
|
q23930
|
adjust_whitespace
|
train
|
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace=''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
|
python
|
{
"resource": ""
}
|
q23931
|
PythonPrinter.write_indented_block
|
train
|
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
self._update_lineno(1)
|
python
|
{
"resource": ""
}
|
q23932
|
PythonPrinter.writeline
|
train
|
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -= 1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
self._update_lineno(len(line.split("\n")))
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent += 1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)",
line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
|
python
|
{
"resource": ""
}
|
q23933
|
PythonPrinter._is_unindentor
|
train
|
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
|
python
|
{
"resource": ""
}
|
q23934
|
PythonPrinter._indent_line
|
train
|
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring
* self.indent, line)
|
python
|
{
"resource": ""
}
|
q23935
|
PythonPrinter._in_multi_line
|
train
|
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
|
python
|
{
"resource": ""
}
|
q23936
|
html_error_template
|
train
|
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default ``encoding_errors`` value is
``'htmlentityreplace'``. The template has two options. With the
``full`` option disabled, only a section of an HTML document is
returned. With the ``css`` option disabled, the default stylesheet
won't be included.
"""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback, syntax_highlight,\
pygments_html_formatter
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs()}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message|h}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
${lines[index] | syntax_highlight(language='mako')}
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | syntax_highlight(filename)}</div>
</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(),
encoding_errors='htmlentityreplace')
|
python
|
{
"resource": ""
}
|
q23937
|
RichTraceback._init_message
|
train
|
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = compat.text_type(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, compat.text_type):
self.message = compat.text_type(self.message, 'ascii', 'replace')
|
python
|
{
"resource": ""
}
|
q23938
|
TemplateLookup.adjust_uri
|
train
|
def adjust_uri(self, uri, relativeto):
"""Adjust the given ``uri`` based on the given relative URI."""
key = (uri, relativeto)
if key in self._uri_cache:
return self._uri_cache[key]
if uri[0] != '/':
if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(
posixpath.dirname(relativeto), uri)
else:
v = self._uri_cache[key] = '/' + uri
else:
v = self._uri_cache[key] = uri
return v
|
python
|
{
"resource": ""
}
|
q23939
|
TemplateLookup._relativeize
|
train
|
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
|
python
|
{
"resource": ""
}
|
q23940
|
GitHubAssetManager._get_style_urls
|
train
|
def _get_style_urls(self, asset_url_path):
"""
Gets the specified resource and parses all style URLs and their
assets in the form of the specified patterns.
"""
# Check cache
if self.cache_path:
cached = self._get_cached_style_urls(asset_url_path)
# Skip fetching styles if there's any already cached
if cached:
return cached
# Find style URLs
r = requests.get(STYLE_URLS_SOURCE)
if not 200 <= r.status_code < 300:
print('Warning: retrieving styles gave status code',
r.status_code, file=sys.stderr)
urls = []
for style_urls_re in STYLE_URLS_RES:
urls.extend(re.findall(style_urls_re, r.text))
if not urls:
print('Warning: no styles found - see https://github.com/joeyespo/'
'grip/issues/265', file=sys.stderr)
# Cache the styles and their assets
if self.cache_path:
is_cached = self._cache_contents(urls, asset_url_path)
if is_cached:
urls = self._get_cached_style_urls(asset_url_path)
return urls
|
python
|
{
"resource": ""
}
|
q23941
|
GitHubAssetManager._get_cached_style_urls
|
train
|
def _get_cached_style_urls(self, asset_url_path):
"""
Gets the URLs of the cached styles.
"""
try:
cached_styles = os.listdir(self.cache_path)
except IOError as ex:
if ex.errno != errno.ENOENT and ex.errno != errno.ESRCH:
raise
return []
except OSError:
return []
return [posixpath.join(asset_url_path, style)
for style in cached_styles
if style.endswith('.css')]
|
python
|
{
"resource": ""
}
|
q23942
|
GitHubAssetManager._cache_contents
|
train
|
def _cache_contents(self, style_urls, asset_url_path):
"""
Fetches the given URLs and caches their contents
and their assets in the given directory.
"""
files = {}
asset_urls = []
for style_url in style_urls:
if not self.quiet:
print(' * Downloading style', style_url, file=sys.stderr)
r = requests.get(style_url)
if not 200 <= r.status_code < 300:
print(' -> Warning: Style request responded with',
r.status_code, file=sys.stderr)
files = None
continue
asset_content = r.text
# Find assets and replace their base URLs with the cache directory
for url in re.findall(STYLE_ASSET_URLS_RE, asset_content):
asset_urls.append(urljoin(style_url, url))
contents = re.sub(
STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT.format(asset_url_path.rstrip('/')),
asset_content)
# Prepare cache
if files is not None:
filename = self.cache_filename(style_url)
files[filename] = contents.encode('utf-8')
for asset_url in asset_urls:
if not self.quiet:
print(' * Downloading asset', asset_url, file=sys.stderr)
# Retrieve binary file and show message
r = requests.get(asset_url, stream=True)
if not 200 <= r.status_code < 300:
print(' -> Warning: Asset request responded with',
r.status_code, file=sys.stderr)
files = None
continue
# Prepare cache
if files is not None:
filename = self.cache_filename(asset_url)
files[filename] = r.raw.read(decode_content=True)
# Skip caching if something went wrong to try again next time
if not files:
return False
# Cache files if all downloads were successful
cache = {}
for relname in files:
cache[safe_join(self.cache_path, relname)] = files[relname]
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
for filename in cache:
with open(filename, 'wb') as f:
f.write(cache[filename])
if not self.quiet:
print(
' * Cached all downloads in', self.cache_path, file=sys.stderr)
return True
|
python
|
{
"resource": ""
}
|
q23943
|
GitHubAssetManager.retrieve_styles
|
train
|
def retrieve_styles(self, asset_url_path):
"""
Get style URLs from the source HTML page and specified cached
asset base URL.
"""
if not asset_url_path.endswith('/'):
asset_url_path += '/'
self.style_urls.extend(self._get_style_urls(asset_url_path))
|
python
|
{
"resource": ""
}
|
q23944
|
is_server_running
|
train
|
def is_server_running(host, port):
"""
Checks whether a server is currently listening on the specified
host and port.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
return s.connect_ex((host, port)) == 0
finally:
s.close()
|
python
|
{
"resource": ""
}
|
q23945
|
wait_for_server
|
train
|
def wait_for_server(host, port, cancel_event=None):
"""
Blocks until a local server is listening on the specified
host and port. Set cancel_event to cancel the wait.
This is intended to be used in conjunction with running
the Flask server.
"""
while not is_server_running(host, port):
# Stop waiting if shutting down
if cancel_event and cancel_event.is_set():
return False
time.sleep(0.1)
return True
|
python
|
{
"resource": ""
}
|
q23946
|
wait_and_start_browser
|
train
|
def wait_and_start_browser(host, port=None, cancel_event=None):
"""
Waits for the server to run and then opens the specified address in
the browser. Set cancel_event to cancel the wait.
"""
if host == '0.0.0.0':
host = 'localhost'
if port is None:
port = 80
if wait_for_server(host, port, cancel_event):
start_browser('http://{0}:{1}/'.format(host, port))
|
python
|
{
"resource": ""
}
|
q23947
|
start_browser_when_ready
|
train
|
def start_browser_when_ready(host, port=None, cancel_event=None):
"""
Starts a thread that waits for the server then opens the specified
address in the browser. Set cancel_event to cancel the wait. The
started thread object is returned.
"""
browser_thread = Thread(
target=wait_and_start_browser, args=(host, port, cancel_event))
browser_thread.daemon = True
browser_thread.start()
return browser_thread
|
python
|
{
"resource": ""
}
|
q23948
|
Grip._render_asset
|
train
|
def _render_asset(self, subpath):
"""
Renders the specified cache file.
"""
return send_from_directory(
self.assets.cache_path, self.assets.cache_filename(subpath))
|
python
|
{
"resource": ""
}
|
q23949
|
Grip._render_rate_limit_page
|
train
|
def _render_rate_limit_page(self, exception=None):
"""
Renders the rate limit page.
"""
auth = request.args.get('auth')
is_auth = auth == '1' if auth else bool(self.auth)
return render_template('limit.html', is_authenticated=is_auth), 403
|
python
|
{
"resource": ""
}
|
q23950
|
Grip._get_styles
|
train
|
def _get_styles(self, style_urls, asset_url_path):
"""
Gets the content of the given list of style URLs and
inlines assets.
"""
styles = []
for style_url in style_urls:
urls_inline = STYLE_ASSET_URLS_INLINE_FORMAT.format(
asset_url_path.rstrip('/'))
asset_content = self._download(style_url)
content = re.sub(urls_inline, self._match_asset, asset_content)
styles.append(content)
return styles
|
python
|
{
"resource": ""
}
|
q23951
|
Grip._inline_styles
|
train
|
def _inline_styles(self):
"""
Downloads the assets from the style URL list, clears it, and adds
each style with its embedded asset to the literal style list.
"""
styles = self._get_styles(self.assets.style_urls, url_for('asset'))
self.assets.styles.extend(styles)
self.assets.style_urls[:] = []
|
python
|
{
"resource": ""
}
|
q23952
|
Grip._retrieve_styles
|
train
|
def _retrieve_styles(self):
"""
Retrieves the style URLs from the source and caches them. This
is called before the first request is dispatched.
"""
if self._styles_retrieved:
return
self._styles_retrieved = True
try:
self.assets.retrieve_styles(url_for('asset'))
except Exception as ex:
if self.debug:
print(format_exc(), file=sys.stderr)
else:
print(' * Error: could not retrieve styles:', ex,
file=sys.stderr)
if self.render_inline:
self._inline_styles()
|
python
|
{
"resource": ""
}
|
q23953
|
Grip.default_asset_manager
|
train
|
def default_asset_manager(self):
"""
Returns the default asset manager using the current config.
This is only used if asset_manager is set to None in the constructor.
"""
cache_path = None
cache_directory = self.config['CACHE_DIRECTORY']
if cache_directory:
cache_directory = cache_directory.format(version=__version__)
cache_path = os.path.join(self.instance_path, cache_directory)
return GitHubAssetManager(
cache_path, self.config['STYLE_URLS'], self.quiet)
|
python
|
{
"resource": ""
}
|
q23954
|
Grip.render
|
train
|
def render(self, route=None):
"""
Renders the application and returns the HTML unicode that would
normally appear when visiting in the browser.
"""
if route is None:
route = '/'
with self.test_client() as c:
response = c.get(route, follow_redirects=True)
encoding = response.charset
return response.data.decode(encoding)
|
python
|
{
"resource": ""
}
|
q23955
|
Grip.run
|
train
|
def run(self, host=None, port=None, debug=None, use_reloader=None,
open_browser=False):
"""
Starts a server to render the README.
"""
if host is None:
host = self.config['HOST']
if port is None:
port = self.config['PORT']
if debug is None:
debug = self.debug
if use_reloader is None:
use_reloader = self.config['DEBUG_GRIP']
# Verify the server is not already running and start
with self._run_mutex:
if self._shutdown_event:
raise AlreadyRunningError()
self._shutdown_event = threading.Event()
# Authentication message
if self.auth and not self.quiet:
if isinstance(self.auth, tuple):
username, password = self.auth
auth_method = ('credentials: {0}'.format(username)
if username
else 'personal access token')
else:
auth_method = type(self.auth).__name__
print(' * Using', auth_method, file=sys.stderr)
# Get random port manually when needed ahead of time
if port == 0 and open_browser:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
# Open browser
browser_thread = (
start_browser_when_ready(host, port, self._shutdown_event)
if open_browser else None)
# Run local server
super(Grip, self).run(host, port, debug=debug,
use_reloader=use_reloader,
threaded=True)
# Signal to the polling and browser threads that they should exit
if not self.quiet:
print(' * Shutting down...')
self._shutdown_event.set()
# Wait for browser thread to finish
if browser_thread:
browser_thread.join()
# Cleanup
self._shutdown_event = None
|
python
|
{
"resource": ""
}
|
q23956
|
create_app
|
train
|
def create_app(path=None, user_content=False, context=None, username=None,
password=None, render_offline=False, render_wide=False,
render_inline=False, api_url=None, title=None, text=None,
autorefresh=None, quiet=None, grip_class=None):
"""
Creates a Grip application with the specified overrides.
"""
# Customize the app
if grip_class is None:
grip_class = Grip
# Customize the reader
if text is not None:
display_filename = DirectoryReader(path, True).filename_for(None)
source = TextReader(text, display_filename)
elif path == '-':
source = StdinReader()
else:
source = DirectoryReader(path)
# Customize the renderer
if render_offline:
renderer = OfflineRenderer(user_content, context)
elif user_content or context or api_url:
renderer = GitHubRenderer(user_content, context, api_url)
else:
renderer = None
# Optional basic auth
auth = (username, password) if username or password else None
# Create the customized app with default asset manager
return grip_class(source, auth, renderer, None, render_wide,
render_inline, title, autorefresh, quiet)
|
python
|
{
"resource": ""
}
|
q23957
|
render_page
|
train
|
def render_page(path=None, user_content=False, context=None,
username=None, password=None,
render_offline=False, render_wide=False, render_inline=False,
api_url=None, title=None, text=None, quiet=None,
grip_class=None):
"""
Renders the specified markup text to an HTML page and returns it.
"""
return create_app(path, user_content, context, username, password,
render_offline, render_wide, render_inline, api_url,
title, text, False, quiet, grip_class).render()
|
python
|
{
"resource": ""
}
|
q23958
|
render_content
|
train
|
def render_content(text, user_content=False, context=None, username=None,
password=None, render_offline=False, api_url=None):
"""
Renders the specified markup and returns the result.
"""
renderer = (GitHubRenderer(user_content, context, api_url)
if not render_offline else
OfflineRenderer(user_content, context))
auth = (username, password) if username or password else None
return renderer.render(text, auth)
|
python
|
{
"resource": ""
}
|
q23959
|
export
|
train
|
def export(path=None, user_content=False, context=None,
username=None, password=None, render_offline=False,
render_wide=False, render_inline=True, out_filename=None,
api_url=None, title=None, quiet=False, grip_class=None):
"""
Exports the rendered HTML to a file.
"""
export_to_stdout = out_filename == '-'
if out_filename is None:
if path == '-':
export_to_stdout = True
else:
filetitle, _ = os.path.splitext(
os.path.relpath(DirectoryReader(path).root_filename))
out_filename = '{0}.html'.format(filetitle)
if not export_to_stdout and not quiet:
print('Exporting to', out_filename, file=sys.stderr)
page = render_page(path, user_content, context, username, password,
render_offline, render_wide, render_inline, api_url,
title, None, quiet, grip_class)
if export_to_stdout:
try:
print(page)
except IOError as ex:
if ex.errno != 0 and ex.errno != errno.EPIPE:
raise
else:
with io.open(out_filename, 'w', encoding='utf-8') as f:
f.write(page)
|
python
|
{
"resource": ""
}
|
q23960
|
patch
|
train
|
def patch(html, user_content=False):
"""
Processes the HTML rendered by the GitHub API, patching
any inconsistencies from the main site.
"""
# FUTURE: Remove this once GitHub API renders task lists
# https://github.com/isaacs/github/issues/309
if not user_content:
html = INCOMPLETE_TASK_RE.sub(INCOMPLETE_TASK_SUB, html)
html = COMPLETE_TASK_RE.sub(COMPLETE_TASK_SUB, html)
# FUTURE: Remove this once GitHub API fixes the header bug
# https://github.com/joeyespo/grip/issues/244
html = HEADER_PATCH_RE.sub(HEADER_PATCH_SUB, html)
return html
|
python
|
{
"resource": ""
}
|
q23961
|
DirectoryReader._find_file
|
train
|
def _find_file(self, path, silent=False):
"""
Gets the full path and extension, or None if a README file could not
be found at the specified path.
"""
for filename in DEFAULT_FILENAMES:
full_path = os.path.join(path, filename) if path else filename
if os.path.exists(full_path):
return full_path
# Return default filename if silent
if silent:
return os.path.join(path, DEFAULT_FILENAME)
raise ReadmeNotFoundError(path)
|
python
|
{
"resource": ""
}
|
q23962
|
DirectoryReader._resolve_readme
|
train
|
def _resolve_readme(self, path=None, silent=False):
"""
Returns the path if it's a file; otherwise, looks for a compatible
README file in the directory specified by path.
If path is None, the current working directory is used.
If silent is set, the default relative filename will be returned
if path is a directory or None if it does not exist.
Raises ReadmeNotFoundError if no compatible README file can be
found and silent is False.
"""
# Default to current working directory
if path is None:
path = '.'
# Normalize the path
path = os.path.normpath(path)
# Resolve README file if path is a directory
if os.path.isdir(path):
return self._find_file(path, silent)
# Return path if file exists or if silent
if silent or os.path.exists(path):
return path
raise ReadmeNotFoundError(path, 'File not found: ' + path)
|
python
|
{
"resource": ""
}
|
q23963
|
DirectoryReader._read_text
|
train
|
def _read_text(self, filename):
"""
Helper that reads the UTF-8 content of the specified file, or
None if the file doesn't exist. This returns a unicode string.
"""
with io.open(filename, 'rt', encoding='utf-8') as f:
return f.read()
|
python
|
{
"resource": ""
}
|
q23964
|
DirectoryReader.normalize_subpath
|
train
|
def normalize_subpath(self, subpath):
"""
Normalizes the specified subpath, or None if subpath is None.
This allows Readme files to be inferred from directories while
still allowing relative paths to work properly.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
if subpath is None:
return None
# Normalize the subpath
subpath = posixpath.normpath(subpath)
# Add or remove trailing slash to properly support relative links
filename = os.path.normpath(safe_join(self.root_directory, subpath))
if os.path.isdir(filename):
subpath += '/'
return subpath
|
python
|
{
"resource": ""
}
|
q23965
|
DirectoryReader.readme_for
|
train
|
def readme_for(self, subpath):
"""
Returns the full path for the README file for the specified
subpath, or the root filename if subpath is None.
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
if subpath is None:
return self.root_filename
# Join for safety and to convert subpath to normalized OS-specific path
filename = os.path.normpath(safe_join(self.root_directory, subpath))
# Check for existence
if not os.path.exists(filename):
raise ReadmeNotFoundError(filename)
# Resolve README file if path is a directory
if os.path.isdir(filename):
return self._find_file(filename)
return filename
|
python
|
{
"resource": ""
}
|
q23966
|
DirectoryReader.filename_for
|
train
|
def filename_for(self, subpath):
"""
Returns the relative filename for the specified subpath, or the
root filename if subpath is None.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
try:
filename = self.readme_for(subpath)
return os.path.relpath(filename, self.root_directory)
except ReadmeNotFoundError:
return None
|
python
|
{
"resource": ""
}
|
q23967
|
DirectoryReader.is_binary
|
train
|
def is_binary(self, subpath=None):
"""
Gets whether the specified subpath is a supported binary file.
"""
mimetype = self.mimetype_for(subpath)
return mimetype is not None and mimetype.startswith('image/')
|
python
|
{
"resource": ""
}
|
q23968
|
DirectoryReader.last_updated
|
train
|
def last_updated(self, subpath=None):
"""
Returns the time of the last modification of the Readme or
specified subpath, or None if the file does not exist.
The return value is a number giving the number of seconds since
the epoch (see the time module).
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
try:
return os.path.getmtime(self.readme_for(subpath))
except ReadmeNotFoundError:
return None
# OSError for Python 3 base class, EnvironmentError for Python 2
except (OSError, EnvironmentError) as ex:
if ex.errno == errno.ENOENT:
return None
raise
|
python
|
{
"resource": ""
}
|
q23969
|
DirectoryReader.read
|
train
|
def read(self, subpath=None):
"""
Returns the UTF-8 content of the specified subpath.
subpath is expected to already have been normalized.
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist.
Raises werkzeug.exceptions.NotFound if the resulting path
would fall out of the root directory.
"""
is_binary = self.is_binary(subpath)
filename = self.readme_for(subpath)
try:
if is_binary:
return self._read_binary(filename)
return self._read_text(filename)
# OSError for Python 3 base class, EnvironmentError for Python 2
except (OSError, EnvironmentError) as ex:
if ex.errno == errno.ENOENT:
raise ReadmeNotFoundError(filename)
raise
|
python
|
{
"resource": ""
}
|
q23970
|
StdinReader.read
|
train
|
def read(self, subpath=None):
"""
Returns the UTF-8 Readme content.
Raises ReadmeNotFoundError if subpath is specified since
subpaths are not supported for text readers.
"""
# Lazily read STDIN
if self.text is None and subpath is None:
self.text = self.read_stdin()
return super(StdinReader, self).read(subpath)
|
python
|
{
"resource": ""
}
|
q23971
|
StdinReader.read_stdin
|
train
|
def read_stdin(self):
"""
Reads STDIN until the end of input and returns a unicode string.
"""
text = sys.stdin.read()
# Decode the bytes returned from earlier Python STDIN implementations
if sys.version_info[0] < 3 and text is not None:
text = text.decode(sys.stdin.encoding or 'utf-8')
return text
|
python
|
{
"resource": ""
}
|
q23972
|
ParetoNBDFitter._fit
|
train
|
def _fit(
self,
minimizing_function_args,
iterative_fitting,
initial_params,
params_size,
disp,
tol=1e-6,
fit_method="Nelder-Mead",
maxiter=2000,
**kwargs
):
"""Fit function for fitters."""
ll = []
sols = []
if iterative_fitting <= 0:
raise ValueError("iterative_fitting parameter should be greater than 0 as of lifetimes v0.2.1")
if iterative_fitting > 1 and initial_params is not None:
raise ValueError(
"iterative_fitting and initial_params should not be both set, as no improvement could be made."
)
# set options for minimize, if specified in kwargs will be overwritten
minimize_options = {}
minimize_options["disp"] = disp
minimize_options["maxiter"] = maxiter
minimize_options.update(kwargs)
total_count = 0
while total_count < iterative_fitting:
current_init_params = (
np.random.normal(1.0, scale=0.05, size=params_size) if initial_params is None else initial_params
)
if minimize_options["disp"]:
print("Optimize function with {}".format(fit_method))
output = minimize(
self._negative_log_likelihood,
method=fit_method,
tol=tol,
x0=current_init_params,
args=minimizing_function_args,
options=minimize_options,
)
sols.append(output.x)
ll.append(output.fun)
total_count += 1
argmin_ll, min_ll = min(enumerate(ll), key=lambda x: x[1])
minimizing_params = sols[argmin_ll]
return minimizing_params, min_ll
|
python
|
{
"resource": ""
}
|
q23973
|
BaseFitter.save_model
|
train
|
def save_model(self, path, save_data=True, save_generate_data_method=True, values_to_save=None):
"""
Save model with dill package.
Parameters
----------
path: str
Path where to save model.
save_data: bool, optional
Whether to save data from fitter.data to pickle object
save_generate_data_method: bool, optional
Whether to save generate_new_data method (if it exists) from
fitter.generate_new_data to pickle object.
values_to_save: list, optional
Placeholders for original attributes for saving object. If None
will be extended to attr_list length like [None] * len(attr_list)
"""
attr_list = ["data" * (not save_data), "generate_new_data" * (not save_generate_data_method)]
_save_obj_without_attr(self, attr_list, path, values_to_save=values_to_save)
|
python
|
{
"resource": ""
}
|
q23974
|
BaseFitter.load_model
|
train
|
def load_model(self, path):
"""
Load model with dill package.
Parameters
----------
path: str
From what path load model.
"""
with open(path, "rb") as in_file:
self.__dict__.update(dill.load(in_file).__dict__)
|
python
|
{
"resource": ""
}
|
q23975
|
ModifiedBetaGeoFitter.conditional_expected_number_of_purchases_up_to_time
|
train
|
def conditional_expected_number_of_purchases_up_to_time(self, t, frequency, recency, T):
"""
Conditional expected number of repeat purchases up to time t.
Calculate the expected number of repeat purchases up to time t for a
randomly choose individual from the population, given they have
purchase history (frequency, recency, T)
See Wagner, U. and Hoppe D. (2008).
Parameters
----------
t: array_like
times to calculate the expectation for.
frequency: array_like
historical frequency of customer.
recency: array_like
historical recency of customer.
T: array_like
age of the customer.
Returns
-------
array_like
"""
x = frequency
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
hyp_term = hyp2f1(r + x, b + x + 1, a + b + x, t / (alpha + T + t))
first_term = (a + b + x) / (a - 1)
second_term = 1 - hyp_term * ((alpha + T) / (alpha + t + T)) ** (r + x)
numerator = first_term * second_term
denominator = 1 + (a / (b + x)) * ((alpha + T) / (alpha + recency)) ** (r + x)
return numerator / denominator
|
python
|
{
"resource": ""
}
|
q23976
|
BetaGeoFitter.conditional_probability_alive
|
train
|
def conditional_probability_alive(self, frequency, recency, T):
"""
Compute conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf
Parameters
----------
frequency: array or scalar
historical frequency of customer.
recency: array or scalar
historical recency of customer.
T: array or scalar
age of the customer.
Returns
-------
array
value representing a probability
"""
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
log_div = (r + frequency) * np.log((alpha + T) / (alpha + recency)) + np.log(
a / (b + np.maximum(frequency, 1) - 1)
)
return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div)))
|
python
|
{
"resource": ""
}
|
q23977
|
BetaGeoFitter.probability_of_n_purchases_up_to_time
|
train
|
def probability_of_n_purchases_up_to_time(self, t, n):
r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
"""
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
first_term = (
beta(a, b + n)
/ beta(a, b)
* gamma(r + n)
/ gamma(r)
/ gamma(n + 1)
* (alpha / (alpha + t)) ** r
* (t / (alpha + t)) ** n
)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum()
second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)
else:
second_term = 0
return first_term + second_term
|
python
|
{
"resource": ""
}
|
q23978
|
calibration_and_holdout_data
|
train
|
def calibration_and_holdout_data(
transactions,
customer_id_col,
datetime_col,
calibration_period_end,
observation_period_end=None,
freq="D",
datetime_format=None,
monetary_value_col=None,
):
"""
Create a summary of each customer over a calibration and holdout period.
This function creates a summary of each customer over a calibration and
holdout period (training and testing, respectively).
It accepts transaction data, and returns a DataFrame of sufficient statistics.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
calibration_period_end: :obj: datetime
a period to limit the calibration to, inclusive.
observation_period_end: :obj: datetime, optional
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
freq: string, optional
Default 'D' for days. Other examples: 'W' for weekly.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
Returns
-------
:obj: DataFrame
A dataframe with columns frequency_cal, recency_cal, T_cal, frequency_holdout, duration_holdout
If monetary_value_col isn't None, the dataframe will also have the columns monetary_value_cal and
monetary_value_holdout.
"""
def to_period(d):
return d.to_period(freq)
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max()
transaction_cols = [customer_id_col, datetime_col]
if monetary_value_col:
transaction_cols.append(monetary_value_col)
transactions = transactions[transaction_cols].copy()
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
observation_period_end = pd.to_datetime(observation_period_end, format=datetime_format)
calibration_period_end = pd.to_datetime(calibration_period_end, format=datetime_format)
# create calibration dataset
calibration_transactions = transactions.loc[transactions[datetime_col] <= calibration_period_end]
calibration_summary_data = summary_data_from_transaction_data(
calibration_transactions,
customer_id_col,
datetime_col,
datetime_format=datetime_format,
observation_period_end=calibration_period_end,
freq=freq,
monetary_value_col=monetary_value_col,
)
calibration_summary_data.columns = [c + "_cal" for c in calibration_summary_data.columns]
# create holdout dataset
holdout_transactions = transactions.loc[
(observation_period_end >= transactions[datetime_col]) & (transactions[datetime_col] > calibration_period_end)
]
if holdout_transactions.empty:
raise ValueError(
"There is no data available. Check the `observation_period_end` and `calibration_period_end` and confirm that values in `transactions` occur prior to those dates."
)
holdout_transactions[datetime_col] = holdout_transactions[datetime_col].map(to_period)
holdout_summary_data = (
holdout_transactions.groupby([customer_id_col, datetime_col], sort=False)
.agg(lambda r: 1)
.groupby(level=customer_id_col)
.agg(["count"])
)
holdout_summary_data.columns = ["frequency_holdout"]
if monetary_value_col:
holdout_summary_data["monetary_value_holdout"] = holdout_transactions.groupby(customer_id_col)[
monetary_value_col
].mean()
combined_data = calibration_summary_data.join(holdout_summary_data, how="left")
combined_data.fillna(0, inplace=True)
delta_time = (to_period(observation_period_end) - to_period(calibration_period_end)).n
combined_data["duration_holdout"] = delta_time
return combined_data
|
python
|
{
"resource": ""
}
|
q23979
|
_find_first_transactions
|
train
|
def _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
):
"""
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
"""
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max()
if type(observation_period_end) == pd.Period:
observation_period_end = observation_period_end.to_timestamp()
select_columns = [customer_id_col, datetime_col]
if monetary_value_col:
select_columns.append(monetary_value_col)
transactions = transactions[select_columns].sort_values(select_columns).copy()
# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()
# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
transactions = transactions.set_index(datetime_col).to_period(freq).to_timestamp()
transactions = transactions.loc[(transactions.index <= observation_period_end)].reset_index()
period_groupby = transactions.groupby([datetime_col, customer_id_col], sort=False, as_index=False)
if monetary_value_col:
# when we have a monetary column, make sure to sum together any values in the same period
period_transactions = period_groupby.sum()
else:
# by calling head() on the groupby object, the datetime_col and customer_id_col columns
# will be reduced
period_transactions = period_groupby.head(1)
# initialize a new column where we will indicate which are the first transactions
period_transactions["first"] = False
# find all of the initial transactions and store as an index
first_transactions = period_transactions.groupby(customer_id_col, sort=True, as_index=False).head(1).index
# mark the initial transactions as True
period_transactions.loc[first_transactions, "first"] = True
select_columns.append("first")
# reset datetime_col to period
period_transactions[datetime_col] = pd.Index(period_transactions[datetime_col]).to_period(freq)
return period_transactions[select_columns]
|
python
|
{
"resource": ""
}
|
q23980
|
summary_data_from_transaction_data
|
train
|
def summary_data_from_transaction_data(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
freq_multiplier=1,
):
"""
Return summary data from transactions.
This transforms a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
to a DataFrame of the form:
customer_id, frequency, recency, T [, monetary_value]
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the columns in the transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: datetime, optional
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
freq_multiplier: int, optional
Default 1, could be use to get exact recency and T, i.e. with freq='W'
row for user id_sample=1 will be recency=30 and T=39 while data in
CDNOW summary are different. Exact values could be obtained with
freq='D' and freq_multiplier=7 which will lead to recency=30.43
and T=38.86
Returns
-------
:obj: DataFrame:
customer_id, frequency, recency, T [, monetary_value]
"""
if observation_period_end is None:
observation_period_end = (
pd.to_datetime(transactions[datetime_col].max(), format=datetime_format).to_period(freq).to_timestamp()
)
else:
observation_period_end = (
pd.to_datetime(observation_period_end, format=datetime_format).to_period(freq).to_timestamp()
)
# label all of the repeated transactions
repeated_transactions = _find_first_transactions(
transactions, customer_id_col, datetime_col, monetary_value_col, datetime_format, observation_period_end, freq
)
# reset datetime_col to timestamp
repeated_transactions[datetime_col] = pd.Index(repeated_transactions[datetime_col]).to_timestamp()
# count all orders by customer.
customers = repeated_transactions.groupby(customer_id_col, sort=False)[datetime_col].agg(["min", "max", "count"])
# subtract 1 from count, as we ignore their first order.
customers["frequency"] = customers["count"] - 1
customers["T"] = (observation_period_end - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
customers["recency"] = (customers["max"] - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
summary_columns = ["frequency", "recency", "T"]
if monetary_value_col:
# create an index of all the first purchases
first_purchases = repeated_transactions[repeated_transactions["first"]].index
# by setting the monetary_value cells of all the first purchases to NaN,
# those values will be excluded from the mean value calculation
repeated_transactions.loc[first_purchases, monetary_value_col] = np.nan
customers["monetary_value"] = (
repeated_transactions.groupby(customer_id_col)[monetary_value_col].mean().fillna(0)
)
summary_columns.append("monetary_value")
return customers[summary_columns].astype(float)
|
python
|
{
"resource": ""
}
|
q23981
|
calculate_alive_path
|
train
|
def calculate_alive_path(model, transactions, datetime_col, t, freq="D"):
"""
Calculate alive path for plotting alive history of user.
Parameters
----------
model:
A fitted lifetimes model
transactions: DataFrame
a Pandas DataFrame containing the transactions history of the customer_id
datetime_col: string
the column in the transactions that denotes the datetime the purchase was made
t: array_like
the number of time units since the birth for which we want to draw the p_alive
freq: string
Default 'D' for days. Other examples= 'W' for weekly
Returns
-------
:obj: Series
A pandas Series containing the p_alive as a function of T (age of the customer)
"""
customer_history = transactions[[datetime_col]].copy()
customer_history[datetime_col] = pd.to_datetime(customer_history[datetime_col])
customer_history = customer_history.set_index(datetime_col)
# Add transactions column
customer_history["transactions"] = 1
# for some reason fillna(0) not working for resample in pandas with python 3.x,
# changed to replace
purchase_history = customer_history.resample(freq).sum().replace(np.nan, 0)["transactions"].values
extra_columns = t + 1 - len(purchase_history)
customer_history = pd.DataFrame(np.append(purchase_history, [0] * extra_columns), columns=["transactions"])
# add T column
customer_history["T"] = np.arange(customer_history.shape[0])
# add cumulative transactions column
customer_history["transactions"] = customer_history["transactions"].apply(lambda t: int(t > 0))
customer_history["frequency"] = customer_history["transactions"].cumsum() - 1 # first purchase is ignored
# Add t_x column
customer_history["recency"] = customer_history.apply(
lambda row: row["T"] if row["transactions"] != 0 else np.nan, axis=1
)
customer_history["recency"] = customer_history["recency"].fillna(method="ffill").fillna(0)
return customer_history.apply(
lambda row: model.conditional_probability_alive(row["frequency"], row["recency"], row["T"]), axis=1
)
|
python
|
{
"resource": ""
}
|
q23982
|
_check_inputs
|
train
|
def _check_inputs(frequency, recency=None, T=None, monetary_value=None):
"""
Check validity of inputs.
Raises ValueError when checks failed.
Parameters
----------
frequency: array_like
the frequency vector of customers' purchases (denoted x in literature).
recency: array_like, optional
the recency vector of customers' purchases (denoted t_x in literature).
T: array_like, optional
the vector of customers' age (time since first purchase)
monetary_value: array_like, optional
the monetary value vector of customer's purchases (denoted m in literature).
"""
if recency is not None:
if T is not None and np.any(recency > T):
raise ValueError("Some values in recency vector are larger than T vector.")
if np.any(recency[frequency == 0] != 0):
raise ValueError("There exist non-zero recency values when frequency is zero.")
if np.any(recency < 0):
raise ValueError("There exist negative recency (ex: last order set before first order)")
if any(x.shape[0] == 0 for x in [recency, frequency, T]):
raise ValueError("There exists a zero length vector in one of frequency, recency or T.")
if np.sum((frequency - frequency.astype(int)) ** 2) != 0:
raise ValueError("There exist non-integer values in the frequency vector.")
if monetary_value is not None and np.any(monetary_value <= 0):
raise ValueError("There exist non-positive values in the monetary_value vector.")
|
python
|
{
"resource": ""
}
|
q23983
|
_customer_lifetime_value
|
train
|
def _customer_lifetime_value(
transaction_prediction_model, frequency, recency, T, monetary_value, time=12, discount_rate=0.01, freq="D"
):
"""
Compute the average lifetime value for a group of one or more customers.
This method computes the average lifetime value for a group of one or more customers.
Parameters
----------
transaction_prediction_model:
the model to predict future transactions
frequency: array_like
the frequency vector of customers' purchases (denoted x in literature).
recency: array_like
the recency vector of customers' purchases (denoted t_x in literature).
T: array_like
the vector of customers' age (time since first purchase)
monetary_value: array_like
the monetary value vector of customer's purchases (denoted m in literature).
time: int, optional
the lifetime expected for the user in months. Default: 12
discount_rate: float, optional
the monthly adjusted discount rate. Default: 1
Returns
-------
:obj: Series
series with customer ids as index and the estimated customer lifetime values as values
"""
df = pd.DataFrame(index=frequency.index)
df["clv"] = 0 # initialize the clv column to zeros
steps = np.arange(1, time + 1)
factor = {"W": 4.345, "M": 1.0, "D": 30, "H": 30 * 24}[freq]
for i in steps * factor:
# since the prediction of number of transactions is cumulative, we have to subtract off the previous periods
expected_number_of_transactions = transaction_prediction_model.predict(
i, frequency, recency, T
) - transaction_prediction_model.predict(i - factor, frequency, recency, T)
# sum up the CLV estimates of all of the periods
df["clv"] += (monetary_value * expected_number_of_transactions) / (1 + discount_rate) ** (i / factor)
return df["clv"]
|
python
|
{
"resource": ""
}
|
q23984
|
expected_cumulative_transactions
|
train
|
def expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=None,
freq="D",
set_index_date=False,
freq_multiplier=1,
):
"""
Get expected and actual repeated cumulative transactions.
Parameters
----------
model:
A fitted lifetimes model
transactions: :obj: DataFrame
a Pandas DataFrame containing the transactions history of the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
customer_id_col: string
the column in transactions that denotes the customer_id
t: int
the number of time units since the begining of
data for which we want to calculate cumulative transactions
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't
understand the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
when True set date as Pandas DataFrame index, default False - number of time units
freq_multiplier: int, optional
Default 1, could be use to get exact cumulative transactions predicted
by model, i.e. model trained with freq='W', passed freq to
expected_cumulative_transactions is freq='D', and freq_multiplier=7.
Returns
-------
:obj: DataFrame
A dataframe with columns actual, predicted
"""
start_date = pd.to_datetime(transactions[datetime_col], format=datetime_format).min()
start_period = start_date.to_period(freq)
observation_period_end = start_period + t
repeated_and_first_transactions = _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
datetime_format=datetime_format,
observation_period_end=observation_period_end,
freq=freq,
)
first_trans_mask = repeated_and_first_transactions["first"]
repeated_transactions = repeated_and_first_transactions[~first_trans_mask]
first_transactions = repeated_and_first_transactions[first_trans_mask]
date_range = pd.date_range(start_date, periods=t + 1, freq=freq)
date_periods = date_range.to_period(freq)
pred_cum_transactions = []
first_trans_size = first_transactions.groupby(datetime_col).size()
for i, period in enumerate(date_periods):
if i % freq_multiplier == 0 and i > 0:
times = np.array([d.n for d in period - first_trans_size.index])
times = times[times > 0].astype(float) / freq_multiplier
expected_trans_agg = model.expected_number_of_purchases_up_to_time(times)
mask = first_trans_size.index < period
expected_trans = sum(expected_trans_agg * first_trans_size[mask])
pred_cum_transactions.append(expected_trans)
act_trans = repeated_transactions.groupby(datetime_col).size()
act_tracking_transactions = act_trans.reindex(date_periods, fill_value=0)
act_cum_transactions = []
for j in range(1, t // freq_multiplier + 1):
sum_trans = sum(act_tracking_transactions.iloc[: j * freq_multiplier])
act_cum_transactions.append(sum_trans)
if set_index_date:
index = date_periods[freq_multiplier - 1 : -1 : freq_multiplier]
else:
index = range(0, t // freq_multiplier)
df_cum_transactions = pd.DataFrame(
{"actual": act_cum_transactions, "predicted": pred_cum_transactions}, index=index
)
return df_cum_transactions
|
python
|
{
"resource": ""
}
|
q23985
|
_save_obj_without_attr
|
train
|
def _save_obj_without_attr(obj, attr_list, path, values_to_save=None):
"""
Save object with attributes from attr_list.
Parameters
----------
obj: obj
Object of class with __dict__ attribute.
attr_list: list
List with attributes to exclude from saving to dill object. If empty
list all attributes will be saved.
path: str
Where to save dill object.
values_to_save: list, optional
Placeholders for original attributes for saving object. If None will be
extended to attr_list length like [None] * len(attr_list)
"""
if values_to_save is None:
values_to_save = [None] * len(attr_list)
saved_attr_dict = {}
for attr, val_save in zip(attr_list, values_to_save):
if attr in obj.__dict__:
item = obj.__dict__.pop(attr)
saved_attr_dict[attr] = item
setattr(obj, attr, val_save)
with open(path, "wb") as out_file:
dill.dump(obj, out_file)
for attr, item in saved_attr_dict.items():
setattr(obj, attr, item)
|
python
|
{
"resource": ""
}
|
q23986
|
BetaGeoBetaBinomFitter._loglikelihood
|
train
|
def _loglikelihood(params, x, tx, T):
warnings.simplefilter(action="ignore", category=FutureWarning)
"""Log likelihood for optimizer."""
alpha, beta, gamma, delta = params
betaln_ab = betaln(alpha, beta)
betaln_gd = betaln(gamma, delta)
A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(gamma, delta + T) - betaln_gd
B = 1e-15 * np.ones_like(T)
recency_T = T - tx - 1
for j in np.arange(recency_T.max() + 1):
ix = recency_T >= j
B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j)
B = log(B) - betaln_gd - betaln_ab
return logaddexp(A, B)
|
python
|
{
"resource": ""
}
|
q23987
|
BetaGeoBetaBinomFitter.conditional_expected_number_of_purchases_up_to_time
|
train
|
def conditional_expected_number_of_purchases_up_to_time(self, m_periods_in_future, frequency, recency, n_periods):
r"""
Conditional expected purchases in future time period.
The expected number of future transactions across the next m_periods_in_future
transaction opportunities by a customer with purchase history
(x, tx, n).
.. math:: E(X(n_{periods}, n_{periods}+m_{periods_in_future})| \alpha, \beta, \gamma, \delta, frequency, recency, n_{periods})
See (13) in Fader & Hardie 2010.
Parameters
----------
t: array_like
time n_periods (n+t)
Returns
-------
array_like
predicted transactions
"""
x = frequency
tx = recency
n = n_periods
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
p1 = 1 / exp(self._loglikelihood(params, x, tx, n))
p2 = exp(betaln(alpha + x + 1, beta + n - x) - betaln(alpha, beta))
p3 = delta / (gamma - 1) * exp(gammaln(gamma + delta) - gammaln(1 + delta))
p4 = exp(gammaln(1 + delta + n) - gammaln(gamma + delta + n))
p5 = exp(gammaln(1 + delta + n + m_periods_in_future) - gammaln(gamma + delta + n + m_periods_in_future))
return p1 * p2 * p3 * (p4 - p5)
|
python
|
{
"resource": ""
}
|
q23988
|
BetaGeoBetaBinomFitter.expected_number_of_transactions_in_first_n_periods
|
train
|
def expected_number_of_transactions_in_first_n_periods(self, n):
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
x_counts = self.data.groupby("frequency")["weights"].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(
betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta)
)
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j) :]
return np.sum(
binom(i, x)
* exp(
betaln(alpha + x, beta + i - x)
- betaln(alpha, beta)
+ betaln(gamma + 1, delta + i)
- betaln(gamma, delta)
)
)
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name="frequency")
return DataFrame(p1 * x_counts.sum(), index=idx, columns=["model"])
|
python
|
{
"resource": ""
}
|
q23989
|
plot_period_transactions
|
train
|
def plot_period_transactions(
model,
max_frequency=7,
title="Frequency of Repeat Transactions",
xlabel="Number of Calibration Period Transactions",
ylabel="Customers",
**kwargs
):
"""
Plot a figure with period actual and predicted transactions.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
labels = kwargs.pop("label", ["Actual", "Model"])
n = model.data.shape[0]
simulated_data = model.generate_new_data(size=n)
model_counts = pd.DataFrame(model.data["frequency"].value_counts().sort_index().iloc[:max_frequency])
simulated_counts = pd.DataFrame(simulated_data["frequency"].value_counts().sort_index().iloc[:max_frequency])
combined_counts = model_counts.merge(simulated_counts, how="outer", left_index=True, right_index=True).fillna(0)
combined_counts.columns = labels
ax = combined_counts.plot(kind="bar", **kwargs)
plt.legend()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
return ax
|
python
|
{
"resource": ""
}
|
q23990
|
plot_calibration_purchases_vs_holdout_purchases
|
train
|
def plot_calibration_purchases_vs_holdout_purchases(
model, calibration_holdout_matrix, kind="frequency_cal", n=7, **kwargs
):
"""
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
x_labels = {
"frequency_cal": "Purchases in calibration period",
"recency_cal": "Age of customer at last purchase",
"T_cal": "Age of customer at the end of calibration period",
"time_since_last_purchase": "Time since user made last purchase",
}
summary = calibration_holdout_matrix.copy()
duration_holdout = summary.iloc[0]["duration_holdout"]
summary["model_predictions"] = model.conditional_expected_number_of_purchases_up_to_time(
duration_holdout, summary["frequency_cal"], summary["recency_cal"], summary["T_cal"])
if kind == "time_since_last_purchase":
summary["time_since_last_purchase"] = summary["T_cal"] - summary["recency_cal"]
ax = (
summary.groupby(["time_since_last_purchase"])[["frequency_holdout", "model_predictions"]]
.mean()
.iloc[:n]
.plot(**kwargs)
)
else:
ax = summary.groupby(kind)[["frequency_holdout", "model_predictions"]].mean().iloc[:n].plot(**kwargs)
plt.title("Actual Purchases in Holdout Period vs Predicted Purchases")
plt.xlabel(x_labels[kind])
plt.ylabel("Average of Purchases in Holdout Period")
plt.legend()
return ax
|
python
|
{
"resource": ""
}
|
q23991
|
plot_frequency_recency_matrix
|
train
|
def plot_frequency_recency_matrix(
model,
T=1,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data["frequency"].max())
if max_recency is None:
max_recency = int(model.data["T"].max())
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(T, frequency, recency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
title = (
"Expected Number of Future Purchases for {} Unit{} of Time,".format(T, "s"[T == 1 :])
+ "\nby Frequency and Recency of a Customer"
)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax
|
python
|
{
"resource": ""
}
|
q23992
|
plot_probability_alive_matrix
|
train
|
def plot_probability_alive_matrix(
model,
max_frequency=None,
max_recency=None,
title="Probability Customer is Alive,\nby Frequency and Recency of a Customer",
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot probability alive matrix as heatmap.
Plot a figure of the probability a customer is alive based on their
frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
z = model.conditional_probability_alive_matrix(max_frequency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax
|
python
|
{
"resource": ""
}
|
q23993
|
plot_expected_repeat_purchases
|
train
|
def plot_expected_repeat_purchases(
model,
title="Expected Number of Repeat Purchases per Customer",
xlabel="Time Since First Purchase",
ax=None,
label=None,
**kwargs
):
"""
Plot expected repeat purchases on calibration period .
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ax: matplotlib.AxesSubplot, optional
Using user axes
label: str, optional
Label for plot.
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
if plt.matplotlib.__version__ >= "1.5":
color_cycle = ax._get_lines.prop_cycler
color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)["color"])
else:
color_cycle = ax._get_lines.color_cycle
color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle))
max_T = model.data["T"].max()
times = np.linspace(0, max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, label=label, **kwargs)
times = np.linspace(max_T, 1.5 * max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, ls="--", **kwargs)
plt.title(title)
plt.xlabel(xlabel)
plt.legend(loc="lower right")
return ax
|
python
|
{
"resource": ""
}
|
q23994
|
plot_history_alive
|
train
|
def plot_history_alive(model, t, transactions, datetime_col, freq="D", start_date=None, ax=None, **kwargs):
"""
Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if start_date is None:
start_date = min(transactions[datetime_col])
if ax is None:
ax = plt.subplot(111)
# Get purchasing history of user
customer_history = transactions[[datetime_col]].copy()
customer_history.index = pd.DatetimeIndex(customer_history[datetime_col])
# Add transactions column
customer_history["transactions"] = 1
customer_history = customer_history.resample(freq).sum()
# plot alive_path
path = calculate_alive_path(model, transactions, datetime_col, t, freq)
path_dates = pd.date_range(start=min(transactions[datetime_col]), periods=len(path), freq=freq)
plt.plot(path_dates, path, "-", label="P_alive")
# plot buying dates
payment_dates = customer_history[customer_history["transactions"] >= 1].index
plt.vlines(payment_dates.values, ymin=0, ymax=1, colors="r", linestyles="dashed", label="purchases")
plt.ylim(0, 1.0)
plt.yticks(np.arange(0, 1.1, 0.1))
plt.xlim(start_date, path_dates[-1])
plt.legend(loc=3)
plt.ylabel("P_alive")
plt.title("History of P_alive")
return ax
|
python
|
{
"resource": ""
}
|
q23995
|
plot_incremental_transactions
|
train
|
def plot_incremental_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
t_cal,
datetime_format=None,
freq="D",
set_index_date=False,
title="Tracking Daily Transactions",
xlabel="day",
ylabel="Transactions",
ax=None,
**kwargs
):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=datetime_format,
freq=freq,
set_index_date=set_index_date,
)
# get incremental from cumulative transactions
df_cum_transactions = df_cum_transactions.apply(lambda x: x - x.shift(1))
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = "date"
else:
x_vline = t_cal
ax.axvline(x=x_vline, color="r", linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
|
python
|
{
"resource": ""
}
|
q23996
|
plot_dropout_rate_heterogeneity
|
train
|
def plot_dropout_rate_heterogeneity(
model,
suptitle="Heterogeneity in Dropout Probability",
xlabel="Dropout Probability p",
ylabel="Density",
suptitle_fontsize=14,
**kwargs
):
"""
Plot the estimated gamma distribution of p.
p - (customers' probability of dropping out immediately after a transaction).
Parameters
----------
model: lifetimes model
A fitted lifetimes model, for now only for BG/NBD
suptitle: str, optional
Figure suptitle
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
a, b = model._unload_params("a", "b")
beta_mean = a / (a + b)
beta_var = a * b / ((a + b) ** 2) / (a + b + 1)
rv = stats.beta(a, b)
lim = rv.ppf(0.99)
x = np.linspace(0, lim, 100)
fig, ax = plt.subplots(1)
fig.suptitle(suptitle, fontsize=suptitle_fontsize, fontweight="bold")
ax.set_title("mean: {:.3f}, var: {:.3f}".format(beta_mean, beta_var))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.plot(x, rv.pdf(x), **kwargs)
return ax
|
python
|
{
"resource": ""
}
|
q23997
|
load_cdnow_summary_data_with_monetary_value
|
train
|
def load_cdnow_summary_data_with_monetary_value(**kwargs):
"""Load cdnow customers summary with monetary value as pandas DataFrame."""
df = load_dataset("cdnow_customers_summary_with_transactions.csv", **kwargs)
df.columns = ["customer_id", "frequency", "recency", "T", "monetary_value"]
df = df.set_index("customer_id")
return df
|
python
|
{
"resource": ""
}
|
q23998
|
GammaGammaFitter.conditional_expected_average_profit
|
train
|
def conditional_expected_average_profit(self, frequency=None, monetary_value=None):
"""
Conditional expectation of the average profit.
This method computes the conditional expectation of the average profit
per transaction for a group of one or more customers.
Parameters
----------
frequency: array_like, optional
a vector containing the customers' frequencies.
Defaults to the whole set of frequencies used for fitting the model.
monetary_value: array_like, optional
a vector containing the customers' monetary values.
Defaults to the whole set of monetary values used for
fitting the model.
Returns
-------
array_like:
The conditional expectation of the average profit per transaction
"""
if monetary_value is None:
monetary_value = self.data["monetary_value"]
if frequency is None:
frequency = self.data["frequency"]
p, q, v = self._unload_params("p", "q", "v")
# The expected average profit is a weighted average of individual
# monetary value and the population mean.
individual_weight = p * frequency / (p * frequency + q - 1)
population_mean = v * p / (q - 1)
return (1 - individual_weight) * population_mean + individual_weight * monetary_value
|
python
|
{
"resource": ""
}
|
q23999
|
GammaGammaFitter.customer_lifetime_value
|
train
|
def customer_lifetime_value(
self, transaction_prediction_model, frequency, recency, T, monetary_value, time=12, discount_rate=0.01, freq="D"
):
"""
Return customer lifetime value.
This method computes the average lifetime value for a group of one
or more customers.
Parameters
----------
transaction_prediction_model: model
the model to predict future transactions, literature uses
pareto/ndb models but we can also use a different model like beta-geo models
frequency: array_like
the frequency vector of customers' purchases
(denoted x in literature).
recency: the recency vector of customers' purchases
(denoted t_x in literature).
T: array_like
customers' age (time units since first purchase)
monetary_value: array_like
the monetary value vector of customer's purchases
(denoted m in literature).
time: float, optional
the lifetime expected for the user in months. Default: 12
discount_rate: float, optional
the monthly adjusted discount rate. Default: 0.01
freq: string, optional
{"D", "H", "M", "W"} for day, hour, month, week. This represents what unit of time your T is measure in.
Returns
-------
Series:
Series object with customer ids as index and the estimated customer
lifetime values as values
"""
# use the Gamma-Gamma estimates for the monetary_values
adjusted_monetary_value = self.conditional_expected_average_profit(frequency, monetary_value)
return _customer_lifetime_value(
transaction_prediction_model, frequency, recency, T, adjusted_monetary_value, time, discount_rate, freq=freq
)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.