repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
ASMfreaK/habitipy
habitipy/api.py
ApiNode.into
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']: """Get another leaf node with name `val` if possible""" if val in self.paths: return self.paths[val] if self.param: return self.param raise IndexError(_("Value {} is missing from api").format(val))
python
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']: """Get another leaf node with name `val` if possible""" if val in self.paths: return self.paths[val] if self.param: return self.param raise IndexError(_("Value {} is missing from api").format(val))
[ "def", "into", "(", "self", ",", "val", ":", "str", ")", "->", "Union", "[", "'ApiNode'", ",", "'ApiEndpoint'", "]", ":", "if", "val", "in", "self", ".", "paths", ":", "return", "self", ".", "paths", "[", "val", "]", "if", "self", ".", "param", ":", "return", "self", ".", "param", "raise", "IndexError", "(", "_", "(", "\"Value {} is missing from api\"", ")", ".", "format", "(", "val", ")", ")" ]
Get another leaf node with name `val` if possible
[ "Get", "another", "leaf", "node", "with", "name", "val", "if", "possible" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L52-L58
ASMfreaK/habitipy
habitipy/api.py
ApiNode.can_into
def can_into(self, val: str) -> bool: """Determine if there is a leaf node with name `val`""" return val in self.paths or (self.param and self.param_name == val)
python
def can_into(self, val: str) -> bool: """Determine if there is a leaf node with name `val`""" return val in self.paths or (self.param and self.param_name == val)
[ "def", "can_into", "(", "self", ",", "val", ":", "str", ")", "->", "bool", ":", "return", "val", "in", "self", ".", "paths", "or", "(", "self", ".", "param", "and", "self", ".", "param_name", "==", "val", ")" ]
Determine if there is a leaf node with name `val`
[ "Determine", "if", "there", "is", "a", "leaf", "node", "with", "name", "val" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L60-L62
ASMfreaK/habitipy
habitipy/api.py
ApiNode.place
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']): """place a leaf node""" if part.startswith(':'): if self.param and self.param != part: err = """Cannot place param '{}' as '{self.param_name}' exist on node already!""" raise ParamAlreadyExist(err.format(part, self=self)) self.param = val self.param_name = part return val self.paths[part] = val return val
python
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']): """place a leaf node""" if part.startswith(':'): if self.param and self.param != part: err = """Cannot place param '{}' as '{self.param_name}' exist on node already!""" raise ParamAlreadyExist(err.format(part, self=self)) self.param = val self.param_name = part return val self.paths[part] = val return val
[ "def", "place", "(", "self", ",", "part", ":", "str", ",", "val", ":", "Union", "[", "'ApiNode'", ",", "'ApiEndpoint'", "]", ")", ":", "if", "part", ".", "startswith", "(", "':'", ")", ":", "if", "self", ".", "param", "and", "self", ".", "param", "!=", "part", ":", "err", "=", "\"\"\"Cannot place param '{}' as '{self.param_name}' exist on node already!\"\"\"", "raise", "ParamAlreadyExist", "(", "err", ".", "format", "(", "part", ",", "self", "=", "self", ")", ")", "self", ".", "param", "=", "val", "self", ".", "param_name", "=", "part", "return", "val", "self", ".", "paths", "[", "part", "]", "=", "val", "return", "val" ]
place a leaf node
[ "place", "a", "leaf", "node" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L64-L74
ASMfreaK/habitipy
habitipy/api.py
ApiNode.keys
def keys(self) -> Iterator[str]: """return all possible paths one can take from this ApiNode""" if self.param: yield self.param_name yield from self.paths.keys()
python
def keys(self) -> Iterator[str]: """return all possible paths one can take from this ApiNode""" if self.param: yield self.param_name yield from self.paths.keys()
[ "def", "keys", "(", "self", ")", "->", "Iterator", "[", "str", "]", ":", "if", "self", ".", "param", ":", "yield", "self", ".", "param_name", "yield", "from", "self", ".", "paths", ".", "keys", "(", ")" ]
return all possible paths one can take from this ApiNode
[ "return", "all", "possible", "paths", "one", "can", "take", "from", "this", "ApiNode" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L76-L80
ASMfreaK/habitipy
habitipy/api.py
ApiEndpoint.add_param
def add_param(self, group=None, type_='', field='', description=''): """parse and append a param""" group = group or '(Parameter)' group = group.lower()[1:-1] p = Param(type_, field, description) self.params[group][p.field] = p
python
def add_param(self, group=None, type_='', field='', description=''): """parse and append a param""" group = group or '(Parameter)' group = group.lower()[1:-1] p = Param(type_, field, description) self.params[group][p.field] = p
[ "def", "add_param", "(", "self", ",", "group", "=", "None", ",", "type_", "=", "''", ",", "field", "=", "''", ",", "description", "=", "''", ")", ":", "group", "=", "group", "or", "'(Parameter)'", "group", "=", "group", ".", "lower", "(", ")", "[", "1", ":", "-", "1", "]", "p", "=", "Param", "(", "type_", ",", "field", ",", "description", ")", "self", ".", "params", "[", "group", "]", "[", "p", ".", "field", "]", "=", "p" ]
parse and append a param
[ "parse", "and", "append", "a", "param" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L368-L373
ASMfreaK/habitipy
habitipy/api.py
ApiEndpoint.add_success
def add_success(self, group=None, type_='', field='', description=''): """parse and append a success data param""" group = group or '(200)' group = int(group.lower()[1:-1]) self.retcode = self.retcode or group if group != self.retcode: raise ValueError('Two or more retcodes!') type_ = type_ or '{String}' p = Param(type_, field, description) self.params['responce'][p.field] = p
python
def add_success(self, group=None, type_='', field='', description=''): """parse and append a success data param""" group = group or '(200)' group = int(group.lower()[1:-1]) self.retcode = self.retcode or group if group != self.retcode: raise ValueError('Two or more retcodes!') type_ = type_ or '{String}' p = Param(type_, field, description) self.params['responce'][p.field] = p
[ "def", "add_success", "(", "self", ",", "group", "=", "None", ",", "type_", "=", "''", ",", "field", "=", "''", ",", "description", "=", "''", ")", ":", "group", "=", "group", "or", "'(200)'", "group", "=", "int", "(", "group", ".", "lower", "(", ")", "[", "1", ":", "-", "1", "]", ")", "self", ".", "retcode", "=", "self", ".", "retcode", "or", "group", "if", "group", "!=", "self", ".", "retcode", ":", "raise", "ValueError", "(", "'Two or more retcodes!'", ")", "type_", "=", "type_", "or", "'{String}'", "p", "=", "Param", "(", "type_", ",", "field", ",", "description", ")", "self", ".", "params", "[", "'responce'", "]", "[", "p", ".", "field", "]", "=", "p" ]
parse and append a success data param
[ "parse", "and", "append", "a", "success", "data", "param" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L375-L384
ASMfreaK/habitipy
habitipy/api.py
ApiEndpoint.render_docstring
def render_docstring(self): """make a nice docstring for ipython""" res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self) if self.params: for group, params in self.params.items(): res += '\n' + group + ' params:\n' for param in params.values(): res += param.render_docstring() return res
python
def render_docstring(self): """make a nice docstring for ipython""" res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self) if self.params: for group, params in self.params.items(): res += '\n' + group + ' params:\n' for param in params.values(): res += param.render_docstring() return res
[ "def", "render_docstring", "(", "self", ")", ":", "res", "=", "'{{{self.method}}} {self.uri} {self.title}\\n'", ".", "format", "(", "self", "=", "self", ")", "if", "self", ".", "params", ":", "for", "group", ",", "params", "in", "self", ".", "params", ".", "items", "(", ")", ":", "res", "+=", "'\\n'", "+", "group", "+", "' params:\\n'", "for", "param", "in", "params", ".", "values", "(", ")", ":", "res", "+=", "param", ".", "render_docstring", "(", ")", "return", "res" ]
make a nice docstring for ipython
[ "make", "a", "nice", "docstring", "for", "ipython" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L389-L397
ASMfreaK/habitipy
habitipy/api.py
Param.validate
def validate(self, obj): """check if obj has this api param""" if self.path: for i in self.path: obj = obj[i] obj = obj[self.field] raise NotImplementedError('Validation is not implemented yet')
python
def validate(self, obj): """check if obj has this api param""" if self.path: for i in self.path: obj = obj[i] obj = obj[self.field] raise NotImplementedError('Validation is not implemented yet')
[ "def", "validate", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "path", ":", "for", "i", "in", "self", ".", "path", ":", "obj", "=", "obj", "[", "i", "]", "obj", "=", "obj", "[", "self", ".", "field", "]", "raise", "NotImplementedError", "(", "'Validation is not implemented yet'", ")" ]
check if obj has this api param
[ "check", "if", "obj", "has", "this", "api", "param" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L439-L446
ASMfreaK/habitipy
habitipy/api.py
Param.render_docstring
def render_docstring(self): """make a nice docstring for ipython""" default = (' = ' + str(self.default)) if self.default else '' opt = 'optional' if self.is_optional else '' can_be = ' '.join(self.possible_values) if self.possible_values else '' can_be = 'one of [{}]'.format(can_be) if can_be else '' type_ = 'of type "' + str(self.type) + '"' res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n']) return res.replace(' ', ' ').lstrip()
python
def render_docstring(self): """make a nice docstring for ipython""" default = (' = ' + str(self.default)) if self.default else '' opt = 'optional' if self.is_optional else '' can_be = ' '.join(self.possible_values) if self.possible_values else '' can_be = 'one of [{}]'.format(can_be) if can_be else '' type_ = 'of type "' + str(self.type) + '"' res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n']) return res.replace(' ', ' ').lstrip()
[ "def", "render_docstring", "(", "self", ")", ":", "default", "=", "(", "' = '", "+", "str", "(", "self", ".", "default", ")", ")", "if", "self", ".", "default", "else", "''", "opt", "=", "'optional'", "if", "self", ".", "is_optional", "else", "''", "can_be", "=", "' '", ".", "join", "(", "self", ".", "possible_values", ")", "if", "self", ".", "possible_values", "else", "''", "can_be", "=", "'one of [{}]'", ".", "format", "(", "can_be", ")", "if", "can_be", "else", "''", "type_", "=", "'of type \"'", "+", "str", "(", "self", ".", "type", ")", "+", "'\"'", "res", "=", "' '", ".", "join", "(", "[", "opt", ",", "'\"'", "+", "self", ".", "field", "+", "'\"'", ",", "default", ",", "type_", ",", "can_be", ",", "'\\n'", "]", ")", "return", "res", ".", "replace", "(", "' '", ",", "' '", ")", ".", "lstrip", "(", ")" ]
make a nice docstring for ipython
[ "make", "a", "nice", "docstring", "for", "ipython" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L448-L456
ASMfreaK/habitipy
habitipy/cli.py
is_uuid
def is_uuid(u): """validator for plumbum prompt""" if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex: return u return False
python
def is_uuid(u): """validator for plumbum prompt""" if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex: return u return False
[ "def", "is_uuid", "(", "u", ")", ":", "if", "isinstance", "(", "u", ",", "str", ")", "and", "u", ".", "replace", "(", "'-'", ",", "''", ")", "==", "uuid", ".", "UUID", "(", "u", ")", ".", "hex", ":", "return", "u", "return", "False" ]
validator for plumbum prompt
[ "validator", "for", "plumbum", "prompt" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L48-L52
ASMfreaK/habitipy
habitipy/cli.py
load_conf
def load_conf(configfile, config=None): """Get authentication data from the AUTH_CONF file.""" default_login = 'your-login-for-api-here' default_password = 'your-password-for-api-here' config = config or {} configfile = local.path(configfile) if not configfile.exists(): configfile.dirname.mkdir() else: assert_secure_file(configfile) with secure_filestore(), cli.Config(configfile) as conf: config['url'] = conf.get('habitipy.url', 'https://habitica.com') config['login'] = conf.get('habitipy.login', default_login) config['password'] = conf.get('habitipy.password', default_password) if config['login'] == default_login or config['password'] == default_password: if cli.terminal.ask( _("""Your creditentials are invalid. Do you want to enter them now?"""), default=True): msg = _(""" You can get your login information at https://habitica.com/#/options/settings/api Both your user id and API token should look like this: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where 'x' is a number between 0-9 or a character a-f. """) print(dedent(msg)) msg = _("""Please enter your login (user ID)""") config['login'] = cli.terminal.prompt(msg, validator=is_uuid) msg = _("""Please enter your password (API token)""") config['password'] = cli.terminal.prompt(msg, validator=is_uuid) conf.set('habitipy.login', config['login']) conf.set('habitipy.password', config['password']) print(dedent(_(""" Your creditentials are securely stored in {configfile} You can edit that file later if you need. """)).format(configfile=configfile)) config['show_numbers'] = conf.get('habitipy.show_numbers', 'y') config['show_numbers'] = config['show_numbers'] in YES_ANSWERS config['show_style'] = conf.get('habitipy.show_style', 'wide') if config['show_style'] not in CHECK_MARK_STYLES: config['show_style'] = 'wide' return config
python
def load_conf(configfile, config=None): """Get authentication data from the AUTH_CONF file.""" default_login = 'your-login-for-api-here' default_password = 'your-password-for-api-here' config = config or {} configfile = local.path(configfile) if not configfile.exists(): configfile.dirname.mkdir() else: assert_secure_file(configfile) with secure_filestore(), cli.Config(configfile) as conf: config['url'] = conf.get('habitipy.url', 'https://habitica.com') config['login'] = conf.get('habitipy.login', default_login) config['password'] = conf.get('habitipy.password', default_password) if config['login'] == default_login or config['password'] == default_password: if cli.terminal.ask( _("""Your creditentials are invalid. Do you want to enter them now?"""), default=True): msg = _(""" You can get your login information at https://habitica.com/#/options/settings/api Both your user id and API token should look like this: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where 'x' is a number between 0-9 or a character a-f. """) print(dedent(msg)) msg = _("""Please enter your login (user ID)""") config['login'] = cli.terminal.prompt(msg, validator=is_uuid) msg = _("""Please enter your password (API token)""") config['password'] = cli.terminal.prompt(msg, validator=is_uuid) conf.set('habitipy.login', config['login']) conf.set('habitipy.password', config['password']) print(dedent(_(""" Your creditentials are securely stored in {configfile} You can edit that file later if you need. """)).format(configfile=configfile)) config['show_numbers'] = conf.get('habitipy.show_numbers', 'y') config['show_numbers'] = config['show_numbers'] in YES_ANSWERS config['show_style'] = conf.get('habitipy.show_style', 'wide') if config['show_style'] not in CHECK_MARK_STYLES: config['show_style'] = 'wide' return config
[ "def", "load_conf", "(", "configfile", ",", "config", "=", "None", ")", ":", "default_login", "=", "'your-login-for-api-here'", "default_password", "=", "'your-password-for-api-here'", "config", "=", "config", "or", "{", "}", "configfile", "=", "local", ".", "path", "(", "configfile", ")", "if", "not", "configfile", ".", "exists", "(", ")", ":", "configfile", ".", "dirname", ".", "mkdir", "(", ")", "else", ":", "assert_secure_file", "(", "configfile", ")", "with", "secure_filestore", "(", ")", ",", "cli", ".", "Config", "(", "configfile", ")", "as", "conf", ":", "config", "[", "'url'", "]", "=", "conf", ".", "get", "(", "'habitipy.url'", ",", "'https://habitica.com'", ")", "config", "[", "'login'", "]", "=", "conf", ".", "get", "(", "'habitipy.login'", ",", "default_login", ")", "config", "[", "'password'", "]", "=", "conf", ".", "get", "(", "'habitipy.password'", ",", "default_password", ")", "if", "config", "[", "'login'", "]", "==", "default_login", "or", "config", "[", "'password'", "]", "==", "default_password", ":", "if", "cli", ".", "terminal", ".", "ask", "(", "_", "(", "\"\"\"Your creditentials are invalid. Do you want to enter them now?\"\"\"", ")", ",", "default", "=", "True", ")", ":", "msg", "=", "_", "(", "\"\"\"\n You can get your login information at\n https://habitica.com/#/options/settings/api\n Both your user id and API token should look like this:\n xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n where 'x' is a number between 0-9 or a character a-f.\n \"\"\"", ")", "print", "(", "dedent", "(", "msg", ")", ")", "msg", "=", "_", "(", "\"\"\"Please enter your login (user ID)\"\"\"", ")", "config", "[", "'login'", "]", "=", "cli", ".", "terminal", ".", "prompt", "(", "msg", ",", "validator", "=", "is_uuid", ")", "msg", "=", "_", "(", "\"\"\"Please enter your password (API token)\"\"\"", ")", "config", "[", "'password'", "]", "=", "cli", ".", "terminal", ".", "prompt", "(", "msg", ",", "validator", "=", "is_uuid", ")", "conf", ".", "set", "(", "'habitipy.login'", ",", "config", "[", "'login'", "]", ")", "conf", ".", "set", "(", "'habitipy.password'", ",", "config", "[", "'password'", "]", ")", "print", "(", "dedent", "(", "_", "(", "\"\"\"\n Your creditentials are securely stored in\n {configfile}\n You can edit that file later if you need.\n \"\"\"", ")", ")", ".", "format", "(", "configfile", "=", "configfile", ")", ")", "config", "[", "'show_numbers'", "]", "=", "conf", ".", "get", "(", "'habitipy.show_numbers'", ",", "'y'", ")", "config", "[", "'show_numbers'", "]", "=", "config", "[", "'show_numbers'", "]", "in", "YES_ANSWERS", "config", "[", "'show_style'", "]", "=", "conf", ".", "get", "(", "'habitipy.show_style'", ",", "'wide'", ")", "if", "config", "[", "'show_style'", "]", "not", "in", "CHECK_MARK_STYLES", ":", "config", "[", "'show_style'", "]", "=", "'wide'", "return", "config" ]
Get authentication data from the AUTH_CONF file.
[ "Get", "authentication", "data", "from", "the", "AUTH_CONF", "file", "." ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L55-L97
ASMfreaK/habitipy
habitipy/cli.py
get_content
def get_content(api, rebuild_cache=False): """get content from server or cache""" if hasattr(get_content, 'cache') and not rebuild_cache: return get_content.cache if not os.path.exists(CONTENT_JSON) or rebuild_cache: import locale content_endpoint = api.content.get # pylint: disable=protected-access try_langs = [] try: lang = get_translation_for('habitipy').info()['language'] try_langs.append(lang) except KeyError: pass try: loc = locale.getdefaultlocale()[0] if loc: try_langs.append(loc) try_langs.append(loc[:2]) except IndexError: pass server_lang = content_endpoint._node.params['query']['language'] # handle something like 'ru_RU' not available - only 'ru' for lang in try_langs: if lang in server_lang.possible_values: loc = {'language': lang} break else: loc = {} get_content.cache = content = content_endpoint(**loc) with open(CONTENT_JSON, 'w') as f: json.dump(content, f) return content try: with open(CONTENT_JSON) as f: get_content.cache = content = json.load(f) return content except JSONDecodeError: return get_content(api, rebuild_cache=True)
python
def get_content(api, rebuild_cache=False): """get content from server or cache""" if hasattr(get_content, 'cache') and not rebuild_cache: return get_content.cache if not os.path.exists(CONTENT_JSON) or rebuild_cache: import locale content_endpoint = api.content.get # pylint: disable=protected-access try_langs = [] try: lang = get_translation_for('habitipy').info()['language'] try_langs.append(lang) except KeyError: pass try: loc = locale.getdefaultlocale()[0] if loc: try_langs.append(loc) try_langs.append(loc[:2]) except IndexError: pass server_lang = content_endpoint._node.params['query']['language'] # handle something like 'ru_RU' not available - only 'ru' for lang in try_langs: if lang in server_lang.possible_values: loc = {'language': lang} break else: loc = {} get_content.cache = content = content_endpoint(**loc) with open(CONTENT_JSON, 'w') as f: json.dump(content, f) return content try: with open(CONTENT_JSON) as f: get_content.cache = content = json.load(f) return content except JSONDecodeError: return get_content(api, rebuild_cache=True)
[ "def", "get_content", "(", "api", ",", "rebuild_cache", "=", "False", ")", ":", "if", "hasattr", "(", "get_content", ",", "'cache'", ")", "and", "not", "rebuild_cache", ":", "return", "get_content", ".", "cache", "if", "not", "os", ".", "path", ".", "exists", "(", "CONTENT_JSON", ")", "or", "rebuild_cache", ":", "import", "locale", "content_endpoint", "=", "api", ".", "content", ".", "get", "# pylint: disable=protected-access", "try_langs", "=", "[", "]", "try", ":", "lang", "=", "get_translation_for", "(", "'habitipy'", ")", ".", "info", "(", ")", "[", "'language'", "]", "try_langs", ".", "append", "(", "lang", ")", "except", "KeyError", ":", "pass", "try", ":", "loc", "=", "locale", ".", "getdefaultlocale", "(", ")", "[", "0", "]", "if", "loc", ":", "try_langs", ".", "append", "(", "loc", ")", "try_langs", ".", "append", "(", "loc", "[", ":", "2", "]", ")", "except", "IndexError", ":", "pass", "server_lang", "=", "content_endpoint", ".", "_node", ".", "params", "[", "'query'", "]", "[", "'language'", "]", "# handle something like 'ru_RU' not available - only 'ru'", "for", "lang", "in", "try_langs", ":", "if", "lang", "in", "server_lang", ".", "possible_values", ":", "loc", "=", "{", "'language'", ":", "lang", "}", "break", "else", ":", "loc", "=", "{", "}", "get_content", ".", "cache", "=", "content", "=", "content_endpoint", "(", "*", "*", "loc", ")", "with", "open", "(", "CONTENT_JSON", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "content", ",", "f", ")", "return", "content", "try", ":", "with", "open", "(", "CONTENT_JSON", ")", "as", "f", ":", "get_content", ".", "cache", "=", "content", "=", "json", ".", "load", "(", "f", ")", "return", "content", "except", "JSONDecodeError", ":", "return", "get_content", "(", "api", ",", "rebuild_cache", "=", "True", ")" ]
get content from server or cache
[ "get", "content", "from", "server", "or", "cache" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L126-L164
ASMfreaK/habitipy
habitipy/cli.py
get_additional_rewards
def get_additional_rewards(api): """returns list of non-user rewards (potion, armoire, gear)""" c = get_content(api) tasks = [c[i] for i in ['potion', 'armoire']] tasks.extend(api.user.inventory.buy.get()) for task in tasks: task['id'] = task['alias'] = task['key'] return tasks
python
def get_additional_rewards(api): """returns list of non-user rewards (potion, armoire, gear)""" c = get_content(api) tasks = [c[i] for i in ['potion', 'armoire']] tasks.extend(api.user.inventory.buy.get()) for task in tasks: task['id'] = task['alias'] = task['key'] return tasks
[ "def", "get_additional_rewards", "(", "api", ")", ":", "c", "=", "get_content", "(", "api", ")", "tasks", "=", "[", "c", "[", "i", "]", "for", "i", "in", "[", "'potion'", ",", "'armoire'", "]", "]", "tasks", ".", "extend", "(", "api", ".", "user", ".", "inventory", ".", "buy", ".", "get", "(", ")", ")", "for", "task", "in", "tasks", ":", "task", "[", "'id'", "]", "=", "task", "[", "'alias'", "]", "=", "task", "[", "'key'", "]", "return", "tasks" ]
returns list of non-user rewards (potion, armoire, gear)
[ "returns", "list", "of", "non", "-", "user", "rewards", "(", "potion", "armoire", "gear", ")" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L365-L372
ASMfreaK/habitipy
habitipy/cli.py
ScoreInfo.color
def color(cls, value): """task value/score color""" index = bisect(cls.breakpoints, value) return colors.fg(cls.colors_[index])
python
def color(cls, value): """task value/score color""" index = bisect(cls.breakpoints, value) return colors.fg(cls.colors_[index])
[ "def", "color", "(", "cls", ",", "value", ")", ":", "index", "=", "bisect", "(", "cls", ".", "breakpoints", ",", "value", ")", "return", "colors", ".", "fg", "(", "cls", ".", "colors_", "[", "index", "]", ")" ]
task value/score color
[ "task", "value", "/", "score", "color" ]
train
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L289-L292
maxalbert/tohu
tohu/v6/custom_generator/custom_generator.py
augment_init_method
def augment_init_method(cls): """ Replace the existing cls.__init__() method with a new one which also initialises the field generators and similar bookkeeping. """ orig_init = cls.__init__ def new_init(self, *args, **kwargs): super(CustomGenerator, self).__init__() # TODO: does this behave correctly with longer inheritance chains? orig_init(self, *args, **kwargs) self.orig_args = args self.orig_kwargs = kwargs self.ns_gen_templates = TohuNamespace() self.ns_gen_templates.update_from_dict(self.__class__.__dict__) self.ns_gen_templates.update_from_dict(self.__dict__) self.ns_gen_templates.set_owner(self.__class__) self._mark_field_generator_templates() self.ns_gens = self.ns_gen_templates.spawn() self.ns_gens.set_owner(self) self._update_namespace_with_field_generators() self._set_field_names() self._set_tohu_items_name() self._set_tohu_items_cls() cls.__init__ = new_init
python
def augment_init_method(cls): """ Replace the existing cls.__init__() method with a new one which also initialises the field generators and similar bookkeeping. """ orig_init = cls.__init__ def new_init(self, *args, **kwargs): super(CustomGenerator, self).__init__() # TODO: does this behave correctly with longer inheritance chains? orig_init(self, *args, **kwargs) self.orig_args = args self.orig_kwargs = kwargs self.ns_gen_templates = TohuNamespace() self.ns_gen_templates.update_from_dict(self.__class__.__dict__) self.ns_gen_templates.update_from_dict(self.__dict__) self.ns_gen_templates.set_owner(self.__class__) self._mark_field_generator_templates() self.ns_gens = self.ns_gen_templates.spawn() self.ns_gens.set_owner(self) self._update_namespace_with_field_generators() self._set_field_names() self._set_tohu_items_name() self._set_tohu_items_cls() cls.__init__ = new_init
[ "def", "augment_init_method", "(", "cls", ")", ":", "orig_init", "=", "cls", ".", "__init__", "def", "new_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "CustomGenerator", ",", "self", ")", ".", "__init__", "(", ")", "# TODO: does this behave correctly with longer inheritance chains?", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "orig_args", "=", "args", "self", ".", "orig_kwargs", "=", "kwargs", "self", ".", "ns_gen_templates", "=", "TohuNamespace", "(", ")", "self", ".", "ns_gen_templates", ".", "update_from_dict", "(", "self", ".", "__class__", ".", "__dict__", ")", "self", ".", "ns_gen_templates", ".", "update_from_dict", "(", "self", ".", "__dict__", ")", "self", ".", "ns_gen_templates", ".", "set_owner", "(", "self", ".", "__class__", ")", "self", ".", "_mark_field_generator_templates", "(", ")", "self", ".", "ns_gens", "=", "self", ".", "ns_gen_templates", ".", "spawn", "(", ")", "self", ".", "ns_gens", ".", "set_owner", "(", "self", ")", "self", ".", "_update_namespace_with_field_generators", "(", ")", "self", ".", "_set_field_names", "(", ")", "self", ".", "_set_tohu_items_name", "(", ")", "self", ".", "_set_tohu_items_cls", "(", ")", "cls", ".", "__init__", "=", "new_init" ]
Replace the existing cls.__init__() method with a new one which also initialises the field generators and similar bookkeeping.
[ "Replace", "the", "existing", "cls", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "also", "initialises", "the", "field", "generators", "and", "similar", "bookkeeping", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/custom_generator/custom_generator.py#L9-L39
maxalbert/tohu
tohu/v2/custom_generator.py
find_field_generators
def find_field_generators(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} add_field_generators(field_gens, cls_dict) add_field_generators(field_gens, obj_dict) return field_gens
python
def find_field_generators(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} add_field_generators(field_gens, cls_dict) add_field_generators(field_gens, obj_dict) return field_gens
[ "def", "find_field_generators", "(", "obj", ")", ":", "cls_dict", "=", "obj", ".", "__class__", ".", "__dict__", "obj_dict", "=", "obj", ".", "__dict__", "#debug_print_dict(cls_dict, 'cls_dict')", "#debug_print_dict(obj_dict, 'obj_dict')", "field_gens", "=", "{", "}", "add_field_generators", "(", "field_gens", ",", "cls_dict", ")", "add_field_generators", "(", "field_gens", ",", "obj_dict", ")", "return", "field_gens" ]
Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces.
[ "Return", "dictionary", "with", "the", "names", "and", "instances", "of", "all", "tohu", ".", "BaseGenerator", "occurring", "in", "the", "given", "object", "s", "class", "&", "instance", "namespaces", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L20-L36
maxalbert/tohu
tohu/v2/custom_generator.py
set_item_class_name
def set_item_class_name(cls_obj): """ Return the first part of the class name of this custom generator. This will be used for the class name of the items produced by this generator. Examples: FoobarGenerator -> Foobar QuuxGenerator -> Quux """ if '__tohu__items__name__' in cls_obj.__dict__: logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')") else: m = re.match('^(.*)Generator$', cls_obj.__name__) if m is not None: cls_obj.__tohu_items_name__ = m.group(1) logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from custom generator name)") else: raise ValueError("Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'")
python
def set_item_class_name(cls_obj): """ Return the first part of the class name of this custom generator. This will be used for the class name of the items produced by this generator. Examples: FoobarGenerator -> Foobar QuuxGenerator -> Quux """ if '__tohu__items__name__' in cls_obj.__dict__: logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')") else: m = re.match('^(.*)Generator$', cls_obj.__name__) if m is not None: cls_obj.__tohu_items_name__ = m.group(1) logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from custom generator name)") else: raise ValueError("Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'")
[ "def", "set_item_class_name", "(", "cls_obj", ")", ":", "if", "'__tohu__items__name__'", "in", "cls_obj", ".", "__dict__", ":", "logger", ".", "debug", "(", "f\"Using item class name '{cls_obj.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')\"", ")", "else", ":", "m", "=", "re", ".", "match", "(", "'^(.*)Generator$'", ",", "cls_obj", ".", "__name__", ")", "if", "m", "is", "not", "None", ":", "cls_obj", ".", "__tohu_items_name__", "=", "m", ".", "group", "(", "1", ")", "logger", ".", "debug", "(", "f\"Using item class name '{cls_obj.__tohu_items_name__}' (derived from custom generator name)\"", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot derive class name for items to be produced by custom generator. \"", "\"Please set '__tohu_items_name__' at the top of the custom generator's \"", "\"definition or change its name so that it ends in '...Generator'\"", ")" ]
Return the first part of the class name of this custom generator. This will be used for the class name of the items produced by this generator. Examples: FoobarGenerator -> Foobar QuuxGenerator -> Quux
[ "Return", "the", "first", "part", "of", "the", "class", "name", "of", "this", "custom", "generator", ".", "This", "will", "be", "used", "for", "the", "class", "name", "of", "the", "items", "produced", "by", "this", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L39-L59
maxalbert/tohu
tohu/v2/custom_generator.py
make_item_class_for_custom_generator
def make_item_class_for_custom_generator(obj): """ obj: The custom generator instance for which to create an item class """ clsname = obj.__tohu_items_name__ attr_names = obj.field_gens.keys() return make_item_class(clsname, attr_names)
python
def make_item_class_for_custom_generator(obj): """ obj: The custom generator instance for which to create an item class """ clsname = obj.__tohu_items_name__ attr_names = obj.field_gens.keys() return make_item_class(clsname, attr_names)
[ "def", "make_item_class_for_custom_generator", "(", "obj", ")", ":", "clsname", "=", "obj", ".", "__tohu_items_name__", "attr_names", "=", "obj", ".", "field_gens", ".", "keys", "(", ")", "return", "make_item_class", "(", "clsname", ",", "attr_names", ")" ]
obj: The custom generator instance for which to create an item class
[ "obj", ":", "The", "custom", "generator", "instance", "for", "which", "to", "create", "an", "item", "class" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L108-L115
maxalbert/tohu
tohu/v2/custom_generator.py
add_new_init_method
def add_new_init_method(obj): """ Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = obj.__init__ def new_init(self, *args, **kwargs): logger.debug(f"Initialising new {self}") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. orig_init(self, *args, **kwargs) # # Find field generator templates and attach spawned copies # field_gens_templates = find_field_generators(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) def find_orig_parent(dep_gen, origs): """ Find name and instance of the parent of the dependent generator `dep_gen` amongst the generators in `origs`. """ for parent_name, parent in origs.items(): if dep_gen.parent is parent: return parent_name, parent raise RuntimeError(f"Parent of dependent generator {dep_gen} not defined in the same custom generator") logger.debug('Spawning field generator templates...') origs = {} spawned = {} for name, gen in field_gens_templates.items(): if isinstance(gen, IndependentGenerator) and gen in origs.values(): logger.debug(f'Cloning generator {name}={gen} because it is an alias for an existing generator') gen = gen.clone() if isinstance(gen, IndependentGenerator): origs[name] = gen spawned[name] = gen._spawn() logger.debug(f'Spawning generator {gen}. New spawn: {spawned[name]}') elif isinstance(gen, DependentGenerator): orig_parent_name, orig_parent = find_orig_parent(gen, origs) new_parent = spawned[orig_parent_name] #spawned[name] = new_parent.clone() spawned[name] = gen._spawn_and_reattach_parent(new_parent) else: pass self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # # Add seed generator # self.seed_generator = SeedGenerator() # # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator(self) obj.__init__ = new_init
python
def add_new_init_method(obj): """ Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = obj.__init__ def new_init(self, *args, **kwargs): logger.debug(f"Initialising new {self}") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. orig_init(self, *args, **kwargs) # # Find field generator templates and attach spawned copies # field_gens_templates = find_field_generators(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) def find_orig_parent(dep_gen, origs): """ Find name and instance of the parent of the dependent generator `dep_gen` amongst the generators in `origs`. """ for parent_name, parent in origs.items(): if dep_gen.parent is parent: return parent_name, parent raise RuntimeError(f"Parent of dependent generator {dep_gen} not defined in the same custom generator") logger.debug('Spawning field generator templates...') origs = {} spawned = {} for name, gen in field_gens_templates.items(): if isinstance(gen, IndependentGenerator) and gen in origs.values(): logger.debug(f'Cloning generator {name}={gen} because it is an alias for an existing generator') gen = gen.clone() if isinstance(gen, IndependentGenerator): origs[name] = gen spawned[name] = gen._spawn() logger.debug(f'Spawning generator {gen}. New spawn: {spawned[name]}') elif isinstance(gen, DependentGenerator): orig_parent_name, orig_parent = find_orig_parent(gen, origs) new_parent = spawned[orig_parent_name] #spawned[name] = new_parent.clone() spawned[name] = gen._spawn_and_reattach_parent(new_parent) else: pass self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # # Add seed generator # self.seed_generator = SeedGenerator() # # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator(self) obj.__init__ = new_init
[ "def", "add_new_init_method", "(", "obj", ")", ":", "orig_init", "=", "obj", ".", "__init__", "def", "new_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "f\"Initialising new {self}\"", ")", "# Call original __init__ function to ensure we pick up", "# any tohu generators that are defined there.", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "#", "# Find field generator templates and attach spawned copies", "#", "field_gens_templates", "=", "find_field_generators", "(", "self", ")", "logger", ".", "debug", "(", "f'Found {len(field_gens_templates)} field generator template(s):'", ")", "debug_print_dict", "(", "field_gens_templates", ")", "def", "find_orig_parent", "(", "dep_gen", ",", "origs", ")", ":", "\"\"\"\n Find name and instance of the parent of the dependent\n generator `dep_gen` amongst the generators in `origs`.\n \"\"\"", "for", "parent_name", ",", "parent", "in", "origs", ".", "items", "(", ")", ":", "if", "dep_gen", ".", "parent", "is", "parent", ":", "return", "parent_name", ",", "parent", "raise", "RuntimeError", "(", "f\"Parent of dependent generator {dep_gen} not defined in the same custom generator\"", ")", "logger", ".", "debug", "(", "'Spawning field generator templates...'", ")", "origs", "=", "{", "}", "spawned", "=", "{", "}", "for", "name", ",", "gen", "in", "field_gens_templates", ".", "items", "(", ")", ":", "if", "isinstance", "(", "gen", ",", "IndependentGenerator", ")", "and", "gen", "in", "origs", ".", "values", "(", ")", ":", "logger", ".", "debug", "(", "f'Cloning generator {name}={gen} because it is an alias for an existing generator'", ")", "gen", "=", "gen", ".", "clone", "(", ")", "if", "isinstance", "(", "gen", ",", "IndependentGenerator", ")", ":", "origs", "[", "name", "]", "=", "gen", "spawned", "[", "name", "]", "=", "gen", ".", "_spawn", "(", ")", "logger", ".", "debug", "(", "f'Spawning generator {gen}. New spawn: {spawned[name]}'", ")", "elif", "isinstance", "(", "gen", ",", "DependentGenerator", ")", ":", "orig_parent_name", ",", "orig_parent", "=", "find_orig_parent", "(", "gen", ",", "origs", ")", "new_parent", "=", "spawned", "[", "orig_parent_name", "]", "#spawned[name] = new_parent.clone()", "spawned", "[", "name", "]", "=", "gen", ".", "_spawn_and_reattach_parent", "(", "new_parent", ")", "else", ":", "pass", "self", ".", "field_gens", "=", "spawned", "self", ".", "__dict__", ".", "update", "(", "self", ".", "field_gens", ")", "logger", ".", "debug", "(", "f'Field generators attached to custom generator instance:'", ")", "debug_print_dict", "(", "self", ".", "field_gens", ")", "#", "# Add seed generator", "#", "self", ".", "seed_generator", "=", "SeedGenerator", "(", ")", "#", "# Create class for the items produced by this generator", "#", "self", ".", "__class__", ".", "item_cls", "=", "make_item_class_for_custom_generator", "(", "self", ")", "obj", ".", "__init__", "=", "new_init" ]
Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do..
[ "Replace", "the", "existing", "obj", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "calls", "the", "original", "one", "and", "in", "addition", "performs", "the", "following", "actions", ":" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L118-L192
maxalbert/tohu
tohu/v2/custom_generator.py
add_new_reset_method
def add_new_reset_method(obj): """ Attach a new `reset()` method to `obj` which resets the internal seed generator of `obj` and then resets each of its constituent field generators found in `obj.field_gens`. """ # # Create and assign automatically generated reset() method # def new_reset(self, seed=None): logger.debug(f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})') if seed is not None: self.seed_generator.reset(seed) for name, gen in self.field_gens.items(): next_seed = next(self.seed_generator) gen.reset(next_seed) # TODO: the following should be covered by the newly added # reset() method in IndependentGeneratorMeta. However, for # some reason we can't call this via the usual `orig_reset()` # pattern, so we have to duplicate this here. Not ideal... for c in self._dependent_generators: c.reset_dependent_generator(seed) return self obj.reset = new_reset
python
def add_new_reset_method(obj): """ Attach a new `reset()` method to `obj` which resets the internal seed generator of `obj` and then resets each of its constituent field generators found in `obj.field_gens`. """ # # Create and assign automatically generated reset() method # def new_reset(self, seed=None): logger.debug(f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})') if seed is not None: self.seed_generator.reset(seed) for name, gen in self.field_gens.items(): next_seed = next(self.seed_generator) gen.reset(next_seed) # TODO: the following should be covered by the newly added # reset() method in IndependentGeneratorMeta. However, for # some reason we can't call this via the usual `orig_reset()` # pattern, so we have to duplicate this here. Not ideal... for c in self._dependent_generators: c.reset_dependent_generator(seed) return self obj.reset = new_reset
[ "def", "add_new_reset_method", "(", "obj", ")", ":", "#", "# Create and assign automatically generated reset() method", "#", "def", "new_reset", "(", "self", ",", "seed", "=", "None", ")", ":", "logger", ".", "debug", "(", "f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})'", ")", "if", "seed", "is", "not", "None", ":", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "name", ",", "gen", "in", "self", ".", "field_gens", ".", "items", "(", ")", ":", "next_seed", "=", "next", "(", "self", ".", "seed_generator", ")", "gen", ".", "reset", "(", "next_seed", ")", "# TODO: the following should be covered by the newly added", "# reset() method in IndependentGeneratorMeta. However, for", "# some reason we can't call this via the usual `orig_reset()`", "# pattern, so we have to duplicate this here. Not ideal...", "for", "c", "in", "self", ".", "_dependent_generators", ":", "c", ".", "reset_dependent_generator", "(", "seed", ")", "return", "self", "obj", ".", "reset", "=", "new_reset" ]
Attach a new `reset()` method to `obj` which resets the internal seed generator of `obj` and then resets each of its constituent field generators found in `obj.field_gens`.
[ "Attach", "a", "new", "reset", "()", "method", "to", "obj", "which", "resets", "the", "internal", "seed", "generator", "of", "obj", "and", "then", "resets", "each", "of", "its", "constituent", "field", "generators", "found", "in", "obj", ".", "field_gens", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L195-L225
maxalbert/tohu
tohu/v2/custom_generator.py
add_new_next_method
def add_new_next_method(obj): """ TODO """ def new_next(self): field_values = [next(g) for g in self.field_gens.values()] return self.item_cls(*field_values) obj.__next__ = new_next
python
def add_new_next_method(obj): """ TODO """ def new_next(self): field_values = [next(g) for g in self.field_gens.values()] return self.item_cls(*field_values) obj.__next__ = new_next
[ "def", "add_new_next_method", "(", "obj", ")", ":", "def", "new_next", "(", "self", ")", ":", "field_values", "=", "[", "next", "(", "g", ")", "for", "g", "in", "self", ".", "field_gens", ".", "values", "(", ")", "]", "return", "self", ".", "item_cls", "(", "*", "field_values", ")", "obj", ".", "__next__", "=", "new_next" ]
TODO
[ "TODO" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L228-L237
maxalbert/tohu
tohu/v2/custom_generator.py
add_new_spawn_method
def add_new_spawn_method(obj): """ TODO """ def new_spawn(self): # TODO/FIXME: Check that this does the right thing: # (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour) # (ii) ensure that it also works if this custom generator's __init__ requires additional arguments new_instance = self.__class__() return new_instance obj._spawn = new_spawn
python
def add_new_spawn_method(obj): """ TODO """ def new_spawn(self): # TODO/FIXME: Check that this does the right thing: # (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour) # (ii) ensure that it also works if this custom generator's __init__ requires additional arguments new_instance = self.__class__() return new_instance obj._spawn = new_spawn
[ "def", "add_new_spawn_method", "(", "obj", ")", ":", "def", "new_spawn", "(", "self", ")", ":", "# TODO/FIXME: Check that this does the right thing:", "# (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour)", "# (ii) ensure that it also works if this custom generator's __init__ requires additional arguments", "new_instance", "=", "self", ".", "__class__", "(", ")", "return", "new_instance", "obj", ".", "_spawn", "=", "new_spawn" ]
TODO
[ "TODO" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L240-L252
maxalbert/tohu
tohu/v6/set_special_methods.py
check_that_operator_can_be_applied_to_produces_items
def check_that_operator_can_be_applied_to_produces_items(op, g1, g2): """ Helper function to check that the operator `op` can be applied to items produced by g1 and g2. """ g1_tmp_copy = g1.spawn() g2_tmp_copy = g2.spawn() sample_item_1 = next(g1_tmp_copy) sample_item_2 = next(g2_tmp_copy) try: op(sample_item_1, sample_item_2) except TypeError: raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} " f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)")
python
def check_that_operator_can_be_applied_to_produces_items(op, g1, g2): """ Helper function to check that the operator `op` can be applied to items produced by g1 and g2. """ g1_tmp_copy = g1.spawn() g2_tmp_copy = g2.spawn() sample_item_1 = next(g1_tmp_copy) sample_item_2 = next(g2_tmp_copy) try: op(sample_item_1, sample_item_2) except TypeError: raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} " f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)")
[ "def", "check_that_operator_can_be_applied_to_produces_items", "(", "op", ",", "g1", ",", "g2", ")", ":", "g1_tmp_copy", "=", "g1", ".", "spawn", "(", ")", "g2_tmp_copy", "=", "g2", ".", "spawn", "(", ")", "sample_item_1", "=", "next", "(", "g1_tmp_copy", ")", "sample_item_2", "=", "next", "(", "g2_tmp_copy", ")", "try", ":", "op", "(", "sample_item_1", ",", "sample_item_2", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "f\"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} \"", "f\"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)\"", ")" ]
Helper function to check that the operator `op` can be applied to items produced by g1 and g2.
[ "Helper", "function", "to", "check", "that", "the", "operator", "op", "can", "be", "applied", "to", "items", "produced", "by", "g1", "and", "g2", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/set_special_methods.py#L16-L28
maxalbert/tohu
tohu/v2/base.py
add_new_init_method
def add_new_init_method(cls): """ Replace the existing cls.__init__() method with a new one which also initialises the _dependent_generators attribute to an empty list. """ orig_init = cls.__init__ def new_init(self, *args, **kwargs): self._dependent_generators = [] orig_init(self, *args, **kwargs) cls.__init__ = new_init
python
def add_new_init_method(cls): """ Replace the existing cls.__init__() method with a new one which also initialises the _dependent_generators attribute to an empty list. """ orig_init = cls.__init__ def new_init(self, *args, **kwargs): self._dependent_generators = [] orig_init(self, *args, **kwargs) cls.__init__ = new_init
[ "def", "add_new_init_method", "(", "cls", ")", ":", "orig_init", "=", "cls", ".", "__init__", "def", "new_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_dependent_generators", "=", "[", "]", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "cls", ".", "__init__", "=", "new_init" ]
Replace the existing cls.__init__() method with a new one which also initialises the _dependent_generators attribute to an empty list.
[ "Replace", "the", "existing", "cls", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "also", "initialises", "the", "_dependent_generators", "attribute", "to", "an", "empty", "list", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base.py#L113-L125
maxalbert/tohu
tohu/v2/base.py
add_new_repr_method
def add_new_repr_method(cls): """ Add default __repr__ method in case no user-defined one is present. """ if isinstance(cls.__repr__, WrapperDescriptorType): cls.__repr__ = lambda self: f"<{self.__class__.__name__}, id={hex(id(self))}>" else: # Keep the user-defined __repr__ method pass
python
def add_new_repr_method(cls): """ Add default __repr__ method in case no user-defined one is present. """ if isinstance(cls.__repr__, WrapperDescriptorType): cls.__repr__ = lambda self: f"<{self.__class__.__name__}, id={hex(id(self))}>" else: # Keep the user-defined __repr__ method pass
[ "def", "add_new_repr_method", "(", "cls", ")", ":", "if", "isinstance", "(", "cls", ".", "__repr__", ",", "WrapperDescriptorType", ")", ":", "cls", ".", "__repr__", "=", "lambda", "self", ":", "f\"<{self.__class__.__name__}, id={hex(id(self))}>\"", "else", ":", "# Keep the user-defined __repr__ method", "pass" ]
Add default __repr__ method in case no user-defined one is present.
[ "Add", "default", "__repr__", "method", "in", "case", "no", "user", "-", "defined", "one", "is", "present", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base.py#L128-L137
maxalbert/tohu
tohu/v2/base.py
add_new_reset_method
def add_new_reset_method(cls): """ Replace existing cls.reset() method with a new one which also calls reset() on any clones. """ orig_reset = cls.reset def new_reset(self, seed=None): logger.debug(f"Calling reset() on {self} (seed={seed})") orig_reset(self, seed) for c in self._dependent_generators: c.reset_dependent_generator(seed) return self cls.reset = new_reset
python
def add_new_reset_method(cls): """ Replace existing cls.reset() method with a new one which also calls reset() on any clones. """ orig_reset = cls.reset def new_reset(self, seed=None): logger.debug(f"Calling reset() on {self} (seed={seed})") orig_reset(self, seed) for c in self._dependent_generators: c.reset_dependent_generator(seed) return self cls.reset = new_reset
[ "def", "add_new_reset_method", "(", "cls", ")", ":", "orig_reset", "=", "cls", ".", "reset", "def", "new_reset", "(", "self", ",", "seed", "=", "None", ")", ":", "logger", ".", "debug", "(", "f\"Calling reset() on {self} (seed={seed})\"", ")", "orig_reset", "(", "self", ",", "seed", ")", "for", "c", "in", "self", ".", "_dependent_generators", ":", "c", ".", "reset_dependent_generator", "(", "seed", ")", "return", "self", "cls", ".", "reset", "=", "new_reset" ]
Replace existing cls.reset() method with a new one which also calls reset() on any clones.
[ "Replace", "existing", "cls", ".", "reset", "()", "method", "with", "a", "new", "one", "which", "also", "calls", "reset", "()", "on", "any", "clones", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base.py#L141-L155
maxalbert/tohu
tohu/v2/base.py
UltraBaseGenerator.generate
def generate(self, N, *, seed=None, progressbar=False): """ Return sequence of `N` elements. If `seed` is not None, the generator is reset using this seed before generating the elements. """ if seed is not None: self.reset(seed) items = islice(self, N) if progressbar: items = tqdm(items, total=N) item_list = [x for x in items] #logger.warning("TODO: initialise ItemList with random seed!") return ItemList(item_list, N)
python
def generate(self, N, *, seed=None, progressbar=False): """ Return sequence of `N` elements. If `seed` is not None, the generator is reset using this seed before generating the elements. """ if seed is not None: self.reset(seed) items = islice(self, N) if progressbar: items = tqdm(items, total=N) item_list = [x for x in items] #logger.warning("TODO: initialise ItemList with random seed!") return ItemList(item_list, N)
[ "def", "generate", "(", "self", ",", "N", ",", "*", ",", "seed", "=", "None", ",", "progressbar", "=", "False", ")", ":", "if", "seed", "is", "not", "None", ":", "self", ".", "reset", "(", "seed", ")", "items", "=", "islice", "(", "self", ",", "N", ")", "if", "progressbar", ":", "items", "=", "tqdm", "(", "items", ",", "total", "=", "N", ")", "item_list", "=", "[", "x", "for", "x", "in", "items", "]", "#logger.warning(\"TODO: initialise ItemList with random seed!\")", "return", "ItemList", "(", "item_list", ",", "N", ")" ]
Return sequence of `N` elements. If `seed` is not None, the generator is reset using this seed before generating the elements.
[ "Return", "sequence", "of", "N", "elements", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base.py#L34-L50
maxalbert/tohu
tohu/v2/base_NEW.py
TohuUltraBaseGenerator.tohu_id
def tohu_id(self): """ Return (truncated) md5 hash representing this generator. We truncate the hash simply for readability, as this is purely intended for debugging purposes and the risk of any collisions will be negligible. """ myhash = hashlib.md5(str(id(self)).encode()).hexdigest() return myhash[:12]
python
def tohu_id(self): """ Return (truncated) md5 hash representing this generator. We truncate the hash simply for readability, as this is purely intended for debugging purposes and the risk of any collisions will be negligible. """ myhash = hashlib.md5(str(id(self)).encode()).hexdigest() return myhash[:12]
[ "def", "tohu_id", "(", "self", ")", ":", "myhash", "=", "hashlib", ".", "md5", "(", "str", "(", "id", "(", "self", ")", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "return", "myhash", "[", ":", "12", "]" ]
Return (truncated) md5 hash representing this generator. We truncate the hash simply for readability, as this is purely intended for debugging purposes and the risk of any collisions will be negligible.
[ "Return", "(", "truncated", ")", "md5", "hash", "representing", "this", "generator", ".", "We", "truncate", "the", "hash", "simply", "for", "readability", "as", "this", "is", "purely", "intended", "for", "debugging", "purposes", "and", "the", "risk", "of", "any", "collisions", "will", "be", "negligible", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base_NEW.py#L122-L130
maxalbert/tohu
tohu/v4/custom_generator.py
CustomGenerator._set_item_class
def _set_item_class(self): """ cls: The custom generator class for which to create an item-class """ clsname = self.__tohu_items_name__ self.item_cls = make_item_class(clsname, self.field_names)
python
def _set_item_class(self): """ cls: The custom generator class for which to create an item-class """ clsname = self.__tohu_items_name__ self.item_cls = make_item_class(clsname, self.field_names)
[ "def", "_set_item_class", "(", "self", ")", ":", "clsname", "=", "self", ".", "__tohu_items_name__", "self", ".", "item_cls", "=", "make_item_class", "(", "clsname", ",", "self", ".", "field_names", ")" ]
cls: The custom generator class for which to create an item-class
[ "cls", ":", "The", "custom", "generator", "class", "for", "which", "to", "create", "an", "item", "-", "class" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/custom_generator.py#L130-L136
maxalbert/tohu
tohu/v4/custom_generator.py
CustomGenerator._find_field_generator_templates
def _find_field_generator_templates(self): """ Return a dictionary of the form {name: field_generator} containing all tohu generators defined in the class and instance namespace of this custom generator. """ field_gen_templates = {} # Extract field generators from class dict for name, g in self.__class__.__dict__.items(): if isinstance(g, TohuBaseGenerator): field_gen_templates[name] = g.set_tohu_name(f'{name} (TPL)') # Extract field generators from instance dict for name, g in self.__dict__.items(): if isinstance(g, TohuBaseGenerator): field_gen_templates[name] = g.set_tohu_name(f'{name} (TPL)') return field_gen_templates
python
def _find_field_generator_templates(self): """ Return a dictionary of the form {name: field_generator} containing all tohu generators defined in the class and instance namespace of this custom generator. """ field_gen_templates = {} # Extract field generators from class dict for name, g in self.__class__.__dict__.items(): if isinstance(g, TohuBaseGenerator): field_gen_templates[name] = g.set_tohu_name(f'{name} (TPL)') # Extract field generators from instance dict for name, g in self.__dict__.items(): if isinstance(g, TohuBaseGenerator): field_gen_templates[name] = g.set_tohu_name(f'{name} (TPL)') return field_gen_templates
[ "def", "_find_field_generator_templates", "(", "self", ")", ":", "field_gen_templates", "=", "{", "}", "# Extract field generators from class dict", "for", "name", ",", "g", "in", "self", ".", "__class__", ".", "__dict__", ".", "items", "(", ")", ":", "if", "isinstance", "(", "g", ",", "TohuBaseGenerator", ")", ":", "field_gen_templates", "[", "name", "]", "=", "g", ".", "set_tohu_name", "(", "f'{name} (TPL)'", ")", "# Extract field generators from instance dict", "for", "name", ",", "g", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "isinstance", "(", "g", ",", "TohuBaseGenerator", ")", ":", "field_gen_templates", "[", "name", "]", "=", "g", ".", "set_tohu_name", "(", "f'{name} (TPL)'", ")", "return", "field_gen_templates" ]
Return a dictionary of the form {name: field_generator} containing all tohu generators defined in the class and instance namespace of this custom generator.
[ "Return", "a", "dictionary", "of", "the", "form", "{", "name", ":", "field_generator", "}", "containing", "all", "tohu", "generators", "defined", "in", "the", "class", "and", "instance", "namespace", "of", "this", "custom", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/custom_generator.py#L142-L160
maxalbert/tohu
tohu/v4/item_list.py
_generate_csv_header_line
def _generate_csv_header_line(*, header_names, header_prefix='', header=True, sep=',', newline='\n'): """ Helper function to generate a CSV header line depending on the combination of arguments provided. """ if isinstance(header, str): # user-provided header line header_line = header + newline else: if not (header is None or isinstance(header, bool)): raise ValueError(f"Invalid value for argument `header`: {header}") else: if header: header_line = header_prefix + sep.join(header_names) + newline else: header_line = "" return header_line
python
def _generate_csv_header_line(*, header_names, header_prefix='', header=True, sep=',', newline='\n'): """ Helper function to generate a CSV header line depending on the combination of arguments provided. """ if isinstance(header, str): # user-provided header line header_line = header + newline else: if not (header is None or isinstance(header, bool)): raise ValueError(f"Invalid value for argument `header`: {header}") else: if header: header_line = header_prefix + sep.join(header_names) + newline else: header_line = "" return header_line
[ "def", "_generate_csv_header_line", "(", "*", ",", "header_names", ",", "header_prefix", "=", "''", ",", "header", "=", "True", ",", "sep", "=", "','", ",", "newline", "=", "'\\n'", ")", ":", "if", "isinstance", "(", "header", ",", "str", ")", ":", "# user-provided header line", "header_line", "=", "header", "+", "newline", "else", ":", "if", "not", "(", "header", "is", "None", "or", "isinstance", "(", "header", ",", "bool", ")", ")", ":", "raise", "ValueError", "(", "f\"Invalid value for argument `header`: {header}\"", ")", "else", ":", "if", "header", ":", "header_line", "=", "header_prefix", "+", "sep", ".", "join", "(", "header_names", ")", "+", "newline", "else", ":", "header_line", "=", "\"\"", "return", "header_line" ]
Helper function to generate a CSV header line depending on the combination of arguments provided.
[ "Helper", "function", "to", "generate", "a", "CSV", "header", "line", "depending", "on", "the", "combination", "of", "arguments", "provided", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L14-L29
maxalbert/tohu
tohu/v4/item_list.py
_extract_schema_if_given
def _extract_schema_if_given(table_name): """ Return a pair (schema, table) derived from the given `table_name` (anything before the first '.' if the name contains one; otherwise the return value of `schema` is None). Examples: >>> _extract_schema_if_given('some_schema.my_table') ('some_schema', 'my_table') >>> _extract_schema_if_given('my_awesome_table') (None, 'my_awesome_table') """ pattern = '^(([^.]+)\.)?(.+)$' m = re.match(pattern, table_name) schema, table_name = m.group(2), m.group(3) return schema, table_name
python
def _extract_schema_if_given(table_name): """ Return a pair (schema, table) derived from the given `table_name` (anything before the first '.' if the name contains one; otherwise the return value of `schema` is None). Examples: >>> _extract_schema_if_given('some_schema.my_table') ('some_schema', 'my_table') >>> _extract_schema_if_given('my_awesome_table') (None, 'my_awesome_table') """ pattern = '^(([^.]+)\.)?(.+)$' m = re.match(pattern, table_name) schema, table_name = m.group(2), m.group(3) return schema, table_name
[ "def", "_extract_schema_if_given", "(", "table_name", ")", ":", "pattern", "=", "'^(([^.]+)\\.)?(.+)$'", "m", "=", "re", ".", "match", "(", "pattern", ",", "table_name", ")", "schema", ",", "table_name", "=", "m", ".", "group", "(", "2", ")", ",", "m", ".", "group", "(", "3", ")", "return", "schema", ",", "table_name" ]
Return a pair (schema, table) derived from the given `table_name` (anything before the first '.' if the name contains one; otherwise the return value of `schema` is None). Examples: >>> _extract_schema_if_given('some_schema.my_table') ('some_schema', 'my_table') >>> _extract_schema_if_given('my_awesome_table') (None, 'my_awesome_table')
[ "Return", "a", "pair", "(", "schema", "table", ")", "derived", "from", "the", "given", "table_name", "(", "anything", "before", "the", "first", ".", "if", "the", "name", "contains", "one", ";", "otherwise", "the", "return", "value", "of", "schema", "is", "None", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L32-L49
maxalbert/tohu
tohu/v4/item_list.py
ItemList.to_df
def to_df(self, fields=None): """ Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} """ if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} if fields is None: # New version (much faster!, but needs cleaning up) import attr colnames = list(self.items[0].as_dict().keys()) # hack! the field names should perhaps be passed in during initialisation? return pd.DataFrame([attr.astuple(x) for x in self.items], columns=colnames) # Old version: #return pd.DataFrame([x.to_series() for x in self.items]) else: # New version (much faster!) colnames = list(fields.keys()) attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] return pd.DataFrame([tuple(func(x) for func in attr_getters) for x in self.items], columns=colnames)
python
def to_df(self, fields=None): """ Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} """ if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} if fields is None: # New version (much faster!, but needs cleaning up) import attr colnames = list(self.items[0].as_dict().keys()) # hack! the field names should perhaps be passed in during initialisation? return pd.DataFrame([attr.astuple(x) for x in self.items], columns=colnames) # Old version: #return pd.DataFrame([x.to_series() for x in self.items]) else: # New version (much faster!) colnames = list(fields.keys()) attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] return pd.DataFrame([tuple(func(x) for func in attr_getters) for x in self.items], columns=colnames)
[ "def", "to_df", "(", "self", ",", "fields", "=", "None", ")", ":", "if", "isinstance", "(", "fields", ",", "(", "list", ",", "tuple", ")", ")", ":", "fields", "=", "{", "name", ":", "name", "for", "name", "in", "fields", "}", "if", "fields", "is", "None", ":", "# New version (much faster!, but needs cleaning up)", "import", "attr", "colnames", "=", "list", "(", "self", ".", "items", "[", "0", "]", ".", "as_dict", "(", ")", ".", "keys", "(", ")", ")", "# hack! the field names should perhaps be passed in during initialisation?", "return", "pd", ".", "DataFrame", "(", "[", "attr", ".", "astuple", "(", "x", ")", "for", "x", "in", "self", ".", "items", "]", ",", "columns", "=", "colnames", ")", "# Old version:", "#return pd.DataFrame([x.to_series() for x in self.items])", "else", ":", "# New version (much faster!)", "colnames", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "attr_getters", "=", "[", "attrgetter", "(", "attr_name", ")", "for", "attr_name", "in", "fields", ".", "values", "(", ")", "]", "return", "pd", ".", "DataFrame", "(", "[", "tuple", "(", "func", "(", "x", ")", "for", "func", "in", "attr_getters", ")", "for", "x", "in", "self", ".", "items", "]", ",", "columns", "=", "colnames", ")" ]
Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
[ "Export", "items", "as", "rows", "in", "a", "pandas", "dataframe", "table", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L101-L130
maxalbert/tohu
tohu/v4/item_list.py
ItemList.to_csv
def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- filename: str or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If filename is None, the generated CSV output is returned instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `filename` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `filename`. If `filename` is given, writes the output to the file and returns `None`. If `filename` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if filename is not None: # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(filename)) if not os.path.exists(dirname): os.makedirs(dirname) file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO() retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if filename is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
python
def to_csv(self, filename=None, *, fields=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- filename: str or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If filename is None, the generated CSV output is returned instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `filename` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `filename`. If `filename` is given, writes the output to the file and returns `None`. If `filename` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if filename is not None: # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(filename)) if not os.path.exists(dirname): os.makedirs(dirname) file_or_string = open(filename, 'a' if append else 'w') if (filename is not None) else io.StringIO() retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if filename is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
[ "def", "to_csv", "(", "self", ",", "filename", "=", "None", ",", "*", ",", "fields", "=", "None", ",", "append", "=", "False", ",", "header", "=", "True", ",", "header_prefix", "=", "''", ",", "sep", "=", "','", ",", "newline", "=", "'\\n'", ")", ":", "assert", "isinstance", "(", "append", ",", "bool", ")", "if", "fields", "is", "None", ":", "raise", "NotImplementedError", "(", "\"TODO: derive field names automatically from the generator which produced this item list\"", ")", "if", "isinstance", "(", "fields", ",", "(", "list", ",", "tuple", ")", ")", ":", "fields", "=", "{", "name", ":", "name", "for", "name", "in", "fields", "}", "header_line", "=", "_generate_csv_header_line", "(", "header", "=", "header", ",", "header_prefix", "=", "header_prefix", ",", "header_names", "=", "fields", ".", "keys", "(", ")", ",", "sep", "=", "sep", ",", "newline", "=", "newline", ")", "if", "filename", "is", "not", "None", ":", "# ensure parent directory of output file exits", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "os", ".", "makedirs", "(", "dirname", ")", "file_or_string", "=", "open", "(", "filename", ",", "'a'", "if", "append", "else", "'w'", ")", "if", "(", "filename", "is", "not", "None", ")", "else", "io", ".", "StringIO", "(", ")", "retval", "=", "None", "attr_getters", "=", "[", "attrgetter", "(", "attr_name", ")", "for", "attr_name", "in", "fields", ".", "values", "(", ")", "]", "try", ":", "file_or_string", ".", "write", "(", "header_line", ")", "for", "x", "in", "self", ".", "items", ":", "line", "=", "sep", ".", "join", "(", "[", "format", "(", "func", "(", "x", ")", ")", "for", "func", "in", "attr_getters", "]", ")", "+", "newline", "file_or_string", ".", "write", "(", "line", ")", "if", "filename", "is", "None", ":", "retval", "=", "file_or_string", ".", "getvalue", "(", ")", "finally", ":", "file_or_string", ".", "close", "(", ")", "return", "retval" ]
Parameters ---------- filename: str or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If filename is None, the generated CSV output is returned instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `filename` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `filename`. If `filename` is given, writes the output to the file and returns `None`. If `filename` is `None`, returns a string containing the CSV output.
[ "Parameters", "----------", "filename", ":", "str", "or", "None", "The", "file", "to", "which", "output", "will", "be", "written", ".", "By", "default", "any", "existing", "content", "is", "overwritten", ".", "Use", "append", "=", "True", "to", "open", "the", "file", "in", "append", "mode", "instead", ".", "If", "filename", "is", "None", "the", "generated", "CSV", "output", "is", "returned", "instead", "of", "written", "to", "a", "file", ".", "fields", ":", "list", "or", "dict", "List", "of", "field", "names", "to", "export", "or", "dictionary", "mapping", "output", "column", "names", "to", "attribute", "names", "of", "the", "generators", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/item_list.py#L132-L206
maxalbert/tohu
tohu/v5/base.py
SpawnMapping.spawn_generator
def spawn_generator(self, g): """ Return a fresh spawn of g unless g is already contained in this SpawnMapping, in which case return the previously spawned generator. """ try: return self.mapping[g] except KeyError: return g._spawn(self)
python
def spawn_generator(self, g): """ Return a fresh spawn of g unless g is already contained in this SpawnMapping, in which case return the previously spawned generator. """ try: return self.mapping[g] except KeyError: return g._spawn(self)
[ "def", "spawn_generator", "(", "self", ",", "g", ")", ":", "try", ":", "return", "self", ".", "mapping", "[", "g", "]", "except", "KeyError", ":", "return", "g", ".", "_spawn", "(", "self", ")" ]
Return a fresh spawn of g unless g is already contained in this SpawnMapping, in which case return the previously spawned generator.
[ "Return", "a", "fresh", "spawn", "of", "g", "unless", "g", "is", "already", "contained", "in", "this", "SpawnMapping", "in", "which", "case", "return", "the", "previously", "spawned", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v5/base.py#L30-L39
maxalbert/tohu
tohu/v5/base.py
TohuBaseGenerator.spawn
def spawn(self, spawn_mapping=None): """ Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) but is otherwise independent, i.e. there is no link between the two generators (as opposed to a cloned generator, which is automatically reset whenever the original generator is reset). """ spawn_mapping = spawn_mapping or SpawnMapping() if self.parent is not None: if self.parent in spawn_mapping: # Return new clone of the mapped parent return spawn_mapping[self.parent].clone() else: raise TohuCloneError("Cannot spawn a cloned generator without being able to map its parent.") else: new_obj = spawn_mapping.spawn_generator(self) return new_obj
python
def spawn(self, spawn_mapping=None): """ Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) but is otherwise independent, i.e. there is no link between the two generators (as opposed to a cloned generator, which is automatically reset whenever the original generator is reset). """ spawn_mapping = spawn_mapping or SpawnMapping() if self.parent is not None: if self.parent in spawn_mapping: # Return new clone of the mapped parent return spawn_mapping[self.parent].clone() else: raise TohuCloneError("Cannot spawn a cloned generator without being able to map its parent.") else: new_obj = spawn_mapping.spawn_generator(self) return new_obj
[ "def", "spawn", "(", "self", ",", "spawn_mapping", "=", "None", ")", ":", "spawn_mapping", "=", "spawn_mapping", "or", "SpawnMapping", "(", ")", "if", "self", ".", "parent", "is", "not", "None", ":", "if", "self", ".", "parent", "in", "spawn_mapping", ":", "# Return new clone of the mapped parent", "return", "spawn_mapping", "[", "self", ".", "parent", "]", ".", "clone", "(", ")", "else", ":", "raise", "TohuCloneError", "(", "\"Cannot spawn a cloned generator without being able to map its parent.\"", ")", "else", ":", "new_obj", "=", "spawn_mapping", ".", "spawn_generator", "(", "self", ")", "return", "new_obj" ]
Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) but is otherwise independent, i.e. there is no link between the two generators (as opposed to a cloned generator, which is automatically reset whenever the original generator is reset).
[ "Return", "an", "exact", "copy", "of", "this", "generator", "which", "behaves", "the", "same", "way", "(", "i", ".", "e", ".", "produces", "the", "same", "elements", "in", "the", "same", "order", ")", "but", "is", "otherwise", "independent", "i", ".", "e", ".", "there", "is", "no", "link", "between", "the", "two", "generators", "(", "as", "opposed", "to", "a", "cloned", "generator", "which", "is", "automatically", "reset", "whenever", "the", "original", "generator", "is", "reset", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v5/base.py#L171-L189
maxalbert/tohu
tohu/v5/base.py
TohuBaseGenerator.clone
def clone(self, spawn_mapping=None): """ Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) and which is automatically reset whenever the original generator is reset. """ c = self.spawn(spawn_mapping) self.register_clone(c) c.register_parent(self) return c
python
def clone(self, spawn_mapping=None): """ Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) and which is automatically reset whenever the original generator is reset. """ c = self.spawn(spawn_mapping) self.register_clone(c) c.register_parent(self) return c
[ "def", "clone", "(", "self", ",", "spawn_mapping", "=", "None", ")", ":", "c", "=", "self", ".", "spawn", "(", "spawn_mapping", ")", "self", ".", "register_clone", "(", "c", ")", "c", ".", "register_parent", "(", "self", ")", "return", "c" ]
Return an exact copy of this generator which behaves the same way (i.e., produces the same elements in the same order) and which is automatically reset whenever the original generator is reset.
[ "Return", "an", "exact", "copy", "of", "this", "generator", "which", "behaves", "the", "same", "way", "(", "i", ".", "e", ".", "produces", "the", "same", "elements", "in", "the", "same", "order", ")", "and", "which", "is", "automatically", "reset", "whenever", "the", "original", "generator", "is", "reset", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v5/base.py#L195-L204
maxalbert/tohu
tohu/v4/derived_generators.py
FuncArgGens.all_generators
def all_generators(self): """ Convenience property to iterate over all generators in arg_gens and kwarg_gens. """ for arg_gen in self.arg_gens: yield arg_gen for kwarg_gen in self.kwarg_gens.values(): yield kwarg_gen
python
def all_generators(self): """ Convenience property to iterate over all generators in arg_gens and kwarg_gens. """ for arg_gen in self.arg_gens: yield arg_gen for kwarg_gen in self.kwarg_gens.values(): yield kwarg_gen
[ "def", "all_generators", "(", "self", ")", ":", "for", "arg_gen", "in", "self", ".", "arg_gens", ":", "yield", "arg_gen", "for", "kwarg_gen", "in", "self", ".", "kwarg_gens", ".", "values", "(", ")", ":", "yield", "kwarg_gen" ]
Convenience property to iterate over all generators in arg_gens and kwarg_gens.
[ "Convenience", "property", "to", "iterate", "over", "all", "generators", "in", "arg_gens", "and", "kwarg_gens", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/derived_generators.py#L40-L48
maxalbert/tohu
tohu/v2/generators.py
Split
def Split(g, *, maxbuffer=10, tuple_len=None): """ Split a tuple generator into individual generators. Parameters ---------- g: tohu generator The generator to be split. The items produced by `g` must be tuples. maxbuffer: integer Maximum number of items produced by `g` that will be buffered. """ if tuple_len is None: try: tuple_len = g.tuple_len except AttributeError: raise ValueError("Argument 'tuple_len' must be given since generator is not of type TupleGenerator.") g_buffered = BufferedTuple(g, maxbuffer=maxbuffer, tuple_len=tuple_len) return tuple(NthElementBuffered(g_buffered, i) for i in range(tuple_len))
python
def Split(g, *, maxbuffer=10, tuple_len=None): """ Split a tuple generator into individual generators. Parameters ---------- g: tohu generator The generator to be split. The items produced by `g` must be tuples. maxbuffer: integer Maximum number of items produced by `g` that will be buffered. """ if tuple_len is None: try: tuple_len = g.tuple_len except AttributeError: raise ValueError("Argument 'tuple_len' must be given since generator is not of type TupleGenerator.") g_buffered = BufferedTuple(g, maxbuffer=maxbuffer, tuple_len=tuple_len) return tuple(NthElementBuffered(g_buffered, i) for i in range(tuple_len))
[ "def", "Split", "(", "g", ",", "*", ",", "maxbuffer", "=", "10", ",", "tuple_len", "=", "None", ")", ":", "if", "tuple_len", "is", "None", ":", "try", ":", "tuple_len", "=", "g", ".", "tuple_len", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Argument 'tuple_len' must be given since generator is not of type TupleGenerator.\"", ")", "g_buffered", "=", "BufferedTuple", "(", "g", ",", "maxbuffer", "=", "maxbuffer", ",", "tuple_len", "=", "tuple_len", ")", "return", "tuple", "(", "NthElementBuffered", "(", "g_buffered", ",", "i", ")", "for", "i", "in", "range", "(", "tuple_len", ")", ")" ]
Split a tuple generator into individual generators. Parameters ---------- g: tohu generator The generator to be split. The items produced by `g` must be tuples. maxbuffer: integer Maximum number of items produced by `g` that will be buffered.
[ "Split", "a", "tuple", "generator", "into", "individual", "generators", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/generators.py#L956-L975
maxalbert/tohu
tohu/v2/generators.py
TupleGenerator.tuple_len
def tuple_len(self): """ Length of tuples produced by this generator. """ try: return self._tuple_len except AttributeError: raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__))
python
def tuple_len(self): """ Length of tuples produced by this generator. """ try: return self._tuple_len except AttributeError: raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__))
[ "def", "tuple_len", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_tuple_len", "except", "AttributeError", ":", "raise", "NotImplementedError", "(", "\"Class {} does not implement attribute 'tuple_len'.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Length of tuples produced by this generator.
[ "Length", "of", "tuples", "produced", "by", "this", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/generators.py#L48-L55
Morrolan/surrealism
surrealism.py
show_faults
def show_faults(): """ Return all valid/active faults ordered by ID to allow the user to pick and choose. :return: List of Tuples where the Tuple elements are: (fault id, fault template) """ cursor = CONN.cursor() query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc" cursor.execute(query) result = cursor.fetchall() return result
python
def show_faults(): """ Return all valid/active faults ordered by ID to allow the user to pick and choose. :return: List of Tuples where the Tuple elements are: (fault id, fault template) """ cursor = CONN.cursor() query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc" cursor.execute(query) result = cursor.fetchall() return result
[ "def", "show_faults", "(", ")", ":", "cursor", "=", "CONN", ".", "cursor", "(", ")", "query", "=", "\"select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc\"", "cursor", ".", "execute", "(", "query", ")", "result", "=", "cursor", ".", "fetchall", "(", ")", "return", "result" ]
Return all valid/active faults ordered by ID to allow the user to pick and choose. :return: List of Tuples where the Tuple elements are: (fault id, fault template)
[ "Return", "all", "valid", "/", "active", "faults", "ordered", "by", "ID", "to", "allow", "the", "user", "to", "pick", "and", "choose", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L71-L82
Morrolan/surrealism
surrealism.py
show_sentences
def show_sentences(): """ Return all valid/active sentences ordered by ID to allow the user to pick and choose. :return: Dict containing the sentence ID as the key and the sentence structure as the value. """ cursor = CONN.cursor() query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc" cursor.execute(query) result = cursor.fetchall() response_dict = {} for row in result: response_dict[row[0]] = row[1] return response_dict
python
def show_sentences(): """ Return all valid/active sentences ordered by ID to allow the user to pick and choose. :return: Dict containing the sentence ID as the key and the sentence structure as the value. """ cursor = CONN.cursor() query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc" cursor.execute(query) result = cursor.fetchall() response_dict = {} for row in result: response_dict[row[0]] = row[1] return response_dict
[ "def", "show_sentences", "(", ")", ":", "cursor", "=", "CONN", ".", "cursor", "(", ")", "query", "=", "\"select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc\"", "cursor", ".", "execute", "(", "query", ")", "result", "=", "cursor", ".", "fetchall", "(", ")", "response_dict", "=", "{", "}", "for", "row", "in", "result", ":", "response_dict", "[", "row", "[", "0", "]", "]", "=", "row", "[", "1", "]", "return", "response_dict" ]
Return all valid/active sentences ordered by ID to allow the user to pick and choose. :return: Dict containing the sentence ID as the key and the sentence structure as the value.
[ "Return", "all", "valid", "/", "active", "sentences", "ordered", "by", "ID", "to", "allow", "the", "user", "to", "pick", "and", "choose", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L94-L111
Morrolan/surrealism
surrealism.py
get_fault
def get_fault(fault_id=None): """Retrieve a randomly-generated error message as a unicode string. :param fault_id: Allows you to optionally specify an integer representing the fault_id from the database table. This allows you to retrieve a specific fault each time, albeit with different keywords.""" counts = __get_table_limits() result = None id_ = 0 try: if isinstance(fault_id, int): id_ = fault_id elif isinstance(fault_id, float): print("""ValueError: Floating point number detected. Rounding number to 0 decimal places.""") id_ = round(fault_id) else: id_ = random.randint(1, counts['max_fau']) except ValueError: print("ValueError: Incorrect parameter type detected.") if id_ <= counts['max_fau']: fault = __get_fault(counts, fault_id=id_) else: print("""ValueError: Parameter integer is too high. Maximum permitted value is {0}.""".format(str(counts['max_fau']))) id_ = counts['max_fau'] fault = __get_fault(counts, fault_id=id_) if fault is not None: while fault[0] == 'n': if id_ is not None: fault = __get_fault(counts, None) else: fault = __get_fault(counts, id_) if fault[0] == 'y': result = __process_sentence(fault, counts) return result else: print('ValueError: _fault cannot be None.')
python
def get_fault(fault_id=None): """Retrieve a randomly-generated error message as a unicode string. :param fault_id: Allows you to optionally specify an integer representing the fault_id from the database table. This allows you to retrieve a specific fault each time, albeit with different keywords.""" counts = __get_table_limits() result = None id_ = 0 try: if isinstance(fault_id, int): id_ = fault_id elif isinstance(fault_id, float): print("""ValueError: Floating point number detected. Rounding number to 0 decimal places.""") id_ = round(fault_id) else: id_ = random.randint(1, counts['max_fau']) except ValueError: print("ValueError: Incorrect parameter type detected.") if id_ <= counts['max_fau']: fault = __get_fault(counts, fault_id=id_) else: print("""ValueError: Parameter integer is too high. Maximum permitted value is {0}.""".format(str(counts['max_fau']))) id_ = counts['max_fau'] fault = __get_fault(counts, fault_id=id_) if fault is not None: while fault[0] == 'n': if id_ is not None: fault = __get_fault(counts, None) else: fault = __get_fault(counts, id_) if fault[0] == 'y': result = __process_sentence(fault, counts) return result else: print('ValueError: _fault cannot be None.')
[ "def", "get_fault", "(", "fault_id", "=", "None", ")", ":", "counts", "=", "__get_table_limits", "(", ")", "result", "=", "None", "id_", "=", "0", "try", ":", "if", "isinstance", "(", "fault_id", ",", "int", ")", ":", "id_", "=", "fault_id", "elif", "isinstance", "(", "fault_id", ",", "float", ")", ":", "print", "(", "\"\"\"ValueError: Floating point number detected.\n Rounding number to 0 decimal places.\"\"\"", ")", "id_", "=", "round", "(", "fault_id", ")", "else", ":", "id_", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_fau'", "]", ")", "except", "ValueError", ":", "print", "(", "\"ValueError: Incorrect parameter type detected.\"", ")", "if", "id_", "<=", "counts", "[", "'max_fau'", "]", ":", "fault", "=", "__get_fault", "(", "counts", ",", "fault_id", "=", "id_", ")", "else", ":", "print", "(", "\"\"\"ValueError: Parameter integer is too high.\n Maximum permitted value is {0}.\"\"\"", ".", "format", "(", "str", "(", "counts", "[", "'max_fau'", "]", ")", ")", ")", "id_", "=", "counts", "[", "'max_fau'", "]", "fault", "=", "__get_fault", "(", "counts", ",", "fault_id", "=", "id_", ")", "if", "fault", "is", "not", "None", ":", "while", "fault", "[", "0", "]", "==", "'n'", ":", "if", "id_", "is", "not", "None", ":", "fault", "=", "__get_fault", "(", "counts", ",", "None", ")", "else", ":", "fault", "=", "__get_fault", "(", "counts", ",", "id_", ")", "if", "fault", "[", "0", "]", "==", "'y'", ":", "result", "=", "__process_sentence", "(", "fault", ",", "counts", ")", "return", "result", "else", ":", "print", "(", "'ValueError: _fault cannot be None.'", ")" ]
Retrieve a randomly-generated error message as a unicode string. :param fault_id: Allows you to optionally specify an integer representing the fault_id from the database table. This allows you to retrieve a specific fault each time, albeit with different keywords.
[ "Retrieve", "a", "randomly", "-", "generated", "error", "message", "as", "a", "unicode", "string", ".", ":", "param", "fault_id", ":", "Allows", "you", "to", "optionally", "specify", "an", "integer", "representing", "the", "fault_id", "from", "the", "database", "table", ".", "This", "allows", "you", "to", "retrieve", "a", "specific", "fault", "each", "time", "albeit", "with", "different", "keywords", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L185-L229
Morrolan/surrealism
surrealism.py
get_sentence
def get_sentence(sentence_id=None): """Retrieve a randomly-generated sentence as a unicode string. :param sentence_id: Allows you to optionally specify an integer representing the sentence_id from the database table. This allows you to retrieve a specific sentence each time, albeit with different keywords.""" counts = __get_table_limits() result = None id_ = 0 try: if isinstance(sentence_id, int): id_ = sentence_id elif isinstance(sentence_id, float): print("""ValueError: Floating point number detected. Rounding number to 0 decimal places.""") id_ = round(sentence_id) else: id_ = random.randint(1, counts['max_sen']) except ValueError: print("ValueError: Incorrect parameter type detected.") if id_ <= counts['max_sen']: sentence = __get_sentence(counts, sentence_id=id_) else: print("""ValueError: Parameter integer is too high. Maximum permitted value is {0}.""".format(str(counts['max_sen']))) id_ = counts['max_sen'] sentence = __get_sentence(counts, sentence_id=id_) if sentence is not None: while sentence[0] == 'n': if id_ is not None: # here we delibrately pass 'None' to __getsentence__ as it will sentence = __get_sentence(counts, None) else: sentence = __get_sentence(counts, id_) if sentence[0] == 'y': result = __process_sentence(sentence, counts) return result else: print('ValueError: _sentence cannot be None.')
python
def get_sentence(sentence_id=None): """Retrieve a randomly-generated sentence as a unicode string. :param sentence_id: Allows you to optionally specify an integer representing the sentence_id from the database table. This allows you to retrieve a specific sentence each time, albeit with different keywords.""" counts = __get_table_limits() result = None id_ = 0 try: if isinstance(sentence_id, int): id_ = sentence_id elif isinstance(sentence_id, float): print("""ValueError: Floating point number detected. Rounding number to 0 decimal places.""") id_ = round(sentence_id) else: id_ = random.randint(1, counts['max_sen']) except ValueError: print("ValueError: Incorrect parameter type detected.") if id_ <= counts['max_sen']: sentence = __get_sentence(counts, sentence_id=id_) else: print("""ValueError: Parameter integer is too high. Maximum permitted value is {0}.""".format(str(counts['max_sen']))) id_ = counts['max_sen'] sentence = __get_sentence(counts, sentence_id=id_) if sentence is not None: while sentence[0] == 'n': if id_ is not None: # here we delibrately pass 'None' to __getsentence__ as it will sentence = __get_sentence(counts, None) else: sentence = __get_sentence(counts, id_) if sentence[0] == 'y': result = __process_sentence(sentence, counts) return result else: print('ValueError: _sentence cannot be None.')
[ "def", "get_sentence", "(", "sentence_id", "=", "None", ")", ":", "counts", "=", "__get_table_limits", "(", ")", "result", "=", "None", "id_", "=", "0", "try", ":", "if", "isinstance", "(", "sentence_id", ",", "int", ")", ":", "id_", "=", "sentence_id", "elif", "isinstance", "(", "sentence_id", ",", "float", ")", ":", "print", "(", "\"\"\"ValueError: Floating point number detected.\n Rounding number to 0 decimal places.\"\"\"", ")", "id_", "=", "round", "(", "sentence_id", ")", "else", ":", "id_", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_sen'", "]", ")", "except", "ValueError", ":", "print", "(", "\"ValueError: Incorrect parameter type detected.\"", ")", "if", "id_", "<=", "counts", "[", "'max_sen'", "]", ":", "sentence", "=", "__get_sentence", "(", "counts", ",", "sentence_id", "=", "id_", ")", "else", ":", "print", "(", "\"\"\"ValueError: Parameter integer is too high.\n Maximum permitted value is {0}.\"\"\"", ".", "format", "(", "str", "(", "counts", "[", "'max_sen'", "]", ")", ")", ")", "id_", "=", "counts", "[", "'max_sen'", "]", "sentence", "=", "__get_sentence", "(", "counts", ",", "sentence_id", "=", "id_", ")", "if", "sentence", "is", "not", "None", ":", "while", "sentence", "[", "0", "]", "==", "'n'", ":", "if", "id_", "is", "not", "None", ":", "# here we delibrately pass 'None' to __getsentence__ as it will", "sentence", "=", "__get_sentence", "(", "counts", ",", "None", ")", "else", ":", "sentence", "=", "__get_sentence", "(", "counts", ",", "id_", ")", "if", "sentence", "[", "0", "]", "==", "'y'", ":", "result", "=", "__process_sentence", "(", "sentence", ",", "counts", ")", "return", "result", "else", ":", "print", "(", "'ValueError: _sentence cannot be None.'", ")" ]
Retrieve a randomly-generated sentence as a unicode string. :param sentence_id: Allows you to optionally specify an integer representing the sentence_id from the database table. This allows you to retrieve a specific sentence each time, albeit with different keywords.
[ "Retrieve", "a", "randomly", "-", "generated", "sentence", "as", "a", "unicode", "string", ".", ":", "param", "sentence_id", ":", "Allows", "you", "to", "optionally", "specify", "an", "integer", "representing", "the", "sentence_id", "from", "the", "database", "table", ".", "This", "allows", "you", "to", "retrieve", "a", "specific", "sentence", "each", "time", "albeit", "with", "different", "keywords", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L237-L282
Morrolan/surrealism
surrealism.py
__get_sentence
def __get_sentence(counts, sentence_id=None): """Let's fetch a random sentence that we then need to substitute bits of... @ :param counts: :param sentence_id: """ # First of all we need a cursor and a query to retrieve our ID's cursor = CONN.cursor() check_query = "select sen_id from sursentences" # Now we fetch the result of the query and save it into check_result cursor.execute(check_query) check_result = cursor.fetchall() # declare an empty list to be populated below id_list = [] id_to_fetch = None # Populate the id_list variable with all of the ID's we retrieved from the database query. for row in check_result: id_list.append(row[0]) if sentence_id is not None: if type(sentence_id) is int: id_to_fetch = sentence_id else: id_to_fetch = random.randint(1, counts['max_sen']) while id_to_fetch not in id_list: id_to_fetch = random.randint(1, counts['max_sen']) query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch)) cursor.execute(query) result = cursor.fetchone() # cursor.close() return result
python
def __get_sentence(counts, sentence_id=None): """Let's fetch a random sentence that we then need to substitute bits of... @ :param counts: :param sentence_id: """ # First of all we need a cursor and a query to retrieve our ID's cursor = CONN.cursor() check_query = "select sen_id from sursentences" # Now we fetch the result of the query and save it into check_result cursor.execute(check_query) check_result = cursor.fetchall() # declare an empty list to be populated below id_list = [] id_to_fetch = None # Populate the id_list variable with all of the ID's we retrieved from the database query. for row in check_result: id_list.append(row[0]) if sentence_id is not None: if type(sentence_id) is int: id_to_fetch = sentence_id else: id_to_fetch = random.randint(1, counts['max_sen']) while id_to_fetch not in id_list: id_to_fetch = random.randint(1, counts['max_sen']) query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch)) cursor.execute(query) result = cursor.fetchone() # cursor.close() return result
[ "def", "__get_sentence", "(", "counts", ",", "sentence_id", "=", "None", ")", ":", "# First of all we need a cursor and a query to retrieve our ID's", "cursor", "=", "CONN", ".", "cursor", "(", ")", "check_query", "=", "\"select sen_id from sursentences\"", "# Now we fetch the result of the query and save it into check_result", "cursor", ".", "execute", "(", "check_query", ")", "check_result", "=", "cursor", ".", "fetchall", "(", ")", "# declare an empty list to be populated below", "id_list", "=", "[", "]", "id_to_fetch", "=", "None", "# Populate the id_list variable with all of the ID's we retrieved from the database query.", "for", "row", "in", "check_result", ":", "id_list", ".", "append", "(", "row", "[", "0", "]", ")", "if", "sentence_id", "is", "not", "None", ":", "if", "type", "(", "sentence_id", ")", "is", "int", ":", "id_to_fetch", "=", "sentence_id", "else", ":", "id_to_fetch", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_sen'", "]", ")", "while", "id_to_fetch", "not", "in", "id_list", ":", "id_to_fetch", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_sen'", "]", ")", "query", "=", "(", "\"select * from sursentences where sen_id = {0}\"", ".", "format", "(", "id_to_fetch", ")", ")", "cursor", ".", "execute", "(", "query", ")", "result", "=", "cursor", ".", "fetchone", "(", ")", "# cursor.close()", "return", "result" ]
Let's fetch a random sentence that we then need to substitute bits of... @ :param counts: :param sentence_id:
[ "Let", "s", "fetch", "a", "random", "sentence", "that", "we", "then", "need", "to", "substitute", "bits", "of", "..." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L327-L364
Morrolan/surrealism
surrealism.py
__get_verb
def __get_verb(counts): """Let's fetch a VERB :param counts: """ cursor = CONN.cursor() check_query = "select verb_id from surverbs" cursor.execute(check_query) check_result = cursor.fetchall() id_list = [] for row in check_result: id_list.append(row[0]) rand = random.randint(1, counts['max_verb']) while rand not in id_list: rand = random.randint(1, counts['max_verb']) query = "select * from surverbs where verb_id = {0}".format(rand) cursor.execute(query) result = cursor.fetchone() # cursor.close() return result[1]
python
def __get_verb(counts): """Let's fetch a VERB :param counts: """ cursor = CONN.cursor() check_query = "select verb_id from surverbs" cursor.execute(check_query) check_result = cursor.fetchall() id_list = [] for row in check_result: id_list.append(row[0]) rand = random.randint(1, counts['max_verb']) while rand not in id_list: rand = random.randint(1, counts['max_verb']) query = "select * from surverbs where verb_id = {0}".format(rand) cursor.execute(query) result = cursor.fetchone() # cursor.close() return result[1]
[ "def", "__get_verb", "(", "counts", ")", ":", "cursor", "=", "CONN", ".", "cursor", "(", ")", "check_query", "=", "\"select verb_id from surverbs\"", "cursor", ".", "execute", "(", "check_query", ")", "check_result", "=", "cursor", ".", "fetchall", "(", ")", "id_list", "=", "[", "]", "for", "row", "in", "check_result", ":", "id_list", ".", "append", "(", "row", "[", "0", "]", ")", "rand", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_verb'", "]", ")", "while", "rand", "not", "in", "id_list", ":", "rand", "=", "random", ".", "randint", "(", "1", ",", "counts", "[", "'max_verb'", "]", ")", "query", "=", "\"select * from surverbs where verb_id = {0}\"", ".", "format", "(", "rand", ")", "cursor", ".", "execute", "(", "query", ")", "result", "=", "cursor", ".", "fetchone", "(", ")", "# cursor.close()", "return", "result", "[", "1", "]" ]
Let's fetch a VERB :param counts:
[ "Let", "s", "fetch", "a", "VERB", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L367-L392
Morrolan/surrealism
surrealism.py
__get_table_limits
def __get_table_limits(): """Here we simply take a count of each of the database tables so we know our upper limits for our random number calls then return a dictionary of them to the calling function...""" table_counts = { 'max_adjectives': None, 'max_names': None, 'max_nouns': None, 'max_sentences': None, 'max_faults': None, 'max_verbs': None } cursor = CONN.cursor() cursor.execute('SELECT count(*) FROM suradjs') table_counts['max_adjectives'] = cursor.fetchone() table_counts['max_adjectives'] = table_counts['max_adjectives'][0] cursor.execute('SELECT count(*) FROM surnames') table_counts['max_names'] = cursor.fetchone() table_counts['max_names'] = table_counts['max_names'][0] cursor.execute('SELECT count(*) FROM surnouns') table_counts['max_nouns'] = cursor.fetchone() table_counts['max_nouns'] = table_counts['max_nouns'][0] cursor.execute('SELECT count(*) FROM sursentences') table_counts['max_sen'] = cursor.fetchone() table_counts['max_sen'] = table_counts['max_sen'][0] cursor.execute('SELECT count(*) FROM surfaults') table_counts['max_fau'] = cursor.fetchone() table_counts['max_fau'] = table_counts['max_fau'][0] cursor.execute('SELECT count(*) FROM surverbs') table_counts['max_verb'] = cursor.fetchone() table_counts['max_verb'] = table_counts['max_verb'][0] return table_counts
python
def __get_table_limits(): """Here we simply take a count of each of the database tables so we know our upper limits for our random number calls then return a dictionary of them to the calling function...""" table_counts = { 'max_adjectives': None, 'max_names': None, 'max_nouns': None, 'max_sentences': None, 'max_faults': None, 'max_verbs': None } cursor = CONN.cursor() cursor.execute('SELECT count(*) FROM suradjs') table_counts['max_adjectives'] = cursor.fetchone() table_counts['max_adjectives'] = table_counts['max_adjectives'][0] cursor.execute('SELECT count(*) FROM surnames') table_counts['max_names'] = cursor.fetchone() table_counts['max_names'] = table_counts['max_names'][0] cursor.execute('SELECT count(*) FROM surnouns') table_counts['max_nouns'] = cursor.fetchone() table_counts['max_nouns'] = table_counts['max_nouns'][0] cursor.execute('SELECT count(*) FROM sursentences') table_counts['max_sen'] = cursor.fetchone() table_counts['max_sen'] = table_counts['max_sen'][0] cursor.execute('SELECT count(*) FROM surfaults') table_counts['max_fau'] = cursor.fetchone() table_counts['max_fau'] = table_counts['max_fau'][0] cursor.execute('SELECT count(*) FROM surverbs') table_counts['max_verb'] = cursor.fetchone() table_counts['max_verb'] = table_counts['max_verb'][0] return table_counts
[ "def", "__get_table_limits", "(", ")", ":", "table_counts", "=", "{", "'max_adjectives'", ":", "None", ",", "'max_names'", ":", "None", ",", "'max_nouns'", ":", "None", ",", "'max_sentences'", ":", "None", ",", "'max_faults'", ":", "None", ",", "'max_verbs'", ":", "None", "}", "cursor", "=", "CONN", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT count(*) FROM suradjs'", ")", "table_counts", "[", "'max_adjectives'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_adjectives'", "]", "=", "table_counts", "[", "'max_adjectives'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surnames'", ")", "table_counts", "[", "'max_names'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_names'", "]", "=", "table_counts", "[", "'max_names'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surnouns'", ")", "table_counts", "[", "'max_nouns'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_nouns'", "]", "=", "table_counts", "[", "'max_nouns'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM sursentences'", ")", "table_counts", "[", "'max_sen'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_sen'", "]", "=", "table_counts", "[", "'max_sen'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surfaults'", ")", "table_counts", "[", "'max_fau'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_fau'", "]", "=", "table_counts", "[", "'max_fau'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surverbs'", ")", "table_counts", "[", "'max_verb'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_verb'", "]", "=", "table_counts", "[", "'max_verb'", "]", "[", "0", "]", "return", "table_counts" ]
Here we simply take a count of each of the database tables so we know our upper limits for our random number calls then return a dictionary of them to the calling function...
[ "Here", "we", "simply", "take", "a", "count", "of", "each", "of", "the", "database", "tables", "so", "we", "know", "our", "upper", "limits", "for", "our", "random", "number", "calls", "then", "return", "a", "dictionary", "of", "them", "to", "the", "calling", "function", "..." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L478-L518
Morrolan/surrealism
surrealism.py
__process_sentence
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
python
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
[ "def", "__process_sentence", "(", "sentence_tuple", ",", "counts", ")", ":", "sentence", "=", "sentence_tuple", "[", "2", "]", "# now we start replacing words one type at a time...", "sentence", "=", "__replace_verbs", "(", "sentence", ",", "counts", ")", "sentence", "=", "__replace_nouns", "(", "sentence", ",", "counts", ")", "sentence", "=", "___replace_adjective_maybe", "(", "sentence", ",", "counts", ")", "sentence", "=", "__replace_adjective", "(", "sentence", ",", "counts", ")", "sentence", "=", "__replace_names", "(", "sentence", ",", "counts", ")", "# here we perform a check to see if we need to use A or AN depending on the ", "# first letter of the following word...", "sentence", "=", "__replace_an", "(", "sentence", ")", "# replace the new repeating segments", "sentence", "=", "__replace_repeat", "(", "sentence", ")", "# now we will read, choose and substitute each of the RANDOM sentence tuples", "sentence", "=", "__replace_random", "(", "sentence", ")", "# now we are going to choose whether to capitalize words/sentences or not", "sentence", "=", "__replace_capitalise", "(", "sentence", ")", "# here we will choose whether to capitalize all words in the sentence", "sentence", "=", "__replace_capall", "(", "sentence", ")", "# check for appropriate spaces in the correct places.", "sentence", "=", "__check_spaces", "(", "sentence", ")", "return", "sentence" ]
pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts:
[ "pull", "the", "actual", "sentence", "from", "the", "tuple", "(", "tuple", "contains", "additional", "data", "such", "as", "ID", ")", ":", "param", "_sentence_tuple", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L521-L559
Morrolan/surrealism
surrealism.py
__replace_verbs
def __replace_verbs(sentence, counts): """Lets find and replace all instances of #VERB :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#VERB') != -1: sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1) if sentence.find('#VERB') == -1: return sentence return sentence else: return sentence
python
def __replace_verbs(sentence, counts): """Lets find and replace all instances of #VERB :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#VERB') != -1: sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1) if sentence.find('#VERB') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_verbs", "(", "sentence", ",", "counts", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#VERB'", ")", "!=", "-", "1", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#VERB'", ",", "str", "(", "__get_verb", "(", "counts", ")", ")", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#VERB'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #VERB :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#VERB", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L562-L576
Morrolan/surrealism
surrealism.py
__replace_nouns
def __replace_nouns(sentence, counts): """Lets find and replace all instances of #NOUN :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#NOUN') != -1: sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1) if sentence.find('#NOUN') == -1: return sentence return sentence else: return sentence
python
def __replace_nouns(sentence, counts): """Lets find and replace all instances of #NOUN :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#NOUN') != -1: sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1) if sentence.find('#NOUN') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_nouns", "(", "sentence", ",", "counts", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#NOUN'", ")", "!=", "-", "1", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#NOUN'", ",", "str", "(", "__get_noun", "(", "counts", ")", ")", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#NOUN'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #NOUN :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#NOUN", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L579-L594
Morrolan/surrealism
surrealism.py
___replace_adjective_maybe
def ___replace_adjective_maybe(sentence, counts): """Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts: """ random_decision = random.randint(0, 1) if sentence is not None: while sentence.find('#ADJECTIVE_MAYBE') != -1: if random_decision % 2 == 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', ' ' + str(__get_adjective(counts)), 1) elif random_decision % 2 != 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1) if sentence.find('#ADJECTIVE_MAYBE') == -1: return sentence return sentence else: return sentence
python
def ___replace_adjective_maybe(sentence, counts): """Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts: """ random_decision = random.randint(0, 1) if sentence is not None: while sentence.find('#ADJECTIVE_MAYBE') != -1: if random_decision % 2 == 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', ' ' + str(__get_adjective(counts)), 1) elif random_decision % 2 != 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1) if sentence.find('#ADJECTIVE_MAYBE') == -1: return sentence return sentence else: return sentence
[ "def", "___replace_adjective_maybe", "(", "sentence", ",", "counts", ")", ":", "random_decision", "=", "random", ".", "randint", "(", "0", ",", "1", ")", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#ADJECTIVE_MAYBE'", ")", "!=", "-", "1", ":", "if", "random_decision", "%", "2", "==", "0", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#ADJECTIVE_MAYBE'", ",", "' '", "+", "str", "(", "__get_adjective", "(", "counts", ")", ")", ",", "1", ")", "elif", "random_decision", "%", "2", "!=", "0", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#ADJECTIVE_MAYBE'", ",", "''", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#ADJECTIVE_MAYBE'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#ADJECTIVE_MAYBE", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L597-L619
Morrolan/surrealism
surrealism.py
__replace_adjective
def __replace_adjective(sentence, counts): """Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#ADJECTIVE') != -1: sentence = sentence.replace('#ADJECTIVE', str(__get_adjective(counts)), 1) if sentence.find('#ADJECTIVE') == -1: return sentence return sentence else: return sentence
python
def __replace_adjective(sentence, counts): """Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#ADJECTIVE') != -1: sentence = sentence.replace('#ADJECTIVE', str(__get_adjective(counts)), 1) if sentence.find('#ADJECTIVE') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_adjective", "(", "sentence", ",", "counts", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#ADJECTIVE'", ")", "!=", "-", "1", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#ADJECTIVE'", ",", "str", "(", "__get_adjective", "(", "counts", ")", ")", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#ADJECTIVE'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#ADJECTIVE", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L622-L638
Morrolan/surrealism
surrealism.py
__replace_names
def __replace_names(sentence, counts): """Lets find and replace all instances of #NAME :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#NAME') != -1: sentence = sentence.replace('#NAME', str(__get_name(counts)), 1) if sentence.find('#NAME') == -1: return sentence return sentence else: return sentence
python
def __replace_names(sentence, counts): """Lets find and replace all instances of #NAME :param _sentence: :param counts: """ if sentence is not None: while sentence.find('#NAME') != -1: sentence = sentence.replace('#NAME', str(__get_name(counts)), 1) if sentence.find('#NAME') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_names", "(", "sentence", ",", "counts", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#NAME'", ")", "!=", "-", "1", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#NAME'", ",", "str", "(", "__get_name", "(", "counts", ")", ")", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#NAME'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #NAME :param _sentence: :param counts:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#NAME", ":", "param", "_sentence", ":", ":", "param", "counts", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L641-L656
Morrolan/surrealism
surrealism.py
__replace_an
def __replace_an(sentence): """Lets find and replace all instances of #AN This is a little different, as this depends on whether the next word starts with a vowel or a consonant. :param _sentence: """ if sentence is not None: while sentence.find('#AN') != -1: an_index = sentence.find('#AN') if an_index > -1: an_index += 4 if sentence[an_index] in 'aeiouAEIOU': sentence = sentence.replace('#AN', str('an'), 1) else: sentence = sentence.replace('#AN', str('a'), 1) if sentence.find('#AN') == -1: return sentence return sentence else: return sentence
python
def __replace_an(sentence): """Lets find and replace all instances of #AN This is a little different, as this depends on whether the next word starts with a vowel or a consonant. :param _sentence: """ if sentence is not None: while sentence.find('#AN') != -1: an_index = sentence.find('#AN') if an_index > -1: an_index += 4 if sentence[an_index] in 'aeiouAEIOU': sentence = sentence.replace('#AN', str('an'), 1) else: sentence = sentence.replace('#AN', str('a'), 1) if sentence.find('#AN') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_an", "(", "sentence", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#AN'", ")", "!=", "-", "1", ":", "an_index", "=", "sentence", ".", "find", "(", "'#AN'", ")", "if", "an_index", ">", "-", "1", ":", "an_index", "+=", "4", "if", "sentence", "[", "an_index", "]", "in", "'aeiouAEIOU'", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#AN'", ",", "str", "(", "'an'", ")", ",", "1", ")", "else", ":", "sentence", "=", "sentence", ".", "replace", "(", "'#AN'", ",", "str", "(", "'a'", ")", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#AN'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #AN This is a little different, as this depends on whether the next word starts with a vowel or a consonant. :param _sentence:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#AN", "This", "is", "a", "little", "different", "as", "this", "depends", "on", "whether", "the", "next", "word", "starts", "with", "a", "vowel", "or", "a", "consonant", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L659-L683
Morrolan/surrealism
surrealism.py
__replace_random
def __replace_random(sentence): """Lets find and replace all instances of #RANDOM :param _sentence: """ sub_list = None choice = None if sentence is not None: while sentence.find('#RANDOM') != -1: random_index = sentence.find('#RANDOM') start_index = sentence.find('#RANDOM') + 8 end_index = sentence.find(']') if sentence.find('#RANDOM') is not None: sub_list = sentence[start_index:end_index].split(',') choice = random.randint(1, int(sub_list[0])) # _sub_list[_choice] to_be_replaced = sentence[random_index:end_index + 1] sentence = sentence.replace(to_be_replaced, sub_list[choice], 1) if sentence.find('#RANDOM') == -1: return sentence return sentence else: return sentence
python
def __replace_random(sentence): """Lets find and replace all instances of #RANDOM :param _sentence: """ sub_list = None choice = None if sentence is not None: while sentence.find('#RANDOM') != -1: random_index = sentence.find('#RANDOM') start_index = sentence.find('#RANDOM') + 8 end_index = sentence.find(']') if sentence.find('#RANDOM') is not None: sub_list = sentence[start_index:end_index].split(',') choice = random.randint(1, int(sub_list[0])) # _sub_list[_choice] to_be_replaced = sentence[random_index:end_index + 1] sentence = sentence.replace(to_be_replaced, sub_list[choice], 1) if sentence.find('#RANDOM') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_random", "(", "sentence", ")", ":", "sub_list", "=", "None", "choice", "=", "None", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#RANDOM'", ")", "!=", "-", "1", ":", "random_index", "=", "sentence", ".", "find", "(", "'#RANDOM'", ")", "start_index", "=", "sentence", ".", "find", "(", "'#RANDOM'", ")", "+", "8", "end_index", "=", "sentence", ".", "find", "(", "']'", ")", "if", "sentence", ".", "find", "(", "'#RANDOM'", ")", "is", "not", "None", ":", "sub_list", "=", "sentence", "[", "start_index", ":", "end_index", "]", ".", "split", "(", "','", ")", "choice", "=", "random", ".", "randint", "(", "1", ",", "int", "(", "sub_list", "[", "0", "]", ")", ")", "# _sub_list[_choice]", "to_be_replaced", "=", "sentence", "[", "random_index", ":", "end_index", "+", "1", "]", "sentence", "=", "sentence", ".", "replace", "(", "to_be_replaced", ",", "sub_list", "[", "choice", "]", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#RANDOM'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Lets find and replace all instances of #RANDOM :param _sentence:
[ "Lets", "find", "and", "replace", "all", "instances", "of", "#RANDOM", ":", "param", "_sentence", ":" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L686-L716
Morrolan/surrealism
surrealism.py
__replace_repeat
def __replace_repeat(sentence): """ Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences. :param sentence: """ ######### USE SENTENCE_ID 47 for testing! repeat_dict = {} if sentence is not None: while sentence.find('#DEFINE_REPEAT') != -1: begin_index = sentence.find('#DEFINE_REPEAT') start_index = begin_index + 15 end_index = sentence.find(']') if sentence.find('#DEFINE_REPEAT') is not None: sub_list = sentence[start_index:end_index].split(',') choice = sub_list[0] repeat_text = sub_list[1] repeat_dict[choice] = repeat_text sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1) while sentence.find('#REPEAT') != -1: if sentence.find('#REPEAT') is not None: repeat_begin_index = sentence.find('#REPEAT') repeat_start_index = repeat_begin_index + 8 # by searching from repeat_index below we don't encounter dodgy bracket-matching errors. repeat_end_index = sentence.find(']', repeat_start_index) repeat_index = sentence[repeat_start_index:repeat_end_index] if repeat_index in repeat_dict: sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1], str(repeat_dict[repeat_index])) if sentence.find('#REPEAT') == -1: return sentence return sentence else: return sentence
python
def __replace_repeat(sentence): """ Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences. :param sentence: """ ######### USE SENTENCE_ID 47 for testing! repeat_dict = {} if sentence is not None: while sentence.find('#DEFINE_REPEAT') != -1: begin_index = sentence.find('#DEFINE_REPEAT') start_index = begin_index + 15 end_index = sentence.find(']') if sentence.find('#DEFINE_REPEAT') is not None: sub_list = sentence[start_index:end_index].split(',') choice = sub_list[0] repeat_text = sub_list[1] repeat_dict[choice] = repeat_text sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1) while sentence.find('#REPEAT') != -1: if sentence.find('#REPEAT') is not None: repeat_begin_index = sentence.find('#REPEAT') repeat_start_index = repeat_begin_index + 8 # by searching from repeat_index below we don't encounter dodgy bracket-matching errors. repeat_end_index = sentence.find(']', repeat_start_index) repeat_index = sentence[repeat_start_index:repeat_end_index] if repeat_index in repeat_dict: sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1], str(repeat_dict[repeat_index])) if sentence.find('#REPEAT') == -1: return sentence return sentence else: return sentence
[ "def", "__replace_repeat", "(", "sentence", ")", ":", "######### USE SENTENCE_ID 47 for testing!", "repeat_dict", "=", "{", "}", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#DEFINE_REPEAT'", ")", "!=", "-", "1", ":", "begin_index", "=", "sentence", ".", "find", "(", "'#DEFINE_REPEAT'", ")", "start_index", "=", "begin_index", "+", "15", "end_index", "=", "sentence", ".", "find", "(", "']'", ")", "if", "sentence", ".", "find", "(", "'#DEFINE_REPEAT'", ")", "is", "not", "None", ":", "sub_list", "=", "sentence", "[", "start_index", ":", "end_index", "]", ".", "split", "(", "','", ")", "choice", "=", "sub_list", "[", "0", "]", "repeat_text", "=", "sub_list", "[", "1", "]", "repeat_dict", "[", "choice", "]", "=", "repeat_text", "sentence", "=", "sentence", ".", "replace", "(", "sentence", "[", "begin_index", ":", "end_index", "+", "1", "]", ",", "''", ",", "1", ")", "while", "sentence", ".", "find", "(", "'#REPEAT'", ")", "!=", "-", "1", ":", "if", "sentence", ".", "find", "(", "'#REPEAT'", ")", "is", "not", "None", ":", "repeat_begin_index", "=", "sentence", ".", "find", "(", "'#REPEAT'", ")", "repeat_start_index", "=", "repeat_begin_index", "+", "8", "# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.", "repeat_end_index", "=", "sentence", ".", "find", "(", "']'", ",", "repeat_start_index", ")", "repeat_index", "=", "sentence", "[", "repeat_start_index", ":", "repeat_end_index", "]", "if", "repeat_index", "in", "repeat_dict", ":", "sentence", "=", "sentence", ".", "replace", "(", "sentence", "[", "repeat_begin_index", ":", "repeat_end_index", "+", "1", "]", ",", "str", "(", "repeat_dict", "[", "repeat_index", "]", ")", ")", "if", "sentence", ".", "find", "(", "'#REPEAT'", ")", "==", "-", "1", ":", "return", "sentence", "return", "sentence", "else", ":", "return", "sentence" ]
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences. :param sentence:
[ "Allows", "the", "use", "of", "repeating", "random", "-", "elements", "such", "as", "in", "the", "Ten", "green", "bottles", "type", "sentences", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L719-L760
Morrolan/surrealism
surrealism.py
__replace_capitalise
def __replace_capitalise(sentence): """here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence: """ if sentence is not None: while sentence.find('#CAPITALISE') != -1: cap_index = _sentence.find('#CAPITALISE') part1 = sentence[:cap_index] part2 = sentence[cap_index + 12:cap_index + 13] part3 = sentence[cap_index + 13:] if part2 in "abcdefghijklmnopqrstuvwxyz": sentence = part1 + part2.capitalize() + part3 else: sentence = part1 + part2 + part3 if sentence.find('#CAPITALISE') == -1: return sentence else: return sentence
python
def __replace_capitalise(sentence): """here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence: """ if sentence is not None: while sentence.find('#CAPITALISE') != -1: cap_index = _sentence.find('#CAPITALISE') part1 = sentence[:cap_index] part2 = sentence[cap_index + 12:cap_index + 13] part3 = sentence[cap_index + 13:] if part2 in "abcdefghijklmnopqrstuvwxyz": sentence = part1 + part2.capitalize() + part3 else: sentence = part1 + part2 + part3 if sentence.find('#CAPITALISE') == -1: return sentence else: return sentence
[ "def", "__replace_capitalise", "(", "sentence", ")", ":", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#CAPITALISE'", ")", "!=", "-", "1", ":", "cap_index", "=", "_sentence", ".", "find", "(", "'#CAPITALISE'", ")", "part1", "=", "sentence", "[", ":", "cap_index", "]", "part2", "=", "sentence", "[", "cap_index", "+", "12", ":", "cap_index", "+", "13", "]", "part3", "=", "sentence", "[", "cap_index", "+", "13", ":", "]", "if", "part2", "in", "\"abcdefghijklmnopqrstuvwxyz\"", ":", "sentence", "=", "part1", "+", "part2", ".", "capitalize", "(", ")", "+", "part3", "else", ":", "sentence", "=", "part1", "+", "part2", "+", "part3", "if", "sentence", ".", "find", "(", "'#CAPITALISE'", ")", "==", "-", "1", ":", "return", "sentence", "else", ":", "return", "sentence" ]
here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence:
[ "here", "we", "replace", "all", "instances", "of", "#CAPITALISE", "and", "cap", "the", "next", "word", ".", "############" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L763-L790
Morrolan/surrealism
surrealism.py
__replace_capall
def __replace_capall(sentence): """here we replace all instances of #CAPALL and cap the entire sentence. Don't believe that CAPALL is buggy anymore as it forces all uppercase OK? :param _sentence: """ # print "\nReplacing CAPITALISE: " if sentence is not None: while sentence.find('#CAPALL') != -1: # _cap_index = _sentence.find('#CAPALL') sentence = sentence.upper() sentence = sentence.replace('#CAPALL ', '', 1) if sentence.find('#CAPALL') == -1: return sentence else: return sentence
python
def __replace_capall(sentence): """here we replace all instances of #CAPALL and cap the entire sentence. Don't believe that CAPALL is buggy anymore as it forces all uppercase OK? :param _sentence: """ # print "\nReplacing CAPITALISE: " if sentence is not None: while sentence.find('#CAPALL') != -1: # _cap_index = _sentence.find('#CAPALL') sentence = sentence.upper() sentence = sentence.replace('#CAPALL ', '', 1) if sentence.find('#CAPALL') == -1: return sentence else: return sentence
[ "def", "__replace_capall", "(", "sentence", ")", ":", "# print \"\\nReplacing CAPITALISE: \"", "if", "sentence", "is", "not", "None", ":", "while", "sentence", ".", "find", "(", "'#CAPALL'", ")", "!=", "-", "1", ":", "# _cap_index = _sentence.find('#CAPALL')", "sentence", "=", "sentence", ".", "upper", "(", ")", "sentence", "=", "sentence", ".", "replace", "(", "'#CAPALL '", ",", "''", ",", "1", ")", "if", "sentence", ".", "find", "(", "'#CAPALL'", ")", "==", "-", "1", ":", "return", "sentence", "else", ":", "return", "sentence" ]
here we replace all instances of #CAPALL and cap the entire sentence. Don't believe that CAPALL is buggy anymore as it forces all uppercase OK? :param _sentence:
[ "here", "we", "replace", "all", "instances", "of", "#CAPALL", "and", "cap", "the", "entire", "sentence", ".", "Don", "t", "believe", "that", "CAPALL", "is", "buggy", "anymore", "as", "it", "forces", "all", "uppercase", "OK?" ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L793-L811
Morrolan/surrealism
surrealism.py
__check_spaces
def __check_spaces(sentence): """ Here we check to see that we have the correct number of spaces in the correct locations. :param _sentence: :return: """ # We have to run the process multiple times: # Once to search for all spaces, and check if there are adjoining spaces; # The second time to check for 2 spaces after sentence-ending characters such as . and ! and ? if sentence is not None: words = sentence.split() new_sentence = '' for (i, word) in enumerate(words): if word[-1] in set('.!?'): word += ' ' new_word = ''.join(word) new_sentence += ' ' + new_word # remove any trailing whitespace new_sentence = new_sentence.strip() return new_sentence
python
def __check_spaces(sentence): """ Here we check to see that we have the correct number of spaces in the correct locations. :param _sentence: :return: """ # We have to run the process multiple times: # Once to search for all spaces, and check if there are adjoining spaces; # The second time to check for 2 spaces after sentence-ending characters such as . and ! and ? if sentence is not None: words = sentence.split() new_sentence = '' for (i, word) in enumerate(words): if word[-1] in set('.!?'): word += ' ' new_word = ''.join(word) new_sentence += ' ' + new_word # remove any trailing whitespace new_sentence = new_sentence.strip() return new_sentence
[ "def", "__check_spaces", "(", "sentence", ")", ":", "# We have to run the process multiple times:", "# Once to search for all spaces, and check if there are adjoining spaces;", "# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?", "if", "sentence", "is", "not", "None", ":", "words", "=", "sentence", ".", "split", "(", ")", "new_sentence", "=", "''", "for", "(", "i", ",", "word", ")", "in", "enumerate", "(", "words", ")", ":", "if", "word", "[", "-", "1", "]", "in", "set", "(", "'.!?'", ")", ":", "word", "+=", "' '", "new_word", "=", "''", ".", "join", "(", "word", ")", "new_sentence", "+=", "' '", "+", "new_word", "# remove any trailing whitespace", "new_sentence", "=", "new_sentence", ".", "strip", "(", ")", "return", "new_sentence" ]
Here we check to see that we have the correct number of spaces in the correct locations. :param _sentence: :return:
[ "Here", "we", "check", "to", "see", "that", "we", "have", "the", "correct", "number", "of", "spaces", "in", "the", "correct", "locations", "." ]
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L814-L841
maxalbert/tohu
tohu/v6/utils.py
make_exploded_column
def make_exploded_column(df, colname_new, colname_old): """ Internal helper function used by `explode_columns()`. """ s = df[colname_old].apply(pd.Series).stack() s.name = colname_new return s
python
def make_exploded_column(df, colname_new, colname_old): """ Internal helper function used by `explode_columns()`. """ s = df[colname_old].apply(pd.Series).stack() s.name = colname_new return s
[ "def", "make_exploded_column", "(", "df", ",", "colname_new", ",", "colname_old", ")", ":", "s", "=", "df", "[", "colname_old", "]", ".", "apply", "(", "pd", ".", "Series", ")", ".", "stack", "(", ")", "s", ".", "name", "=", "colname_new", "return", "s" ]
Internal helper function used by `explode_columns()`.
[ "Internal", "helper", "function", "used", "by", "explode_columns", "()", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L48-L54
maxalbert/tohu
tohu/v6/utils.py
explode_columns
def explode_columns(df, colnames): """ Given a dataframe with certain columns that contain lists, return another dataframe where the elements in each list are "exploded" into individual rows. Example: >>> df col1 col2 col3 col4 0 foo 11 [DDD, AAA, CCC] [dd, aa, cc] 1 bar 22 [FFF] [ff] 2 quux 33 [EEE, BBB] [ee, bb] >>> explode_columns(df, ['col3']) col1 col2 col3 col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb >>> explode_columns(df, {'col3_exploded': 'col3'}) col1 col2 col3_exploded col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb """ if isinstance(colnames, (list, tuple)): colnames = {name: name for name in colnames} remaining_columns = list(df.columns.difference(colnames.values())) df2 = df.set_index(remaining_columns) df3 = pd.concat((make_exploded_column(df2, col_new, col_old) for col_new, col_old in colnames.items()), axis=1) levels_to_reset = list(range(len(remaining_columns))) df4 = df3.reset_index(level=levels_to_reset).reset_index(drop=True) return df4
python
def explode_columns(df, colnames): """ Given a dataframe with certain columns that contain lists, return another dataframe where the elements in each list are "exploded" into individual rows. Example: >>> df col1 col2 col3 col4 0 foo 11 [DDD, AAA, CCC] [dd, aa, cc] 1 bar 22 [FFF] [ff] 2 quux 33 [EEE, BBB] [ee, bb] >>> explode_columns(df, ['col3']) col1 col2 col3 col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb >>> explode_columns(df, {'col3_exploded': 'col3'}) col1 col2 col3_exploded col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb """ if isinstance(colnames, (list, tuple)): colnames = {name: name for name in colnames} remaining_columns = list(df.columns.difference(colnames.values())) df2 = df.set_index(remaining_columns) df3 = pd.concat((make_exploded_column(df2, col_new, col_old) for col_new, col_old in colnames.items()), axis=1) levels_to_reset = list(range(len(remaining_columns))) df4 = df3.reset_index(level=levels_to_reset).reset_index(drop=True) return df4
[ "def", "explode_columns", "(", "df", ",", "colnames", ")", ":", "if", "isinstance", "(", "colnames", ",", "(", "list", ",", "tuple", ")", ")", ":", "colnames", "=", "{", "name", ":", "name", "for", "name", "in", "colnames", "}", "remaining_columns", "=", "list", "(", "df", ".", "columns", ".", "difference", "(", "colnames", ".", "values", "(", ")", ")", ")", "df2", "=", "df", ".", "set_index", "(", "remaining_columns", ")", "df3", "=", "pd", ".", "concat", "(", "(", "make_exploded_column", "(", "df2", ",", "col_new", ",", "col_old", ")", "for", "col_new", ",", "col_old", "in", "colnames", ".", "items", "(", ")", ")", ",", "axis", "=", "1", ")", "levels_to_reset", "=", "list", "(", "range", "(", "len", "(", "remaining_columns", ")", ")", ")", "df4", "=", "df3", ".", "reset_index", "(", "level", "=", "levels_to_reset", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "return", "df4" ]
Given a dataframe with certain columns that contain lists, return another dataframe where the elements in each list are "exploded" into individual rows. Example: >>> df col1 col2 col3 col4 0 foo 11 [DDD, AAA, CCC] [dd, aa, cc] 1 bar 22 [FFF] [ff] 2 quux 33 [EEE, BBB] [ee, bb] >>> explode_columns(df, ['col3']) col1 col2 col3 col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb >>> explode_columns(df, {'col3_exploded': 'col3'}) col1 col2 col3_exploded col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb
[ "Given", "a", "dataframe", "with", "certain", "columns", "that", "contain", "lists", "return", "another", "dataframe", "where", "the", "elements", "in", "each", "list", "are", "exploded", "into", "individual", "rows", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L57-L97
maxalbert/tohu
tohu/v6/utils.py
print_generated_sequence
def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None): """ Helper function which prints a sequence of `num` items produced by the random generator `gen`. """ if seed: gen.reset(seed) elems = [format(next(gen), fmt) for _ in range(num)] sep_initial = "\n\n" if '\n' in sep else " " print("Generated sequence:{}{}".format(sep_initial, sep.join(elems)))
python
def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None): """ Helper function which prints a sequence of `num` items produced by the random generator `gen`. """ if seed: gen.reset(seed) elems = [format(next(gen), fmt) for _ in range(num)] sep_initial = "\n\n" if '\n' in sep else " " print("Generated sequence:{}{}".format(sep_initial, sep.join(elems)))
[ "def", "print_generated_sequence", "(", "gen", ",", "num", ",", "*", ",", "sep", "=", "\", \"", ",", "fmt", "=", "''", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "gen", ".", "reset", "(", "seed", ")", "elems", "=", "[", "format", "(", "next", "(", "gen", ")", ",", "fmt", ")", "for", "_", "in", "range", "(", "num", ")", "]", "sep_initial", "=", "\"\\n\\n\"", "if", "'\\n'", "in", "sep", "else", "\" \"", "print", "(", "\"Generated sequence:{}{}\"", ".", "format", "(", "sep_initial", ",", "sep", ".", "join", "(", "elems", ")", ")", ")" ]
Helper function which prints a sequence of `num` items produced by the random generator `gen`.
[ "Helper", "function", "which", "prints", "a", "sequence", "of", "num", "items", "produced", "by", "the", "random", "generator", "gen", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L100-L110
maxalbert/tohu
tohu/v6/utils.py
make_dummy_tuples
def make_dummy_tuples(chars='abcde'): """ Helper function to create a list of namedtuples which are useful for testing and debugging (especially of custom generators). Example ------- >>> make_dummy_tuples(chars='abcd') [Quux(x='AA', y='aa'), Quux(x='BB', y='bb'), Quux(x='CC', y='cc'), Quux(x='DD', y='dd')] """ Quux = namedtuple('Quux', ['x', 'y']) some_tuples = [Quux((c*2).upper(), c*2) for c in chars] return some_tuples
python
def make_dummy_tuples(chars='abcde'): """ Helper function to create a list of namedtuples which are useful for testing and debugging (especially of custom generators). Example ------- >>> make_dummy_tuples(chars='abcd') [Quux(x='AA', y='aa'), Quux(x='BB', y='bb'), Quux(x='CC', y='cc'), Quux(x='DD', y='dd')] """ Quux = namedtuple('Quux', ['x', 'y']) some_tuples = [Quux((c*2).upper(), c*2) for c in chars] return some_tuples
[ "def", "make_dummy_tuples", "(", "chars", "=", "'abcde'", ")", ":", "Quux", "=", "namedtuple", "(", "'Quux'", ",", "[", "'x'", ",", "'y'", "]", ")", "some_tuples", "=", "[", "Quux", "(", "(", "c", "*", "2", ")", ".", "upper", "(", ")", ",", "c", "*", "2", ")", "for", "c", "in", "chars", "]", "return", "some_tuples" ]
Helper function to create a list of namedtuples which are useful for testing and debugging (especially of custom generators). Example ------- >>> make_dummy_tuples(chars='abcd') [Quux(x='AA', y='aa'), Quux(x='BB', y='bb'), Quux(x='CC', y='cc'), Quux(x='DD', y='dd')]
[ "Helper", "function", "to", "create", "a", "list", "of", "namedtuples", "which", "are", "useful", "for", "testing", "and", "debugging", "(", "especially", "of", "custom", "generators", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L113-L128
maxalbert/tohu
tohu/v6/utils.py
ensure_is_date_object
def ensure_is_date_object(x): """ Ensure input represents a valid date and return the corresponding `datetime.date` object. Valid inputs: - string of the form "YYYY-MM-DD" - dt.date object - pd.Timestamp of the form "YYYY-MM-DD 00:00:00" with freq='D' (as is generated by pd.date_range()) """ error_msg = f"Cannot convert input to date object: {x} (type: {type(x)})" if isinstance(x, dt.date): if isinstance(x, pd.Timestamp): if x.freq != 'D': raise TohuDateError("Pandas Timestamp must have freq='D' set. Got: freq={x.freq!r}") elif pd.Timestamp(x.date()) == x: return x.date() else: raise TohuDateError(error_msg) elif isinstance(x, dt.datetime): raise TohuDateError(error_msg) else: return x elif isinstance(x, str): return parse_date_string(x) else: raise TohuDateError(error_msg)
python
def ensure_is_date_object(x): """ Ensure input represents a valid date and return the corresponding `datetime.date` object. Valid inputs: - string of the form "YYYY-MM-DD" - dt.date object - pd.Timestamp of the form "YYYY-MM-DD 00:00:00" with freq='D' (as is generated by pd.date_range()) """ error_msg = f"Cannot convert input to date object: {x} (type: {type(x)})" if isinstance(x, dt.date): if isinstance(x, pd.Timestamp): if x.freq != 'D': raise TohuDateError("Pandas Timestamp must have freq='D' set. Got: freq={x.freq!r}") elif pd.Timestamp(x.date()) == x: return x.date() else: raise TohuDateError(error_msg) elif isinstance(x, dt.datetime): raise TohuDateError(error_msg) else: return x elif isinstance(x, str): return parse_date_string(x) else: raise TohuDateError(error_msg)
[ "def", "ensure_is_date_object", "(", "x", ")", ":", "error_msg", "=", "f\"Cannot convert input to date object: {x} (type: {type(x)})\"", "if", "isinstance", "(", "x", ",", "dt", ".", "date", ")", ":", "if", "isinstance", "(", "x", ",", "pd", ".", "Timestamp", ")", ":", "if", "x", ".", "freq", "!=", "'D'", ":", "raise", "TohuDateError", "(", "\"Pandas Timestamp must have freq='D' set. Got: freq={x.freq!r}\"", ")", "elif", "pd", ".", "Timestamp", "(", "x", ".", "date", "(", ")", ")", "==", "x", ":", "return", "x", ".", "date", "(", ")", "else", ":", "raise", "TohuDateError", "(", "error_msg", ")", "elif", "isinstance", "(", "x", ",", "dt", ".", "datetime", ")", ":", "raise", "TohuDateError", "(", "error_msg", ")", "else", ":", "return", "x", "elif", "isinstance", "(", "x", ",", "str", ")", ":", "return", "parse_date_string", "(", "x", ")", "else", ":", "raise", "TohuDateError", "(", "error_msg", ")" ]
Ensure input represents a valid date and return the corresponding `datetime.date` object. Valid inputs: - string of the form "YYYY-MM-DD" - dt.date object - pd.Timestamp of the form "YYYY-MM-DD 00:00:00" with freq='D' (as is generated by pd.date_range())
[ "Ensure", "input", "represents", "a", "valid", "date", "and", "return", "the", "corresponding", "datetime", ".", "date", "object", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L148-L175
maxalbert/tohu
tohu/v6/tohu_namespace.py
TohuNamespace.all_independent_generators
def all_independent_generators(self): """ Return all generators in this namespace which are not clones. """ return {g: name for g, name in self._ns.items() if not is_clone(g)}
python
def all_independent_generators(self): """ Return all generators in this namespace which are not clones. """ return {g: name for g, name in self._ns.items() if not is_clone(g)}
[ "def", "all_independent_generators", "(", "self", ")", ":", "return", "{", "g", ":", "name", "for", "g", ",", "name", "in", "self", ".", "_ns", ".", "items", "(", ")", "if", "not", "is_clone", "(", "g", ")", "}" ]
Return all generators in this namespace which are not clones.
[ "Return", "all", "generators", "in", "this", "namespace", "which", "are", "not", "clones", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/tohu_namespace.py#L60-L64
maxalbert/tohu
tohu/v6/custom_generator/utils.py
make_tohu_items_class
def make_tohu_items_class(clsname, attr_names): """ Parameters ---------- clsname: string Name of the class to be created attr_names: list of strings Names of the attributes of the class to be created """ item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True) def new_repr(self): all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()]) return f'{clsname}({all_fields})' orig_eq = item_cls.__eq__ def new_eq(self, other): """ Custom __eq__() method which also allows comparisons with tuples and dictionaries. This is mostly for convenience during testing. """ if isinstance(other, self.__class__): return orig_eq(self, other) else: if isinstance(other, tuple): return attr.astuple(self) == other elif isinstance(other, dict): return attr.asdict(self) == other else: return NotImplemented item_cls.__repr__ = new_repr item_cls.__eq__ = new_eq item_cls.keys = lambda self: attr_names item_cls.__getitem__ = lambda self, key: getattr(self, key) item_cls.as_dict = lambda self: attr.asdict(self) item_cls.to_series = lambda self: pd.Series(attr.asdict(self)) return item_cls
python
def make_tohu_items_class(clsname, attr_names): """ Parameters ---------- clsname: string Name of the class to be created attr_names: list of strings Names of the attributes of the class to be created """ item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True) def new_repr(self): all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()]) return f'{clsname}({all_fields})' orig_eq = item_cls.__eq__ def new_eq(self, other): """ Custom __eq__() method which also allows comparisons with tuples and dictionaries. This is mostly for convenience during testing. """ if isinstance(other, self.__class__): return orig_eq(self, other) else: if isinstance(other, tuple): return attr.astuple(self) == other elif isinstance(other, dict): return attr.asdict(self) == other else: return NotImplemented item_cls.__repr__ = new_repr item_cls.__eq__ = new_eq item_cls.keys = lambda self: attr_names item_cls.__getitem__ = lambda self, key: getattr(self, key) item_cls.as_dict = lambda self: attr.asdict(self) item_cls.to_series = lambda self: pd.Series(attr.asdict(self)) return item_cls
[ "def", "make_tohu_items_class", "(", "clsname", ",", "attr_names", ")", ":", "item_cls", "=", "attr", ".", "make_class", "(", "clsname", ",", "{", "name", ":", "attr", ".", "ib", "(", ")", "for", "name", "in", "attr_names", "}", ",", "repr", "=", "False", ",", "cmp", "=", "True", ",", "frozen", "=", "True", ")", "def", "new_repr", "(", "self", ")", ":", "all_fields", "=", "', '", ".", "join", "(", "[", "f'{name}={repr(value)}'", "for", "name", ",", "value", "in", "attr", ".", "asdict", "(", "self", ")", ".", "items", "(", ")", "]", ")", "return", "f'{clsname}({all_fields})'", "orig_eq", "=", "item_cls", ".", "__eq__", "def", "new_eq", "(", "self", ",", "other", ")", ":", "\"\"\"\n Custom __eq__() method which also allows comparisons with\n tuples and dictionaries. This is mostly for convenience\n during testing.\n \"\"\"", "if", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "orig_eq", "(", "self", ",", "other", ")", "else", ":", "if", "isinstance", "(", "other", ",", "tuple", ")", ":", "return", "attr", ".", "astuple", "(", "self", ")", "==", "other", "elif", "isinstance", "(", "other", ",", "dict", ")", ":", "return", "attr", ".", "asdict", "(", "self", ")", "==", "other", "else", ":", "return", "NotImplemented", "item_cls", ".", "__repr__", "=", "new_repr", "item_cls", ".", "__eq__", "=", "new_eq", "item_cls", ".", "keys", "=", "lambda", "self", ":", "attr_names", "item_cls", ".", "__getitem__", "=", "lambda", "self", ",", "key", ":", "getattr", "(", "self", ",", "key", ")", "item_cls", ".", "as_dict", "=", "lambda", "self", ":", "attr", ".", "asdict", "(", "self", ")", "item_cls", ".", "to_series", "=", "lambda", "self", ":", "pd", ".", "Series", "(", "attr", ".", "asdict", "(", "self", ")", ")", "return", "item_cls" ]
Parameters ---------- clsname: string Name of the class to be created attr_names: list of strings Names of the attributes of the class to be created
[ "Parameters", "----------", "clsname", ":", "string", "Name", "of", "the", "class", "to", "be", "created" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/custom_generator/utils.py#L11-L54
maxalbert/tohu
tohu/v6/custom_generator/utils.py
get_tohu_items_name
def get_tohu_items_name(cls): """ Return a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem' """ assert issubclass(cls, TohuBaseGenerator) try: tohu_items_name = cls.__dict__['__tohu_items_name__'] logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')") except KeyError: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: tohu_items_name = m.group(1) logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)") else: msg = ( "Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'" ) raise ValueError(msg) return tohu_items_name
python
def get_tohu_items_name(cls): """ Return a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem' """ assert issubclass(cls, TohuBaseGenerator) try: tohu_items_name = cls.__dict__['__tohu_items_name__'] logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')") except KeyError: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: tohu_items_name = m.group(1) logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)") else: msg = ( "Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'" ) raise ValueError(msg) return tohu_items_name
[ "def", "get_tohu_items_name", "(", "cls", ")", ":", "assert", "issubclass", "(", "cls", ",", "TohuBaseGenerator", ")", "try", ":", "tohu_items_name", "=", "cls", ".", "__dict__", "[", "'__tohu_items_name__'", "]", "logger", ".", "debug", "(", "f\"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')\"", ")", "except", "KeyError", ":", "m", "=", "re", ".", "match", "(", "'^(.*)Generator$'", ",", "cls", ".", "__name__", ")", "if", "m", "is", "not", "None", ":", "tohu_items_name", "=", "m", ".", "group", "(", "1", ")", "logger", ".", "debug", "(", "f\"Using item class name '{tohu_items_name}' (derived from custom generator name)\"", ")", "else", ":", "msg", "=", "(", "\"Cannot derive class name for items to be produced by custom generator. \"", "\"Please set '__tohu_items_name__' at the top of the custom generator's \"", "\"definition or change its name so that it ends in '...Generator'\"", ")", "raise", "ValueError", "(", "msg", ")", "return", "tohu_items_name" ]
Return a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem'
[ "Return", "a", "string", "which", "defines", "the", "name", "of", "the", "namedtuple", "class", "which", "will", "be", "used", "to", "produce", "items", "for", "the", "custom", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/custom_generator/utils.py#L57-L92
maxalbert/tohu
tohu/v4/primitive_generators.py
SelectOnePrimitive._init_randgen
def _init_randgen(self): """ Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it. """ if self.p is None: self.randgen = Random() self.func_random_choice = self.randgen.choice else: self.randgen = np.random.RandomState() self.func_random_choice = partial(self.randgen.choice, p=self.p)
python
def _init_randgen(self): """ Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it. """ if self.p is None: self.randgen = Random() self.func_random_choice = self.randgen.choice else: self.randgen = np.random.RandomState() self.func_random_choice = partial(self.randgen.choice, p=self.p)
[ "def", "_init_randgen", "(", "self", ")", ":", "if", "self", ".", "p", "is", "None", ":", "self", ".", "randgen", "=", "Random", "(", ")", "self", ".", "func_random_choice", "=", "self", ".", "randgen", ".", "choice", "else", ":", "self", ".", "randgen", "=", "np", ".", "random", ".", "RandomState", "(", ")", "self", ".", "func_random_choice", "=", "partial", "(", "self", ".", "randgen", ".", "choice", ",", "p", "=", "self", ".", "p", ")" ]
Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it.
[ "Initialise", "random", "generator", "to", "be", "used", "for", "picking", "elements", ".", "With", "the", "current", "implementation", "in", "tohu", "(", "where", "we", "pick", "elements", "from", "generators", "individually", "instead", "of", "in", "bulk", ")", "it", "is", "faster", "to", "use", "random", ".", "Random", "than", "numpy", ".", "random", ".", "RandomState", "(", "it", "is", "possible", "that", "this", "may", "change", "in", "the", "future", "if", "we", "change", "the", "design", "so", "that", "tohu", "pre", "-", "produces", "elements", "in", "bulk", "but", "that", "s", "not", "likely", "to", "happen", "in", "the", "near", "future", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/primitive_generators.py#L511-L532
maxalbert/tohu
tohu/v4/primitive_generators.py
SelectOnePrimitive._set_random_state_from
def _set_random_state_from(self, other): """ Transfer the internal state from `other` to `self`. After this call, `self` will produce the same elements in the same order as `other` (even though they otherwise remain completely independent). """ try: # this works if randgen is an instance of random.Random() self.randgen.setstate(other.randgen.getstate()) except AttributeError: # this works if randgen is an instance of numpy.random.RandomState() self.randgen.set_state(other.randgen.get_state()) return self
python
def _set_random_state_from(self, other): """ Transfer the internal state from `other` to `self`. After this call, `self` will produce the same elements in the same order as `other` (even though they otherwise remain completely independent). """ try: # this works if randgen is an instance of random.Random() self.randgen.setstate(other.randgen.getstate()) except AttributeError: # this works if randgen is an instance of numpy.random.RandomState() self.randgen.set_state(other.randgen.get_state()) return self
[ "def", "_set_random_state_from", "(", "self", ",", "other", ")", ":", "try", ":", "# this works if randgen is an instance of random.Random()", "self", ".", "randgen", ".", "setstate", "(", "other", ".", "randgen", ".", "getstate", "(", ")", ")", "except", "AttributeError", ":", "# this works if randgen is an instance of numpy.random.RandomState()", "self", ".", "randgen", ".", "set_state", "(", "other", ".", "randgen", ".", "get_state", "(", ")", ")", "return", "self" ]
Transfer the internal state from `other` to `self`. After this call, `self` will produce the same elements in the same order as `other` (even though they otherwise remain completely independent).
[ "Transfer", "the", "internal", "state", "from", "other", "to", "self", ".", "After", "this", "call", "self", "will", "produce", "the", "same", "elements", "in", "the", "same", "order", "as", "other", "(", "even", "though", "they", "otherwise", "remain", "completely", "independent", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/primitive_generators.py#L534-L548
maxalbert/tohu
tohu/v4/primitive_generators.py
SelectMultiplePrimitive._init_randgen
def _init_randgen(self): """ Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it. """ if self.p is None: self.randgen = Random() self.func_random_choice = partial(self.randgen.choices, k=self.num) else: self.randgen = np.random.RandomState() self.func_random_choice = partial(self.randgen.choice, p=self.p, k=self.num)
python
def _init_randgen(self): """ Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it. """ if self.p is None: self.randgen = Random() self.func_random_choice = partial(self.randgen.choices, k=self.num) else: self.randgen = np.random.RandomState() self.func_random_choice = partial(self.randgen.choice, p=self.p, k=self.num)
[ "def", "_init_randgen", "(", "self", ")", ":", "if", "self", ".", "p", "is", "None", ":", "self", ".", "randgen", "=", "Random", "(", ")", "self", ".", "func_random_choice", "=", "partial", "(", "self", ".", "randgen", ".", "choices", ",", "k", "=", "self", ".", "num", ")", "else", ":", "self", ".", "randgen", "=", "np", ".", "random", ".", "RandomState", "(", ")", "self", ".", "func_random_choice", "=", "partial", "(", "self", ".", "randgen", ".", "choice", ",", "p", "=", "self", ".", "p", ",", "k", "=", "self", ".", "num", ")" ]
Initialise random generator to be used for picking elements. With the current implementation in tohu (where we pick elements from generators individually instead of in bulk), it is faster to `use random.Random` than `numpy.random.RandomState` (it is possible that this may change in the future if we change the design so that tohu pre-produces elements in bulk, but that's not likely to happen in the near future). Since `random.Random` doesn't support arbitrary distributions, we can only use it if `p=None`. This helper function returns the appropriate random number generator depending in the value of `p`, and also returns a function `random_choice` which can be applied to the input sequence to select random elements from it.
[ "Initialise", "random", "generator", "to", "be", "used", "for", "picking", "elements", ".", "With", "the", "current", "implementation", "in", "tohu", "(", "where", "we", "pick", "elements", "from", "generators", "individually", "instead", "of", "in", "bulk", ")", "it", "is", "faster", "to", "use", "random", ".", "Random", "than", "numpy", ".", "random", ".", "RandomState", "(", "it", "is", "possible", "that", "this", "may", "change", "in", "the", "future", "if", "we", "change", "the", "design", "so", "that", "tohu", "pre", "-", "produces", "elements", "in", "bulk", "but", "that", "s", "not", "likely", "to", "happen", "in", "the", "near", "future", ")", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v4/primitive_generators.py#L589-L610
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
update_with_tohu_generators
def update_with_tohu_generators(field_gens, adict): """ Helper function which updates `field_gens` with any items in the dictionary `adict` that are instances of `TohuUltraBaseGenerator`. """ for name, gen in adict.items(): if isinstance(gen, TohuUltraBaseGenerator): field_gens[name] = gen
python
def update_with_tohu_generators(field_gens, adict): """ Helper function which updates `field_gens` with any items in the dictionary `adict` that are instances of `TohuUltraBaseGenerator`. """ for name, gen in adict.items(): if isinstance(gen, TohuUltraBaseGenerator): field_gens[name] = gen
[ "def", "update_with_tohu_generators", "(", "field_gens", ",", "adict", ")", ":", "for", "name", ",", "gen", "in", "adict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "gen", ",", "TohuUltraBaseGenerator", ")", ":", "field_gens", "[", "name", "]", "=", "gen" ]
Helper function which updates `field_gens` with any items in the dictionary `adict` that are instances of `TohuUltraBaseGenerator`.
[ "Helper", "function", "which", "updates", "field_gens", "with", "any", "items", "in", "the", "dictionary", "adict", "that", "are", "instances", "of", "TohuUltraBaseGenerator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L22-L29
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
find_field_generator_templates
def find_field_generator_templates(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} update_with_tohu_generators(field_gens, cls_dict) update_with_tohu_generators(field_gens, obj_dict) return field_gens
python
def find_field_generator_templates(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} update_with_tohu_generators(field_gens, cls_dict) update_with_tohu_generators(field_gens, obj_dict) return field_gens
[ "def", "find_field_generator_templates", "(", "obj", ")", ":", "cls_dict", "=", "obj", ".", "__class__", ".", "__dict__", "obj_dict", "=", "obj", ".", "__dict__", "#debug_print_dict(cls_dict, 'cls_dict')", "#debug_print_dict(obj_dict, 'obj_dict')", "field_gens", "=", "{", "}", "update_with_tohu_generators", "(", "field_gens", ",", "cls_dict", ")", "update_with_tohu_generators", "(", "field_gens", ",", "obj_dict", ")", "return", "field_gens" ]
Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces.
[ "Return", "dictionary", "with", "the", "names", "and", "instances", "of", "all", "tohu", ".", "BaseGenerator", "occurring", "in", "the", "given", "object", "s", "class", "&", "instance", "namespaces", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L32-L48
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
set_item_class_name_on_custom_generator_class
def set_item_class_name_on_custom_generator_class(cls): """ Set the attribute `cls.__tohu_items_name__` to a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem' """ if '__tohu__items__name__' in cls.__dict__: logger.debug( f"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')") else: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: cls.__tohu_items_name__ = m.group(1) logger.debug(f"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)") else: raise ValueError("Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'")
python
def set_item_class_name_on_custom_generator_class(cls): """ Set the attribute `cls.__tohu_items_name__` to a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem' """ if '__tohu__items__name__' in cls.__dict__: logger.debug( f"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')") else: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: cls.__tohu_items_name__ = m.group(1) logger.debug(f"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)") else: raise ValueError("Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'")
[ "def", "set_item_class_name_on_custom_generator_class", "(", "cls", ")", ":", "if", "'__tohu__items__name__'", "in", "cls", ".", "__dict__", ":", "logger", ".", "debug", "(", "f\"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')\"", ")", "else", ":", "m", "=", "re", ".", "match", "(", "'^(.*)Generator$'", ",", "cls", ".", "__name__", ")", "if", "m", "is", "not", "None", ":", "cls", ".", "__tohu_items_name__", "=", "m", ".", "group", "(", "1", ")", "logger", ".", "debug", "(", "f\"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)\"", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot derive class name for items to be produced by custom generator. \"", "\"Please set '__tohu_items_name__' at the top of the custom generator's \"", "\"definition or change its name so that it ends in '...Generator'\"", ")" ]
Set the attribute `cls.__tohu_items_name__` to a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem'
[ "Set", "the", "attribute", "cls", ".", "__tohu_items_name__", "to", "a", "string", "which", "defines", "the", "name", "of", "the", "namedtuple", "class", "which", "will", "be", "used", "to", "produce", "items", "for", "the", "custom", "generator", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L51-L80
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
make_item_class_for_custom_generator_class
def make_item_class_for_custom_generator_class(cls): """ cls: The custom generator class for which to create an item-class """ clsname = cls.__tohu_items_name__ attr_names = cls.field_gens.keys() return make_item_class(clsname, attr_names)
python
def make_item_class_for_custom_generator_class(cls): """ cls: The custom generator class for which to create an item-class """ clsname = cls.__tohu_items_name__ attr_names = cls.field_gens.keys() return make_item_class(clsname, attr_names)
[ "def", "make_item_class_for_custom_generator_class", "(", "cls", ")", ":", "clsname", "=", "cls", ".", "__tohu_items_name__", "attr_names", "=", "cls", ".", "field_gens", ".", "keys", "(", ")", "return", "make_item_class", "(", "clsname", ",", "attr_names", ")" ]
cls: The custom generator class for which to create an item-class
[ "cls", ":", "The", "custom", "generator", "class", "for", "which", "to", "create", "an", "item", "-", "class" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L129-L136
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
_add_new_init_method
def _add_new_init_method(cls): """ Replace the existing cls.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = cls.__init__ def new_init_method(self, *args, **kwargs): logger.debug(f"Initialising new {self} (type: {type(self)})") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. # logger.debug(f" orig_init: {orig_init}") orig_init(self, *args, **kwargs) # # Find field generator templates and spawn them to create # field generators for the new custom generator instance. # field_gens_templates = find_field_generator_templates(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) logger.debug('Spawning field generator templates...') origs = {} spawned = {} dependency_mapping = {} for (name, gen) in field_gens_templates.items(): origs[name] = gen spawned[name] = gen.spawn(dependency_mapping) logger.debug(f'Adding dependency mapping: {gen} -> {spawned[name]}') self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Spawned field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # Add seed generator # #self.seed_generator = SeedGenerator() # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator_class(self) cls.__init__ = new_init_method
python
def _add_new_init_method(cls): """ Replace the existing cls.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = cls.__init__ def new_init_method(self, *args, **kwargs): logger.debug(f"Initialising new {self} (type: {type(self)})") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. # logger.debug(f" orig_init: {orig_init}") orig_init(self, *args, **kwargs) # # Find field generator templates and spawn them to create # field generators for the new custom generator instance. # field_gens_templates = find_field_generator_templates(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) logger.debug('Spawning field generator templates...') origs = {} spawned = {} dependency_mapping = {} for (name, gen) in field_gens_templates.items(): origs[name] = gen spawned[name] = gen.spawn(dependency_mapping) logger.debug(f'Adding dependency mapping: {gen} -> {spawned[name]}') self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Spawned field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # Add seed generator # #self.seed_generator = SeedGenerator() # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator_class(self) cls.__init__ = new_init_method
[ "def", "_add_new_init_method", "(", "cls", ")", ":", "orig_init", "=", "cls", ".", "__init__", "def", "new_init_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "f\"Initialising new {self} (type: {type(self)})\"", ")", "# Call original __init__ function to ensure we pick up", "# any tohu generators that are defined there.", "#", "logger", ".", "debug", "(", "f\" orig_init: {orig_init}\"", ")", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "#", "# Find field generator templates and spawn them to create", "# field generators for the new custom generator instance.", "#", "field_gens_templates", "=", "find_field_generator_templates", "(", "self", ")", "logger", ".", "debug", "(", "f'Found {len(field_gens_templates)} field generator template(s):'", ")", "debug_print_dict", "(", "field_gens_templates", ")", "logger", ".", "debug", "(", "'Spawning field generator templates...'", ")", "origs", "=", "{", "}", "spawned", "=", "{", "}", "dependency_mapping", "=", "{", "}", "for", "(", "name", ",", "gen", ")", "in", "field_gens_templates", ".", "items", "(", ")", ":", "origs", "[", "name", "]", "=", "gen", "spawned", "[", "name", "]", "=", "gen", ".", "spawn", "(", "dependency_mapping", ")", "logger", ".", "debug", "(", "f'Adding dependency mapping: {gen} -> {spawned[name]}'", ")", "self", ".", "field_gens", "=", "spawned", "self", ".", "__dict__", ".", "update", "(", "self", ".", "field_gens", ")", "logger", ".", "debug", "(", "f'Spawned field generators attached to custom generator instance:'", ")", "debug_print_dict", "(", "self", ".", "field_gens", ")", "# Add seed generator", "#", "#self.seed_generator = SeedGenerator()", "# Create class for the items produced by this generator", "#", "self", ".", "__class__", ".", "item_cls", "=", "make_item_class_for_custom_generator_class", "(", "self", ")", "cls", ".", "__init__", "=", "new_init_method" ]
Replace the existing cls.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do..
[ "Replace", "the", "existing", "cls", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "calls", "the", "original", "one", "and", "in", "addition", "performs", "the", "following", "actions", ":" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L139-L192
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
_add_new_next_method
def _add_new_next_method(cls): """ TODO """ def new_next(self): field_values = [next(g) for g in self.field_gens.values()] return self.item_cls(*field_values) cls.__next__ = new_next
python
def _add_new_next_method(cls): """ TODO """ def new_next(self): field_values = [next(g) for g in self.field_gens.values()] return self.item_cls(*field_values) cls.__next__ = new_next
[ "def", "_add_new_next_method", "(", "cls", ")", ":", "def", "new_next", "(", "self", ")", ":", "field_values", "=", "[", "next", "(", "g", ")", "for", "g", "in", "self", ".", "field_gens", ".", "values", "(", ")", "]", "return", "self", ".", "item_cls", "(", "*", "field_values", ")", "cls", ".", "__next__", "=", "new_next" ]
TODO
[ "TODO" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L195-L204
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
_add_new_reset_method
def _add_new_reset_method(cls): """ Attach a new `reset()` method to `cls` which resets the internal seed generator of `cls` and then resets each of its constituent field generators found in `cls.field_gens`. """ # # Create and assign automatically generated reset() method # def new_reset_method(self, seed=None): logger.debug(f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})') if seed is not None: self.seed_generator.reset(seed) for name, gen in self.field_gens.items(): next_seed = next(self.seed_generator) gen.reset(next_seed) # TODO: the following should be covered by the newly added # reset() method in IndependentGeneratorMeta. However, for # some reason we can't call this via the usual `orig_reset()` # pattern, so we have to duplicate this here. Not ideal... for c in self._clones: c.reset_clone(seed) return self cls.reset = new_reset_method
python
def _add_new_reset_method(cls): """ Attach a new `reset()` method to `cls` which resets the internal seed generator of `cls` and then resets each of its constituent field generators found in `cls.field_gens`. """ # # Create and assign automatically generated reset() method # def new_reset_method(self, seed=None): logger.debug(f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})') if seed is not None: self.seed_generator.reset(seed) for name, gen in self.field_gens.items(): next_seed = next(self.seed_generator) gen.reset(next_seed) # TODO: the following should be covered by the newly added # reset() method in IndependentGeneratorMeta. However, for # some reason we can't call this via the usual `orig_reset()` # pattern, so we have to duplicate this here. Not ideal... for c in self._clones: c.reset_clone(seed) return self cls.reset = new_reset_method
[ "def", "_add_new_reset_method", "(", "cls", ")", ":", "#", "# Create and assign automatically generated reset() method", "#", "def", "new_reset_method", "(", "self", ",", "seed", "=", "None", ")", ":", "logger", ".", "debug", "(", "f'[EEE] Inside automatically generated reset() method for {self} (seed={seed})'", ")", "if", "seed", "is", "not", "None", ":", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "name", ",", "gen", "in", "self", ".", "field_gens", ".", "items", "(", ")", ":", "next_seed", "=", "next", "(", "self", ".", "seed_generator", ")", "gen", ".", "reset", "(", "next_seed", ")", "# TODO: the following should be covered by the newly added", "# reset() method in IndependentGeneratorMeta. However, for", "# some reason we can't call this via the usual `orig_reset()`", "# pattern, so we have to duplicate this here. Not ideal...", "for", "c", "in", "self", ".", "_clones", ":", "c", ".", "reset_clone", "(", "seed", ")", "return", "self", "cls", ".", "reset", "=", "new_reset_method" ]
Attach a new `reset()` method to `cls` which resets the internal seed generator of `cls` and then resets each of its constituent field generators found in `cls.field_gens`.
[ "Attach", "a", "new", "reset", "()", "method", "to", "cls", "which", "resets", "the", "internal", "seed", "generator", "of", "cls", "and", "then", "resets", "each", "of", "its", "constituent", "field", "generators", "found", "in", "cls", ".", "field_gens", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L207-L237
maxalbert/tohu
tohu/v2/custom_generator_NEW.py
_add_new_spawn_method
def _add_new_spawn_method(cls): """ TODO """ def new_spawn_method(self, dependency_mapping): # TODO/FIXME: Check that this does the right thing: # (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour) # (ii) ensure that it also works if this custom generator's __init__ requires additional arguments #new_instance = self.__class__() # # FIXME: It would be good to explicitly spawn the field generators of `self` # here because this would ensure that the internal random generators # of the spawned versions are in the same state as the ones in `self`. # This would guarantee that the spawned custom generator produces the # same elements as `self` even before reset() is called explicitly. new_instance = cls() return new_instance cls.spawn = new_spawn_method
python
def _add_new_spawn_method(cls): """ TODO """ def new_spawn_method(self, dependency_mapping): # TODO/FIXME: Check that this does the right thing: # (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour) # (ii) ensure that it also works if this custom generator's __init__ requires additional arguments #new_instance = self.__class__() # # FIXME: It would be good to explicitly spawn the field generators of `self` # here because this would ensure that the internal random generators # of the spawned versions are in the same state as the ones in `self`. # This would guarantee that the spawned custom generator produces the # same elements as `self` even before reset() is called explicitly. new_instance = cls() return new_instance cls.spawn = new_spawn_method
[ "def", "_add_new_spawn_method", "(", "cls", ")", ":", "def", "new_spawn_method", "(", "self", ",", "dependency_mapping", ")", ":", "# TODO/FIXME: Check that this does the right thing:", "# (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour)", "# (ii) ensure that it also works if this custom generator's __init__ requires additional arguments", "#new_instance = self.__class__()", "#", "# FIXME: It would be good to explicitly spawn the field generators of `self`", "# here because this would ensure that the internal random generators", "# of the spawned versions are in the same state as the ones in `self`.", "# This would guarantee that the spawned custom generator produces the", "# same elements as `self` even before reset() is called explicitly.", "new_instance", "=", "cls", "(", ")", "return", "new_instance", "cls", ".", "spawn", "=", "new_spawn_method" ]
TODO
[ "TODO" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L240-L259
maxalbert/tohu
tohu/v6/derived_generators.py
DerivedGenerator.reset_input_generators
def reset_input_generators(self, seed): """ Helper method which explicitly resets all input generators to the derived generator. This should only ever be called for testing or debugging. """ seed_generator = SeedGenerator().reset(seed=seed) for gen in self.input_generators: gen.reset(next(seed_generator)) try: # In case `gen` is itself a derived generator, # recursively reset its own input generators. gen.reset_input_generators(next(seed_generator)) except AttributeError: pass
python
def reset_input_generators(self, seed): """ Helper method which explicitly resets all input generators to the derived generator. This should only ever be called for testing or debugging. """ seed_generator = SeedGenerator().reset(seed=seed) for gen in self.input_generators: gen.reset(next(seed_generator)) try: # In case `gen` is itself a derived generator, # recursively reset its own input generators. gen.reset_input_generators(next(seed_generator)) except AttributeError: pass
[ "def", "reset_input_generators", "(", "self", ",", "seed", ")", ":", "seed_generator", "=", "SeedGenerator", "(", ")", ".", "reset", "(", "seed", "=", "seed", ")", "for", "gen", "in", "self", ".", "input_generators", ":", "gen", ".", "reset", "(", "next", "(", "seed_generator", ")", ")", "try", ":", "# In case `gen` is itself a derived generator,", "# recursively reset its own input generators.", "gen", ".", "reset_input_generators", "(", "next", "(", "seed_generator", ")", ")", "except", "AttributeError", ":", "pass" ]
Helper method which explicitly resets all input generators to the derived generator. This should only ever be called for testing or debugging.
[ "Helper", "method", "which", "explicitly", "resets", "all", "input", "generators", "to", "the", "derived", "generator", ".", "This", "should", "only", "ever", "be", "called", "for", "testing", "or", "debugging", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/derived_generators.py#L20-L35
maxalbert/tohu
tohu/v6/derived_generators.py
SelectOne._spot_check_that_elements_produced_by_this_generator_have_attribute
def _spot_check_that_elements_produced_by_this_generator_have_attribute(self, name): """ Helper function to spot-check that the items produces by this generator have the attribute `name`. """ g_tmp = self.values_gen.spawn() sample_element = next(g_tmp)[0] try: getattr(sample_element, name) except AttributeError: raise AttributeError(f"Items produced by {self} do not have the attribute '{name}'")
python
def _spot_check_that_elements_produced_by_this_generator_have_attribute(self, name): """ Helper function to spot-check that the items produces by this generator have the attribute `name`. """ g_tmp = self.values_gen.spawn() sample_element = next(g_tmp)[0] try: getattr(sample_element, name) except AttributeError: raise AttributeError(f"Items produced by {self} do not have the attribute '{name}'")
[ "def", "_spot_check_that_elements_produced_by_this_generator_have_attribute", "(", "self", ",", "name", ")", ":", "g_tmp", "=", "self", ".", "values_gen", ".", "spawn", "(", ")", "sample_element", "=", "next", "(", "g_tmp", ")", "[", "0", "]", "try", ":", "getattr", "(", "sample_element", ",", "name", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "f\"Items produced by {self} do not have the attribute '{name}'\"", ")" ]
Helper function to spot-check that the items produces by this generator have the attribute `name`.
[ "Helper", "function", "to", "spot", "-", "check", "that", "the", "items", "produces", "by", "this", "generator", "have", "the", "attribute", "name", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/derived_generators.py#L201-L210
maxalbert/tohu
tohu/v6/item_list.py
ItemList.to_df
def to_df(self, fields=None, fields_to_explode=None): """ Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. """ if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} assert fields_to_explode is None or isinstance(fields_to_explode, (list, tuple)) if fields_to_explode is None: fields_to_explode = [] if fields is None: colnames_to_export = list(self.items[0].as_dict().keys()) # hack! the field names should perhaps be passed in during initialisation? else: colnames_to_export = list(fields.keys()) if not set(fields_to_explode).issubset(colnames_to_export): raise ValueError( "All fields to explode must occur as column names. " f"Got field names: {fields_to_explode}. Column names: {list(fields.keys())}" ) if fields is None: # New version (much faster!, but needs cleaning up) import attr df = pd.DataFrame([attr.astuple(x) for x in self.items], columns=colnames_to_export) # Old version: #return pd.DataFrame([x.to_series() for x in self.items]) else: # New version (much faster!) def make_attrgetter(attr_name_new, attr_name, fields_to_explode): # TODO: this needs cleaning up! if attr_name_new in fields_to_explode and '.' in attr_name: attr_name_first_part, attr_name_rest = attr_name.split('.', maxsplit=1) def func(row): foo_items = attrgetter(attr_name_first_part)(row) return [attrgetter(attr_name_rest)(x) for x in foo_items] return func else: return attrgetter(attr_name) attr_getters = [make_attrgetter(attr_name_new, attr_name, fields_to_explode) for attr_name_new, attr_name in fields.items()] try: df = pd.DataFrame([tuple(func(x) for func in attr_getters) for x in self.items], columns=colnames_to_export) except AttributeError as exc: msg = ( "Could not export to dataframe. Did you forget to pass any fields " "which contain sequences within the 'fields_to_explode' argument?. " f"The original error message was: \"{exc}\"" ) raise AttributeError(msg) if fields_to_explode != []: # TODO: add sanity checks to avoid unwanted behaviour (e.g. that all columns # to be exploded must have the same number of elements in each entry?) df = explode_columns(df, fields_to_explode) return df
python
def to_df(self, fields=None, fields_to_explode=None): """ Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. """ if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} assert fields_to_explode is None or isinstance(fields_to_explode, (list, tuple)) if fields_to_explode is None: fields_to_explode = [] if fields is None: colnames_to_export = list(self.items[0].as_dict().keys()) # hack! the field names should perhaps be passed in during initialisation? else: colnames_to_export = list(fields.keys()) if not set(fields_to_explode).issubset(colnames_to_export): raise ValueError( "All fields to explode must occur as column names. " f"Got field names: {fields_to_explode}. Column names: {list(fields.keys())}" ) if fields is None: # New version (much faster!, but needs cleaning up) import attr df = pd.DataFrame([attr.astuple(x) for x in self.items], columns=colnames_to_export) # Old version: #return pd.DataFrame([x.to_series() for x in self.items]) else: # New version (much faster!) def make_attrgetter(attr_name_new, attr_name, fields_to_explode): # TODO: this needs cleaning up! if attr_name_new in fields_to_explode and '.' in attr_name: attr_name_first_part, attr_name_rest = attr_name.split('.', maxsplit=1) def func(row): foo_items = attrgetter(attr_name_first_part)(row) return [attrgetter(attr_name_rest)(x) for x in foo_items] return func else: return attrgetter(attr_name) attr_getters = [make_attrgetter(attr_name_new, attr_name, fields_to_explode) for attr_name_new, attr_name in fields.items()] try: df = pd.DataFrame([tuple(func(x) for func in attr_getters) for x in self.items], columns=colnames_to_export) except AttributeError as exc: msg = ( "Could not export to dataframe. Did you forget to pass any fields " "which contain sequences within the 'fields_to_explode' argument?. " f"The original error message was: \"{exc}\"" ) raise AttributeError(msg) if fields_to_explode != []: # TODO: add sanity checks to avoid unwanted behaviour (e.g. that all columns # to be exploded must have the same number of elements in each entry?) df = explode_columns(df, fields_to_explode) return df
[ "def", "to_df", "(", "self", ",", "fields", "=", "None", ",", "fields_to_explode", "=", "None", ")", ":", "if", "isinstance", "(", "fields", ",", "(", "list", ",", "tuple", ")", ")", ":", "fields", "=", "{", "name", ":", "name", "for", "name", "in", "fields", "}", "assert", "fields_to_explode", "is", "None", "or", "isinstance", "(", "fields_to_explode", ",", "(", "list", ",", "tuple", ")", ")", "if", "fields_to_explode", "is", "None", ":", "fields_to_explode", "=", "[", "]", "if", "fields", "is", "None", ":", "colnames_to_export", "=", "list", "(", "self", ".", "items", "[", "0", "]", ".", "as_dict", "(", ")", ".", "keys", "(", ")", ")", "# hack! the field names should perhaps be passed in during initialisation?", "else", ":", "colnames_to_export", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "if", "not", "set", "(", "fields_to_explode", ")", ".", "issubset", "(", "colnames_to_export", ")", ":", "raise", "ValueError", "(", "\"All fields to explode must occur as column names. \"", "f\"Got field names: {fields_to_explode}. Column names: {list(fields.keys())}\"", ")", "if", "fields", "is", "None", ":", "# New version (much faster!, but needs cleaning up)", "import", "attr", "df", "=", "pd", ".", "DataFrame", "(", "[", "attr", ".", "astuple", "(", "x", ")", "for", "x", "in", "self", ".", "items", "]", ",", "columns", "=", "colnames_to_export", ")", "# Old version:", "#return pd.DataFrame([x.to_series() for x in self.items])", "else", ":", "# New version (much faster!)", "def", "make_attrgetter", "(", "attr_name_new", ",", "attr_name", ",", "fields_to_explode", ")", ":", "# TODO: this needs cleaning up!", "if", "attr_name_new", "in", "fields_to_explode", "and", "'.'", "in", "attr_name", ":", "attr_name_first_part", ",", "attr_name_rest", "=", "attr_name", ".", "split", "(", "'.'", ",", "maxsplit", "=", "1", ")", "def", "func", "(", "row", ")", ":", "foo_items", "=", "attrgetter", "(", "attr_name_first_part", ")", "(", "row", ")", "return", "[", "attrgetter", "(", "attr_name_rest", ")", "(", "x", ")", "for", "x", "in", "foo_items", "]", "return", "func", "else", ":", "return", "attrgetter", "(", "attr_name", ")", "attr_getters", "=", "[", "make_attrgetter", "(", "attr_name_new", ",", "attr_name", ",", "fields_to_explode", ")", "for", "attr_name_new", ",", "attr_name", "in", "fields", ".", "items", "(", ")", "]", "try", ":", "df", "=", "pd", ".", "DataFrame", "(", "[", "tuple", "(", "func", "(", "x", ")", "for", "func", "in", "attr_getters", ")", "for", "x", "in", "self", ".", "items", "]", ",", "columns", "=", "colnames_to_export", ")", "except", "AttributeError", "as", "exc", ":", "msg", "=", "(", "\"Could not export to dataframe. Did you forget to pass any fields \"", "\"which contain sequences within the 'fields_to_explode' argument?. \"", "f\"The original error message was: \\\"{exc}\\\"\"", ")", "raise", "AttributeError", "(", "msg", ")", "if", "fields_to_explode", "!=", "[", "]", ":", "# TODO: add sanity checks to avoid unwanted behaviour (e.g. that all columns", "# to be exploded must have the same number of elements in each entry?)", "df", "=", "explode_columns", "(", "df", ",", "fields_to_explode", ")", "return", "df" ]
Export items as rows in a pandas dataframe table. Parameters ---------- fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows.
[ "Export", "items", "as", "rows", "in", "a", "pandas", "dataframe", "table", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/item_list.py#L104-L180
maxalbert/tohu
tohu/v6/item_list.py
ItemList.to_csv
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- output_file: str or file object or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If `output_file` is None, the generated CSV output is returned as a string instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.) append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `output_file` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `output_file`. If `output_file` is given, writes the output to the file and returns `None`. If `output_file` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if fields_to_explode is not None: raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if output_file is None: file_or_string = io.StringIO() elif isinstance(output_file, str): mode = 'a' if append else 'w' file_or_string = open(output_file, mode) # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(output_file)) if not os.path.exists(dirname): logger.debug(f"Creating parent directory of output file '{output_file}'") os.makedirs(dirname) elif isinstance(output_file, io.IOBase): file_or_string = output_file else: raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})") retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: # TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up! # (Note that for regular file output we don't want to encode each line to a bytes # object because this seems to be ca. 2x slower). if isinstance(file_or_string, gzip.GzipFile): file_or_string.write(header_line.encode()) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line.encode()) else: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if output_file is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
python
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'): """ Parameters ---------- output_file: str or file object or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If `output_file` is None, the generated CSV output is returned as a string instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.) append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `output_file` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `output_file`. If `output_file` is given, writes the output to the file and returns `None`. If `output_file` is `None`, returns a string containing the CSV output. """ assert isinstance(append, bool) if fields is None: raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list") if fields_to_explode is not None: raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.") if isinstance(fields, (list, tuple)): fields = {name: name for name in fields} header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline) if output_file is None: file_or_string = io.StringIO() elif isinstance(output_file, str): mode = 'a' if append else 'w' file_or_string = open(output_file, mode) # ensure parent directory of output file exits dirname = os.path.dirname(os.path.abspath(output_file)) if not os.path.exists(dirname): logger.debug(f"Creating parent directory of output file '{output_file}'") os.makedirs(dirname) elif isinstance(output_file, io.IOBase): file_or_string = output_file else: raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})") retval = None attr_getters = [attrgetter(attr_name) for attr_name in fields.values()] try: # TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up! # (Note that for regular file output we don't want to encode each line to a bytes # object because this seems to be ca. 2x slower). if isinstance(file_or_string, gzip.GzipFile): file_or_string.write(header_line.encode()) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line.encode()) else: file_or_string.write(header_line) for x in self.items: line = sep.join([format(func(x)) for func in attr_getters]) + newline file_or_string.write(line) if output_file is None: retval = file_or_string.getvalue() finally: file_or_string.close() return retval
[ "def", "to_csv", "(", "self", ",", "output_file", "=", "None", ",", "*", ",", "fields", "=", "None", ",", "fields_to_explode", "=", "None", ",", "append", "=", "False", ",", "header", "=", "True", ",", "header_prefix", "=", "''", ",", "sep", "=", "','", ",", "newline", "=", "'\\n'", ")", ":", "assert", "isinstance", "(", "append", ",", "bool", ")", "if", "fields", "is", "None", ":", "raise", "NotImplementedError", "(", "\"TODO: derive field names automatically from the generator which produced this item list\"", ")", "if", "fields_to_explode", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "\"TODO: the 'fields_to_explode' argument is not supported for CSV export yet.\"", ")", "if", "isinstance", "(", "fields", ",", "(", "list", ",", "tuple", ")", ")", ":", "fields", "=", "{", "name", ":", "name", "for", "name", "in", "fields", "}", "header_line", "=", "_generate_csv_header_line", "(", "header", "=", "header", ",", "header_prefix", "=", "header_prefix", ",", "header_names", "=", "fields", ".", "keys", "(", ")", ",", "sep", "=", "sep", ",", "newline", "=", "newline", ")", "if", "output_file", "is", "None", ":", "file_or_string", "=", "io", ".", "StringIO", "(", ")", "elif", "isinstance", "(", "output_file", ",", "str", ")", ":", "mode", "=", "'a'", "if", "append", "else", "'w'", "file_or_string", "=", "open", "(", "output_file", ",", "mode", ")", "# ensure parent directory of output file exits", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "output_file", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "logger", ".", "debug", "(", "f\"Creating parent directory of output file '{output_file}'\"", ")", "os", ".", "makedirs", "(", "dirname", ")", "elif", "isinstance", "(", "output_file", ",", "io", ".", "IOBase", ")", ":", "file_or_string", "=", "output_file", "else", ":", "raise", "TypeError", "(", "f\"Invalid output file: {output_file} (type: {type(output_file)})\"", ")", "retval", "=", "None", "attr_getters", "=", "[", "attrgetter", "(", "attr_name", ")", "for", "attr_name", "in", "fields", ".", "values", "(", ")", "]", "try", ":", "# TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up!", "# (Note that for regular file output we don't want to encode each line to a bytes", "# object because this seems to be ca. 2x slower).", "if", "isinstance", "(", "file_or_string", ",", "gzip", ".", "GzipFile", ")", ":", "file_or_string", ".", "write", "(", "header_line", ".", "encode", "(", ")", ")", "for", "x", "in", "self", ".", "items", ":", "line", "=", "sep", ".", "join", "(", "[", "format", "(", "func", "(", "x", ")", ")", "for", "func", "in", "attr_getters", "]", ")", "+", "newline", "file_or_string", ".", "write", "(", "line", ".", "encode", "(", ")", ")", "else", ":", "file_or_string", ".", "write", "(", "header_line", ")", "for", "x", "in", "self", ".", "items", ":", "line", "=", "sep", ".", "join", "(", "[", "format", "(", "func", "(", "x", ")", ")", "for", "func", "in", "attr_getters", "]", ")", "+", "newline", "file_or_string", ".", "write", "(", "line", ")", "if", "output_file", "is", "None", ":", "retval", "=", "file_or_string", ".", "getvalue", "(", ")", "finally", ":", "file_or_string", ".", "close", "(", ")", "return", "retval" ]
Parameters ---------- output_file: str or file object or None The file to which output will be written. By default, any existing content is overwritten. Use `append=True` to open the file in append mode instead. If `output_file` is None, the generated CSV output is returned as a string instead of written to a file. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.) append: bool If `True`, open the file in 'append' mode to avoid overwriting existing content. Default is `False`, i.e. any existing content will be overwritten. This argument only has an effect if `output_file` is given (i.e. if output happens to a file instead of returning a CSV string). header: bool or str or None If `header=False` or `header=None` then no header line will be written. If `header` is a string then this string will be used as the header line. If `header=True` then a header line will be automatically generated from the field names of the custom generator. header_prefix: str If `header=True` then the auto-generated header line will be prefixed with `header_prefix` (otherwise this argument has no effect). For example, set `header_prefix='#'` to make the header line start with '#'. Default: '' sep: str Field separator to use in the output. Default: ',' newline: str Line terminator to use in the output. Default: '\n' Returns ------- The return value depends on the value of `output_file`. If `output_file` is given, writes the output to the file and returns `None`. If `output_file` is `None`, returns a string containing the CSV output.
[ "Parameters", "----------", "output_file", ":", "str", "or", "file", "object", "or", "None", "The", "file", "to", "which", "output", "will", "be", "written", ".", "By", "default", "any", "existing", "content", "is", "overwritten", ".", "Use", "append", "=", "True", "to", "open", "the", "file", "in", "append", "mode", "instead", ".", "If", "output_file", "is", "None", "the", "generated", "CSV", "output", "is", "returned", "as", "a", "string", "instead", "of", "written", "to", "a", "file", ".", "fields", ":", "list", "or", "dict", "List", "of", "field", "names", "to", "export", "or", "dictionary", "mapping", "output", "column", "names", "to", "attribute", "names", "of", "the", "generators", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/item_list.py#L182-L280
maxalbert/tohu
tohu/v6/item_list.py
ItemList.to_sql
def to_sql(self, url, table_name, *, schema=None, fields=None, fields_to_explode=None, if_exists="fail", dtype=None): """ Export items as rows in a PostgreSQL table. Parameters ---------- url: string Connection string to connect to the database. Example: "postgresql://postgres@127.0.0.1:5432/testdb" table_name: string Name of the database table. Note that if this name contains a dot ('.') and `schema` is not specified, the first part of the name before the dot will be interpreted as the schema name. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema or derive the schema name from `table_name`. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. if_exists : {'fail', 'do_nothing', 'replace', 'append'}, default 'fail' - fail: If table exists, raise an error. - do_nothing: If table exists, do nothing and immediately return. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. This is passed through to pandas.DataFrame.to_sql(). """ if schema is None: schema, table_name = _extract_schema_if_given(table_name) engine = create_engine(url) ins = inspect(engine) if schema is not None and schema not in ins.get_schema_names(): logger.debug(f"Creating non-existing schema: '{schema}'") engine.execute(CreateSchema(schema)) if table_name in ins.get_table_names(schema=schema) and if_exists == 'do_nothing': logger.debug("Table already exists (use if_exists='replace' or if_exists='append' to modify it).") return if if_exists == 'do_nothing': # we handled the 'do nothing' case above; change to an option that pandas will understand if_exists = 'fail' with engine.begin() as conn: self.to_df(fields=fields, fields_to_explode=fields_to_explode).to_sql( table_name, conn, schema=schema, index=False, if_exists=if_exists, dtype=dtype)
python
def to_sql(self, url, table_name, *, schema=None, fields=None, fields_to_explode=None, if_exists="fail", dtype=None): """ Export items as rows in a PostgreSQL table. Parameters ---------- url: string Connection string to connect to the database. Example: "postgresql://postgres@127.0.0.1:5432/testdb" table_name: string Name of the database table. Note that if this name contains a dot ('.') and `schema` is not specified, the first part of the name before the dot will be interpreted as the schema name. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema or derive the schema name from `table_name`. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. if_exists : {'fail', 'do_nothing', 'replace', 'append'}, default 'fail' - fail: If table exists, raise an error. - do_nothing: If table exists, do nothing and immediately return. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. This is passed through to pandas.DataFrame.to_sql(). """ if schema is None: schema, table_name = _extract_schema_if_given(table_name) engine = create_engine(url) ins = inspect(engine) if schema is not None and schema not in ins.get_schema_names(): logger.debug(f"Creating non-existing schema: '{schema}'") engine.execute(CreateSchema(schema)) if table_name in ins.get_table_names(schema=schema) and if_exists == 'do_nothing': logger.debug("Table already exists (use if_exists='replace' or if_exists='append' to modify it).") return if if_exists == 'do_nothing': # we handled the 'do nothing' case above; change to an option that pandas will understand if_exists = 'fail' with engine.begin() as conn: self.to_df(fields=fields, fields_to_explode=fields_to_explode).to_sql( table_name, conn, schema=schema, index=False, if_exists=if_exists, dtype=dtype)
[ "def", "to_sql", "(", "self", ",", "url", ",", "table_name", ",", "*", ",", "schema", "=", "None", ",", "fields", "=", "None", ",", "fields_to_explode", "=", "None", ",", "if_exists", "=", "\"fail\"", ",", "dtype", "=", "None", ")", ":", "if", "schema", "is", "None", ":", "schema", ",", "table_name", "=", "_extract_schema_if_given", "(", "table_name", ")", "engine", "=", "create_engine", "(", "url", ")", "ins", "=", "inspect", "(", "engine", ")", "if", "schema", "is", "not", "None", "and", "schema", "not", "in", "ins", ".", "get_schema_names", "(", ")", ":", "logger", ".", "debug", "(", "f\"Creating non-existing schema: '{schema}'\"", ")", "engine", ".", "execute", "(", "CreateSchema", "(", "schema", ")", ")", "if", "table_name", "in", "ins", ".", "get_table_names", "(", "schema", "=", "schema", ")", "and", "if_exists", "==", "'do_nothing'", ":", "logger", ".", "debug", "(", "\"Table already exists (use if_exists='replace' or if_exists='append' to modify it).\"", ")", "return", "if", "if_exists", "==", "'do_nothing'", ":", "# we handled the 'do nothing' case above; change to an option that pandas will understand", "if_exists", "=", "'fail'", "with", "engine", ".", "begin", "(", ")", "as", "conn", ":", "self", ".", "to_df", "(", "fields", "=", "fields", ",", "fields_to_explode", "=", "fields_to_explode", ")", ".", "to_sql", "(", "table_name", ",", "conn", ",", "schema", "=", "schema", ",", "index", "=", "False", ",", "if_exists", "=", "if_exists", ",", "dtype", "=", "dtype", ")" ]
Export items as rows in a PostgreSQL table. Parameters ---------- url: string Connection string to connect to the database. Example: "postgresql://postgres@127.0.0.1:5432/testdb" table_name: string Name of the database table. Note that if this name contains a dot ('.') and `schema` is not specified, the first part of the name before the dot will be interpreted as the schema name. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema or derive the schema name from `table_name`. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. if_exists : {'fail', 'do_nothing', 'replace', 'append'}, default 'fail' - fail: If table exists, raise an error. - do_nothing: If table exists, do nothing and immediately return. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. This is passed through to pandas.DataFrame.to_sql().
[ "Export", "items", "as", "rows", "in", "a", "PostgreSQL", "table", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/item_list.py#L282-L345
maxalbert/tohu
tohu/v6/base.py
TohuBaseGenerator.reset
def reset(self, seed): """ Reset this generator's seed generator and any clones. """ logger.debug(f'Resetting {self} (seed={seed})') self.seed_generator.reset(seed) for c in self.clones: c.reset(seed)
python
def reset(self, seed): """ Reset this generator's seed generator and any clones. """ logger.debug(f'Resetting {self} (seed={seed})') self.seed_generator.reset(seed) for c in self.clones: c.reset(seed)
[ "def", "reset", "(", "self", ",", "seed", ")", ":", "logger", ".", "debug", "(", "f'Resetting {self} (seed={seed})'", ")", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "c", "in", "self", ".", "clones", ":", "c", ".", "reset", "(", "seed", ")" ]
Reset this generator's seed generator and any clones.
[ "Reset", "this", "generator", "s", "seed", "generator", "and", "any", "clones", "." ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/base.py#L121-L129
depop/python-flexisettings
flexisettings/__init__.py
_load_config
def _load_config(initial_namespace=None, defaults=None): # type: (Optional[str], Optional[str]) -> ConfigLoader """ Kwargs: initial_namespace: defaults: """ # load defaults if defaults: config = ConfigLoader() config.update_from_object(defaults) namespace = getattr(config, 'CONFIG_NAMESPACE', initial_namespace) app_config = getattr(config, 'APP_CONFIG', None) # load customised config if app_config: if namespace is None: config.update_from_object(app_config) else: _temp = ConfigLoader() _temp.update_from_object(app_config, lambda key: key.startswith(namespace)) config.update(_temp.namespace(namespace)) return config
python
def _load_config(initial_namespace=None, defaults=None): # type: (Optional[str], Optional[str]) -> ConfigLoader """ Kwargs: initial_namespace: defaults: """ # load defaults if defaults: config = ConfigLoader() config.update_from_object(defaults) namespace = getattr(config, 'CONFIG_NAMESPACE', initial_namespace) app_config = getattr(config, 'APP_CONFIG', None) # load customised config if app_config: if namespace is None: config.update_from_object(app_config) else: _temp = ConfigLoader() _temp.update_from_object(app_config, lambda key: key.startswith(namespace)) config.update(_temp.namespace(namespace)) return config
[ "def", "_load_config", "(", "initial_namespace", "=", "None", ",", "defaults", "=", "None", ")", ":", "# type: (Optional[str], Optional[str]) -> ConfigLoader", "# load defaults", "if", "defaults", ":", "config", "=", "ConfigLoader", "(", ")", "config", ".", "update_from_object", "(", "defaults", ")", "namespace", "=", "getattr", "(", "config", ",", "'CONFIG_NAMESPACE'", ",", "initial_namespace", ")", "app_config", "=", "getattr", "(", "config", ",", "'APP_CONFIG'", ",", "None", ")", "# load customised config", "if", "app_config", ":", "if", "namespace", "is", "None", ":", "config", ".", "update_from_object", "(", "app_config", ")", "else", ":", "_temp", "=", "ConfigLoader", "(", ")", "_temp", ".", "update_from_object", "(", "app_config", ",", "lambda", "key", ":", "key", ".", "startswith", "(", "namespace", ")", ")", "config", ".", "update", "(", "_temp", ".", "namespace", "(", "namespace", ")", ")", "return", "config" ]
Kwargs: initial_namespace: defaults:
[ "Kwargs", ":", "initial_namespace", ":", "defaults", ":" ]
train
https://github.com/depop/python-flexisettings/blob/36d08280ab7c45568fdf206fcdb4cf771d240c6b/flexisettings/__init__.py#L90-L114
depop/python-flexisettings
flexisettings/utils.py
override_environment
def override_environment(settings, **kwargs): # type: (Settings, **str) -> Generator """ Override env vars and reload the Settings object NOTE: Obviously this context has to be in place before you import any module which reads env values at import time. NOTE: The values in `kwargs` must be strings else you will get a cryptic: TypeError: execve() arg 3 contains a non-string value """ old_env = os.environ.copy() os.environ.update(kwargs) settings._reload() try: yield except Exception: raise finally: for key in kwargs.keys(): del os.environ[key] os.environ.update(old_env) settings._reload()
python
def override_environment(settings, **kwargs): # type: (Settings, **str) -> Generator """ Override env vars and reload the Settings object NOTE: Obviously this context has to be in place before you import any module which reads env values at import time. NOTE: The values in `kwargs` must be strings else you will get a cryptic: TypeError: execve() arg 3 contains a non-string value """ old_env = os.environ.copy() os.environ.update(kwargs) settings._reload() try: yield except Exception: raise finally: for key in kwargs.keys(): del os.environ[key] os.environ.update(old_env) settings._reload()
[ "def", "override_environment", "(", "settings", ",", "*", "*", "kwargs", ")", ":", "# type: (Settings, **str) -> Generator", "old_env", "=", "os", ".", "environ", ".", "copy", "(", ")", "os", ".", "environ", ".", "update", "(", "kwargs", ")", "settings", ".", "_reload", "(", ")", "try", ":", "yield", "except", "Exception", ":", "raise", "finally", ":", "for", "key", "in", "kwargs", ".", "keys", "(", ")", ":", "del", "os", ".", "environ", "[", "key", "]", "os", ".", "environ", ".", "update", "(", "old_env", ")", "settings", ".", "_reload", "(", ")" ]
Override env vars and reload the Settings object NOTE: Obviously this context has to be in place before you import any module which reads env values at import time. NOTE: The values in `kwargs` must be strings else you will get a cryptic: TypeError: execve() arg 3 contains a non-string value
[ "Override", "env", "vars", "and", "reload", "the", "Settings", "object" ]
train
https://github.com/depop/python-flexisettings/blob/36d08280ab7c45568fdf206fcdb4cf771d240c6b/flexisettings/utils.py#L47-L75
cloudify-cosmo/repex
repex.py
_import_yaml
def _import_yaml(config_file_path): """Return a configuration object """ try: logger.info('Importing config %s...', config_file_path) with open(config_file_path) as config_file: return yaml.safe_load(config_file.read()) except IOError as ex: raise RepexError('{0}: {1} ({2})'.format( ERRORS['config_file_not_found'], config_file_path, ex)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex: raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex))
python
def _import_yaml(config_file_path): """Return a configuration object """ try: logger.info('Importing config %s...', config_file_path) with open(config_file_path) as config_file: return yaml.safe_load(config_file.read()) except IOError as ex: raise RepexError('{0}: {1} ({2})'.format( ERRORS['config_file_not_found'], config_file_path, ex)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex: raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex))
[ "def", "_import_yaml", "(", "config_file_path", ")", ":", "try", ":", "logger", ".", "info", "(", "'Importing config %s...'", ",", "config_file_path", ")", "with", "open", "(", "config_file_path", ")", "as", "config_file", ":", "return", "yaml", ".", "safe_load", "(", "config_file", ".", "read", "(", ")", ")", "except", "IOError", "as", "ex", ":", "raise", "RepexError", "(", "'{0}: {1} ({2})'", ".", "format", "(", "ERRORS", "[", "'config_file_not_found'", "]", ",", "config_file_path", ",", "ex", ")", ")", "except", "(", "yaml", ".", "parser", ".", "ParserError", ",", "yaml", ".", "scanner", ".", "ScannerError", ")", "as", "ex", ":", "raise", "RepexError", "(", "'{0} ({1})'", ".", "format", "(", "ERRORS", "[", "'invalid_yaml'", "]", ",", "ex", ")", ")" ]
Return a configuration object
[ "Return", "a", "configuration", "object" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L72-L83
cloudify-cosmo/repex
repex.py
_get_all_files
def _get_all_files(filename_regex, path, base_dir, excluded_paths=None, excluded_filename_regex=None): """Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well. """ # For windows def replace_backslashes(string): return string.replace('\\', '/') excluded_paths = _normalize_excluded_paths(base_dir, excluded_paths) if excluded_paths: logger.info('Excluding paths: %s', excluded_paths) logger.info('Looking for %s under %s...', filename_regex, os.path.join(base_dir, path)) if excluded_filename_regex: logger.info('Excluding file names: %s', excluded_filename_regex) path_expression = re.compile(replace_backslashes(path)) target_files = [] for root, _, files in os.walk(base_dir): if not root.startswith(tuple(excluded_paths)) \ and path_expression.search(replace_backslashes(root)): for filename in files: filepath = os.path.join(root, filename) is_file, matched, excluded_filename, excluded_path = \ _set_match_parameters( filename, filepath, filename_regex, excluded_filename_regex, excluded_paths) if is_file and matched and not excluded_filename \ and not excluded_path: logger.debug('%s is a match. Appending to list...', filepath) target_files.append(filepath) return target_files
python
def _get_all_files(filename_regex, path, base_dir, excluded_paths=None, excluded_filename_regex=None): """Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well. """ # For windows def replace_backslashes(string): return string.replace('\\', '/') excluded_paths = _normalize_excluded_paths(base_dir, excluded_paths) if excluded_paths: logger.info('Excluding paths: %s', excluded_paths) logger.info('Looking for %s under %s...', filename_regex, os.path.join(base_dir, path)) if excluded_filename_regex: logger.info('Excluding file names: %s', excluded_filename_regex) path_expression = re.compile(replace_backslashes(path)) target_files = [] for root, _, files in os.walk(base_dir): if not root.startswith(tuple(excluded_paths)) \ and path_expression.search(replace_backslashes(root)): for filename in files: filepath = os.path.join(root, filename) is_file, matched, excluded_filename, excluded_path = \ _set_match_parameters( filename, filepath, filename_regex, excluded_filename_regex, excluded_paths) if is_file and matched and not excluded_filename \ and not excluded_path: logger.debug('%s is a match. Appending to list...', filepath) target_files.append(filepath) return target_files
[ "def", "_get_all_files", "(", "filename_regex", ",", "path", ",", "base_dir", ",", "excluded_paths", "=", "None", ",", "excluded_filename_regex", "=", "None", ")", ":", "# For windows", "def", "replace_backslashes", "(", "string", ")", ":", "return", "string", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "excluded_paths", "=", "_normalize_excluded_paths", "(", "base_dir", ",", "excluded_paths", ")", "if", "excluded_paths", ":", "logger", ".", "info", "(", "'Excluding paths: %s'", ",", "excluded_paths", ")", "logger", ".", "info", "(", "'Looking for %s under %s...'", ",", "filename_regex", ",", "os", ".", "path", ".", "join", "(", "base_dir", ",", "path", ")", ")", "if", "excluded_filename_regex", ":", "logger", ".", "info", "(", "'Excluding file names: %s'", ",", "excluded_filename_regex", ")", "path_expression", "=", "re", ".", "compile", "(", "replace_backslashes", "(", "path", ")", ")", "target_files", "=", "[", "]", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "if", "not", "root", ".", "startswith", "(", "tuple", "(", "excluded_paths", ")", ")", "and", "path_expression", ".", "search", "(", "replace_backslashes", "(", "root", ")", ")", ":", "for", "filename", "in", "files", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "is_file", ",", "matched", ",", "excluded_filename", ",", "excluded_path", "=", "_set_match_parameters", "(", "filename", ",", "filepath", ",", "filename_regex", ",", "excluded_filename_regex", ",", "excluded_paths", ")", "if", "is_file", "and", "matched", "and", "not", "excluded_filename", "and", "not", "excluded_path", ":", "logger", ".", "debug", "(", "'%s is a match. Appending to list...'", ",", "filepath", ")", "target_files", ".", "append", "(", "filepath", ")", "return", "target_files" ]
Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well.
[ "Get", "all", "files", "for", "processing", "." ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L120-L167
cloudify-cosmo/repex
repex.py
_match_tags
def _match_tags(repex_tags, path_tags): """Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match. """ if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
python
def _match_tags(repex_tags, path_tags): """Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match. """ if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
[ "def", "_match_tags", "(", "repex_tags", ",", "path_tags", ")", ":", "if", "'any'", "in", "repex_tags", "or", "(", "not", "repex_tags", "and", "not", "path_tags", ")", ":", "return", "True", "elif", "set", "(", "repex_tags", ")", "&", "set", "(", "path_tags", ")", ":", "return", "True", "return", "False" ]
Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match.
[ "Check", "for", "matching", "tags", "between", "what", "the", "user", "provided", "and", "the", "tags", "set", "in", "the", "config", "." ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L328-L340
cloudify-cosmo/repex
repex.py
iterate
def iterate(config_file_path=None, config=None, variables=None, tags=None, validate=True, validate_only=False, with_diff=False): """Iterate over all paths in `config_file_path` :param string config_file_path: a path to a repex config file :param dict config: a dictionary representing a repex config :param dict variables: a dict of variables (can be None) :param list tags: a list of tags to check for :param bool validate: whether to perform schema validation on the config :param bool validate_only: only perform validation without running :param bool with_diff: whether to write a diff of all changes to a file """ # TODO: Check if tags can be a tuple instead of a list if not isinstance(variables or {}, dict): raise TypeError(ERRORS['variables_not_dict']) if not isinstance(tags or [], list): raise TypeError(ERRORS['tags_not_list']) config = _get_config(config_file_path, config) if validate or validate_only: _validate_config_schema(config) if validate_only: logger.info('Config file validation completed successfully!') sys.exit(0) repex_vars = _merge_variables(config['variables'], variables or {}) repex_tags = tags or [] logger.debug('Chosen tags: %s', repex_tags) for path in config['paths']: _process_path(path, repex_tags, repex_vars, with_diff)
python
def iterate(config_file_path=None, config=None, variables=None, tags=None, validate=True, validate_only=False, with_diff=False): """Iterate over all paths in `config_file_path` :param string config_file_path: a path to a repex config file :param dict config: a dictionary representing a repex config :param dict variables: a dict of variables (can be None) :param list tags: a list of tags to check for :param bool validate: whether to perform schema validation on the config :param bool validate_only: only perform validation without running :param bool with_diff: whether to write a diff of all changes to a file """ # TODO: Check if tags can be a tuple instead of a list if not isinstance(variables or {}, dict): raise TypeError(ERRORS['variables_not_dict']) if not isinstance(tags or [], list): raise TypeError(ERRORS['tags_not_list']) config = _get_config(config_file_path, config) if validate or validate_only: _validate_config_schema(config) if validate_only: logger.info('Config file validation completed successfully!') sys.exit(0) repex_vars = _merge_variables(config['variables'], variables or {}) repex_tags = tags or [] logger.debug('Chosen tags: %s', repex_tags) for path in config['paths']: _process_path(path, repex_tags, repex_vars, with_diff)
[ "def", "iterate", "(", "config_file_path", "=", "None", ",", "config", "=", "None", ",", "variables", "=", "None", ",", "tags", "=", "None", ",", "validate", "=", "True", ",", "validate_only", "=", "False", ",", "with_diff", "=", "False", ")", ":", "# TODO: Check if tags can be a tuple instead of a list", "if", "not", "isinstance", "(", "variables", "or", "{", "}", ",", "dict", ")", ":", "raise", "TypeError", "(", "ERRORS", "[", "'variables_not_dict'", "]", ")", "if", "not", "isinstance", "(", "tags", "or", "[", "]", ",", "list", ")", ":", "raise", "TypeError", "(", "ERRORS", "[", "'tags_not_list'", "]", ")", "config", "=", "_get_config", "(", "config_file_path", ",", "config", ")", "if", "validate", "or", "validate_only", ":", "_validate_config_schema", "(", "config", ")", "if", "validate_only", ":", "logger", ".", "info", "(", "'Config file validation completed successfully!'", ")", "sys", ".", "exit", "(", "0", ")", "repex_vars", "=", "_merge_variables", "(", "config", "[", "'variables'", "]", ",", "variables", "or", "{", "}", ")", "repex_tags", "=", "tags", "or", "[", "]", "logger", ".", "debug", "(", "'Chosen tags: %s'", ",", "repex_tags", ")", "for", "path", "in", "config", "[", "'paths'", "]", ":", "_process_path", "(", "path", ",", "repex_tags", ",", "repex_vars", ",", "with_diff", ")" ]
Iterate over all paths in `config_file_path` :param string config_file_path: a path to a repex config file :param dict config: a dictionary representing a repex config :param dict variables: a dict of variables (can be None) :param list tags: a list of tags to check for :param bool validate: whether to perform schema validation on the config :param bool validate_only: only perform validation without running :param bool with_diff: whether to write a diff of all changes to a file
[ "Iterate", "over", "all", "paths", "in", "config_file_path" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L343-L378
cloudify-cosmo/repex
repex.py
handle_path
def handle_path(pathobj, variables=None, diff=False): """Iterate over all chosen files in a path :param dict pathobj: a dict of a specific path in the config :param dict variables: a dict of variables (can be None) """ logger.info('Handling path with description: %s', pathobj.get('description')) variables = variables or {} variable_expander = _VariablesHandler() pathobj = variable_expander.expand(variables, pathobj) pathobj = _set_path_defaults(pathobj) path_to_handle = os.path.join(pathobj['base_directory'], pathobj['path']) logger.debug('Path to process: %s', path_to_handle) validate = 'validator' in pathobj if validate: validator_config = pathobj['validator'] validator = _Validator(validator_config) validator_type = validator_config.get('type', 'per_type') rpx = Repex(pathobj) if not pathobj.get('type'): _handle_single_file( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None) else: _handle_multiple_files( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None, validator_type=validator_type if validate else None)
python
def handle_path(pathobj, variables=None, diff=False): """Iterate over all chosen files in a path :param dict pathobj: a dict of a specific path in the config :param dict variables: a dict of variables (can be None) """ logger.info('Handling path with description: %s', pathobj.get('description')) variables = variables or {} variable_expander = _VariablesHandler() pathobj = variable_expander.expand(variables, pathobj) pathobj = _set_path_defaults(pathobj) path_to_handle = os.path.join(pathobj['base_directory'], pathobj['path']) logger.debug('Path to process: %s', path_to_handle) validate = 'validator' in pathobj if validate: validator_config = pathobj['validator'] validator = _Validator(validator_config) validator_type = validator_config.get('type', 'per_type') rpx = Repex(pathobj) if not pathobj.get('type'): _handle_single_file( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None) else: _handle_multiple_files( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None, validator_type=validator_type if validate else None)
[ "def", "handle_path", "(", "pathobj", ",", "variables", "=", "None", ",", "diff", "=", "False", ")", ":", "logger", ".", "info", "(", "'Handling path with description: %s'", ",", "pathobj", ".", "get", "(", "'description'", ")", ")", "variables", "=", "variables", "or", "{", "}", "variable_expander", "=", "_VariablesHandler", "(", ")", "pathobj", "=", "variable_expander", ".", "expand", "(", "variables", ",", "pathobj", ")", "pathobj", "=", "_set_path_defaults", "(", "pathobj", ")", "path_to_handle", "=", "os", ".", "path", ".", "join", "(", "pathobj", "[", "'base_directory'", "]", ",", "pathobj", "[", "'path'", "]", ")", "logger", ".", "debug", "(", "'Path to process: %s'", ",", "path_to_handle", ")", "validate", "=", "'validator'", "in", "pathobj", "if", "validate", ":", "validator_config", "=", "pathobj", "[", "'validator'", "]", "validator", "=", "_Validator", "(", "validator_config", ")", "validator_type", "=", "validator_config", ".", "get", "(", "'type'", ",", "'per_type'", ")", "rpx", "=", "Repex", "(", "pathobj", ")", "if", "not", "pathobj", ".", "get", "(", "'type'", ")", ":", "_handle_single_file", "(", "rpx", "=", "rpx", ",", "path_to_handle", "=", "path_to_handle", ",", "pathobj", "=", "pathobj", ",", "validate", "=", "validate", ",", "diff", "=", "diff", ",", "validator", "=", "validator", "if", "validate", "else", "None", ")", "else", ":", "_handle_multiple_files", "(", "rpx", "=", "rpx", ",", "path_to_handle", "=", "path_to_handle", ",", "pathobj", "=", "pathobj", ",", "validate", "=", "validate", ",", "diff", "=", "diff", ",", "validator", "=", "validator", "if", "validate", "else", "None", ",", "validator_type", "=", "validator_type", "if", "validate", "else", "None", ")" ]
Iterate over all chosen files in a path :param dict pathobj: a dict of a specific path in the config :param dict variables: a dict of variables (can be None)
[ "Iterate", "over", "all", "chosen", "files", "in", "a", "path" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L503-L545
cloudify-cosmo/repex
repex.py
_build_vars_dict
def _build_vars_dict(vars_file='', variables=None): """Merge variables into a single dictionary Applies to CLI provided variables only """ repex_vars = {} if vars_file: with open(vars_file) as varsfile: repex_vars = yaml.safe_load(varsfile.read()) for var in variables: key, value = var.split('=') repex_vars.update({str(key): str(value)}) return repex_vars
python
def _build_vars_dict(vars_file='', variables=None): """Merge variables into a single dictionary Applies to CLI provided variables only """ repex_vars = {} if vars_file: with open(vars_file) as varsfile: repex_vars = yaml.safe_load(varsfile.read()) for var in variables: key, value = var.split('=') repex_vars.update({str(key): str(value)}) return repex_vars
[ "def", "_build_vars_dict", "(", "vars_file", "=", "''", ",", "variables", "=", "None", ")", ":", "repex_vars", "=", "{", "}", "if", "vars_file", ":", "with", "open", "(", "vars_file", ")", "as", "varsfile", ":", "repex_vars", "=", "yaml", ".", "safe_load", "(", "varsfile", ".", "read", "(", ")", ")", "for", "var", "in", "variables", ":", "key", ",", "value", "=", "var", ".", "split", "(", "'='", ")", "repex_vars", ".", "update", "(", "{", "str", "(", "key", ")", ":", "str", "(", "value", ")", "}", ")", "return", "repex_vars" ]
Merge variables into a single dictionary Applies to CLI provided variables only
[ "Merge", "variables", "into", "a", "single", "dictionary" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L705-L717
cloudify-cosmo/repex
repex.py
main
def main(verbose, **kwargs): """Replace strings in one or multiple files. You must either provide `REGEX_PATH` or use the `-c` flag to provide a valid repex configuration. `REGEX_PATH` can be: a regex of paths under `basedir`, a path to a single directory under `basedir`, or a path to a single file. It's important to note that if the `REGEX_PATH` is a path to a directory, the `-t,--ftype` flag must be provided. """ config = kwargs['config'] if not config and not kwargs['regex_path']: click.echo('Must either provide a path or a viable repex config file.') sys.exit(1) if verbose: set_verbose() if config: repex_vars = _build_vars_dict(kwargs['vars_file'], kwargs['var']) try: iterate( config_file_path=config, variables=repex_vars, tags=list(kwargs['tag']), validate=kwargs['validate'], validate_only=kwargs['validate_only'], with_diff=kwargs['diff']) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex)) else: pathobj = _construct_path_object(**kwargs) try: handle_path(pathobj) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex))
python
def main(verbose, **kwargs): """Replace strings in one or multiple files. You must either provide `REGEX_PATH` or use the `-c` flag to provide a valid repex configuration. `REGEX_PATH` can be: a regex of paths under `basedir`, a path to a single directory under `basedir`, or a path to a single file. It's important to note that if the `REGEX_PATH` is a path to a directory, the `-t,--ftype` flag must be provided. """ config = kwargs['config'] if not config and not kwargs['regex_path']: click.echo('Must either provide a path or a viable repex config file.') sys.exit(1) if verbose: set_verbose() if config: repex_vars = _build_vars_dict(kwargs['vars_file'], kwargs['var']) try: iterate( config_file_path=config, variables=repex_vars, tags=list(kwargs['tag']), validate=kwargs['validate'], validate_only=kwargs['validate_only'], with_diff=kwargs['diff']) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex)) else: pathobj = _construct_path_object(**kwargs) try: handle_path(pathobj) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex))
[ "def", "main", "(", "verbose", ",", "*", "*", "kwargs", ")", ":", "config", "=", "kwargs", "[", "'config'", "]", "if", "not", "config", "and", "not", "kwargs", "[", "'regex_path'", "]", ":", "click", ".", "echo", "(", "'Must either provide a path or a viable repex config file.'", ")", "sys", ".", "exit", "(", "1", ")", "if", "verbose", ":", "set_verbose", "(", ")", "if", "config", ":", "repex_vars", "=", "_build_vars_dict", "(", "kwargs", "[", "'vars_file'", "]", ",", "kwargs", "[", "'var'", "]", ")", "try", ":", "iterate", "(", "config_file_path", "=", "config", ",", "variables", "=", "repex_vars", ",", "tags", "=", "list", "(", "kwargs", "[", "'tag'", "]", ")", ",", "validate", "=", "kwargs", "[", "'validate'", "]", ",", "validate_only", "=", "kwargs", "[", "'validate_only'", "]", ",", "with_diff", "=", "kwargs", "[", "'diff'", "]", ")", "except", "(", "RepexError", ",", "IOError", ",", "OSError", ")", "as", "ex", ":", "sys", ".", "exit", "(", "str", "(", "ex", ")", ")", "else", ":", "pathobj", "=", "_construct_path_object", "(", "*", "*", "kwargs", ")", "try", ":", "handle_path", "(", "pathobj", ")", "except", "(", "RepexError", ",", "IOError", ",", "OSError", ")", "as", "ex", ":", "sys", ".", "exit", "(", "str", "(", "ex", ")", ")" ]
Replace strings in one or multiple files. You must either provide `REGEX_PATH` or use the `-c` flag to provide a valid repex configuration. `REGEX_PATH` can be: a regex of paths under `basedir`, a path to a single directory under `basedir`, or a path to a single file. It's important to note that if the `REGEX_PATH` is a path to a directory, the `-t,--ftype` flag must be provided.
[ "Replace", "strings", "in", "one", "or", "multiple", "files", "." ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L859-L898
cloudify-cosmo/repex
repex.py
_VariablesHandler.expand
def expand(self, repex_vars, fields): r"""Receive a dict of variables and a dict of fields and iterates through them to expand a variable in an field, then returns the fields dict with its variables expanded. This will fail if not all variables expand (due to not providing all necessary ones). fields: type: VERSION path: resources excluded: - excluded_file.file base_directory: '{{ .base_dir }}' match: '"version": "\d+\.\d+(\.\d+)?(-\w\d+)?' replace: \d+\.\d+(\.\d+)?(-\w\d+)? with: "{{ .version }}" must_include: - {{ .my_var }}/{{ .another_var }} - {{ .my_other_var }} - version validator: type: per_file path: {{ .my_validator_path }} function: validate variables: { 'version': 3, 'base_dir': . ... } :param dict vars: dict of variables :param dict fields: dict of fields as shown above. """ logger.debug('Expanding variables...') unexpanded_instances = set() # Expand variables in variables # TODO: This should be done in the global scope. # _VariableHandler is called per path, which makes this redundant # as variables are declared globally per config. for k, v in repex_vars.items(): repex_vars[k] = self._expand_var(v, repex_vars) instances = self._get_instances(repex_vars[k]) unexpanded_instances.update(instances) # TODO: Consolidate variable expansion code into single logic # Expand variables in path objects for key in fields.keys(): field = fields[key] if isinstance(field, str): fields[key] = self._expand_var(field, repex_vars) instances = self._get_instances(fields[key]) unexpanded_instances.update(instances) elif isinstance(field, dict): for k, v in field.items(): fields[key][k] = self._expand_var(v, repex_vars) instances = self._get_instances(fields[key][k]) unexpanded_instances.update(instances) elif isinstance(field, list): for index, item in enumerate(field): fields[key][index] = self._expand_var(item, repex_vars) instances = self._get_instances(fields[key][index]) unexpanded_instances.update(instances) if unexpanded_instances: raise RepexError( 'Variables failed to expand: {0}\n' 'Please make sure to provide all necessary variables '.format( list(unexpanded_instances))) return fields
python
def expand(self, repex_vars, fields): r"""Receive a dict of variables and a dict of fields and iterates through them to expand a variable in an field, then returns the fields dict with its variables expanded. This will fail if not all variables expand (due to not providing all necessary ones). fields: type: VERSION path: resources excluded: - excluded_file.file base_directory: '{{ .base_dir }}' match: '"version": "\d+\.\d+(\.\d+)?(-\w\d+)?' replace: \d+\.\d+(\.\d+)?(-\w\d+)? with: "{{ .version }}" must_include: - {{ .my_var }}/{{ .another_var }} - {{ .my_other_var }} - version validator: type: per_file path: {{ .my_validator_path }} function: validate variables: { 'version': 3, 'base_dir': . ... } :param dict vars: dict of variables :param dict fields: dict of fields as shown above. """ logger.debug('Expanding variables...') unexpanded_instances = set() # Expand variables in variables # TODO: This should be done in the global scope. # _VariableHandler is called per path, which makes this redundant # as variables are declared globally per config. for k, v in repex_vars.items(): repex_vars[k] = self._expand_var(v, repex_vars) instances = self._get_instances(repex_vars[k]) unexpanded_instances.update(instances) # TODO: Consolidate variable expansion code into single logic # Expand variables in path objects for key in fields.keys(): field = fields[key] if isinstance(field, str): fields[key] = self._expand_var(field, repex_vars) instances = self._get_instances(fields[key]) unexpanded_instances.update(instances) elif isinstance(field, dict): for k, v in field.items(): fields[key][k] = self._expand_var(v, repex_vars) instances = self._get_instances(fields[key][k]) unexpanded_instances.update(instances) elif isinstance(field, list): for index, item in enumerate(field): fields[key][index] = self._expand_var(item, repex_vars) instances = self._get_instances(fields[key][index]) unexpanded_instances.update(instances) if unexpanded_instances: raise RepexError( 'Variables failed to expand: {0}\n' 'Please make sure to provide all necessary variables '.format( list(unexpanded_instances))) return fields
[ "def", "expand", "(", "self", ",", "repex_vars", ",", "fields", ")", ":", "logger", ".", "debug", "(", "'Expanding variables...'", ")", "unexpanded_instances", "=", "set", "(", ")", "# Expand variables in variables", "# TODO: This should be done in the global scope.", "# _VariableHandler is called per path, which makes this redundant", "# as variables are declared globally per config.", "for", "k", ",", "v", "in", "repex_vars", ".", "items", "(", ")", ":", "repex_vars", "[", "k", "]", "=", "self", ".", "_expand_var", "(", "v", ",", "repex_vars", ")", "instances", "=", "self", ".", "_get_instances", "(", "repex_vars", "[", "k", "]", ")", "unexpanded_instances", ".", "update", "(", "instances", ")", "# TODO: Consolidate variable expansion code into single logic", "# Expand variables in path objects", "for", "key", "in", "fields", ".", "keys", "(", ")", ":", "field", "=", "fields", "[", "key", "]", "if", "isinstance", "(", "field", ",", "str", ")", ":", "fields", "[", "key", "]", "=", "self", ".", "_expand_var", "(", "field", ",", "repex_vars", ")", "instances", "=", "self", ".", "_get_instances", "(", "fields", "[", "key", "]", ")", "unexpanded_instances", ".", "update", "(", "instances", ")", "elif", "isinstance", "(", "field", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "field", ".", "items", "(", ")", ":", "fields", "[", "key", "]", "[", "k", "]", "=", "self", ".", "_expand_var", "(", "v", ",", "repex_vars", ")", "instances", "=", "self", ".", "_get_instances", "(", "fields", "[", "key", "]", "[", "k", "]", ")", "unexpanded_instances", ".", "update", "(", "instances", ")", "elif", "isinstance", "(", "field", ",", "list", ")", ":", "for", "index", ",", "item", "in", "enumerate", "(", "field", ")", ":", "fields", "[", "key", "]", "[", "index", "]", "=", "self", ".", "_expand_var", "(", "item", ",", "repex_vars", ")", "instances", "=", "self", ".", "_get_instances", "(", "fields", "[", "key", "]", "[", "index", "]", ")", "unexpanded_instances", ".", "update", "(", "instances", ")", "if", "unexpanded_instances", ":", "raise", "RepexError", "(", "'Variables failed to expand: {0}\\n'", "'Please make sure to provide all necessary variables '", ".", "format", "(", "list", "(", "unexpanded_instances", ")", ")", ")", "return", "fields" ]
r"""Receive a dict of variables and a dict of fields and iterates through them to expand a variable in an field, then returns the fields dict with its variables expanded. This will fail if not all variables expand (due to not providing all necessary ones). fields: type: VERSION path: resources excluded: - excluded_file.file base_directory: '{{ .base_dir }}' match: '"version": "\d+\.\d+(\.\d+)?(-\w\d+)?' replace: \d+\.\d+(\.\d+)?(-\w\d+)? with: "{{ .version }}" must_include: - {{ .my_var }}/{{ .another_var }} - {{ .my_other_var }} - version validator: type: per_file path: {{ .my_validator_path }} function: validate variables: { 'version': 3, 'base_dir': . ... } :param dict vars: dict of variables :param dict fields: dict of fields as shown above.
[ "r", "Receive", "a", "dict", "of", "variables", "and", "a", "dict", "of", "fields", "and", "iterates", "through", "them", "to", "expand", "a", "variable", "in", "an", "field", "then", "returns", "the", "fields", "dict", "with", "its", "variables", "expanded", "." ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L217-L293
cloudify-cosmo/repex
repex.py
_VariablesHandler._expand_var
def _expand_var(self, in_string, available_variables): """Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in """ instances = self._get_instances(in_string) for instance in instances: for name, value in available_variables.items(): variable_string = self._get_variable_string(name) if instance == variable_string: in_string = in_string.replace(variable_string, value) return in_string
python
def _expand_var(self, in_string, available_variables): """Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in """ instances = self._get_instances(in_string) for instance in instances: for name, value in available_variables.items(): variable_string = self._get_variable_string(name) if instance == variable_string: in_string = in_string.replace(variable_string, value) return in_string
[ "def", "_expand_var", "(", "self", ",", "in_string", ",", "available_variables", ")", ":", "instances", "=", "self", ".", "_get_instances", "(", "in_string", ")", "for", "instance", "in", "instances", ":", "for", "name", ",", "value", "in", "available_variables", ".", "items", "(", ")", ":", "variable_string", "=", "self", ".", "_get_variable_string", "(", "name", ")", "if", "instance", "==", "variable_string", ":", "in_string", "=", "in_string", ".", "replace", "(", "variable_string", ",", "value", ")", "return", "in_string" ]
Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in
[ "Expand", "variable", "to", "its", "corresponding", "value", "in_string" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L302-L315
cloudify-cosmo/repex
repex.py
Repex.validate_before
def validate_before(self, content, file_to_handle): """Verify that all required strings are in the file """ logger.debug('Looking for required strings: %s', self.must_include) included = True for string in self.must_include: if not re.search(r'{0}'.format(string), content): logger.error('Required string `%s` not found in %s', string, file_to_handle) included = False if not included: logger.debug('Required strings not found') return False logger.debug('Required strings found') return True
python
def validate_before(self, content, file_to_handle): """Verify that all required strings are in the file """ logger.debug('Looking for required strings: %s', self.must_include) included = True for string in self.must_include: if not re.search(r'{0}'.format(string), content): logger.error('Required string `%s` not found in %s', string, file_to_handle) included = False if not included: logger.debug('Required strings not found') return False logger.debug('Required strings found') return True
[ "def", "validate_before", "(", "self", ",", "content", ",", "file_to_handle", ")", ":", "logger", ".", "debug", "(", "'Looking for required strings: %s'", ",", "self", ".", "must_include", ")", "included", "=", "True", "for", "string", "in", "self", ".", "must_include", ":", "if", "not", "re", ".", "search", "(", "r'{0}'", ".", "format", "(", "string", ")", ",", "content", ")", ":", "logger", ".", "error", "(", "'Required string `%s` not found in %s'", ",", "string", ",", "file_to_handle", ")", "included", "=", "False", "if", "not", "included", ":", "logger", ".", "debug", "(", "'Required strings not found'", ")", "return", "False", "logger", ".", "debug", "(", "'Required strings found'", ")", "return", "True" ]
Verify that all required strings are in the file
[ "Verify", "that", "all", "required", "strings", "are", "in", "the", "file" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L589-L603
cloudify-cosmo/repex
repex.py
Repex.find_matches
def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))
python
def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """ # look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))
[ "def", "find_matches", "(", "self", ",", "content", ",", "file_to_handle", ")", ":", "# look for all match groups in the content", "groups", "=", "[", "match", ".", "groupdict", "(", ")", "for", "match", "in", "self", ".", "match_expression", ".", "finditer", "(", "content", ")", "]", "# filter out content not in the matchgroup", "matches", "=", "[", "group", "[", "'matchgroup'", "]", "for", "group", "in", "groups", "if", "group", ".", "get", "(", "'matchgroup'", ")", "]", "logger", ".", "info", "(", "'Found %s matches in %s'", ",", "len", "(", "matches", ")", ",", "file_to_handle", ")", "# We only need the unique strings found as we'll be replacing each", "# of them. No need to replace the ones already replaced.", "return", "list", "(", "set", "(", "matches", ")", ")" ]
Find all matches of an expression in a file
[ "Find", "all", "matches", "of", "an", "expression", "in", "a", "file" ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L605-L618
cloudify-cosmo/repex
repex.py
Repex.replace
def replace(self, match, content): """Replace all occurences of the regex in all matches from a file with a specific value. """ new_string = self.replace_expression.sub(self.replace_with, match) logger.info('Replacing: [ %s ] --> [ %s ]', match, new_string) new_content = content.replace(match, new_string) return new_content
python
def replace(self, match, content): """Replace all occurences of the regex in all matches from a file with a specific value. """ new_string = self.replace_expression.sub(self.replace_with, match) logger.info('Replacing: [ %s ] --> [ %s ]', match, new_string) new_content = content.replace(match, new_string) return new_content
[ "def", "replace", "(", "self", ",", "match", ",", "content", ")", ":", "new_string", "=", "self", ".", "replace_expression", ".", "sub", "(", "self", ".", "replace_with", ",", "match", ")", "logger", ".", "info", "(", "'Replacing: [ %s ] --> [ %s ]'", ",", "match", ",", "new_string", ")", "new_content", "=", "content", ".", "replace", "(", "match", ",", "new_string", ")", "return", "new_content" ]
Replace all occurences of the regex in all matches from a file with a specific value.
[ "Replace", "all", "occurences", "of", "the", "regex", "in", "all", "matches", "from", "a", "file", "with", "a", "specific", "value", "." ]
train
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L623-L630
idlesign/uwsgiconf
uwsgiconf/options/master_process.py
MasterProcess.set_exit_events
def set_exit_events(self, no_workers=None, idle=None, reload=None, sig_term=None): """Do exit on certain events :param bool no_workers: Shutdown uWSGI when no workers are running. :param bool idle: Shutdown uWSGI when idle. :param bool reload: Force exit even if a reload is requested. :param bool sig_term: Exit on SIGTERM instead of brutal workers reload. .. note:: Before 2.1 SIGTERM reloaded the stack while SIGINT/SIGQUIT shut it down. """ self._set('die-on-no-workers', no_workers, cast=bool) self._set('exit-on-reload', reload, cast=bool) self._set('die-on-term', sig_term, cast=bool) self.set_idle_params(exit=idle) return self._section
python
def set_exit_events(self, no_workers=None, idle=None, reload=None, sig_term=None): """Do exit on certain events :param bool no_workers: Shutdown uWSGI when no workers are running. :param bool idle: Shutdown uWSGI when idle. :param bool reload: Force exit even if a reload is requested. :param bool sig_term: Exit on SIGTERM instead of brutal workers reload. .. note:: Before 2.1 SIGTERM reloaded the stack while SIGINT/SIGQUIT shut it down. """ self._set('die-on-no-workers', no_workers, cast=bool) self._set('exit-on-reload', reload, cast=bool) self._set('die-on-term', sig_term, cast=bool) self.set_idle_params(exit=idle) return self._section
[ "def", "set_exit_events", "(", "self", ",", "no_workers", "=", "None", ",", "idle", "=", "None", ",", "reload", "=", "None", ",", "sig_term", "=", "None", ")", ":", "self", ".", "_set", "(", "'die-on-no-workers'", ",", "no_workers", ",", "cast", "=", "bool", ")", "self", ".", "_set", "(", "'exit-on-reload'", ",", "reload", ",", "cast", "=", "bool", ")", "self", ".", "_set", "(", "'die-on-term'", ",", "sig_term", ",", "cast", "=", "bool", ")", "self", ".", "set_idle_params", "(", "exit", "=", "idle", ")", "return", "self", ".", "_section" ]
Do exit on certain events :param bool no_workers: Shutdown uWSGI when no workers are running. :param bool idle: Shutdown uWSGI when idle. :param bool reload: Force exit even if a reload is requested. :param bool sig_term: Exit on SIGTERM instead of brutal workers reload. .. note:: Before 2.1 SIGTERM reloaded the stack while SIGINT/SIGQUIT shut it down.
[ "Do", "exit", "on", "certain", "events" ]
train
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/master_process.py#L59-L78
idlesign/uwsgiconf
uwsgiconf/options/master_process.py
MasterProcess.set_exception_handling_params
def set_exception_handling_params(self, handler=None, catch=None, no_write_exception=None): """Exception handling related params. :param str|unicode|list[str|unicode] handler: Register one or more exception handling C-functions. :param bool catch: Catch exceptions and report them as http output (including stack trace and env params). .. warning:: Use only for testing purposes. :param bool no_write_exception: Disable exception generation on write()/writev(). .. note:: This can be combined with ``logging.set_filters(write_errors=False, sigpipe=False)``. .. note: Currently available for Python. """ self._set('exception-handler', handler, multi=True) self._set('catch-exceptions', catch, cast=bool) self._set('disable-write-exception', no_write_exception, cast=bool) return self._section
python
def set_exception_handling_params(self, handler=None, catch=None, no_write_exception=None): """Exception handling related params. :param str|unicode|list[str|unicode] handler: Register one or more exception handling C-functions. :param bool catch: Catch exceptions and report them as http output (including stack trace and env params). .. warning:: Use only for testing purposes. :param bool no_write_exception: Disable exception generation on write()/writev(). .. note:: This can be combined with ``logging.set_filters(write_errors=False, sigpipe=False)``. .. note: Currently available for Python. """ self._set('exception-handler', handler, multi=True) self._set('catch-exceptions', catch, cast=bool) self._set('disable-write-exception', no_write_exception, cast=bool) return self._section
[ "def", "set_exception_handling_params", "(", "self", ",", "handler", "=", "None", ",", "catch", "=", "None", ",", "no_write_exception", "=", "None", ")", ":", "self", ".", "_set", "(", "'exception-handler'", ",", "handler", ",", "multi", "=", "True", ")", "self", ".", "_set", "(", "'catch-exceptions'", ",", "catch", ",", "cast", "=", "bool", ")", "self", ".", "_set", "(", "'disable-write-exception'", ",", "no_write_exception", ",", "cast", "=", "bool", ")", "return", "self", ".", "_section" ]
Exception handling related params. :param str|unicode|list[str|unicode] handler: Register one or more exception handling C-functions. :param bool catch: Catch exceptions and report them as http output (including stack trace and env params). .. warning:: Use only for testing purposes. :param bool no_write_exception: Disable exception generation on write()/writev(). .. note:: This can be combined with ``logging.set_filters(write_errors=False, sigpipe=False)``. .. note: Currently available for Python.
[ "Exception", "handling", "related", "params", "." ]
train
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/master_process.py#L80-L100