id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
8,000
crs4/pydoop
pydoop/app/submit.py
PydoopSubmitter.set_args
def set_args(self, args, unknown_args=None): """ Configure job, based on the arguments provided. """ if unknown_args is None: unknown_args = [] self.logger.setLevel(getattr(logging, args.log_level)) parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/"))) self.remote_wd = hdfs.path.join( parent, utils.make_random_str(prefix="pydoop_submit_") ) self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4())) self.properties[JOB_NAME] = args.job_name or 'pydoop' self.properties[IS_JAVA_RR] = ( 'false' if args.do_not_use_java_record_reader else 'true' ) self.properties[IS_JAVA_RW] = ( 'false' if args.do_not_use_java_record_writer else 'true' ) self.properties[JOB_REDUCES] = args.num_reducers if args.job_name: self.properties[JOB_NAME] = args.job_name self.properties.update(args.job_conf or {}) self.__set_files_to_cache(args) self.__set_archives_to_cache(args) self.requested_env = self._env_arg_to_dict(args.set_env or []) self.args = args self.unknown_args = unknown_args
python
def set_args(self, args, unknown_args=None): """ Configure job, based on the arguments provided. """ if unknown_args is None: unknown_args = [] self.logger.setLevel(getattr(logging, args.log_level)) parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/"))) self.remote_wd = hdfs.path.join( parent, utils.make_random_str(prefix="pydoop_submit_") ) self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4())) self.properties[JOB_NAME] = args.job_name or 'pydoop' self.properties[IS_JAVA_RR] = ( 'false' if args.do_not_use_java_record_reader else 'true' ) self.properties[IS_JAVA_RW] = ( 'false' if args.do_not_use_java_record_writer else 'true' ) self.properties[JOB_REDUCES] = args.num_reducers if args.job_name: self.properties[JOB_NAME] = args.job_name self.properties.update(args.job_conf or {}) self.__set_files_to_cache(args) self.__set_archives_to_cache(args) self.requested_env = self._env_arg_to_dict(args.set_env or []) self.args = args self.unknown_args = unknown_args
[ "def", "set_args", "(", "self", ",", "args", ",", "unknown_args", "=", "None", ")", ":", "if", "unknown_args", "is", "None", ":", "unknown_args", "=", "[", "]", "self", ".", "logger", ".", "setLevel", "(", "getattr", "(", "logging", ",", "args", ".", "log_level", ")", ")", "parent", "=", "hdfs", ".", "path", ".", "dirname", "(", "hdfs", ".", "path", ".", "abspath", "(", "args", ".", "output", ".", "rstrip", "(", "\"/\"", ")", ")", ")", "self", ".", "remote_wd", "=", "hdfs", ".", "path", ".", "join", "(", "parent", ",", "utils", ".", "make_random_str", "(", "prefix", "=", "\"pydoop_submit_\"", ")", ")", "self", ".", "remote_exe", "=", "hdfs", ".", "path", ".", "join", "(", "self", ".", "remote_wd", ",", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "self", ".", "properties", "[", "JOB_NAME", "]", "=", "args", ".", "job_name", "or", "'pydoop'", "self", ".", "properties", "[", "IS_JAVA_RR", "]", "=", "(", "'false'", "if", "args", ".", "do_not_use_java_record_reader", "else", "'true'", ")", "self", ".", "properties", "[", "IS_JAVA_RW", "]", "=", "(", "'false'", "if", "args", ".", "do_not_use_java_record_writer", "else", "'true'", ")", "self", ".", "properties", "[", "JOB_REDUCES", "]", "=", "args", ".", "num_reducers", "if", "args", ".", "job_name", ":", "self", ".", "properties", "[", "JOB_NAME", "]", "=", "args", ".", "job_name", "self", ".", "properties", ".", "update", "(", "args", ".", "job_conf", "or", "{", "}", ")", "self", ".", "__set_files_to_cache", "(", "args", ")", "self", ".", "__set_archives_to_cache", "(", "args", ")", "self", ".", "requested_env", "=", "self", ".", "_env_arg_to_dict", "(", "args", ".", "set_env", "or", "[", "]", ")", "self", ".", "args", "=", "args", "self", ".", "unknown_args", "=", "unknown_args" ]
Configure job, based on the arguments provided.
[ "Configure", "job", "based", "on", "the", "arguments", "provided", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L136-L164
8,001
crs4/pydoop
pydoop/app/submit.py
PydoopSubmitter.__warn_user_if_wd_maybe_unreadable
def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path): """ Check directories above the remote module and issue a warning if they are not traversable by all users. The reasoning behind this is mainly aimed at set-ups with a centralized Hadoop cluster, accessed by all users, and where the Hadoop task tracker user is not a superuser; an example may be if you're running a shared Hadoop without HDFS (using only a POSIX shared file system). The task tracker correctly changes user to the job requester's user for most operations, but not when initializing the distributed cache, so jobs who want to place files not accessible by the Hadoop user into dist cache fail. """ host, port, path = hdfs.path.split(abs_remote_path) if host == '' and port == 0: # local file system host_port = "file:///" else: # FIXME: this won't work with any scheme other than # hdfs:// (e.g., s3) host_port = "hdfs://%s:%s/" % (host, port) path_pieces = path.strip('/').split(os.path.sep) fs = hdfs.hdfs(host, port) for i in range(0, len(path_pieces)): part = os.path.join( host_port, os.path.sep.join(path_pieces[0: i + 1]) ) permissions = fs.get_path_info(part)['permissions'] if permissions & 0o111 != 0o111: self.logger.warning( ("remote module %s may not be readable by the task " "tracker when initializing the distributed cache. " "Permissions on %s: %s"), abs_remote_path, part, oct(permissions) ) break
python
def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path): """ Check directories above the remote module and issue a warning if they are not traversable by all users. The reasoning behind this is mainly aimed at set-ups with a centralized Hadoop cluster, accessed by all users, and where the Hadoop task tracker user is not a superuser; an example may be if you're running a shared Hadoop without HDFS (using only a POSIX shared file system). The task tracker correctly changes user to the job requester's user for most operations, but not when initializing the distributed cache, so jobs who want to place files not accessible by the Hadoop user into dist cache fail. """ host, port, path = hdfs.path.split(abs_remote_path) if host == '' and port == 0: # local file system host_port = "file:///" else: # FIXME: this won't work with any scheme other than # hdfs:// (e.g., s3) host_port = "hdfs://%s:%s/" % (host, port) path_pieces = path.strip('/').split(os.path.sep) fs = hdfs.hdfs(host, port) for i in range(0, len(path_pieces)): part = os.path.join( host_port, os.path.sep.join(path_pieces[0: i + 1]) ) permissions = fs.get_path_info(part)['permissions'] if permissions & 0o111 != 0o111: self.logger.warning( ("remote module %s may not be readable by the task " "tracker when initializing the distributed cache. " "Permissions on %s: %s"), abs_remote_path, part, oct(permissions) ) break
[ "def", "__warn_user_if_wd_maybe_unreadable", "(", "self", ",", "abs_remote_path", ")", ":", "host", ",", "port", ",", "path", "=", "hdfs", ".", "path", ".", "split", "(", "abs_remote_path", ")", "if", "host", "==", "''", "and", "port", "==", "0", ":", "# local file system", "host_port", "=", "\"file:///\"", "else", ":", "# FIXME: this won't work with any scheme other than", "# hdfs:// (e.g., s3)", "host_port", "=", "\"hdfs://%s:%s/\"", "%", "(", "host", ",", "port", ")", "path_pieces", "=", "path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "fs", "=", "hdfs", ".", "hdfs", "(", "host", ",", "port", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "path_pieces", ")", ")", ":", "part", "=", "os", ".", "path", ".", "join", "(", "host_port", ",", "os", ".", "path", ".", "sep", ".", "join", "(", "path_pieces", "[", "0", ":", "i", "+", "1", "]", ")", ")", "permissions", "=", "fs", ".", "get_path_info", "(", "part", ")", "[", "'permissions'", "]", "if", "permissions", "&", "0o111", "!=", "0o111", ":", "self", ".", "logger", ".", "warning", "(", "(", "\"remote module %s may not be readable by the task \"", "\"tracker when initializing the distributed cache. \"", "\"Permissions on %s: %s\"", ")", ",", "abs_remote_path", ",", "part", ",", "oct", "(", "permissions", ")", ")", "break" ]
Check directories above the remote module and issue a warning if they are not traversable by all users. The reasoning behind this is mainly aimed at set-ups with a centralized Hadoop cluster, accessed by all users, and where the Hadoop task tracker user is not a superuser; an example may be if you're running a shared Hadoop without HDFS (using only a POSIX shared file system). The task tracker correctly changes user to the job requester's user for most operations, but not when initializing the distributed cache, so jobs who want to place files not accessible by the Hadoop user into dist cache fail.
[ "Check", "directories", "above", "the", "remote", "module", "and", "issue", "a", "warning", "if", "they", "are", "not", "traversable", "by", "all", "users", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L166-L202
8,002
crs4/pydoop
pydoop/app/submit.py
PydoopSubmitter.__setup_remote_paths
def __setup_remote_paths(self): """ Actually create the working directory and copy the module into it. Note: the script has to be readable by Hadoop; though this may not generally be a problem on HDFS, where the Hadoop user is usually the superuser, things may be different if our working directory is on a shared POSIX filesystem. Therefore, we make the directory and the script accessible by all. """ self.logger.debug("remote_wd: %s", self.remote_wd) self.logger.debug("remote_exe: %s", self.remote_exe) self.logger.debug("remotes: %s", self.files_to_upload) if self.args.module: self.logger.debug( 'Generated pipes_code:\n\n %s', self._generate_pipes_code() ) if not self.args.pretend: hdfs.mkdir(self.remote_wd) hdfs.chmod(self.remote_wd, "a+rx") self.logger.debug("created and chmod-ed: %s", self.remote_wd) pipes_code = self._generate_pipes_code() hdfs.dump(pipes_code, self.remote_exe) self.logger.debug("dumped pipes_code to: %s", self.remote_exe) hdfs.chmod(self.remote_exe, "a+rx") self.__warn_user_if_wd_maybe_unreadable(self.remote_wd) for (l, h, _) in self.files_to_upload: self.logger.debug("uploading: %s to %s", l, h) hdfs.cp(l, h) self.logger.debug("Created%sremote paths:" % (' [simulation] ' if self.args.pretend else ' '))
python
def __setup_remote_paths(self): """ Actually create the working directory and copy the module into it. Note: the script has to be readable by Hadoop; though this may not generally be a problem on HDFS, where the Hadoop user is usually the superuser, things may be different if our working directory is on a shared POSIX filesystem. Therefore, we make the directory and the script accessible by all. """ self.logger.debug("remote_wd: %s", self.remote_wd) self.logger.debug("remote_exe: %s", self.remote_exe) self.logger.debug("remotes: %s", self.files_to_upload) if self.args.module: self.logger.debug( 'Generated pipes_code:\n\n %s', self._generate_pipes_code() ) if not self.args.pretend: hdfs.mkdir(self.remote_wd) hdfs.chmod(self.remote_wd, "a+rx") self.logger.debug("created and chmod-ed: %s", self.remote_wd) pipes_code = self._generate_pipes_code() hdfs.dump(pipes_code, self.remote_exe) self.logger.debug("dumped pipes_code to: %s", self.remote_exe) hdfs.chmod(self.remote_exe, "a+rx") self.__warn_user_if_wd_maybe_unreadable(self.remote_wd) for (l, h, _) in self.files_to_upload: self.logger.debug("uploading: %s to %s", l, h) hdfs.cp(l, h) self.logger.debug("Created%sremote paths:" % (' [simulation] ' if self.args.pretend else ' '))
[ "def", "__setup_remote_paths", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"remote_wd: %s\"", ",", "self", ".", "remote_wd", ")", "self", ".", "logger", ".", "debug", "(", "\"remote_exe: %s\"", ",", "self", ".", "remote_exe", ")", "self", ".", "logger", ".", "debug", "(", "\"remotes: %s\"", ",", "self", ".", "files_to_upload", ")", "if", "self", ".", "args", ".", "module", ":", "self", ".", "logger", ".", "debug", "(", "'Generated pipes_code:\\n\\n %s'", ",", "self", ".", "_generate_pipes_code", "(", ")", ")", "if", "not", "self", ".", "args", ".", "pretend", ":", "hdfs", ".", "mkdir", "(", "self", ".", "remote_wd", ")", "hdfs", ".", "chmod", "(", "self", ".", "remote_wd", ",", "\"a+rx\"", ")", "self", ".", "logger", ".", "debug", "(", "\"created and chmod-ed: %s\"", ",", "self", ".", "remote_wd", ")", "pipes_code", "=", "self", ".", "_generate_pipes_code", "(", ")", "hdfs", ".", "dump", "(", "pipes_code", ",", "self", ".", "remote_exe", ")", "self", ".", "logger", ".", "debug", "(", "\"dumped pipes_code to: %s\"", ",", "self", ".", "remote_exe", ")", "hdfs", ".", "chmod", "(", "self", ".", "remote_exe", ",", "\"a+rx\"", ")", "self", ".", "__warn_user_if_wd_maybe_unreadable", "(", "self", ".", "remote_wd", ")", "for", "(", "l", ",", "h", ",", "_", ")", "in", "self", ".", "files_to_upload", ":", "self", ".", "logger", ".", "debug", "(", "\"uploading: %s to %s\"", ",", "l", ",", "h", ")", "hdfs", ".", "cp", "(", "l", ",", "h", ")", "self", ".", "logger", ".", "debug", "(", "\"Created%sremote paths:\"", "%", "(", "' [simulation] '", "if", "self", ".", "args", ".", "pretend", "else", "' '", ")", ")" ]
Actually create the working directory and copy the module into it. Note: the script has to be readable by Hadoop; though this may not generally be a problem on HDFS, where the Hadoop user is usually the superuser, things may be different if our working directory is on a shared POSIX filesystem. Therefore, we make the directory and the script accessible by all.
[ "Actually", "create", "the", "working", "directory", "and", "copy", "the", "module", "into", "it", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L295-L325
8,003
crs4/pydoop
dev_tools/docker/scripts/share_etc_hosts.py
docker_client
def docker_client(): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ cert_path = os.environ.get('DOCKER_CERT_PATH', '') if cert_path == '': cert_path = os.path.join(os.environ.get('HOME', ''), '.docker') base_url = os.environ.get('DOCKER_HOST') tls_config = None if os.environ.get('DOCKER_TLS_VERIFY', '') != '': parts = base_url.split('://', 1) base_url = '%s://%s' % ('https', parts[1]) client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) ca_cert = os.path.join(cert_path, 'ca.pem') tls_config = tls.TLSConfig( ssl_version=ssl.PROTOCOL_TLSv1, verify=True, assert_hostname=False, client_cert=client_cert, ca_cert=ca_cert, ) timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) return Client( base_url=base_url, tls=tls_config, version='1.15', timeout=timeout )
python
def docker_client(): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ cert_path = os.environ.get('DOCKER_CERT_PATH', '') if cert_path == '': cert_path = os.path.join(os.environ.get('HOME', ''), '.docker') base_url = os.environ.get('DOCKER_HOST') tls_config = None if os.environ.get('DOCKER_TLS_VERIFY', '') != '': parts = base_url.split('://', 1) base_url = '%s://%s' % ('https', parts[1]) client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) ca_cert = os.path.join(cert_path, 'ca.pem') tls_config = tls.TLSConfig( ssl_version=ssl.PROTOCOL_TLSv1, verify=True, assert_hostname=False, client_cert=client_cert, ca_cert=ca_cert, ) timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) return Client( base_url=base_url, tls=tls_config, version='1.15', timeout=timeout )
[ "def", "docker_client", "(", ")", ":", "cert_path", "=", "os", ".", "environ", ".", "get", "(", "'DOCKER_CERT_PATH'", ",", "''", ")", "if", "cert_path", "==", "''", ":", "cert_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", ".", "get", "(", "'HOME'", ",", "''", ")", ",", "'.docker'", ")", "base_url", "=", "os", ".", "environ", ".", "get", "(", "'DOCKER_HOST'", ")", "tls_config", "=", "None", "if", "os", ".", "environ", ".", "get", "(", "'DOCKER_TLS_VERIFY'", ",", "''", ")", "!=", "''", ":", "parts", "=", "base_url", ".", "split", "(", "'://'", ",", "1", ")", "base_url", "=", "'%s://%s'", "%", "(", "'https'", ",", "parts", "[", "1", "]", ")", "client_cert", "=", "(", "os", ".", "path", ".", "join", "(", "cert_path", ",", "'cert.pem'", ")", ",", "os", ".", "path", ".", "join", "(", "cert_path", ",", "'key.pem'", ")", ")", "ca_cert", "=", "os", ".", "path", ".", "join", "(", "cert_path", ",", "'ca.pem'", ")", "tls_config", "=", "tls", ".", "TLSConfig", "(", "ssl_version", "=", "ssl", ".", "PROTOCOL_TLSv1", ",", "verify", "=", "True", ",", "assert_hostname", "=", "False", ",", "client_cert", "=", "client_cert", ",", "ca_cert", "=", "ca_cert", ",", ")", "timeout", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'DOCKER_CLIENT_TIMEOUT'", ",", "60", ")", ")", "return", "Client", "(", "base_url", "=", "base_url", ",", "tls", "=", "tls_config", ",", "version", "=", "'1.15'", ",", "timeout", "=", "timeout", ")" ]
Returns a docker-py client configured using environment variables according to the same logic as the official Docker client.
[ "Returns", "a", "docker", "-", "py", "client", "configured", "using", "environment", "variables", "according", "to", "the", "same", "logic", "as", "the", "official", "Docker", "client", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/dev_tools/docker/scripts/share_etc_hosts.py#L44-L75
8,004
crs4/pydoop
pydoop/utils/jvm.py
get_java_home
def get_java_home(): """\ Try getting JAVA_HOME from system properties. We are interested in the JDK home, containing include/jni.h, while the java.home property points to the JRE home. If a JDK is installed, however, the two are (usually) related: the JDK home is either the same directory as the JRE home (recent java versions) or its parent (and java.home points to jdk_home/jre). """ error = RuntimeError("java home not found, try setting JAVA_HOME") try: return os.environ["JAVA_HOME"] except KeyError: wd = tempfile.mkdtemp(prefix='pydoop_') jclass = "Temp" jsrc = os.path.join(wd, "%s.java" % jclass) with open(jsrc, "w") as f: f.write(JPROG.substitute(classname=jclass)) try: subprocess.check_call(["javac", jsrc]) path = subprocess.check_output( ["java", "-cp", wd, jclass], universal_newlines=True ) except (OSError, UnicodeDecodeError, subprocess.CalledProcessError): raise error finally: shutil.rmtree(wd) path = os.path.normpath(path.strip()) if os.path.exists(os.path.join(path, "include", "jni.h")): return path path = os.path.dirname(path) if os.path.exists(os.path.join(path, "include", "jni.h")): return path raise error
python
def get_java_home(): """\ Try getting JAVA_HOME from system properties. We are interested in the JDK home, containing include/jni.h, while the java.home property points to the JRE home. If a JDK is installed, however, the two are (usually) related: the JDK home is either the same directory as the JRE home (recent java versions) or its parent (and java.home points to jdk_home/jre). """ error = RuntimeError("java home not found, try setting JAVA_HOME") try: return os.environ["JAVA_HOME"] except KeyError: wd = tempfile.mkdtemp(prefix='pydoop_') jclass = "Temp" jsrc = os.path.join(wd, "%s.java" % jclass) with open(jsrc, "w") as f: f.write(JPROG.substitute(classname=jclass)) try: subprocess.check_call(["javac", jsrc]) path = subprocess.check_output( ["java", "-cp", wd, jclass], universal_newlines=True ) except (OSError, UnicodeDecodeError, subprocess.CalledProcessError): raise error finally: shutil.rmtree(wd) path = os.path.normpath(path.strip()) if os.path.exists(os.path.join(path, "include", "jni.h")): return path path = os.path.dirname(path) if os.path.exists(os.path.join(path, "include", "jni.h")): return path raise error
[ "def", "get_java_home", "(", ")", ":", "error", "=", "RuntimeError", "(", "\"java home not found, try setting JAVA_HOME\"", ")", "try", ":", "return", "os", ".", "environ", "[", "\"JAVA_HOME\"", "]", "except", "KeyError", ":", "wd", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'pydoop_'", ")", "jclass", "=", "\"Temp\"", "jsrc", "=", "os", ".", "path", ".", "join", "(", "wd", ",", "\"%s.java\"", "%", "jclass", ")", "with", "open", "(", "jsrc", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "JPROG", ".", "substitute", "(", "classname", "=", "jclass", ")", ")", "try", ":", "subprocess", ".", "check_call", "(", "[", "\"javac\"", ",", "jsrc", "]", ")", "path", "=", "subprocess", ".", "check_output", "(", "[", "\"java\"", ",", "\"-cp\"", ",", "wd", ",", "jclass", "]", ",", "universal_newlines", "=", "True", ")", "except", "(", "OSError", ",", "UnicodeDecodeError", ",", "subprocess", ".", "CalledProcessError", ")", ":", "raise", "error", "finally", ":", "shutil", ".", "rmtree", "(", "wd", ")", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ".", "strip", "(", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"include\"", ",", "\"jni.h\"", ")", ")", ":", "return", "path", "path", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"include\"", ",", "\"jni.h\"", ")", ")", ":", "return", "path", "raise", "error" ]
\ Try getting JAVA_HOME from system properties. We are interested in the JDK home, containing include/jni.h, while the java.home property points to the JRE home. If a JDK is installed, however, the two are (usually) related: the JDK home is either the same directory as the JRE home (recent java versions) or its parent (and java.home points to jdk_home/jre).
[ "\\", "Try", "getting", "JAVA_HOME", "from", "system", "properties", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/utils/jvm.py#L37-L71
8,005
crs4/pydoop
pydoop/mapreduce/pipes.py
run_task
def run_task(factory, **kwargs): """\ Run a MapReduce task. Available keyword arguments: * ``raw_keys`` (default: :obj:`False`): pass map input keys to context as byte strings (ignore any type information) * ``raw_values`` (default: :obj:`False`): pass map input values to context as byte strings (ignore any type information) * ``private_encoding`` (default: :obj:`True`): automatically serialize map output k/v and deserialize reduce input k/v (pickle) * ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce output (map output in map-only jobs) k/v (call str/unicode then encode as utf-8) Advanced keyword arguments: * ``pstats_dir``: run the task with cProfile and store stats in this dir * ``pstats_fmt``: use this pattern for pstats filenames (experts only) The pstats dir and filename pattern can also be provided via ``pydoop submit`` arguments, with lower precedence in case of clashes. """ context = TaskContext(factory, **kwargs) pstats_dir = kwargs.get("pstats_dir", os.getenv(PSTATS_DIR)) if pstats_dir: import cProfile import tempfile import pydoop.hdfs as hdfs hdfs.mkdir(pstats_dir) fd, pstats_fn = tempfile.mkstemp(suffix=".pstats") os.close(fd) cProfile.runctx( "_run(context, **kwargs)", globals(), locals(), filename=pstats_fn ) pstats_fmt = kwargs.get( "pstats_fmt", os.getenv(PSTATS_FMT, DEFAULT_PSTATS_FMT) ) name = pstats_fmt % ( context.task_type, context.get_task_partition(), os.path.basename(pstats_fn) ) hdfs.put(pstats_fn, hdfs.path.join(pstats_dir, name)) else: _run(context, **kwargs)
python
def run_task(factory, **kwargs): """\ Run a MapReduce task. Available keyword arguments: * ``raw_keys`` (default: :obj:`False`): pass map input keys to context as byte strings (ignore any type information) * ``raw_values`` (default: :obj:`False`): pass map input values to context as byte strings (ignore any type information) * ``private_encoding`` (default: :obj:`True`): automatically serialize map output k/v and deserialize reduce input k/v (pickle) * ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce output (map output in map-only jobs) k/v (call str/unicode then encode as utf-8) Advanced keyword arguments: * ``pstats_dir``: run the task with cProfile and store stats in this dir * ``pstats_fmt``: use this pattern for pstats filenames (experts only) The pstats dir and filename pattern can also be provided via ``pydoop submit`` arguments, with lower precedence in case of clashes. """ context = TaskContext(factory, **kwargs) pstats_dir = kwargs.get("pstats_dir", os.getenv(PSTATS_DIR)) if pstats_dir: import cProfile import tempfile import pydoop.hdfs as hdfs hdfs.mkdir(pstats_dir) fd, pstats_fn = tempfile.mkstemp(suffix=".pstats") os.close(fd) cProfile.runctx( "_run(context, **kwargs)", globals(), locals(), filename=pstats_fn ) pstats_fmt = kwargs.get( "pstats_fmt", os.getenv(PSTATS_FMT, DEFAULT_PSTATS_FMT) ) name = pstats_fmt % ( context.task_type, context.get_task_partition(), os.path.basename(pstats_fn) ) hdfs.put(pstats_fn, hdfs.path.join(pstats_dir, name)) else: _run(context, **kwargs)
[ "def", "run_task", "(", "factory", ",", "*", "*", "kwargs", ")", ":", "context", "=", "TaskContext", "(", "factory", ",", "*", "*", "kwargs", ")", "pstats_dir", "=", "kwargs", ".", "get", "(", "\"pstats_dir\"", ",", "os", ".", "getenv", "(", "PSTATS_DIR", ")", ")", "if", "pstats_dir", ":", "import", "cProfile", "import", "tempfile", "import", "pydoop", ".", "hdfs", "as", "hdfs", "hdfs", ".", "mkdir", "(", "pstats_dir", ")", "fd", ",", "pstats_fn", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "\".pstats\"", ")", "os", ".", "close", "(", "fd", ")", "cProfile", ".", "runctx", "(", "\"_run(context, **kwargs)\"", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "filename", "=", "pstats_fn", ")", "pstats_fmt", "=", "kwargs", ".", "get", "(", "\"pstats_fmt\"", ",", "os", ".", "getenv", "(", "PSTATS_FMT", ",", "DEFAULT_PSTATS_FMT", ")", ")", "name", "=", "pstats_fmt", "%", "(", "context", ".", "task_type", ",", "context", ".", "get_task_partition", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "pstats_fn", ")", ")", "hdfs", ".", "put", "(", "pstats_fn", ",", "hdfs", ".", "path", ".", "join", "(", "pstats_dir", ",", "name", ")", ")", "else", ":", "_run", "(", "context", ",", "*", "*", "kwargs", ")" ]
\ Run a MapReduce task. Available keyword arguments: * ``raw_keys`` (default: :obj:`False`): pass map input keys to context as byte strings (ignore any type information) * ``raw_values`` (default: :obj:`False`): pass map input values to context as byte strings (ignore any type information) * ``private_encoding`` (default: :obj:`True`): automatically serialize map output k/v and deserialize reduce input k/v (pickle) * ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce output (map output in map-only jobs) k/v (call str/unicode then encode as utf-8) Advanced keyword arguments: * ``pstats_dir``: run the task with cProfile and store stats in this dir * ``pstats_fmt``: use this pattern for pstats filenames (experts only) The pstats dir and filename pattern can also be provided via ``pydoop submit`` arguments, with lower precedence in case of clashes.
[ "\\", "Run", "a", "MapReduce", "task", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/mapreduce/pipes.py#L414-L462
8,006
crs4/pydoop
pydoop/mapreduce/pipes.py
TaskContext.progress
def progress(self): """\ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second. """ now = time() if now - self.last_progress_t > 1: self.last_progress_t = now if self.status: self.uplink.status(self.status) self.status = None self.__spill_counters() self.uplink.progress(self.progress_value) self.uplink.flush()
python
def progress(self): """\ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second. """ now = time() if now - self.last_progress_t > 1: self.last_progress_t = now if self.status: self.uplink.status(self.status) self.status = None self.__spill_counters() self.uplink.progress(self.progress_value) self.uplink.flush()
[ "def", "progress", "(", "self", ")", ":", "now", "=", "time", "(", ")", "if", "now", "-", "self", ".", "last_progress_t", ">", "1", ":", "self", ".", "last_progress_t", "=", "now", "if", "self", ".", "status", ":", "self", ".", "uplink", ".", "status", "(", "self", ".", "status", ")", "self", ".", "status", "=", "None", "self", ".", "__spill_counters", "(", ")", "self", ".", "uplink", ".", "progress", "(", "self", ".", "progress_value", ")", "self", ".", "uplink", ".", "flush", "(", ")" ]
\ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second.
[ "\\", "Report", "progress", "to", "the", "Java", "side", "." ]
f375be2a06f9c67eaae3ce6f605195dbca143b2b
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/mapreduce/pipes.py#L210-L225
8,007
Bouke/docx-mailmerge
mailmerge.py
MailMerge.merge_pages
def merge_pages(self, replacements): """ Deprecated method. """ warnings.warn("merge_pages has been deprecated in favour of merge_templates", category=DeprecationWarning, stacklevel=2) self.merge_templates(replacements, "page_break")
python
def merge_pages(self, replacements): """ Deprecated method. """ warnings.warn("merge_pages has been deprecated in favour of merge_templates", category=DeprecationWarning, stacklevel=2) self.merge_templates(replacements, "page_break")
[ "def", "merge_pages", "(", "self", ",", "replacements", ")", ":", "warnings", ".", "warn", "(", "\"merge_pages has been deprecated in favour of merge_templates\"", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "merge_templates", "(", "replacements", ",", "\"page_break\"", ")" ]
Deprecated method.
[ "Deprecated", "method", "." ]
6900b686794b4bf85b662488add8df0880114b99
https://github.com/Bouke/docx-mailmerge/blob/6900b686794b4bf85b662488add8df0880114b99/mailmerge.py#L236-L243
8,008
bashu/django-easy-maps
easy_maps/utils.py
importpath
def importpath(path, error_text=None): """ Import value by specified ``path``. Value can represent module, class, object, attribute or method. If ``error_text`` is not None and import will raise ImproperlyConfigured with user friendly text. """ result = None attrs = [] parts = path.split('.') exception = None while parts: try: result = __import__('.'.join(parts), {}, {}, ['']) except ImportError as e: if exception is None: exception = e attrs = parts[-1:] + attrs parts = parts[:-1] else: break for attr in attrs: try: result = getattr(result, attr) except (AttributeError, ValueError) as e: if error_text is not None: raise ImproperlyConfigured('Error: %s can import "%s"' % ( error_text, path)) else: raise exception return result
python
def importpath(path, error_text=None): """ Import value by specified ``path``. Value can represent module, class, object, attribute or method. If ``error_text`` is not None and import will raise ImproperlyConfigured with user friendly text. """ result = None attrs = [] parts = path.split('.') exception = None while parts: try: result = __import__('.'.join(parts), {}, {}, ['']) except ImportError as e: if exception is None: exception = e attrs = parts[-1:] + attrs parts = parts[:-1] else: break for attr in attrs: try: result = getattr(result, attr) except (AttributeError, ValueError) as e: if error_text is not None: raise ImproperlyConfigured('Error: %s can import "%s"' % ( error_text, path)) else: raise exception return result
[ "def", "importpath", "(", "path", ",", "error_text", "=", "None", ")", ":", "result", "=", "None", "attrs", "=", "[", "]", "parts", "=", "path", ".", "split", "(", "'.'", ")", "exception", "=", "None", "while", "parts", ":", "try", ":", "result", "=", "__import__", "(", "'.'", ".", "join", "(", "parts", ")", ",", "{", "}", ",", "{", "}", ",", "[", "''", "]", ")", "except", "ImportError", "as", "e", ":", "if", "exception", "is", "None", ":", "exception", "=", "e", "attrs", "=", "parts", "[", "-", "1", ":", "]", "+", "attrs", "parts", "=", "parts", "[", ":", "-", "1", "]", "else", ":", "break", "for", "attr", "in", "attrs", ":", "try", ":", "result", "=", "getattr", "(", "result", ",", "attr", ")", "except", "(", "AttributeError", ",", "ValueError", ")", "as", "e", ":", "if", "error_text", "is", "not", "None", ":", "raise", "ImproperlyConfigured", "(", "'Error: %s can import \"%s\"'", "%", "(", "error_text", ",", "path", ")", ")", "else", ":", "raise", "exception", "return", "result" ]
Import value by specified ``path``. Value can represent module, class, object, attribute or method. If ``error_text`` is not None and import will raise ImproperlyConfigured with user friendly text.
[ "Import", "value", "by", "specified", "path", ".", "Value", "can", "represent", "module", "class", "object", "attribute", "or", "method", ".", "If", "error_text", "is", "not", "None", "and", "import", "will", "raise", "ImproperlyConfigured", "with", "user", "friendly", "text", "." ]
32f4f3274443219e8828d93d09a406bf2a126982
https://github.com/bashu/django-easy-maps/blob/32f4f3274443219e8828d93d09a406bf2a126982/easy_maps/utils.py#L6-L37
8,009
bitlabstudio/django-booking
booking/utils.py
get_booking
def get_booking(request): """ Returns the booking that is in progress for the current user or None We assume that a user can only have one booking that is in-progress. TODO: This implementation assumes that there is a status called 'inprogress' and that there should only be one such booking for a given user. We need to see if this can be more generic for future projects. :param request: The Request object. """ booking = None if request.user.is_authenticated(): try: booking = Booking.objects.get( user=request.user, booking_status__slug='inprogress') except Booking.DoesNotExist: # The user does not have any open bookings pass else: session = Session.objects.get( session_key=request.session.session_key) try: booking = Booking.objects.get(session=session) except Booking.DoesNotExist: # The user does not have any bookings in his session pass return booking
python
def get_booking(request): """ Returns the booking that is in progress for the current user or None We assume that a user can only have one booking that is in-progress. TODO: This implementation assumes that there is a status called 'inprogress' and that there should only be one such booking for a given user. We need to see if this can be more generic for future projects. :param request: The Request object. """ booking = None if request.user.is_authenticated(): try: booking = Booking.objects.get( user=request.user, booking_status__slug='inprogress') except Booking.DoesNotExist: # The user does not have any open bookings pass else: session = Session.objects.get( session_key=request.session.session_key) try: booking = Booking.objects.get(session=session) except Booking.DoesNotExist: # The user does not have any bookings in his session pass return booking
[ "def", "get_booking", "(", "request", ")", ":", "booking", "=", "None", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "try", ":", "booking", "=", "Booking", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "booking_status__slug", "=", "'inprogress'", ")", "except", "Booking", ".", "DoesNotExist", ":", "# The user does not have any open bookings", "pass", "else", ":", "session", "=", "Session", ".", "objects", ".", "get", "(", "session_key", "=", "request", ".", "session", ".", "session_key", ")", "try", ":", "booking", "=", "Booking", ".", "objects", ".", "get", "(", "session", "=", "session", ")", "except", "Booking", ".", "DoesNotExist", ":", "# The user does not have any bookings in his session", "pass", "return", "booking" ]
Returns the booking that is in progress for the current user or None We assume that a user can only have one booking that is in-progress. TODO: This implementation assumes that there is a status called 'inprogress' and that there should only be one such booking for a given user. We need to see if this can be more generic for future projects. :param request: The Request object.
[ "Returns", "the", "booking", "that", "is", "in", "progress", "for", "the", "current", "user", "or", "None" ]
7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00
https://github.com/bitlabstudio/django-booking/blob/7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00/booking/utils.py#L7-L37
8,010
bitlabstudio/django-booking
booking/utils.py
persist_booking
def persist_booking(booking, user): """ Ties an in-progress booking from a session to a user when the user logs in. If we don't do this, the booking will be lost, because on a login, the old session will be deleted and a new one will be created. Since the booking has a FK to the session, it would be deleted as well when the user logs in. We assume that a user can only have one booking that is in-progress. Therefore we will delete any existing in-progress bookings of this user before tying the one from the session to the user. TODO: Find a more generic solution for this, as this assumes that there is a status called inprogress and that a user can only have one such booking. :param booking: The booking that should be tied to the user. :user: The user the booking should be tied to. """ if booking is not None: existing_bookings = Booking.objects.filter( user=user, booking_status__slug='inprogress').exclude( pk=booking.pk) existing_bookings.delete() booking.session = None booking.user = user booking.save()
python
def persist_booking(booking, user): """ Ties an in-progress booking from a session to a user when the user logs in. If we don't do this, the booking will be lost, because on a login, the old session will be deleted and a new one will be created. Since the booking has a FK to the session, it would be deleted as well when the user logs in. We assume that a user can only have one booking that is in-progress. Therefore we will delete any existing in-progress bookings of this user before tying the one from the session to the user. TODO: Find a more generic solution for this, as this assumes that there is a status called inprogress and that a user can only have one such booking. :param booking: The booking that should be tied to the user. :user: The user the booking should be tied to. """ if booking is not None: existing_bookings = Booking.objects.filter( user=user, booking_status__slug='inprogress').exclude( pk=booking.pk) existing_bookings.delete() booking.session = None booking.user = user booking.save()
[ "def", "persist_booking", "(", "booking", ",", "user", ")", ":", "if", "booking", "is", "not", "None", ":", "existing_bookings", "=", "Booking", ".", "objects", ".", "filter", "(", "user", "=", "user", ",", "booking_status__slug", "=", "'inprogress'", ")", ".", "exclude", "(", "pk", "=", "booking", ".", "pk", ")", "existing_bookings", ".", "delete", "(", ")", "booking", ".", "session", "=", "None", "booking", ".", "user", "=", "user", "booking", ".", "save", "(", ")" ]
Ties an in-progress booking from a session to a user when the user logs in. If we don't do this, the booking will be lost, because on a login, the old session will be deleted and a new one will be created. Since the booking has a FK to the session, it would be deleted as well when the user logs in. We assume that a user can only have one booking that is in-progress. Therefore we will delete any existing in-progress bookings of this user before tying the one from the session to the user. TODO: Find a more generic solution for this, as this assumes that there is a status called inprogress and that a user can only have one such booking. :param booking: The booking that should be tied to the user. :user: The user the booking should be tied to.
[ "Ties", "an", "in", "-", "progress", "booking", "from", "a", "session", "to", "a", "user", "when", "the", "user", "logs", "in", "." ]
7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00
https://github.com/bitlabstudio/django-booking/blob/7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00/booking/utils.py#L40-L68
8,011
spotify/pyfg
pyFG/forticonfig.py
FortiConfig.compare_config
def compare_config(self, target, init=True, indent_level=0): """ This method will return all the necessary commands to get from the config we are in to the target config. Args: * **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config. * **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\ the recursion. You can ignore this parameter. * **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it. Returns: A string containing all the necessary commands to reach the target config. """ if init: fwd = self.full_path_fwd bwd = self.full_path_bwd else: fwd = self.rel_path_fwd bwd = self.rel_path_bwd indent = 4*indent_level*' ' if indent_level == 0 and self.vdom is not None: if self.vdom == 'global': pre = 'conf global\n' else: pre = 'conf vdom\n edit %s\n' % self.vdom post = 'end' else: pre = '' post = '' pre_block = '%s%s' % (indent, fwd) post_block = '%s%s' % (indent, bwd) my_params = self.parameters.keys() ot_params = target.parameters.keys() text = '' for param in my_params: if param not in ot_params: text += ' %sunset %s\n' % (indent, param) else: # We ignore quotes when comparing values if str(self.get_param(param)).replace('"', '') != str(target.get_param(param)).replace('"', ''): text += ' %sset %s %s\n' % (indent, param, target.get_param(param)) for param in ot_params: if param not in my_params: text += ' %sset %s %s\n' % (indent, param, target.get_param(param)) my_blocks = self.sub_blocks.keys() ot_blocks = target.sub_blocks.keys() for block_name in my_blocks: if block_name not in ot_blocks: text += " %sdelete %s\n" % (indent, block_name) else: text += self[block_name].compare_config(target[block_name], False, indent_level+1) for block_name in ot_blocks: if block_name not in my_blocks: text += target[block_name].to_text(True, indent_level+1, True) if text == '': return '' else: return '%s%s%s%s%s' % (pre, pre_block, text, post_block, post)
python
def compare_config(self, target, init=True, indent_level=0): """ This method will return all the necessary commands to get from the config we are in to the target config. Args: * **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config. * **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\ the recursion. You can ignore this parameter. * **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it. Returns: A string containing all the necessary commands to reach the target config. """ if init: fwd = self.full_path_fwd bwd = self.full_path_bwd else: fwd = self.rel_path_fwd bwd = self.rel_path_bwd indent = 4*indent_level*' ' if indent_level == 0 and self.vdom is not None: if self.vdom == 'global': pre = 'conf global\n' else: pre = 'conf vdom\n edit %s\n' % self.vdom post = 'end' else: pre = '' post = '' pre_block = '%s%s' % (indent, fwd) post_block = '%s%s' % (indent, bwd) my_params = self.parameters.keys() ot_params = target.parameters.keys() text = '' for param in my_params: if param not in ot_params: text += ' %sunset %s\n' % (indent, param) else: # We ignore quotes when comparing values if str(self.get_param(param)).replace('"', '') != str(target.get_param(param)).replace('"', ''): text += ' %sset %s %s\n' % (indent, param, target.get_param(param)) for param in ot_params: if param not in my_params: text += ' %sset %s %s\n' % (indent, param, target.get_param(param)) my_blocks = self.sub_blocks.keys() ot_blocks = target.sub_blocks.keys() for block_name in my_blocks: if block_name not in ot_blocks: text += " %sdelete %s\n" % (indent, block_name) else: text += self[block_name].compare_config(target[block_name], False, indent_level+1) for block_name in ot_blocks: if block_name not in my_blocks: text += target[block_name].to_text(True, indent_level+1, True) if text == '': return '' else: return '%s%s%s%s%s' % (pre, pre_block, text, post_block, post)
[ "def", "compare_config", "(", "self", ",", "target", ",", "init", "=", "True", ",", "indent_level", "=", "0", ")", ":", "if", "init", ":", "fwd", "=", "self", ".", "full_path_fwd", "bwd", "=", "self", ".", "full_path_bwd", "else", ":", "fwd", "=", "self", ".", "rel_path_fwd", "bwd", "=", "self", ".", "rel_path_bwd", "indent", "=", "4", "*", "indent_level", "*", "' '", "if", "indent_level", "==", "0", "and", "self", ".", "vdom", "is", "not", "None", ":", "if", "self", ".", "vdom", "==", "'global'", ":", "pre", "=", "'conf global\\n'", "else", ":", "pre", "=", "'conf vdom\\n edit %s\\n'", "%", "self", ".", "vdom", "post", "=", "'end'", "else", ":", "pre", "=", "''", "post", "=", "''", "pre_block", "=", "'%s%s'", "%", "(", "indent", ",", "fwd", ")", "post_block", "=", "'%s%s'", "%", "(", "indent", ",", "bwd", ")", "my_params", "=", "self", ".", "parameters", ".", "keys", "(", ")", "ot_params", "=", "target", ".", "parameters", ".", "keys", "(", ")", "text", "=", "''", "for", "param", "in", "my_params", ":", "if", "param", "not", "in", "ot_params", ":", "text", "+=", "' %sunset %s\\n'", "%", "(", "indent", ",", "param", ")", "else", ":", "# We ignore quotes when comparing values", "if", "str", "(", "self", ".", "get_param", "(", "param", ")", ")", ".", "replace", "(", "'\"'", ",", "''", ")", "!=", "str", "(", "target", ".", "get_param", "(", "param", ")", ")", ".", "replace", "(", "'\"'", ",", "''", ")", ":", "text", "+=", "' %sset %s %s\\n'", "%", "(", "indent", ",", "param", ",", "target", ".", "get_param", "(", "param", ")", ")", "for", "param", "in", "ot_params", ":", "if", "param", "not", "in", "my_params", ":", "text", "+=", "' %sset %s %s\\n'", "%", "(", "indent", ",", "param", ",", "target", ".", "get_param", "(", "param", ")", ")", "my_blocks", "=", "self", ".", "sub_blocks", ".", "keys", "(", ")", "ot_blocks", "=", "target", ".", "sub_blocks", ".", "keys", "(", ")", "for", "block_name", "in", "my_blocks", ":", "if", "block_name", "not", "in", "ot_blocks", ":", "text", "+=", "\" %sdelete %s\\n\"", "%", "(", "indent", ",", "block_name", ")", "else", ":", "text", "+=", "self", "[", "block_name", "]", ".", "compare_config", "(", "target", "[", "block_name", "]", ",", "False", ",", "indent_level", "+", "1", ")", "for", "block_name", "in", "ot_blocks", ":", "if", "block_name", "not", "in", "my_blocks", ":", "text", "+=", "target", "[", "block_name", "]", ".", "to_text", "(", "True", ",", "indent_level", "+", "1", ",", "True", ")", "if", "text", "==", "''", ":", "return", "''", "else", ":", "return", "'%s%s%s%s%s'", "%", "(", "pre", ",", "pre_block", ",", "text", ",", "post_block", ",", "post", ")" ]
This method will return all the necessary commands to get from the config we are in to the target config. Args: * **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config. * **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\ the recursion. You can ignore this parameter. * **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it. Returns: A string containing all the necessary commands to reach the target config.
[ "This", "method", "will", "return", "all", "the", "necessary", "commands", "to", "get", "from", "the", "config", "we", "are", "in", "to", "the", "target", "config", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/forticonfig.py#L103-L173
8,012
spotify/pyfg
pyFG/forticonfig.py
FortiConfig.to_text
def to_text(self, relative=False, indent_level=0, clean_empty_block=False): """ This method returns the object model in text format. You should be able to copy&paste this text into any device running a supported version of FortiOS. Args: - **relative** (bool): * If ``True`` the text returned will assume that you are one block away * If ``False`` the text returned will contain instructions to reach the block from the root. - **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\ increase readability. - **clean_empty_block** (bool): * If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\ string * If ``False`` a block without parameters will still return how to create it. """ if relative: fwd = self.rel_path_fwd bwd = self.rel_path_bwd else: fwd = self.full_path_fwd bwd = self.full_path_bwd indent = 4*indent_level*' ' pre = '%s%s' % (indent, fwd) post = '%s%s' % (indent, bwd) text = '' for param, value in self.iterparams(): text += ' %sset %s %s\n' % (indent, param, value) for key, block in self.iterblocks(): text += block.to_text(True, indent_level+1) if len(text) > 0 or not clean_empty_block: text = '%s%s%s' % (pre, text, post) return text
python
def to_text(self, relative=False, indent_level=0, clean_empty_block=False): """ This method returns the object model in text format. You should be able to copy&paste this text into any device running a supported version of FortiOS. Args: - **relative** (bool): * If ``True`` the text returned will assume that you are one block away * If ``False`` the text returned will contain instructions to reach the block from the root. - **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\ increase readability. - **clean_empty_block** (bool): * If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\ string * If ``False`` a block without parameters will still return how to create it. """ if relative: fwd = self.rel_path_fwd bwd = self.rel_path_bwd else: fwd = self.full_path_fwd bwd = self.full_path_bwd indent = 4*indent_level*' ' pre = '%s%s' % (indent, fwd) post = '%s%s' % (indent, bwd) text = '' for param, value in self.iterparams(): text += ' %sset %s %s\n' % (indent, param, value) for key, block in self.iterblocks(): text += block.to_text(True, indent_level+1) if len(text) > 0 or not clean_empty_block: text = '%s%s%s' % (pre, text, post) return text
[ "def", "to_text", "(", "self", ",", "relative", "=", "False", ",", "indent_level", "=", "0", ",", "clean_empty_block", "=", "False", ")", ":", "if", "relative", ":", "fwd", "=", "self", ".", "rel_path_fwd", "bwd", "=", "self", ".", "rel_path_bwd", "else", ":", "fwd", "=", "self", ".", "full_path_fwd", "bwd", "=", "self", ".", "full_path_bwd", "indent", "=", "4", "*", "indent_level", "*", "' '", "pre", "=", "'%s%s'", "%", "(", "indent", ",", "fwd", ")", "post", "=", "'%s%s'", "%", "(", "indent", ",", "bwd", ")", "text", "=", "''", "for", "param", ",", "value", "in", "self", ".", "iterparams", "(", ")", ":", "text", "+=", "' %sset %s %s\\n'", "%", "(", "indent", ",", "param", ",", "value", ")", "for", "key", ",", "block", "in", "self", ".", "iterblocks", "(", ")", ":", "text", "+=", "block", ".", "to_text", "(", "True", ",", "indent_level", "+", "1", ")", "if", "len", "(", "text", ")", ">", "0", "or", "not", "clean_empty_block", ":", "text", "=", "'%s%s%s'", "%", "(", "pre", ",", "text", ",", "post", ")", "return", "text" ]
This method returns the object model in text format. You should be able to copy&paste this text into any device running a supported version of FortiOS. Args: - **relative** (bool): * If ``True`` the text returned will assume that you are one block away * If ``False`` the text returned will contain instructions to reach the block from the root. - **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\ increase readability. - **clean_empty_block** (bool): * If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\ string * If ``False`` a block without parameters will still return how to create it.
[ "This", "method", "returns", "the", "object", "model", "in", "text", "format", ".", "You", "should", "be", "able", "to", "copy&paste", "this", "text", "into", "any", "device", "running", "a", "supported", "version", "of", "FortiOS", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/forticonfig.py#L305-L341
8,013
spotify/pyfg
pyFG/fortios.py
FortiOS.open
def open(self): """ Opens the ssh session with the device. """ logger.debug('Connecting to device %s, vdom %s' % (self.hostname, self.vdom)) self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) cfg = { 'hostname': self.hostname, 'timeout': self.timeout, 'username': self.username, 'password': self.password, 'key_filename': self.keyfile } if os.path.exists(os.path.expanduser("~/.ssh/config")): ssh_config = paramiko.SSHConfig() user_config_file = os.path.expanduser("~/.ssh/config") with io.open(user_config_file, 'rt', encoding='utf-8') as f: ssh_config.parse(f) host_conf = ssh_config.lookup(self.hostname) if host_conf: if 'proxycommand' in host_conf: cfg['sock'] = paramiko.ProxyCommand(host_conf['proxycommand']) if 'user' in host_conf: cfg['username'] = host_conf['user'] if 'identityfile' in host_conf: cfg['key_filename'] = host_conf['identityfile'] if 'hostname' in host_conf: cfg['hostname'] = host_conf['hostname'] self.ssh.connect(**cfg)
python
def open(self): """ Opens the ssh session with the device. """ logger.debug('Connecting to device %s, vdom %s' % (self.hostname, self.vdom)) self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) cfg = { 'hostname': self.hostname, 'timeout': self.timeout, 'username': self.username, 'password': self.password, 'key_filename': self.keyfile } if os.path.exists(os.path.expanduser("~/.ssh/config")): ssh_config = paramiko.SSHConfig() user_config_file = os.path.expanduser("~/.ssh/config") with io.open(user_config_file, 'rt', encoding='utf-8') as f: ssh_config.parse(f) host_conf = ssh_config.lookup(self.hostname) if host_conf: if 'proxycommand' in host_conf: cfg['sock'] = paramiko.ProxyCommand(host_conf['proxycommand']) if 'user' in host_conf: cfg['username'] = host_conf['user'] if 'identityfile' in host_conf: cfg['key_filename'] = host_conf['identityfile'] if 'hostname' in host_conf: cfg['hostname'] = host_conf['hostname'] self.ssh.connect(**cfg)
[ "def", "open", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Connecting to device %s, vdom %s'", "%", "(", "self", ".", "hostname", ",", "self", ".", "vdom", ")", ")", "self", ".", "ssh", "=", "paramiko", ".", "SSHClient", "(", ")", "self", ".", "ssh", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "AutoAddPolicy", "(", ")", ")", "cfg", "=", "{", "'hostname'", ":", "self", ".", "hostname", ",", "'timeout'", ":", "self", ".", "timeout", ",", "'username'", ":", "self", ".", "username", ",", "'password'", ":", "self", ".", "password", ",", "'key_filename'", ":", "self", ".", "keyfile", "}", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "expanduser", "(", "\"~/.ssh/config\"", ")", ")", ":", "ssh_config", "=", "paramiko", ".", "SSHConfig", "(", ")", "user_config_file", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.ssh/config\"", ")", "with", "io", ".", "open", "(", "user_config_file", ",", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "ssh_config", ".", "parse", "(", "f", ")", "host_conf", "=", "ssh_config", ".", "lookup", "(", "self", ".", "hostname", ")", "if", "host_conf", ":", "if", "'proxycommand'", "in", "host_conf", ":", "cfg", "[", "'sock'", "]", "=", "paramiko", ".", "ProxyCommand", "(", "host_conf", "[", "'proxycommand'", "]", ")", "if", "'user'", "in", "host_conf", ":", "cfg", "[", "'username'", "]", "=", "host_conf", "[", "'user'", "]", "if", "'identityfile'", "in", "host_conf", ":", "cfg", "[", "'key_filename'", "]", "=", "host_conf", "[", "'identityfile'", "]", "if", "'hostname'", "in", "host_conf", ":", "cfg", "[", "'hostname'", "]", "=", "host_conf", "[", "'hostname'", "]", "self", ".", "ssh", ".", "connect", "(", "*", "*", "cfg", ")" ]
Opens the ssh session with the device.
[ "Opens", "the", "ssh", "session", "with", "the", "device", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L68-L103
8,014
spotify/pyfg
pyFG/fortios.py
FortiOS._read_wrapper
def _read_wrapper(data): """Ensure unicode always returned on read.""" # Paramiko (strangely) in PY3 returns an int here. if isinstance(data, int): data = chr(data) # Ensure unicode return py23_compat.text_type(data)
python
def _read_wrapper(data): """Ensure unicode always returned on read.""" # Paramiko (strangely) in PY3 returns an int here. if isinstance(data, int): data = chr(data) # Ensure unicode return py23_compat.text_type(data)
[ "def", "_read_wrapper", "(", "data", ")", ":", "# Paramiko (strangely) in PY3 returns an int here.", "if", "isinstance", "(", "data", ",", "int", ")", ":", "data", "=", "chr", "(", "data", ")", "# Ensure unicode", "return", "py23_compat", ".", "text_type", "(", "data", ")" ]
Ensure unicode always returned on read.
[ "Ensure", "unicode", "always", "returned", "on", "read", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L114-L120
8,015
spotify/pyfg
pyFG/fortios.py
FortiOS._parse_batch_lastlog
def _parse_batch_lastlog(last_log): """ This static method will help reading the result of the commit, command by command. Args: last_log(list): A list containing, line by line, the result of committing the changes. Returns: A list of tuples that went wrong. The tuple will contain (*status_code*, *command*) """ regexp = re.compile('(-?[0-9]\d*):\W+(.*)') wrong_commands = list() for line in last_log: result = regexp.match(line) if result is not None: status_code = result.group(1) command = result.group(2) if int(status_code) < 0: wrong_commands.append((status_code, command)) return wrong_commands
python
def _parse_batch_lastlog(last_log): """ This static method will help reading the result of the commit, command by command. Args: last_log(list): A list containing, line by line, the result of committing the changes. Returns: A list of tuples that went wrong. The tuple will contain (*status_code*, *command*) """ regexp = re.compile('(-?[0-9]\d*):\W+(.*)') wrong_commands = list() for line in last_log: result = regexp.match(line) if result is not None: status_code = result.group(1) command = result.group(2) if int(status_code) < 0: wrong_commands.append((status_code, command)) return wrong_commands
[ "def", "_parse_batch_lastlog", "(", "last_log", ")", ":", "regexp", "=", "re", ".", "compile", "(", "'(-?[0-9]\\d*):\\W+(.*)'", ")", "wrong_commands", "=", "list", "(", ")", "for", "line", "in", "last_log", ":", "result", "=", "regexp", ".", "match", "(", "line", ")", "if", "result", "is", "not", "None", ":", "status_code", "=", "result", ".", "group", "(", "1", ")", "command", "=", "result", ".", "group", "(", "2", ")", "if", "int", "(", "status_code", ")", "<", "0", ":", "wrong_commands", ".", "append", "(", "(", "status_code", ",", "command", ")", ")", "return", "wrong_commands" ]
This static method will help reading the result of the commit, command by command. Args: last_log(list): A list containing, line by line, the result of committing the changes. Returns: A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)
[ "This", "static", "method", "will", "help", "reading", "the", "result", "of", "the", "commit", "command", "by", "command", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L349-L370
8,016
spotify/pyfg
pyFG/fortios.py
FortiOS._reload_config
def _reload_config(self, reload_original_config): """ This command will update the running config from the live device. Args: * reload_original_config: * If ``True`` the original config will be loaded with the running config before reloading the\ original config. * If ``False`` the original config will remain untouched. """ # We don't want to reload the config under some circumstances if reload_original_config: self.original_config = self.running_config self.original_config.set_name('original') paths = self.running_config.get_paths() self.running_config = FortiConfig('running', vdom=self.vdom) for path in paths: self.load_config(path, empty_candidate=True)
python
def _reload_config(self, reload_original_config): """ This command will update the running config from the live device. Args: * reload_original_config: * If ``True`` the original config will be loaded with the running config before reloading the\ original config. * If ``False`` the original config will remain untouched. """ # We don't want to reload the config under some circumstances if reload_original_config: self.original_config = self.running_config self.original_config.set_name('original') paths = self.running_config.get_paths() self.running_config = FortiConfig('running', vdom=self.vdom) for path in paths: self.load_config(path, empty_candidate=True)
[ "def", "_reload_config", "(", "self", ",", "reload_original_config", ")", ":", "# We don't want to reload the config under some circumstances", "if", "reload_original_config", ":", "self", ".", "original_config", "=", "self", ".", "running_config", "self", ".", "original_config", ".", "set_name", "(", "'original'", ")", "paths", "=", "self", ".", "running_config", ".", "get_paths", "(", ")", "self", ".", "running_config", "=", "FortiConfig", "(", "'running'", ",", "vdom", "=", "self", ".", "vdom", ")", "for", "path", "in", "paths", ":", "self", ".", "load_config", "(", "path", ",", "empty_candidate", "=", "True", ")" ]
This command will update the running config from the live device. Args: * reload_original_config: * If ``True`` the original config will be loaded with the running config before reloading the\ original config. * If ``False`` the original config will remain untouched.
[ "This", "command", "will", "update", "the", "running", "config", "from", "the", "live", "device", "." ]
518668539146e7f998a37d75994a4278adf79897
https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L372-L392
8,017
mattjj/pyslds
pyslds/states.py
_SLDSStates.generate_states
def generate_states(self, initial_condition=None, with_noise=True, stateseq=None): """ Jointly sample the discrete and continuous states """ from pybasicbayes.util.stats import sample_discrete # Generate from the prior and raise exception if unstable T, K, n = self.T, self.num_states, self.D_latent A = self.trans_matrix # Initialize discrete state sequence dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq.astype(np.int32) assert dss.shape == (T,) gss = np.empty((T,n), dtype='double') if initial_condition is None: if dss[0] == -1: dss[0] = sample_discrete(self.pi_0) gss[0] = self.init_dynamics_distns[dss[0]].rvs() else: dss[0] = initial_condition[0] gss[0] = initial_condition[1] for t in range(1,T): # Sample discrete state given previous continuous state if with_noise: # Sample discre=te state from recurrent transition matrix if dss[t] == -1: dss[t] = sample_discrete(A[dss[t-1], :]) # Sample continuous state given current discrete state gss[t] = self.dynamics_distns[dss[t-1]].\ rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])), return_xy=False) else: # Pick the most likely next discrete state and continuous state if dss[t] == -1: dss[t] = np.argmax(A[dss[t-1], :]) gss[t] = self.dynamics_distns[dss[t-1]]. \ predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:]))) assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!" self.stateseq = dss self.gaussian_states = gss
python
def generate_states(self, initial_condition=None, with_noise=True, stateseq=None): """ Jointly sample the discrete and continuous states """ from pybasicbayes.util.stats import sample_discrete # Generate from the prior and raise exception if unstable T, K, n = self.T, self.num_states, self.D_latent A = self.trans_matrix # Initialize discrete state sequence dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq.astype(np.int32) assert dss.shape == (T,) gss = np.empty((T,n), dtype='double') if initial_condition is None: if dss[0] == -1: dss[0] = sample_discrete(self.pi_0) gss[0] = self.init_dynamics_distns[dss[0]].rvs() else: dss[0] = initial_condition[0] gss[0] = initial_condition[1] for t in range(1,T): # Sample discrete state given previous continuous state if with_noise: # Sample discre=te state from recurrent transition matrix if dss[t] == -1: dss[t] = sample_discrete(A[dss[t-1], :]) # Sample continuous state given current discrete state gss[t] = self.dynamics_distns[dss[t-1]].\ rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])), return_xy=False) else: # Pick the most likely next discrete state and continuous state if dss[t] == -1: dss[t] = np.argmax(A[dss[t-1], :]) gss[t] = self.dynamics_distns[dss[t-1]]. \ predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:]))) assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!" self.stateseq = dss self.gaussian_states = gss
[ "def", "generate_states", "(", "self", ",", "initial_condition", "=", "None", ",", "with_noise", "=", "True", ",", "stateseq", "=", "None", ")", ":", "from", "pybasicbayes", ".", "util", ".", "stats", "import", "sample_discrete", "# Generate from the prior and raise exception if unstable", "T", ",", "K", ",", "n", "=", "self", ".", "T", ",", "self", ".", "num_states", ",", "self", ".", "D_latent", "A", "=", "self", ".", "trans_matrix", "# Initialize discrete state sequence", "dss", "=", "-", "1", "*", "np", ".", "ones", "(", "T", ",", "dtype", "=", "np", ".", "int32", ")", "if", "stateseq", "is", "None", "else", "stateseq", ".", "astype", "(", "np", ".", "int32", ")", "assert", "dss", ".", "shape", "==", "(", "T", ",", ")", "gss", "=", "np", ".", "empty", "(", "(", "T", ",", "n", ")", ",", "dtype", "=", "'double'", ")", "if", "initial_condition", "is", "None", ":", "if", "dss", "[", "0", "]", "==", "-", "1", ":", "dss", "[", "0", "]", "=", "sample_discrete", "(", "self", ".", "pi_0", ")", "gss", "[", "0", "]", "=", "self", ".", "init_dynamics_distns", "[", "dss", "[", "0", "]", "]", ".", "rvs", "(", ")", "else", ":", "dss", "[", "0", "]", "=", "initial_condition", "[", "0", "]", "gss", "[", "0", "]", "=", "initial_condition", "[", "1", "]", "for", "t", "in", "range", "(", "1", ",", "T", ")", ":", "# Sample discrete state given previous continuous state", "if", "with_noise", ":", "# Sample discre=te state from recurrent transition matrix", "if", "dss", "[", "t", "]", "==", "-", "1", ":", "dss", "[", "t", "]", "=", "sample_discrete", "(", "A", "[", "dss", "[", "t", "-", "1", "]", ",", ":", "]", ")", "# Sample continuous state given current discrete state", "gss", "[", "t", "]", "=", "self", ".", "dynamics_distns", "[", "dss", "[", "t", "-", "1", "]", "]", ".", "rvs", "(", "x", "=", "np", ".", "hstack", "(", "(", "gss", "[", "t", "-", "1", "]", "[", "None", ",", ":", "]", ",", "self", ".", "inputs", "[", "t", "-", "1", "]", "[", "None", ",", ":", "]", ")", ")", ",", "return_xy", "=", "False", ")", "else", ":", "# Pick the most likely next discrete state and continuous state", "if", "dss", "[", "t", "]", "==", "-", "1", ":", "dss", "[", "t", "]", "=", "np", ".", "argmax", "(", "A", "[", "dss", "[", "t", "-", "1", "]", ",", ":", "]", ")", "gss", "[", "t", "]", "=", "self", ".", "dynamics_distns", "[", "dss", "[", "t", "-", "1", "]", "]", ".", "predict", "(", "np", ".", "hstack", "(", "(", "gss", "[", "t", "-", "1", "]", "[", "None", ",", ":", "]", ",", "self", ".", "inputs", "[", "t", "-", "1", "]", "[", "None", ",", ":", "]", ")", ")", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "gss", "[", "t", "]", ")", ")", ",", "\"SLDS appears to be unstable!\"", "self", ".", "stateseq", "=", "dss", "self", ".", "gaussian_states", "=", "gss" ]
Jointly sample the discrete and continuous states
[ "Jointly", "sample", "the", "discrete", "and", "continuous", "states" ]
c505c2bd05a5549d450b518f02493b68ed12e590
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L35-L78
8,018
mattjj/pyslds
pyslds/states.py
_SLDSStatesMaskedData.heldout_log_likelihood
def heldout_log_likelihood(self, test_mask=None): """ Compute the log likelihood of the masked data given the latent discrete and continuous states. """ if test_mask is None: # If a test mask is not supplied, use the negation of this object's mask if self.mask is None: return 0 else: test_mask = ~self.mask xs = np.hstack((self.gaussian_states, self.inputs)) if self.single_emission: return self.emission_distns[0].\ log_likelihood((xs, self.data), mask=test_mask).sum() else: hll = 0 z = self.stateseq for idx, ed in enumerate(self.emission_distns): hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]), mask=test_mask[z == idx]).sum()
python
def heldout_log_likelihood(self, test_mask=None): """ Compute the log likelihood of the masked data given the latent discrete and continuous states. """ if test_mask is None: # If a test mask is not supplied, use the negation of this object's mask if self.mask is None: return 0 else: test_mask = ~self.mask xs = np.hstack((self.gaussian_states, self.inputs)) if self.single_emission: return self.emission_distns[0].\ log_likelihood((xs, self.data), mask=test_mask).sum() else: hll = 0 z = self.stateseq for idx, ed in enumerate(self.emission_distns): hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]), mask=test_mask[z == idx]).sum()
[ "def", "heldout_log_likelihood", "(", "self", ",", "test_mask", "=", "None", ")", ":", "if", "test_mask", "is", "None", ":", "# If a test mask is not supplied, use the negation of this object's mask", "if", "self", ".", "mask", "is", "None", ":", "return", "0", "else", ":", "test_mask", "=", "~", "self", ".", "mask", "xs", "=", "np", ".", "hstack", "(", "(", "self", ".", "gaussian_states", ",", "self", ".", "inputs", ")", ")", "if", "self", ".", "single_emission", ":", "return", "self", ".", "emission_distns", "[", "0", "]", ".", "log_likelihood", "(", "(", "xs", ",", "self", ".", "data", ")", ",", "mask", "=", "test_mask", ")", ".", "sum", "(", ")", "else", ":", "hll", "=", "0", "z", "=", "self", ".", "stateseq", "for", "idx", ",", "ed", "in", "enumerate", "(", "self", ".", "emission_distns", ")", ":", "hll", "+=", "ed", ".", "log_likelihood", "(", "(", "xs", "[", "z", "==", "idx", "]", ",", "self", ".", "data", "[", "z", "==", "idx", "]", ")", ",", "mask", "=", "test_mask", "[", "z", "==", "idx", "]", ")", ".", "sum", "(", ")" ]
Compute the log likelihood of the masked data given the latent discrete and continuous states.
[ "Compute", "the", "log", "likelihood", "of", "the", "masked", "data", "given", "the", "latent", "discrete", "and", "continuous", "states", "." ]
c505c2bd05a5549d450b518f02493b68ed12e590
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L853-L874
8,019
mattjj/pyslds
pyslds/states.py
_SLDSStatesCountData.empirical_rate
def empirical_rate(data, sigma=3.0): """ Smooth count data to get an empirical rate """ from scipy.ndimage.filters import gaussian_filter1d return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0)
python
def empirical_rate(data, sigma=3.0): """ Smooth count data to get an empirical rate """ from scipy.ndimage.filters import gaussian_filter1d return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0)
[ "def", "empirical_rate", "(", "data", ",", "sigma", "=", "3.0", ")", ":", "from", "scipy", ".", "ndimage", ".", "filters", "import", "gaussian_filter1d", "return", "0.001", "+", "gaussian_filter1d", "(", "data", ".", "astype", "(", "np", ".", "float", ")", ",", "sigma", ",", "axis", "=", "0", ")" ]
Smooth count data to get an empirical rate
[ "Smooth", "count", "data", "to", "get", "an", "empirical", "rate" ]
c505c2bd05a5549d450b518f02493b68ed12e590
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L1251-L1256
8,020
mattjj/pyslds
pyslds/util.py
get_empirical_ar_params
def get_empirical_ar_params(train_datas, params): """ Estimate the parameters of an AR observation model by fitting a single AR model to the entire dataset. """ assert isinstance(train_datas, list) and len(train_datas) > 0 datadimension = train_datas[0].shape[1] assert params["nu_0"] > datadimension + 1 # Initialize the observation parameters obs_params = dict(nu_0=params["nu_0"], S_0=params['S_0'], M_0=params['M_0'], K_0=params['K_0'], affine=params['affine']) # Fit an AR model to the entire dataset obs_distn = AutoRegression(**obs_params) obs_distn.max_likelihood(train_datas) # Use the inferred noise covariance as the prior mean # E_{IW}[S] = S_0 / (nu_0 - datadimension - 1) obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1) obs_params["M_0"] = obs_distn.A.copy() return obs_params
python
def get_empirical_ar_params(train_datas, params): """ Estimate the parameters of an AR observation model by fitting a single AR model to the entire dataset. """ assert isinstance(train_datas, list) and len(train_datas) > 0 datadimension = train_datas[0].shape[1] assert params["nu_0"] > datadimension + 1 # Initialize the observation parameters obs_params = dict(nu_0=params["nu_0"], S_0=params['S_0'], M_0=params['M_0'], K_0=params['K_0'], affine=params['affine']) # Fit an AR model to the entire dataset obs_distn = AutoRegression(**obs_params) obs_distn.max_likelihood(train_datas) # Use the inferred noise covariance as the prior mean # E_{IW}[S] = S_0 / (nu_0 - datadimension - 1) obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1) obs_params["M_0"] = obs_distn.A.copy() return obs_params
[ "def", "get_empirical_ar_params", "(", "train_datas", ",", "params", ")", ":", "assert", "isinstance", "(", "train_datas", ",", "list", ")", "and", "len", "(", "train_datas", ")", ">", "0", "datadimension", "=", "train_datas", "[", "0", "]", ".", "shape", "[", "1", "]", "assert", "params", "[", "\"nu_0\"", "]", ">", "datadimension", "+", "1", "# Initialize the observation parameters", "obs_params", "=", "dict", "(", "nu_0", "=", "params", "[", "\"nu_0\"", "]", ",", "S_0", "=", "params", "[", "'S_0'", "]", ",", "M_0", "=", "params", "[", "'M_0'", "]", ",", "K_0", "=", "params", "[", "'K_0'", "]", ",", "affine", "=", "params", "[", "'affine'", "]", ")", "# Fit an AR model to the entire dataset", "obs_distn", "=", "AutoRegression", "(", "*", "*", "obs_params", ")", "obs_distn", ".", "max_likelihood", "(", "train_datas", ")", "# Use the inferred noise covariance as the prior mean", "# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)", "obs_params", "[", "\"S_0\"", "]", "=", "obs_distn", ".", "sigma", "*", "(", "params", "[", "\"nu_0\"", "]", "-", "datadimension", "-", "1", ")", "obs_params", "[", "\"M_0\"", "]", "=", "obs_distn", ".", "A", ".", "copy", "(", ")", "return", "obs_params" ]
Estimate the parameters of an AR observation model by fitting a single AR model to the entire dataset.
[ "Estimate", "the", "parameters", "of", "an", "AR", "observation", "model", "by", "fitting", "a", "single", "AR", "model", "to", "the", "entire", "dataset", "." ]
c505c2bd05a5549d450b518f02493b68ed12e590
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/util.py#L6-L32
8,021
pastpages/savepagenow
savepagenow/api.py
capture
def capture( target_url, user_agent="savepagenow (https://github.com/pastpages/savepagenow)", accept_cache=False ): """ Archives the provided URL using archive.org's Wayback Machine. Returns the archive.org URL where the capture is stored. Raises a CachedPage exception if archive.org declines to conduct a new capture and returns a previous snapshot instead. To silence that exception, pass into True to the ``accept_cache`` keyword argument. """ # Put together the URL that will save our request domain = "https://web.archive.org" save_url = urljoin(domain, "/save/") request_url = save_url + target_url # Send the capture request to achive.org headers = { 'User-Agent': user_agent, } response = requests.get(request_url, headers=headers) # If it has an error header, raise that. has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers if has_error_header: error_header = response.headers['X-Archive-Wayback-Runtime-Error'] if error_header == 'RobotAccessControlException: Blocked By Robots': raise BlockedByRobots("archive.org returned blocked by robots.txt error") else: raise WaybackRuntimeError(error_header) # If it has an error code, raise that if response.status_code in [403, 502]: raise WaybackRuntimeError(response.headers) # Put together the URL where this page is archived try: archive_id = response.headers['Content-Location'] except KeyError: # If it can't find that key raise the error raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers)) archive_url = urljoin(domain, archive_id) # Determine if the response was cached cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT' if cached: if not accept_cache: raise CachedPage("archive.org returned a cached version of this page: {}".format( archive_url )) # Return that return archive_url
python
def capture( target_url, user_agent="savepagenow (https://github.com/pastpages/savepagenow)", accept_cache=False ): """ Archives the provided URL using archive.org's Wayback Machine. Returns the archive.org URL where the capture is stored. Raises a CachedPage exception if archive.org declines to conduct a new capture and returns a previous snapshot instead. To silence that exception, pass into True to the ``accept_cache`` keyword argument. """ # Put together the URL that will save our request domain = "https://web.archive.org" save_url = urljoin(domain, "/save/") request_url = save_url + target_url # Send the capture request to achive.org headers = { 'User-Agent': user_agent, } response = requests.get(request_url, headers=headers) # If it has an error header, raise that. has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers if has_error_header: error_header = response.headers['X-Archive-Wayback-Runtime-Error'] if error_header == 'RobotAccessControlException: Blocked By Robots': raise BlockedByRobots("archive.org returned blocked by robots.txt error") else: raise WaybackRuntimeError(error_header) # If it has an error code, raise that if response.status_code in [403, 502]: raise WaybackRuntimeError(response.headers) # Put together the URL where this page is archived try: archive_id = response.headers['Content-Location'] except KeyError: # If it can't find that key raise the error raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers)) archive_url = urljoin(domain, archive_id) # Determine if the response was cached cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT' if cached: if not accept_cache: raise CachedPage("archive.org returned a cached version of this page: {}".format( archive_url )) # Return that return archive_url
[ "def", "capture", "(", "target_url", ",", "user_agent", "=", "\"savepagenow (https://github.com/pastpages/savepagenow)\"", ",", "accept_cache", "=", "False", ")", ":", "# Put together the URL that will save our request", "domain", "=", "\"https://web.archive.org\"", "save_url", "=", "urljoin", "(", "domain", ",", "\"/save/\"", ")", "request_url", "=", "save_url", "+", "target_url", "# Send the capture request to achive.org", "headers", "=", "{", "'User-Agent'", ":", "user_agent", ",", "}", "response", "=", "requests", ".", "get", "(", "request_url", ",", "headers", "=", "headers", ")", "# If it has an error header, raise that.", "has_error_header", "=", "'X-Archive-Wayback-Runtime-Error'", "in", "response", ".", "headers", "if", "has_error_header", ":", "error_header", "=", "response", ".", "headers", "[", "'X-Archive-Wayback-Runtime-Error'", "]", "if", "error_header", "==", "'RobotAccessControlException: Blocked By Robots'", ":", "raise", "BlockedByRobots", "(", "\"archive.org returned blocked by robots.txt error\"", ")", "else", ":", "raise", "WaybackRuntimeError", "(", "error_header", ")", "# If it has an error code, raise that", "if", "response", ".", "status_code", "in", "[", "403", ",", "502", "]", ":", "raise", "WaybackRuntimeError", "(", "response", ".", "headers", ")", "# Put together the URL where this page is archived", "try", ":", "archive_id", "=", "response", ".", "headers", "[", "'Content-Location'", "]", "except", "KeyError", ":", "# If it can't find that key raise the error", "raise", "WaybackRuntimeError", "(", "dict", "(", "status_code", "=", "response", ".", "status_code", ",", "headers", "=", "response", ".", "headers", ")", ")", "archive_url", "=", "urljoin", "(", "domain", ",", "archive_id", ")", "# Determine if the response was cached", "cached", "=", "'X-Page-Cache'", "in", "response", ".", "headers", "and", "response", ".", "headers", "[", "'X-Page-Cache'", "]", "==", "'HIT'", "if", "cached", ":", "if", "not", "accept_cache", ":", "raise", "CachedPage", "(", "\"archive.org returned a cached version of this page: {}\"", ".", "format", "(", "archive_url", ")", ")", "# Return that", "return", "archive_url" ]
Archives the provided URL using archive.org's Wayback Machine. Returns the archive.org URL where the capture is stored. Raises a CachedPage exception if archive.org declines to conduct a new capture and returns a previous snapshot instead. To silence that exception, pass into True to the ``accept_cache`` keyword argument.
[ "Archives", "the", "provided", "URL", "using", "archive", ".", "org", "s", "Wayback", "Machine", "." ]
9555ffb10905fe1b0d2452be2bd8a7d4338a8379
https://github.com/pastpages/savepagenow/blob/9555ffb10905fe1b0d2452be2bd8a7d4338a8379/savepagenow/api.py#L8-L65
8,022
pastpages/savepagenow
savepagenow/api.py
capture_or_cache
def capture_or_cache( target_url, user_agent="savepagenow (https://github.com/pastpages/savepagenow)" ): """ Archives the provided URL using archive.org's Wayback Machine, unless the page has been recently captured. Returns a tuple with the archive.org URL where the capture is stored, along with a boolean indicating if a new capture was conducted. If the boolean is True, archive.org conducted a new capture. If it is False, archive.org has returned a recently cached capture instead, likely taken in the previous minutes. """ try: return capture(target_url, user_agent=user_agent, accept_cache=False), True except CachedPage: return capture(target_url, user_agent=user_agent, accept_cache=True), False
python
def capture_or_cache( target_url, user_agent="savepagenow (https://github.com/pastpages/savepagenow)" ): """ Archives the provided URL using archive.org's Wayback Machine, unless the page has been recently captured. Returns a tuple with the archive.org URL where the capture is stored, along with a boolean indicating if a new capture was conducted. If the boolean is True, archive.org conducted a new capture. If it is False, archive.org has returned a recently cached capture instead, likely taken in the previous minutes. """ try: return capture(target_url, user_agent=user_agent, accept_cache=False), True except CachedPage: return capture(target_url, user_agent=user_agent, accept_cache=True), False
[ "def", "capture_or_cache", "(", "target_url", ",", "user_agent", "=", "\"savepagenow (https://github.com/pastpages/savepagenow)\"", ")", ":", "try", ":", "return", "capture", "(", "target_url", ",", "user_agent", "=", "user_agent", ",", "accept_cache", "=", "False", ")", ",", "True", "except", "CachedPage", ":", "return", "capture", "(", "target_url", ",", "user_agent", "=", "user_agent", ",", "accept_cache", "=", "True", ")", ",", "False" ]
Archives the provided URL using archive.org's Wayback Machine, unless the page has been recently captured. Returns a tuple with the archive.org URL where the capture is stored, along with a boolean indicating if a new capture was conducted. If the boolean is True, archive.org conducted a new capture. If it is False, archive.org has returned a recently cached capture instead, likely taken in the previous minutes.
[ "Archives", "the", "provided", "URL", "using", "archive", ".", "org", "s", "Wayback", "Machine", "unless", "the", "page", "has", "been", "recently", "captured", "." ]
9555ffb10905fe1b0d2452be2bd8a7d4338a8379
https://github.com/pastpages/savepagenow/blob/9555ffb10905fe1b0d2452be2bd8a7d4338a8379/savepagenow/api.py#L68-L86
8,023
quandyfactory/dicttoxml
dicttoxml.py
get_unique_id
def get_unique_id(element): """Returns a unique id for a given element""" this_id = make_id(element) dup = True while dup: if this_id not in ids: dup = False ids.append(this_id) else: this_id = make_id(element) return ids[-1]
python
def get_unique_id(element): """Returns a unique id for a given element""" this_id = make_id(element) dup = True while dup: if this_id not in ids: dup = False ids.append(this_id) else: this_id = make_id(element) return ids[-1]
[ "def", "get_unique_id", "(", "element", ")", ":", "this_id", "=", "make_id", "(", "element", ")", "dup", "=", "True", "while", "dup", ":", "if", "this_id", "not", "in", "ids", ":", "dup", "=", "False", "ids", ".", "append", "(", "this_id", ")", "else", ":", "this_id", "=", "make_id", "(", "element", ")", "return", "ids", "[", "-", "1", "]" ]
Returns a unique id for a given element
[ "Returns", "a", "unique", "id", "for", "a", "given", "element" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L70-L80
8,024
quandyfactory/dicttoxml
dicttoxml.py
get_xml_type
def get_xml_type(val): """Returns the data type for the xml type attribute""" if type(val).__name__ in ('str', 'unicode'): return 'str' if type(val).__name__ in ('int', 'long'): return 'int' if type(val).__name__ == 'float': return 'float' if type(val).__name__ == 'bool': return 'bool' if isinstance(val, numbers.Number): return 'number' if type(val).__name__ == 'NoneType': return 'null' if isinstance(val, dict): return 'dict' if isinstance(val, collections.Iterable): return 'list' return type(val).__name__
python
def get_xml_type(val): """Returns the data type for the xml type attribute""" if type(val).__name__ in ('str', 'unicode'): return 'str' if type(val).__name__ in ('int', 'long'): return 'int' if type(val).__name__ == 'float': return 'float' if type(val).__name__ == 'bool': return 'bool' if isinstance(val, numbers.Number): return 'number' if type(val).__name__ == 'NoneType': return 'null' if isinstance(val, dict): return 'dict' if isinstance(val, collections.Iterable): return 'list' return type(val).__name__
[ "def", "get_xml_type", "(", "val", ")", ":", "if", "type", "(", "val", ")", ".", "__name__", "in", "(", "'str'", ",", "'unicode'", ")", ":", "return", "'str'", "if", "type", "(", "val", ")", ".", "__name__", "in", "(", "'int'", ",", "'long'", ")", ":", "return", "'int'", "if", "type", "(", "val", ")", ".", "__name__", "==", "'float'", ":", "return", "'float'", "if", "type", "(", "val", ")", ".", "__name__", "==", "'bool'", ":", "return", "'bool'", "if", "isinstance", "(", "val", ",", "numbers", ".", "Number", ")", ":", "return", "'number'", "if", "type", "(", "val", ")", ".", "__name__", "==", "'NoneType'", ":", "return", "'null'", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "return", "'dict'", "if", "isinstance", "(", "val", ",", "collections", ".", "Iterable", ")", ":", "return", "'list'", "return", "type", "(", "val", ")", ".", "__name__" ]
Returns the data type for the xml type attribute
[ "Returns", "the", "data", "type", "for", "the", "xml", "type", "attribute" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L83-L101
8,025
quandyfactory/dicttoxml
dicttoxml.py
make_attrstring
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()]) return '%s%s' % (' ' if attrstring != '' else '', attrstring)
python
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()]) return '%s%s' % (' ' if attrstring != '' else '', attrstring)
[ "def", "make_attrstring", "(", "attr", ")", ":", "attrstring", "=", "' '", ".", "join", "(", "[", "'%s=\"%s\"'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "attr", ".", "items", "(", ")", "]", ")", "return", "'%s%s'", "%", "(", "' '", "if", "attrstring", "!=", "''", "else", "''", ",", "attrstring", ")" ]
Returns an attribute string in the form key="val"
[ "Returns", "an", "attribute", "string", "in", "the", "form", "key", "=", "val" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L115-L118
8,026
quandyfactory/dicttoxml
dicttoxml.py
key_is_valid_xml
def key_is_valid_xml(key): """Checks that a key is a valid XML name""" LOG.info('Inside key_is_valid_xml(). Testing "%s"' % (unicode_me(key))) test_xml = '<?xml version="1.0" encoding="UTF-8" ?><%s>foo</%s>' % (key, key) try: parseString(test_xml) return True except Exception: # minidom does not implement exceptions well return False
python
def key_is_valid_xml(key): """Checks that a key is a valid XML name""" LOG.info('Inside key_is_valid_xml(). Testing "%s"' % (unicode_me(key))) test_xml = '<?xml version="1.0" encoding="UTF-8" ?><%s>foo</%s>' % (key, key) try: parseString(test_xml) return True except Exception: # minidom does not implement exceptions well return False
[ "def", "key_is_valid_xml", "(", "key", ")", ":", "LOG", ".", "info", "(", "'Inside key_is_valid_xml(). Testing \"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ")", ")", "test_xml", "=", "'<?xml version=\"1.0\" encoding=\"UTF-8\" ?><%s>foo</%s>'", "%", "(", "key", ",", "key", ")", "try", ":", "parseString", "(", "test_xml", ")", "return", "True", "except", "Exception", ":", "# minidom does not implement exceptions well", "return", "False" ]
Checks that a key is a valid XML name
[ "Checks", "that", "a", "key", "is", "a", "valid", "XML", "name" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L121-L129
8,027
quandyfactory/dicttoxml
dicttoxml.py
make_valid_xml_name
def make_valid_xml_name(key, attr): """Tests an XML name and fixes it if invalid""" LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % ( unicode_me(key), unicode_me(attr)) ) key = escape_xml(key) attr = escape_xml(attr) # pass through if key is already valid if key_is_valid_xml(key): return key, attr # prepend a lowercase n if the key is numeric if key.isdigit(): return 'n%s' % (key), attr # replace spaces with underscores if that fixes the problem if key_is_valid_xml(key.replace(' ', '_')): return key.replace(' ', '_'), attr # key is still invalid - move it into a name attribute attr['name'] = key key = 'key' return key, attr
python
def make_valid_xml_name(key, attr): """Tests an XML name and fixes it if invalid""" LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % ( unicode_me(key), unicode_me(attr)) ) key = escape_xml(key) attr = escape_xml(attr) # pass through if key is already valid if key_is_valid_xml(key): return key, attr # prepend a lowercase n if the key is numeric if key.isdigit(): return 'n%s' % (key), attr # replace spaces with underscores if that fixes the problem if key_is_valid_xml(key.replace(' ', '_')): return key.replace(' ', '_'), attr # key is still invalid - move it into a name attribute attr['name'] = key key = 'key' return key, attr
[ "def", "make_valid_xml_name", "(", "key", ",", "attr", ")", ":", "LOG", ".", "info", "(", "'Inside make_valid_xml_name(). Testing key \"%s\" with attr \"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ",", "unicode_me", "(", "attr", ")", ")", ")", "key", "=", "escape_xml", "(", "key", ")", "attr", "=", "escape_xml", "(", "attr", ")", "# pass through if key is already valid", "if", "key_is_valid_xml", "(", "key", ")", ":", "return", "key", ",", "attr", "# prepend a lowercase n if the key is numeric", "if", "key", ".", "isdigit", "(", ")", ":", "return", "'n%s'", "%", "(", "key", ")", ",", "attr", "# replace spaces with underscores if that fixes the problem", "if", "key_is_valid_xml", "(", "key", ".", "replace", "(", "' '", ",", "'_'", ")", ")", ":", "return", "key", ".", "replace", "(", "' '", ",", "'_'", ")", ",", "attr", "# key is still invalid - move it into a name attribute", "attr", "[", "'name'", "]", "=", "key", "key", "=", "'key'", "return", "key", ",", "attr" ]
Tests an XML name and fixes it if invalid
[ "Tests", "an", "XML", "name", "and", "fixes", "it", "if", "invalid" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L132-L155
8,028
quandyfactory/dicttoxml
dicttoxml.py
convert
def convert(obj, ids, attr_type, item_func, cdata, parent='root'): """Routes the elements of an object to the right function to convert them based on their data type""" LOG.info('Inside convert(). obj type is: "%s", obj="%s"' % (type(obj).__name__, unicode_me(obj))) item_name = item_func(parent) if isinstance(obj, numbers.Number) or type(obj) in (str, unicode): return convert_kv(item_name, obj, attr_type, cdata) if hasattr(obj, 'isoformat'): return convert_kv(item_name, obj.isoformat(), attr_type, cdata) if type(obj) == bool: return convert_bool(item_name, obj, attr_type, cdata) if obj is None: return convert_none(item_name, '', attr_type, cdata) if isinstance(obj, dict): return convert_dict(obj, ids, parent, attr_type, item_func, cdata) if isinstance(obj, collections.Iterable): return convert_list(obj, ids, parent, attr_type, item_func, cdata) raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__))
python
def convert(obj, ids, attr_type, item_func, cdata, parent='root'): """Routes the elements of an object to the right function to convert them based on their data type""" LOG.info('Inside convert(). obj type is: "%s", obj="%s"' % (type(obj).__name__, unicode_me(obj))) item_name = item_func(parent) if isinstance(obj, numbers.Number) or type(obj) in (str, unicode): return convert_kv(item_name, obj, attr_type, cdata) if hasattr(obj, 'isoformat'): return convert_kv(item_name, obj.isoformat(), attr_type, cdata) if type(obj) == bool: return convert_bool(item_name, obj, attr_type, cdata) if obj is None: return convert_none(item_name, '', attr_type, cdata) if isinstance(obj, dict): return convert_dict(obj, ids, parent, attr_type, item_func, cdata) if isinstance(obj, collections.Iterable): return convert_list(obj, ids, parent, attr_type, item_func, cdata) raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__))
[ "def", "convert", "(", "obj", ",", "ids", ",", "attr_type", ",", "item_func", ",", "cdata", ",", "parent", "=", "'root'", ")", ":", "LOG", ".", "info", "(", "'Inside convert(). obj type is: \"%s\", obj=\"%s\"'", "%", "(", "type", "(", "obj", ")", ".", "__name__", ",", "unicode_me", "(", "obj", ")", ")", ")", "item_name", "=", "item_func", "(", "parent", ")", "if", "isinstance", "(", "obj", ",", "numbers", ".", "Number", ")", "or", "type", "(", "obj", ")", "in", "(", "str", ",", "unicode", ")", ":", "return", "convert_kv", "(", "item_name", ",", "obj", ",", "attr_type", ",", "cdata", ")", "if", "hasattr", "(", "obj", ",", "'isoformat'", ")", ":", "return", "convert_kv", "(", "item_name", ",", "obj", ".", "isoformat", "(", ")", ",", "attr_type", ",", "cdata", ")", "if", "type", "(", "obj", ")", "==", "bool", ":", "return", "convert_bool", "(", "item_name", ",", "obj", ",", "attr_type", ",", "cdata", ")", "if", "obj", "is", "None", ":", "return", "convert_none", "(", "item_name", ",", "''", ",", "attr_type", ",", "cdata", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "convert_dict", "(", "obj", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", "if", "isinstance", "(", "obj", ",", "collections", ".", "Iterable", ")", ":", "return", "convert_list", "(", "obj", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", "raise", "TypeError", "(", "'Unsupported data type: %s (%s)'", "%", "(", "obj", ",", "type", "(", "obj", ")", ".", "__name__", ")", ")" ]
Routes the elements of an object to the right function to convert them based on their data type
[ "Routes", "the", "elements", "of", "an", "object", "to", "the", "right", "function", "to", "convert", "them", "based", "on", "their", "data", "type" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L168-L194
8,029
quandyfactory/dicttoxml
dicttoxml.py
convert_dict
def convert_dict(obj, ids, parent, attr_type, item_func, cdata): """Converts a dict into an XML string.""" LOG.info('Inside convert_dict(): obj type is: "%s", obj="%s"' % ( type(obj).__name__, unicode_me(obj)) ) output = [] addline = output.append item_name = item_func(parent) for key, val in obj.items(): LOG.info('Looping inside convert_dict(): key="%s", val="%s", type(val)="%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) attr = {} if not ids else {'id': '%s' % (get_unique_id(parent)) } key, attr = make_valid_xml_name(key, attr) if isinstance(val, numbers.Number) or type(val) in (str, unicode): addline(convert_kv(key, val, attr_type, attr, cdata)) elif hasattr(val, 'isoformat'): # datetime addline(convert_kv(key, val.isoformat(), attr_type, attr, cdata)) elif type(val) == bool: addline(convert_bool(key, val, attr_type, attr, cdata)) elif isinstance(val, dict): if attr_type: attr['type'] = get_xml_type(val) addline('<%s%s>%s</%s>' % ( key, make_attrstring(attr), convert_dict(val, ids, key, attr_type, item_func, cdata), key ) ) elif isinstance(val, collections.Iterable): if attr_type: attr['type'] = get_xml_type(val) addline('<%s%s>%s</%s>' % ( key, make_attrstring(attr), convert_list(val, ids, key, attr_type, item_func, cdata), key ) ) elif val is None: addline(convert_none(key, val, attr_type, attr, cdata)) else: raise TypeError('Unsupported data type: %s (%s)' % ( val, type(val).__name__) ) return ''.join(output)
python
def convert_dict(obj, ids, parent, attr_type, item_func, cdata): """Converts a dict into an XML string.""" LOG.info('Inside convert_dict(): obj type is: "%s", obj="%s"' % ( type(obj).__name__, unicode_me(obj)) ) output = [] addline = output.append item_name = item_func(parent) for key, val in obj.items(): LOG.info('Looping inside convert_dict(): key="%s", val="%s", type(val)="%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) attr = {} if not ids else {'id': '%s' % (get_unique_id(parent)) } key, attr = make_valid_xml_name(key, attr) if isinstance(val, numbers.Number) or type(val) in (str, unicode): addline(convert_kv(key, val, attr_type, attr, cdata)) elif hasattr(val, 'isoformat'): # datetime addline(convert_kv(key, val.isoformat(), attr_type, attr, cdata)) elif type(val) == bool: addline(convert_bool(key, val, attr_type, attr, cdata)) elif isinstance(val, dict): if attr_type: attr['type'] = get_xml_type(val) addline('<%s%s>%s</%s>' % ( key, make_attrstring(attr), convert_dict(val, ids, key, attr_type, item_func, cdata), key ) ) elif isinstance(val, collections.Iterable): if attr_type: attr['type'] = get_xml_type(val) addline('<%s%s>%s</%s>' % ( key, make_attrstring(attr), convert_list(val, ids, key, attr_type, item_func, cdata), key ) ) elif val is None: addline(convert_none(key, val, attr_type, attr, cdata)) else: raise TypeError('Unsupported data type: %s (%s)' % ( val, type(val).__name__) ) return ''.join(output)
[ "def", "convert_dict", "(", "obj", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ":", "LOG", ".", "info", "(", "'Inside convert_dict(): obj type is: \"%s\", obj=\"%s\"'", "%", "(", "type", "(", "obj", ")", ".", "__name__", ",", "unicode_me", "(", "obj", ")", ")", ")", "output", "=", "[", "]", "addline", "=", "output", ".", "append", "item_name", "=", "item_func", "(", "parent", ")", "for", "key", ",", "val", "in", "obj", ".", "items", "(", ")", ":", "LOG", ".", "info", "(", "'Looping inside convert_dict(): key=\"%s\", val=\"%s\", type(val)=\"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ",", "unicode_me", "(", "val", ")", ",", "type", "(", "val", ")", ".", "__name__", ")", ")", "attr", "=", "{", "}", "if", "not", "ids", "else", "{", "'id'", ":", "'%s'", "%", "(", "get_unique_id", "(", "parent", ")", ")", "}", "key", ",", "attr", "=", "make_valid_xml_name", "(", "key", ",", "attr", ")", "if", "isinstance", "(", "val", ",", "numbers", ".", "Number", ")", "or", "type", "(", "val", ")", "in", "(", "str", ",", "unicode", ")", ":", "addline", "(", "convert_kv", "(", "key", ",", "val", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "hasattr", "(", "val", ",", "'isoformat'", ")", ":", "# datetime", "addline", "(", "convert_kv", "(", "key", ",", "val", ".", "isoformat", "(", ")", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "type", "(", "val", ")", "==", "bool", ":", "addline", "(", "convert_bool", "(", "key", ",", "val", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "addline", "(", "'<%s%s>%s</%s>'", "%", "(", "key", ",", "make_attrstring", "(", "attr", ")", ",", "convert_dict", "(", "val", ",", "ids", ",", "key", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "key", ")", ")", "elif", "isinstance", "(", "val", ",", "collections", ".", "Iterable", ")", ":", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "addline", "(", "'<%s%s>%s</%s>'", "%", "(", "key", ",", "make_attrstring", "(", "attr", ")", ",", "convert_list", "(", "val", ",", "ids", ",", "key", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "key", ")", ")", "elif", "val", "is", "None", ":", "addline", "(", "convert_none", "(", "key", ",", "val", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "else", ":", "raise", "TypeError", "(", "'Unsupported data type: %s (%s)'", "%", "(", "val", ",", "type", "(", "val", ")", ".", "__name__", ")", ")", "return", "''", ".", "join", "(", "output", ")" ]
Converts a dict into an XML string.
[ "Converts", "a", "dict", "into", "an", "XML", "string", "." ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L197-L254
8,030
quandyfactory/dicttoxml
dicttoxml.py
convert_list
def convert_list(items, ids, parent, attr_type, item_func, cdata): """Converts a list into an XML string.""" LOG.info('Inside convert_list()') output = [] addline = output.append item_name = item_func(parent) if ids: this_id = get_unique_id(parent) for i, item in enumerate(items): LOG.info('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % ( unicode_me(item), item_name, type(item).__name__) ) attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+1) } if isinstance(item, numbers.Number) or type(item) in (str, unicode): addline(convert_kv(item_name, item, attr_type, attr, cdata)) elif hasattr(item, 'isoformat'): # datetime addline(convert_kv(item_name, item.isoformat(), attr_type, attr, cdata)) elif type(item) == bool: addline(convert_bool(item_name, item, attr_type, attr, cdata)) elif isinstance(item, dict): if not attr_type: addline('<%s>%s</%s>' % ( item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name, ) ) else: addline('<%s type="dict">%s</%s>' % ( item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name, ) ) elif isinstance(item, collections.Iterable): if not attr_type: addline('<%s %s>%s</%s>' % ( item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name, ) ) else: addline('<%s type="list"%s>%s</%s>' % ( item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name, ) ) elif item is None: addline(convert_none(item_name, None, attr_type, attr, cdata)) else: raise TypeError('Unsupported data type: %s (%s)' % ( item, type(item).__name__) ) return ''.join(output)
python
def convert_list(items, ids, parent, attr_type, item_func, cdata): """Converts a list into an XML string.""" LOG.info('Inside convert_list()') output = [] addline = output.append item_name = item_func(parent) if ids: this_id = get_unique_id(parent) for i, item in enumerate(items): LOG.info('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % ( unicode_me(item), item_name, type(item).__name__) ) attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+1) } if isinstance(item, numbers.Number) or type(item) in (str, unicode): addline(convert_kv(item_name, item, attr_type, attr, cdata)) elif hasattr(item, 'isoformat'): # datetime addline(convert_kv(item_name, item.isoformat(), attr_type, attr, cdata)) elif type(item) == bool: addline(convert_bool(item_name, item, attr_type, attr, cdata)) elif isinstance(item, dict): if not attr_type: addline('<%s>%s</%s>' % ( item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name, ) ) else: addline('<%s type="dict">%s</%s>' % ( item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name, ) ) elif isinstance(item, collections.Iterable): if not attr_type: addline('<%s %s>%s</%s>' % ( item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name, ) ) else: addline('<%s type="list"%s>%s</%s>' % ( item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name, ) ) elif item is None: addline(convert_none(item_name, None, attr_type, attr, cdata)) else: raise TypeError('Unsupported data type: %s (%s)' % ( item, type(item).__name__) ) return ''.join(output)
[ "def", "convert_list", "(", "items", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ":", "LOG", ".", "info", "(", "'Inside convert_list()'", ")", "output", "=", "[", "]", "addline", "=", "output", ".", "append", "item_name", "=", "item_func", "(", "parent", ")", "if", "ids", ":", "this_id", "=", "get_unique_id", "(", "parent", ")", "for", "i", ",", "item", "in", "enumerate", "(", "items", ")", ":", "LOG", ".", "info", "(", "'Looping inside convert_list(): item=\"%s\", item_name=\"%s\", type=\"%s\"'", "%", "(", "unicode_me", "(", "item", ")", ",", "item_name", ",", "type", "(", "item", ")", ".", "__name__", ")", ")", "attr", "=", "{", "}", "if", "not", "ids", "else", "{", "'id'", ":", "'%s_%s'", "%", "(", "this_id", ",", "i", "+", "1", ")", "}", "if", "isinstance", "(", "item", ",", "numbers", ".", "Number", ")", "or", "type", "(", "item", ")", "in", "(", "str", ",", "unicode", ")", ":", "addline", "(", "convert_kv", "(", "item_name", ",", "item", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "hasattr", "(", "item", ",", "'isoformat'", ")", ":", "# datetime", "addline", "(", "convert_kv", "(", "item_name", ",", "item", ".", "isoformat", "(", ")", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "type", "(", "item", ")", "==", "bool", ":", "addline", "(", "convert_bool", "(", "item_name", ",", "item", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "elif", "isinstance", "(", "item", ",", "dict", ")", ":", "if", "not", "attr_type", ":", "addline", "(", "'<%s>%s</%s>'", "%", "(", "item_name", ",", "convert_dict", "(", "item", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "item_name", ",", ")", ")", "else", ":", "addline", "(", "'<%s type=\"dict\">%s</%s>'", "%", "(", "item_name", ",", "convert_dict", "(", "item", ",", "ids", ",", "parent", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "item_name", ",", ")", ")", "elif", "isinstance", "(", "item", ",", "collections", ".", "Iterable", ")", ":", "if", "not", "attr_type", ":", "addline", "(", "'<%s %s>%s</%s>'", "%", "(", "item_name", ",", "make_attrstring", "(", "attr", ")", ",", "convert_list", "(", "item", ",", "ids", ",", "item_name", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "item_name", ",", ")", ")", "else", ":", "addline", "(", "'<%s type=\"list\"%s>%s</%s>'", "%", "(", "item_name", ",", "make_attrstring", "(", "attr", ")", ",", "convert_list", "(", "item", ",", "ids", ",", "item_name", ",", "attr_type", ",", "item_func", ",", "cdata", ")", ",", "item_name", ",", ")", ")", "elif", "item", "is", "None", ":", "addline", "(", "convert_none", "(", "item_name", ",", "None", ",", "attr_type", ",", "attr", ",", "cdata", ")", ")", "else", ":", "raise", "TypeError", "(", "'Unsupported data type: %s (%s)'", "%", "(", "item", ",", "type", "(", "item", ")", ".", "__name__", ")", ")", "return", "''", ".", "join", "(", "output", ")" ]
Converts a list into an XML string.
[ "Converts", "a", "list", "into", "an", "XML", "string", "." ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L257-L321
8,031
quandyfactory/dicttoxml
dicttoxml.py
convert_kv
def convert_kv(key, val, attr_type, attr={}, cdata=False): """Converts a number or string into an XML element""" LOG.info('Inside convert_kv(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % ( key, attrstring, wrap_cdata(val) if cdata == True else escape_xml(val), key )
python
def convert_kv(key, val, attr_type, attr={}, cdata=False): """Converts a number or string into an XML element""" LOG.info('Inside convert_kv(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % ( key, attrstring, wrap_cdata(val) if cdata == True else escape_xml(val), key )
[ "def", "convert_kv", "(", "key", ",", "val", ",", "attr_type", ",", "attr", "=", "{", "}", ",", "cdata", "=", "False", ")", ":", "LOG", ".", "info", "(", "'Inside convert_kv(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ",", "unicode_me", "(", "val", ")", ",", "type", "(", "val", ")", ".", "__name__", ")", ")", "key", ",", "attr", "=", "make_valid_xml_name", "(", "key", ",", "attr", ")", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "attrstring", "=", "make_attrstring", "(", "attr", ")", "return", "'<%s%s>%s</%s>'", "%", "(", "key", ",", "attrstring", ",", "wrap_cdata", "(", "val", ")", "if", "cdata", "==", "True", "else", "escape_xml", "(", "val", ")", ",", "key", ")" ]
Converts a number or string into an XML element
[ "Converts", "a", "number", "or", "string", "into", "an", "XML", "element" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L324-L339
8,032
quandyfactory/dicttoxml
dicttoxml.py
convert_bool
def convert_bool(key, val, attr_type, attr={}, cdata=False): """Converts a boolean into an XML element""" LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key)
python
def convert_bool(key, val, attr_type, attr={}, cdata=False): """Converts a boolean into an XML element""" LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key)
[ "def", "convert_bool", "(", "key", ",", "val", ",", "attr_type", ",", "attr", "=", "{", "}", ",", "cdata", "=", "False", ")", ":", "LOG", ".", "info", "(", "'Inside convert_bool(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ",", "unicode_me", "(", "val", ")", ",", "type", "(", "val", ")", ".", "__name__", ")", ")", "key", ",", "attr", "=", "make_valid_xml_name", "(", "key", ",", "attr", ")", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "attrstring", "=", "make_attrstring", "(", "attr", ")", "return", "'<%s%s>%s</%s>'", "%", "(", "key", ",", "attrstring", ",", "unicode", "(", "val", ")", ".", "lower", "(", ")", ",", "key", ")" ]
Converts a boolean into an XML element
[ "Converts", "a", "boolean", "into", "an", "XML", "element" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L342-L353
8,033
quandyfactory/dicttoxml
dicttoxml.py
convert_none
def convert_none(key, val, attr_type, attr={}, cdata=False): """Converts a null value into an XML element""" LOG.info('Inside convert_none(): key="%s"' % (unicode_me(key))) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s></%s>' % (key, attrstring, key)
python
def convert_none(key, val, attr_type, attr={}, cdata=False): """Converts a null value into an XML element""" LOG.info('Inside convert_none(): key="%s"' % (unicode_me(key))) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s></%s>' % (key, attrstring, key)
[ "def", "convert_none", "(", "key", ",", "val", ",", "attr_type", ",", "attr", "=", "{", "}", ",", "cdata", "=", "False", ")", ":", "LOG", ".", "info", "(", "'Inside convert_none(): key=\"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ")", ")", "key", ",", "attr", "=", "make_valid_xml_name", "(", "key", ",", "attr", ")", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "attrstring", "=", "make_attrstring", "(", "attr", ")", "return", "'<%s%s></%s>'", "%", "(", "key", ",", "attrstring", ",", "key", ")" ]
Converts a null value into an XML element
[ "Converts", "a", "null", "value", "into", "an", "XML", "element" ]
2016fe9817ad03b26aa5f1a475f5b79ad6757b96
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L356-L365
8,034
gruns/icecream
icecream/icecream.py
getCallSourceLines
def getCallSourceLines(callFrame, icNames, icMethod): """Raises NoSourceAvailableError.""" code = callFrame.f_code # inspect.getblock(), which is called internally by inspect.getsource(), # only returns the first line of <code> when <code> represents a top-level # module, not the entire module's source, as needed here. The # # if ismodule(object): # return lines, 0 # # check in inspect.py doesn't account for code objects of modules, only # actual module objects themselves. # # A workaround is to call findsource() directly on code objects of modules, # which bypasses getblock(). # # Also, the errors raised differ between Python2 and Python3 . In Python2, # inspect.findsource() and inspect.getsource() raise IOErrors. In Python3, # inspect.findsource() and inspect.getsource() raise OSErrors. try: if code.co_name == '<module>': # Module -> use workaround above. parentBlockStartLine = 1 lines = inspect.findsource(code)[0] # Raises [IO/OS]Error. parentBlockSource = ''.join(lines) else: # Not a module -> use inspect.getsource() normally. parentBlockStartLine = code.co_firstlineno parentBlockSource = inspect.getsource(code) # Raises [IO/OS]Error. except (IOError, OSError) as err: if 'source code' in err.args[0]: raise NoSourceAvailableError() else: raise lineno = inspect.getframeinfo(callFrame)[1] linenoRelativeToParent = lineno - parentBlockStartLine + 1 # There could be multiple ic() calls on the same line(s), like # # ic(1); ic(2); ic(3, # 4, # 5); ic(6) # # so include all of them. Which invocation is the appropriate one will be # determined later via bytecode offset calculations. # # TODO(grun): Support invocations of ic() where ic() is an attribute chain # in the AST. For example, support # # import icecream # icecream.ic() # # and # # class Foo: # blah = ic # Foo.blah() # parentBlockSource = textwrap.dedent(parentBlockSource) potentialCalls = [ node for node in ast.walk(ast.parse(parentBlockSource)) if isAstNodeIceCreamCall(node, icNames, icMethod) and linenoRelativeToParent in getAllLineNumbersOfAstNode(node)] if not potentialCalls: # TODO(grun): Add note that to NoSourceAvailableError that this # situation can occur when the underlying source changed during # execution. raise NoSourceAvailableError() endLine = lineno - parentBlockStartLine + 1 startLine = min(call.lineno for call in potentialCalls) lines = parentBlockSource.splitlines()[startLine - 1: endLine] # inspect's lineno attribute doesn't point to the closing right parenthesis # if the closing right parenthesis is on its own line without any # arguments. E.g. # # ic(1, # 2 <--- inspect's reported lineno. # ) <--- Should be the reported lineno. # # Detect this situation and add the missing right parenthesis. if isCallStrMissingClosingRightParenthesis('\n'.join(lines).strip()): lines.append(')') source = stripCommentsAndNewlines('\n'.join(lines)).strip() absoluteStartLineNum = parentBlockStartLine + startLine - 1 startLineOffset = calculateLineOffsets(code)[absoluteStartLineNum] return source, absoluteStartLineNum, startLineOffset
python
def getCallSourceLines(callFrame, icNames, icMethod): """Raises NoSourceAvailableError.""" code = callFrame.f_code # inspect.getblock(), which is called internally by inspect.getsource(), # only returns the first line of <code> when <code> represents a top-level # module, not the entire module's source, as needed here. The # # if ismodule(object): # return lines, 0 # # check in inspect.py doesn't account for code objects of modules, only # actual module objects themselves. # # A workaround is to call findsource() directly on code objects of modules, # which bypasses getblock(). # # Also, the errors raised differ between Python2 and Python3 . In Python2, # inspect.findsource() and inspect.getsource() raise IOErrors. In Python3, # inspect.findsource() and inspect.getsource() raise OSErrors. try: if code.co_name == '<module>': # Module -> use workaround above. parentBlockStartLine = 1 lines = inspect.findsource(code)[0] # Raises [IO/OS]Error. parentBlockSource = ''.join(lines) else: # Not a module -> use inspect.getsource() normally. parentBlockStartLine = code.co_firstlineno parentBlockSource = inspect.getsource(code) # Raises [IO/OS]Error. except (IOError, OSError) as err: if 'source code' in err.args[0]: raise NoSourceAvailableError() else: raise lineno = inspect.getframeinfo(callFrame)[1] linenoRelativeToParent = lineno - parentBlockStartLine + 1 # There could be multiple ic() calls on the same line(s), like # # ic(1); ic(2); ic(3, # 4, # 5); ic(6) # # so include all of them. Which invocation is the appropriate one will be # determined later via bytecode offset calculations. # # TODO(grun): Support invocations of ic() where ic() is an attribute chain # in the AST. For example, support # # import icecream # icecream.ic() # # and # # class Foo: # blah = ic # Foo.blah() # parentBlockSource = textwrap.dedent(parentBlockSource) potentialCalls = [ node for node in ast.walk(ast.parse(parentBlockSource)) if isAstNodeIceCreamCall(node, icNames, icMethod) and linenoRelativeToParent in getAllLineNumbersOfAstNode(node)] if not potentialCalls: # TODO(grun): Add note that to NoSourceAvailableError that this # situation can occur when the underlying source changed during # execution. raise NoSourceAvailableError() endLine = lineno - parentBlockStartLine + 1 startLine = min(call.lineno for call in potentialCalls) lines = parentBlockSource.splitlines()[startLine - 1: endLine] # inspect's lineno attribute doesn't point to the closing right parenthesis # if the closing right parenthesis is on its own line without any # arguments. E.g. # # ic(1, # 2 <--- inspect's reported lineno. # ) <--- Should be the reported lineno. # # Detect this situation and add the missing right parenthesis. if isCallStrMissingClosingRightParenthesis('\n'.join(lines).strip()): lines.append(')') source = stripCommentsAndNewlines('\n'.join(lines)).strip() absoluteStartLineNum = parentBlockStartLine + startLine - 1 startLineOffset = calculateLineOffsets(code)[absoluteStartLineNum] return source, absoluteStartLineNum, startLineOffset
[ "def", "getCallSourceLines", "(", "callFrame", ",", "icNames", ",", "icMethod", ")", ":", "code", "=", "callFrame", ".", "f_code", "# inspect.getblock(), which is called internally by inspect.getsource(),", "# only returns the first line of <code> when <code> represents a top-level", "# module, not the entire module's source, as needed here. The", "#", "# if ismodule(object):", "# return lines, 0", "#", "# check in inspect.py doesn't account for code objects of modules, only", "# actual module objects themselves.", "#", "# A workaround is to call findsource() directly on code objects of modules,", "# which bypasses getblock().", "#", "# Also, the errors raised differ between Python2 and Python3 . In Python2,", "# inspect.findsource() and inspect.getsource() raise IOErrors. In Python3,", "# inspect.findsource() and inspect.getsource() raise OSErrors.", "try", ":", "if", "code", ".", "co_name", "==", "'<module>'", ":", "# Module -> use workaround above.", "parentBlockStartLine", "=", "1", "lines", "=", "inspect", ".", "findsource", "(", "code", ")", "[", "0", "]", "# Raises [IO/OS]Error.", "parentBlockSource", "=", "''", ".", "join", "(", "lines", ")", "else", ":", "# Not a module -> use inspect.getsource() normally.", "parentBlockStartLine", "=", "code", ".", "co_firstlineno", "parentBlockSource", "=", "inspect", ".", "getsource", "(", "code", ")", "# Raises [IO/OS]Error.", "except", "(", "IOError", ",", "OSError", ")", "as", "err", ":", "if", "'source code'", "in", "err", ".", "args", "[", "0", "]", ":", "raise", "NoSourceAvailableError", "(", ")", "else", ":", "raise", "lineno", "=", "inspect", ".", "getframeinfo", "(", "callFrame", ")", "[", "1", "]", "linenoRelativeToParent", "=", "lineno", "-", "parentBlockStartLine", "+", "1", "# There could be multiple ic() calls on the same line(s), like", "#", "# ic(1); ic(2); ic(3,", "# 4,", "# 5); ic(6)", "#", "# so include all of them. Which invocation is the appropriate one will be", "# determined later via bytecode offset calculations.", "#", "# TODO(grun): Support invocations of ic() where ic() is an attribute chain", "# in the AST. For example, support", "#", "# import icecream", "# icecream.ic()", "#", "# and", "#", "# class Foo:", "# blah = ic", "# Foo.blah()", "#", "parentBlockSource", "=", "textwrap", ".", "dedent", "(", "parentBlockSource", ")", "potentialCalls", "=", "[", "node", "for", "node", "in", "ast", ".", "walk", "(", "ast", ".", "parse", "(", "parentBlockSource", ")", ")", "if", "isAstNodeIceCreamCall", "(", "node", ",", "icNames", ",", "icMethod", ")", "and", "linenoRelativeToParent", "in", "getAllLineNumbersOfAstNode", "(", "node", ")", "]", "if", "not", "potentialCalls", ":", "# TODO(grun): Add note that to NoSourceAvailableError that this", "# situation can occur when the underlying source changed during", "# execution.", "raise", "NoSourceAvailableError", "(", ")", "endLine", "=", "lineno", "-", "parentBlockStartLine", "+", "1", "startLine", "=", "min", "(", "call", ".", "lineno", "for", "call", "in", "potentialCalls", ")", "lines", "=", "parentBlockSource", ".", "splitlines", "(", ")", "[", "startLine", "-", "1", ":", "endLine", "]", "# inspect's lineno attribute doesn't point to the closing right parenthesis", "# if the closing right parenthesis is on its own line without any", "# arguments. E.g.", "#", "# ic(1,", "# 2 <--- inspect's reported lineno.", "# ) <--- Should be the reported lineno.", "#", "# Detect this situation and add the missing right parenthesis.", "if", "isCallStrMissingClosingRightParenthesis", "(", "'\\n'", ".", "join", "(", "lines", ")", ".", "strip", "(", ")", ")", ":", "lines", ".", "append", "(", "')'", ")", "source", "=", "stripCommentsAndNewlines", "(", "'\\n'", ".", "join", "(", "lines", ")", ")", ".", "strip", "(", ")", "absoluteStartLineNum", "=", "parentBlockStartLine", "+", "startLine", "-", "1", "startLineOffset", "=", "calculateLineOffsets", "(", "code", ")", "[", "absoluteStartLineNum", "]", "return", "source", ",", "absoluteStartLineNum", ",", "startLineOffset" ]
Raises NoSourceAvailableError.
[ "Raises", "NoSourceAvailableError", "." ]
cb4f3d50ec747637721fe58b80f2cc2a2baedabf
https://github.com/gruns/icecream/blob/cb4f3d50ec747637721fe58b80f2cc2a2baedabf/icecream/icecream.py#L275-L366
8,035
Vaelor/python-mattermost-driver
src/mattermostdriver/driver.py
Driver.init_websocket
def init_websocket(self, event_handler, websocket_cls=Websocket): """ Will initialize the websocket connection to the mattermost server. This should be run after login(), because the websocket needs to make an authentification. See https://api.mattermost.com/v4/#tag/WebSocket for which websocket events mattermost sends. Example of a really simple event_handler function .. code:: python @asyncio.coroutine def my_event_handler(message): print(message) :param event_handler: The function to handle the websocket events. Takes one argument. :type event_handler: Function(message) :return: The event loop """ self.websocket = websocket_cls(self.options, self.client.token) loop = asyncio.get_event_loop() loop.run_until_complete(self.websocket.connect(event_handler)) return loop
python
def init_websocket(self, event_handler, websocket_cls=Websocket): """ Will initialize the websocket connection to the mattermost server. This should be run after login(), because the websocket needs to make an authentification. See https://api.mattermost.com/v4/#tag/WebSocket for which websocket events mattermost sends. Example of a really simple event_handler function .. code:: python @asyncio.coroutine def my_event_handler(message): print(message) :param event_handler: The function to handle the websocket events. Takes one argument. :type event_handler: Function(message) :return: The event loop """ self.websocket = websocket_cls(self.options, self.client.token) loop = asyncio.get_event_loop() loop.run_until_complete(self.websocket.connect(event_handler)) return loop
[ "def", "init_websocket", "(", "self", ",", "event_handler", ",", "websocket_cls", "=", "Websocket", ")", ":", "self", ".", "websocket", "=", "websocket_cls", "(", "self", ".", "options", ",", "self", ".", "client", ".", "token", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "loop", ".", "run_until_complete", "(", "self", ".", "websocket", ".", "connect", "(", "event_handler", ")", ")", "return", "loop" ]
Will initialize the websocket connection to the mattermost server. This should be run after login(), because the websocket needs to make an authentification. See https://api.mattermost.com/v4/#tag/WebSocket for which websocket events mattermost sends. Example of a really simple event_handler function .. code:: python @asyncio.coroutine def my_event_handler(message): print(message) :param event_handler: The function to handle the websocket events. Takes one argument. :type event_handler: Function(message) :return: The event loop
[ "Will", "initialize", "the", "websocket", "connection", "to", "the", "mattermost", "server", "." ]
ad1a936130096e39c2e1b76d78913e5950e06ca5
https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/driver.py#L114-L140
8,036
Vaelor/python-mattermost-driver
src/mattermostdriver/driver.py
Driver.login
def login(self): """ Logs the user in. The log in information is saved in the client - userid - username - cookies :return: The raw response from the request """ if self.options['token']: self.client.token = self.options['token'] result = self.users.get_user('me') else: response = self.users.login_user({ 'login_id': self.options['login_id'], 'password': self.options['password'], 'token': self.options['mfa_token'] }) if response.status_code == 200: self.client.token = response.headers['Token'] self.client.cookies = response.cookies try: result = response.json() except ValueError: log.debug('Could not convert response to json, returning raw response') result = response log.debug(result) if 'id' in result: self.client.userid = result['id'] if 'username' in result: self.client.username = result['username'] return result
python
def login(self): """ Logs the user in. The log in information is saved in the client - userid - username - cookies :return: The raw response from the request """ if self.options['token']: self.client.token = self.options['token'] result = self.users.get_user('me') else: response = self.users.login_user({ 'login_id': self.options['login_id'], 'password': self.options['password'], 'token': self.options['mfa_token'] }) if response.status_code == 200: self.client.token = response.headers['Token'] self.client.cookies = response.cookies try: result = response.json() except ValueError: log.debug('Could not convert response to json, returning raw response') result = response log.debug(result) if 'id' in result: self.client.userid = result['id'] if 'username' in result: self.client.username = result['username'] return result
[ "def", "login", "(", "self", ")", ":", "if", "self", ".", "options", "[", "'token'", "]", ":", "self", ".", "client", ".", "token", "=", "self", ".", "options", "[", "'token'", "]", "result", "=", "self", ".", "users", ".", "get_user", "(", "'me'", ")", "else", ":", "response", "=", "self", ".", "users", ".", "login_user", "(", "{", "'login_id'", ":", "self", ".", "options", "[", "'login_id'", "]", ",", "'password'", ":", "self", ".", "options", "[", "'password'", "]", ",", "'token'", ":", "self", ".", "options", "[", "'mfa_token'", "]", "}", ")", "if", "response", ".", "status_code", "==", "200", ":", "self", ".", "client", ".", "token", "=", "response", ".", "headers", "[", "'Token'", "]", "self", ".", "client", ".", "cookies", "=", "response", ".", "cookies", "try", ":", "result", "=", "response", ".", "json", "(", ")", "except", "ValueError", ":", "log", ".", "debug", "(", "'Could not convert response to json, returning raw response'", ")", "result", "=", "response", "log", ".", "debug", "(", "result", ")", "if", "'id'", "in", "result", ":", "self", ".", "client", ".", "userid", "=", "result", "[", "'id'", "]", "if", "'username'", "in", "result", ":", "self", ".", "client", ".", "username", "=", "result", "[", "'username'", "]", "return", "result" ]
Logs the user in. The log in information is saved in the client - userid - username - cookies :return: The raw response from the request
[ "Logs", "the", "user", "in", "." ]
ad1a936130096e39c2e1b76d78913e5950e06ca5
https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/driver.py#L142-L178
8,037
Vaelor/python-mattermost-driver
src/mattermostdriver/websocket.py
Websocket.connect
def connect(self, event_handler): """ Connect to the websocket and authenticate it. When the authentication has finished, start the loop listening for messages, sending a ping to the server to keep the connection alive. :param event_handler: Every websocket event will be passed there. Takes one argument. :type event_handler: Function(message) :return: """ context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if not self.options['verify']: context.verify_mode = ssl.CERT_NONE scheme = 'wss://' if self.options['scheme'] != 'https': scheme = 'ws://' context = None url = '{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'.format( scheme=scheme, url=self.options['url'], port=str(self.options['port']), basepath=self.options['basepath'] ) websocket = yield from websockets.connect( url, ssl=context, ) yield from self._authenticate_websocket(websocket, event_handler) yield from self._start_loop(websocket, event_handler)
python
def connect(self, event_handler): """ Connect to the websocket and authenticate it. When the authentication has finished, start the loop listening for messages, sending a ping to the server to keep the connection alive. :param event_handler: Every websocket event will be passed there. Takes one argument. :type event_handler: Function(message) :return: """ context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if not self.options['verify']: context.verify_mode = ssl.CERT_NONE scheme = 'wss://' if self.options['scheme'] != 'https': scheme = 'ws://' context = None url = '{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'.format( scheme=scheme, url=self.options['url'], port=str(self.options['port']), basepath=self.options['basepath'] ) websocket = yield from websockets.connect( url, ssl=context, ) yield from self._authenticate_websocket(websocket, event_handler) yield from self._start_loop(websocket, event_handler)
[ "def", "connect", "(", "self", ",", "event_handler", ")", ":", "context", "=", "ssl", ".", "create_default_context", "(", "purpose", "=", "ssl", ".", "Purpose", ".", "CLIENT_AUTH", ")", "if", "not", "self", ".", "options", "[", "'verify'", "]", ":", "context", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "scheme", "=", "'wss://'", "if", "self", ".", "options", "[", "'scheme'", "]", "!=", "'https'", ":", "scheme", "=", "'ws://'", "context", "=", "None", "url", "=", "'{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'", ".", "format", "(", "scheme", "=", "scheme", ",", "url", "=", "self", ".", "options", "[", "'url'", "]", ",", "port", "=", "str", "(", "self", ".", "options", "[", "'port'", "]", ")", ",", "basepath", "=", "self", ".", "options", "[", "'basepath'", "]", ")", "websocket", "=", "yield", "from", "websockets", ".", "connect", "(", "url", ",", "ssl", "=", "context", ",", ")", "yield", "from", "self", ".", "_authenticate_websocket", "(", "websocket", ",", "event_handler", ")", "yield", "from", "self", ".", "_start_loop", "(", "websocket", ",", "event_handler", ")" ]
Connect to the websocket and authenticate it. When the authentication has finished, start the loop listening for messages, sending a ping to the server to keep the connection alive. :param event_handler: Every websocket event will be passed there. Takes one argument. :type event_handler: Function(message) :return:
[ "Connect", "to", "the", "websocket", "and", "authenticate", "it", ".", "When", "the", "authentication", "has", "finished", "start", "the", "loop", "listening", "for", "messages", "sending", "a", "ping", "to", "the", "server", "to", "keep", "the", "connection", "alive", "." ]
ad1a936130096e39c2e1b76d78913e5950e06ca5
https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/websocket.py#L19-L51
8,038
Vaelor/python-mattermost-driver
src/mattermostdriver/websocket.py
Websocket._authenticate_websocket
def _authenticate_websocket(self, websocket, event_handler): """ Sends a authentication challenge over a websocket. This is not needed when we just send the cookie we got on login when connecting to the websocket. """ log.debug('Authenticating websocket') json_data = json.dumps({ "seq": 1, "action": "authentication_challenge", "data": { "token": self._token } }).encode('utf8') yield from websocket.send(json_data) while True: message = yield from websocket.recv() status = json.loads(message) log.debug(status) # We want to pass the events to the event_handler already # because the hello event could arrive before the authentication ok response yield from event_handler(message) if ('status' in status and status['status'] == 'OK') and \ ('seq_reply' in status and status['seq_reply'] == 1): log.info('Websocket authentification OK') return True elif 'seq_reply' in status and status['seq_reply'] == 1: log.error('Websocket authentification failed')
python
def _authenticate_websocket(self, websocket, event_handler): """ Sends a authentication challenge over a websocket. This is not needed when we just send the cookie we got on login when connecting to the websocket. """ log.debug('Authenticating websocket') json_data = json.dumps({ "seq": 1, "action": "authentication_challenge", "data": { "token": self._token } }).encode('utf8') yield from websocket.send(json_data) while True: message = yield from websocket.recv() status = json.loads(message) log.debug(status) # We want to pass the events to the event_handler already # because the hello event could arrive before the authentication ok response yield from event_handler(message) if ('status' in status and status['status'] == 'OK') and \ ('seq_reply' in status and status['seq_reply'] == 1): log.info('Websocket authentification OK') return True elif 'seq_reply' in status and status['seq_reply'] == 1: log.error('Websocket authentification failed')
[ "def", "_authenticate_websocket", "(", "self", ",", "websocket", ",", "event_handler", ")", ":", "log", ".", "debug", "(", "'Authenticating websocket'", ")", "json_data", "=", "json", ".", "dumps", "(", "{", "\"seq\"", ":", "1", ",", "\"action\"", ":", "\"authentication_challenge\"", ",", "\"data\"", ":", "{", "\"token\"", ":", "self", ".", "_token", "}", "}", ")", ".", "encode", "(", "'utf8'", ")", "yield", "from", "websocket", ".", "send", "(", "json_data", ")", "while", "True", ":", "message", "=", "yield", "from", "websocket", ".", "recv", "(", ")", "status", "=", "json", ".", "loads", "(", "message", ")", "log", ".", "debug", "(", "status", ")", "# We want to pass the events to the event_handler already", "# because the hello event could arrive before the authentication ok response", "yield", "from", "event_handler", "(", "message", ")", "if", "(", "'status'", "in", "status", "and", "status", "[", "'status'", "]", "==", "'OK'", ")", "and", "(", "'seq_reply'", "in", "status", "and", "status", "[", "'seq_reply'", "]", "==", "1", ")", ":", "log", ".", "info", "(", "'Websocket authentification OK'", ")", "return", "True", "elif", "'seq_reply'", "in", "status", "and", "status", "[", "'seq_reply'", "]", "==", "1", ":", "log", ".", "error", "(", "'Websocket authentification failed'", ")" ]
Sends a authentication challenge over a websocket. This is not needed when we just send the cookie we got on login when connecting to the websocket.
[ "Sends", "a", "authentication", "challenge", "over", "a", "websocket", ".", "This", "is", "not", "needed", "when", "we", "just", "send", "the", "cookie", "we", "got", "on", "login", "when", "connecting", "to", "the", "websocket", "." ]
ad1a936130096e39c2e1b76d78913e5950e06ca5
https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/websocket.py#L73-L100
8,039
paulgb/runipy
runipy/notebook_runner.py
NotebookRunner.run_cell
def run_cell(self, cell): """Run a notebook cell and update the output of that cell in-place.""" logging.info('Running cell:\n%s\n', cell.input) self.kc.execute(cell.input) reply = self.kc.get_shell_msg() status = reply['content']['status'] traceback_text = '' if status == 'error': traceback_text = 'Cell raised uncaught exception: \n' + \ '\n'.join(reply['content']['traceback']) logging.info(traceback_text) else: logging.info('Cell returned') outs = list() while True: try: msg = self.kc.get_iopub_msg(timeout=1) if msg['msg_type'] == 'status': if msg['content']['execution_state'] == 'idle': break except Empty: # execution state should return to idle # before the queue becomes empty, # if it doesn't, something bad has happened raise content = msg['content'] msg_type = msg['msg_type'] # IPython 3.0.0-dev writes pyerr/pyout in the notebook format # but uses error/execute_result in the message spec. This does the # translation needed for tests to pass with IPython 3.0.0-dev notebook3_format_conversions = { 'error': 'pyerr', 'execute_result': 'pyout' } msg_type = notebook3_format_conversions.get(msg_type, msg_type) out = NotebookNode(output_type=msg_type) if 'execution_count' in content: cell['prompt_number'] = content['execution_count'] out.prompt_number = content['execution_count'] if msg_type in ('status', 'pyin', 'execute_input'): continue elif msg_type == 'stream': out.stream = content['name'] # in msgspec 5, this is name, text # in msgspec 4, this is name, data if 'text' in content: out.text = content['text'] else: out.text = content['data'] elif msg_type in ('display_data', 'pyout'): for mime, data in content['data'].items(): try: attr = self.MIME_MAP[mime] except KeyError: raise NotImplementedError( 'unhandled mime type: %s' % mime ) # In notebook version <= 3 JSON data is stored as a string # Evaluation of IPython2's JSON gives strings directly # Therefore do not encode for IPython versions prior to 3 json_encode = ( IPython.version_info[0] >= 3 and mime == "application/json") data_out = data if not json_encode else json.dumps(data) setattr(out, attr, data_out) elif msg_type == 'pyerr': out.ename = content['ename'] out.evalue = content['evalue'] out.traceback = content['traceback'] elif msg_type == 'clear_output': outs = list() continue else: raise NotImplementedError( 'unhandled iopub message: %s' % msg_type ) outs.append(out) cell['outputs'] = outs if status == 'error': raise NotebookError(traceback_text)
python
def run_cell(self, cell): """Run a notebook cell and update the output of that cell in-place.""" logging.info('Running cell:\n%s\n', cell.input) self.kc.execute(cell.input) reply = self.kc.get_shell_msg() status = reply['content']['status'] traceback_text = '' if status == 'error': traceback_text = 'Cell raised uncaught exception: \n' + \ '\n'.join(reply['content']['traceback']) logging.info(traceback_text) else: logging.info('Cell returned') outs = list() while True: try: msg = self.kc.get_iopub_msg(timeout=1) if msg['msg_type'] == 'status': if msg['content']['execution_state'] == 'idle': break except Empty: # execution state should return to idle # before the queue becomes empty, # if it doesn't, something bad has happened raise content = msg['content'] msg_type = msg['msg_type'] # IPython 3.0.0-dev writes pyerr/pyout in the notebook format # but uses error/execute_result in the message spec. This does the # translation needed for tests to pass with IPython 3.0.0-dev notebook3_format_conversions = { 'error': 'pyerr', 'execute_result': 'pyout' } msg_type = notebook3_format_conversions.get(msg_type, msg_type) out = NotebookNode(output_type=msg_type) if 'execution_count' in content: cell['prompt_number'] = content['execution_count'] out.prompt_number = content['execution_count'] if msg_type in ('status', 'pyin', 'execute_input'): continue elif msg_type == 'stream': out.stream = content['name'] # in msgspec 5, this is name, text # in msgspec 4, this is name, data if 'text' in content: out.text = content['text'] else: out.text = content['data'] elif msg_type in ('display_data', 'pyout'): for mime, data in content['data'].items(): try: attr = self.MIME_MAP[mime] except KeyError: raise NotImplementedError( 'unhandled mime type: %s' % mime ) # In notebook version <= 3 JSON data is stored as a string # Evaluation of IPython2's JSON gives strings directly # Therefore do not encode for IPython versions prior to 3 json_encode = ( IPython.version_info[0] >= 3 and mime == "application/json") data_out = data if not json_encode else json.dumps(data) setattr(out, attr, data_out) elif msg_type == 'pyerr': out.ename = content['ename'] out.evalue = content['evalue'] out.traceback = content['traceback'] elif msg_type == 'clear_output': outs = list() continue else: raise NotImplementedError( 'unhandled iopub message: %s' % msg_type ) outs.append(out) cell['outputs'] = outs if status == 'error': raise NotebookError(traceback_text)
[ "def", "run_cell", "(", "self", ",", "cell", ")", ":", "logging", ".", "info", "(", "'Running cell:\\n%s\\n'", ",", "cell", ".", "input", ")", "self", ".", "kc", ".", "execute", "(", "cell", ".", "input", ")", "reply", "=", "self", ".", "kc", ".", "get_shell_msg", "(", ")", "status", "=", "reply", "[", "'content'", "]", "[", "'status'", "]", "traceback_text", "=", "''", "if", "status", "==", "'error'", ":", "traceback_text", "=", "'Cell raised uncaught exception: \\n'", "+", "'\\n'", ".", "join", "(", "reply", "[", "'content'", "]", "[", "'traceback'", "]", ")", "logging", ".", "info", "(", "traceback_text", ")", "else", ":", "logging", ".", "info", "(", "'Cell returned'", ")", "outs", "=", "list", "(", ")", "while", "True", ":", "try", ":", "msg", "=", "self", ".", "kc", ".", "get_iopub_msg", "(", "timeout", "=", "1", ")", "if", "msg", "[", "'msg_type'", "]", "==", "'status'", ":", "if", "msg", "[", "'content'", "]", "[", "'execution_state'", "]", "==", "'idle'", ":", "break", "except", "Empty", ":", "# execution state should return to idle", "# before the queue becomes empty,", "# if it doesn't, something bad has happened", "raise", "content", "=", "msg", "[", "'content'", "]", "msg_type", "=", "msg", "[", "'msg_type'", "]", "# IPython 3.0.0-dev writes pyerr/pyout in the notebook format", "# but uses error/execute_result in the message spec. This does the", "# translation needed for tests to pass with IPython 3.0.0-dev", "notebook3_format_conversions", "=", "{", "'error'", ":", "'pyerr'", ",", "'execute_result'", ":", "'pyout'", "}", "msg_type", "=", "notebook3_format_conversions", ".", "get", "(", "msg_type", ",", "msg_type", ")", "out", "=", "NotebookNode", "(", "output_type", "=", "msg_type", ")", "if", "'execution_count'", "in", "content", ":", "cell", "[", "'prompt_number'", "]", "=", "content", "[", "'execution_count'", "]", "out", ".", "prompt_number", "=", "content", "[", "'execution_count'", "]", "if", "msg_type", "in", "(", "'status'", ",", "'pyin'", ",", "'execute_input'", ")", ":", "continue", "elif", "msg_type", "==", "'stream'", ":", "out", ".", "stream", "=", "content", "[", "'name'", "]", "# in msgspec 5, this is name, text", "# in msgspec 4, this is name, data", "if", "'text'", "in", "content", ":", "out", ".", "text", "=", "content", "[", "'text'", "]", "else", ":", "out", ".", "text", "=", "content", "[", "'data'", "]", "elif", "msg_type", "in", "(", "'display_data'", ",", "'pyout'", ")", ":", "for", "mime", ",", "data", "in", "content", "[", "'data'", "]", ".", "items", "(", ")", ":", "try", ":", "attr", "=", "self", ".", "MIME_MAP", "[", "mime", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "'unhandled mime type: %s'", "%", "mime", ")", "# In notebook version <= 3 JSON data is stored as a string", "# Evaluation of IPython2's JSON gives strings directly", "# Therefore do not encode for IPython versions prior to 3", "json_encode", "=", "(", "IPython", ".", "version_info", "[", "0", "]", ">=", "3", "and", "mime", "==", "\"application/json\"", ")", "data_out", "=", "data", "if", "not", "json_encode", "else", "json", ".", "dumps", "(", "data", ")", "setattr", "(", "out", ",", "attr", ",", "data_out", ")", "elif", "msg_type", "==", "'pyerr'", ":", "out", ".", "ename", "=", "content", "[", "'ename'", "]", "out", ".", "evalue", "=", "content", "[", "'evalue'", "]", "out", ".", "traceback", "=", "content", "[", "'traceback'", "]", "elif", "msg_type", "==", "'clear_output'", ":", "outs", "=", "list", "(", ")", "continue", "else", ":", "raise", "NotImplementedError", "(", "'unhandled iopub message: %s'", "%", "msg_type", ")", "outs", ".", "append", "(", "out", ")", "cell", "[", "'outputs'", "]", "=", "outs", "if", "status", "==", "'error'", ":", "raise", "NotebookError", "(", "traceback_text", ")" ]
Run a notebook cell and update the output of that cell in-place.
[ "Run", "a", "notebook", "cell", "and", "update", "the", "output", "of", "that", "cell", "in", "-", "place", "." ]
d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe
https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L138-L226
8,040
paulgb/runipy
runipy/notebook_runner.py
NotebookRunner.iter_code_cells
def iter_code_cells(self): """Iterate over the notebook cells containing code.""" for ws in self.nb.worksheets: for cell in ws.cells: if cell.cell_type == 'code': yield cell
python
def iter_code_cells(self): """Iterate over the notebook cells containing code.""" for ws in self.nb.worksheets: for cell in ws.cells: if cell.cell_type == 'code': yield cell
[ "def", "iter_code_cells", "(", "self", ")", ":", "for", "ws", "in", "self", ".", "nb", ".", "worksheets", ":", "for", "cell", "in", "ws", ".", "cells", ":", "if", "cell", ".", "cell_type", "==", "'code'", ":", "yield", "cell" ]
Iterate over the notebook cells containing code.
[ "Iterate", "over", "the", "notebook", "cells", "containing", "code", "." ]
d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe
https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L228-L233
8,041
paulgb/runipy
runipy/notebook_runner.py
NotebookRunner.run_notebook
def run_notebook(self, skip_exceptions=False, progress_callback=None): """ Run all the notebook cells in order and update the outputs in-place. If ``skip_exceptions`` is set, then if exceptions occur in a cell, the subsequent cells are run (by default, the notebook execution stops). """ for i, cell in enumerate(self.iter_code_cells()): try: self.run_cell(cell) except NotebookError: if not skip_exceptions: raise if progress_callback: progress_callback(i)
python
def run_notebook(self, skip_exceptions=False, progress_callback=None): """ Run all the notebook cells in order and update the outputs in-place. If ``skip_exceptions`` is set, then if exceptions occur in a cell, the subsequent cells are run (by default, the notebook execution stops). """ for i, cell in enumerate(self.iter_code_cells()): try: self.run_cell(cell) except NotebookError: if not skip_exceptions: raise if progress_callback: progress_callback(i)
[ "def", "run_notebook", "(", "self", ",", "skip_exceptions", "=", "False", ",", "progress_callback", "=", "None", ")", ":", "for", "i", ",", "cell", "in", "enumerate", "(", "self", ".", "iter_code_cells", "(", ")", ")", ":", "try", ":", "self", ".", "run_cell", "(", "cell", ")", "except", "NotebookError", ":", "if", "not", "skip_exceptions", ":", "raise", "if", "progress_callback", ":", "progress_callback", "(", "i", ")" ]
Run all the notebook cells in order and update the outputs in-place. If ``skip_exceptions`` is set, then if exceptions occur in a cell, the subsequent cells are run (by default, the notebook execution stops).
[ "Run", "all", "the", "notebook", "cells", "in", "order", "and", "update", "the", "outputs", "in", "-", "place", "." ]
d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe
https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L235-L249
8,042
metacloud/gilt
gilt/config.py
config
def config(filename): """ Construct `Config` object and return a list. :parse filename: A string containing the path to YAML file. :return: list """ Config = collections.namedtuple('Config', [ 'git', 'lock_file', 'version', 'name', 'src', 'dst', 'files', 'post_commands', ]) return [Config(**d) for d in _get_config_generator(filename)]
python
def config(filename): """ Construct `Config` object and return a list. :parse filename: A string containing the path to YAML file. :return: list """ Config = collections.namedtuple('Config', [ 'git', 'lock_file', 'version', 'name', 'src', 'dst', 'files', 'post_commands', ]) return [Config(**d) for d in _get_config_generator(filename)]
[ "def", "config", "(", "filename", ")", ":", "Config", "=", "collections", ".", "namedtuple", "(", "'Config'", ",", "[", "'git'", ",", "'lock_file'", ",", "'version'", ",", "'name'", ",", "'src'", ",", "'dst'", ",", "'files'", ",", "'post_commands'", ",", "]", ")", "return", "[", "Config", "(", "*", "*", "d", ")", "for", "d", "in", "_get_config_generator", "(", "filename", ")", "]" ]
Construct `Config` object and return a list. :parse filename: A string containing the path to YAML file. :return: list
[ "Construct", "Config", "object", "and", "return", "a", "list", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L41-L59
8,043
metacloud/gilt
gilt/config.py
_get_files_config
def _get_files_config(src_dir, files_list): """ Construct `FileConfig` object and return a list. :param src_dir: A string containing the source directory. :param files_list: A list of dicts containing the src/dst mapping of files to overlay. :return: list """ FilesConfig = collections.namedtuple('FilesConfig', ['src', 'dst', 'post_commands']) return [ FilesConfig(**d) for d in _get_files_generator(src_dir, files_list) ]
python
def _get_files_config(src_dir, files_list): """ Construct `FileConfig` object and return a list. :param src_dir: A string containing the source directory. :param files_list: A list of dicts containing the src/dst mapping of files to overlay. :return: list """ FilesConfig = collections.namedtuple('FilesConfig', ['src', 'dst', 'post_commands']) return [ FilesConfig(**d) for d in _get_files_generator(src_dir, files_list) ]
[ "def", "_get_files_config", "(", "src_dir", ",", "files_list", ")", ":", "FilesConfig", "=", "collections", ".", "namedtuple", "(", "'FilesConfig'", ",", "[", "'src'", ",", "'dst'", ",", "'post_commands'", "]", ")", "return", "[", "FilesConfig", "(", "*", "*", "d", ")", "for", "d", "in", "_get_files_generator", "(", "src_dir", ",", "files_list", ")", "]" ]
Construct `FileConfig` object and return a list. :param src_dir: A string containing the source directory. :param files_list: A list of dicts containing the src/dst mapping of files to overlay. :return: list
[ "Construct", "FileConfig", "object", "and", "return", "a", "list", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L62-L76
8,044
metacloud/gilt
gilt/config.py
_get_config
def _get_config(filename): """ Parse the provided YAML file and return a dict. :parse filename: A string containing the path to YAML file. :return: dict """ i = interpolation.Interpolator(interpolation.TemplateWithDefaults, os.environ) with open(filename, 'r') as stream: try: interpolated_config = i.interpolate(stream.read()) return yaml.safe_load(interpolated_config) except yaml.parser.ParserError as e: msg = 'Error parsing gilt config: {0}'.format(e) raise ParseError(msg)
python
def _get_config(filename): """ Parse the provided YAML file and return a dict. :parse filename: A string containing the path to YAML file. :return: dict """ i = interpolation.Interpolator(interpolation.TemplateWithDefaults, os.environ) with open(filename, 'r') as stream: try: interpolated_config = i.interpolate(stream.read()) return yaml.safe_load(interpolated_config) except yaml.parser.ParserError as e: msg = 'Error parsing gilt config: {0}'.format(e) raise ParseError(msg)
[ "def", "_get_config", "(", "filename", ")", ":", "i", "=", "interpolation", ".", "Interpolator", "(", "interpolation", ".", "TemplateWithDefaults", ",", "os", ".", "environ", ")", "with", "open", "(", "filename", ",", "'r'", ")", "as", "stream", ":", "try", ":", "interpolated_config", "=", "i", ".", "interpolate", "(", "stream", ".", "read", "(", ")", ")", "return", "yaml", ".", "safe_load", "(", "interpolated_config", ")", "except", "yaml", ".", "parser", ".", "ParserError", "as", "e", ":", "msg", "=", "'Error parsing gilt config: {0}'", ".", "format", "(", "e", ")", "raise", "ParseError", "(", "msg", ")" ]
Parse the provided YAML file and return a dict. :parse filename: A string containing the path to YAML file. :return: dict
[ "Parse", "the", "provided", "YAML", "file", "and", "return", "a", "dict", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L126-L142
8,045
metacloud/gilt
gilt/config.py
_get_dst_dir
def _get_dst_dir(dst_dir): """ Prefix the provided string with working directory and return a str. :param dst_dir: A string to be prefixed with the working dir. :return: str """ wd = os.getcwd() _makedirs(dst_dir) return os.path.join(wd, dst_dir)
python
def _get_dst_dir(dst_dir): """ Prefix the provided string with working directory and return a str. :param dst_dir: A string to be prefixed with the working dir. :return: str """ wd = os.getcwd() _makedirs(dst_dir) return os.path.join(wd, dst_dir)
[ "def", "_get_dst_dir", "(", "dst_dir", ")", ":", "wd", "=", "os", ".", "getcwd", "(", ")", "_makedirs", "(", "dst_dir", ")", "return", "os", ".", "path", ".", "join", "(", "wd", ",", "dst_dir", ")" ]
Prefix the provided string with working directory and return a str. :param dst_dir: A string to be prefixed with the working dir. :return: str
[ "Prefix", "the", "provided", "string", "with", "working", "directory", "and", "return", "a", "str", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L145-L156
8,046
metacloud/gilt
gilt/config.py
_makedirs
def _makedirs(path): """ Create a base directory of the provided path and return None. :param path: A string containing a path to be deconstructed and basedir created. :return: None """ dirname, _ = os.path.split(path) try: os.makedirs(dirname) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise
python
def _makedirs(path): """ Create a base directory of the provided path and return None. :param path: A string containing a path to be deconstructed and basedir created. :return: None """ dirname, _ = os.path.split(path) try: os.makedirs(dirname) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise
[ "def", "_makedirs", "(", "path", ")", ":", "dirname", ",", "_", "=", "os", ".", "path", ".", "split", "(", "path", ")", "try", ":", "os", ".", "makedirs", "(", "dirname", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "EEXIST", ":", "pass", "else", ":", "raise" ]
Create a base directory of the provided path and return None. :param path: A string containing a path to be deconstructed and basedir created. :return: None
[ "Create", "a", "base", "directory", "of", "the", "provided", "path", "and", "return", "None", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L193-L208
8,047
metacloud/gilt
gilt/shell.py
main
def main(ctx, config, debug): # pragma: no cover """ gilt - A GIT layering tool. """ ctx.obj = {} ctx.obj['args'] = {} ctx.obj['args']['debug'] = debug ctx.obj['args']['config'] = config
python
def main(ctx, config, debug): # pragma: no cover """ gilt - A GIT layering tool. """ ctx.obj = {} ctx.obj['args'] = {} ctx.obj['args']['debug'] = debug ctx.obj['args']['config'] = config
[ "def", "main", "(", "ctx", ",", "config", ",", "debug", ")", ":", "# pragma: no cover", "ctx", ".", "obj", "=", "{", "}", "ctx", ".", "obj", "[", "'args'", "]", "=", "{", "}", "ctx", ".", "obj", "[", "'args'", "]", "[", "'debug'", "]", "=", "debug", "ctx", ".", "obj", "[", "'args'", "]", "[", "'config'", "]", "=", "config" ]
gilt - A GIT layering tool.
[ "gilt", "-", "A", "GIT", "layering", "tool", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/shell.py#L50-L55
8,048
metacloud/gilt
gilt/shell.py
overlay
def overlay(ctx): # pragma: no cover """ Install gilt dependencies """ args = ctx.obj.get('args') filename = args.get('config') debug = args.get('debug') _setup(filename) for c in config.config(filename): with fasteners.InterProcessLock(c.lock_file): util.print_info('{}:'.format(c.name)) if not os.path.exists(c.src): git.clone(c.name, c.git, c.src, debug=debug) if c.dst: git.extract(c.src, c.dst, c.version, debug=debug) post_commands = {c.dst: c.post_commands} else: git.overlay(c.src, c.files, c.version, debug=debug) post_commands = { conf.dst: conf.post_commands for conf in c.files } # Run post commands if any. for dst, commands in post_commands.items(): for command in commands: msg = ' - running `{}` in {}'.format(command, dst) util.print_info(msg) cmd = util.build_sh_cmd(command, cwd=dst) util.run_command(cmd, debug=debug)
python
def overlay(ctx): # pragma: no cover """ Install gilt dependencies """ args = ctx.obj.get('args') filename = args.get('config') debug = args.get('debug') _setup(filename) for c in config.config(filename): with fasteners.InterProcessLock(c.lock_file): util.print_info('{}:'.format(c.name)) if not os.path.exists(c.src): git.clone(c.name, c.git, c.src, debug=debug) if c.dst: git.extract(c.src, c.dst, c.version, debug=debug) post_commands = {c.dst: c.post_commands} else: git.overlay(c.src, c.files, c.version, debug=debug) post_commands = { conf.dst: conf.post_commands for conf in c.files } # Run post commands if any. for dst, commands in post_commands.items(): for command in commands: msg = ' - running `{}` in {}'.format(command, dst) util.print_info(msg) cmd = util.build_sh_cmd(command, cwd=dst) util.run_command(cmd, debug=debug)
[ "def", "overlay", "(", "ctx", ")", ":", "# pragma: no cover", "args", "=", "ctx", ".", "obj", ".", "get", "(", "'args'", ")", "filename", "=", "args", ".", "get", "(", "'config'", ")", "debug", "=", "args", ".", "get", "(", "'debug'", ")", "_setup", "(", "filename", ")", "for", "c", "in", "config", ".", "config", "(", "filename", ")", ":", "with", "fasteners", ".", "InterProcessLock", "(", "c", ".", "lock_file", ")", ":", "util", ".", "print_info", "(", "'{}:'", ".", "format", "(", "c", ".", "name", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "c", ".", "src", ")", ":", "git", ".", "clone", "(", "c", ".", "name", ",", "c", ".", "git", ",", "c", ".", "src", ",", "debug", "=", "debug", ")", "if", "c", ".", "dst", ":", "git", ".", "extract", "(", "c", ".", "src", ",", "c", ".", "dst", ",", "c", ".", "version", ",", "debug", "=", "debug", ")", "post_commands", "=", "{", "c", ".", "dst", ":", "c", ".", "post_commands", "}", "else", ":", "git", ".", "overlay", "(", "c", ".", "src", ",", "c", ".", "files", ",", "c", ".", "version", ",", "debug", "=", "debug", ")", "post_commands", "=", "{", "conf", ".", "dst", ":", "conf", ".", "post_commands", "for", "conf", "in", "c", ".", "files", "}", "# Run post commands if any.", "for", "dst", ",", "commands", "in", "post_commands", ".", "items", "(", ")", ":", "for", "command", "in", "commands", ":", "msg", "=", "' - running `{}` in {}'", ".", "format", "(", "command", ",", "dst", ")", "util", ".", "print_info", "(", "msg", ")", "cmd", "=", "util", ".", "build_sh_cmd", "(", "command", ",", "cwd", "=", "dst", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")" ]
Install gilt dependencies
[ "Install", "gilt", "dependencies" ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/shell.py#L60-L87
8,049
metacloud/gilt
gilt/git.py
clone
def clone(name, repository, destination, debug=False): """ Clone the specified repository into a temporary directory and return None. :param name: A string containing the name of the repository being cloned. :param repository: A string containing the repository to clone. :param destination: A string containing the directory to clone the repository into. :param debug: An optional bool to toggle debug output. :return: None """ msg = ' - cloning {} to {}'.format(name, destination) util.print_info(msg) cmd = sh.git.bake('clone', repository, destination) util.run_command(cmd, debug=debug)
python
def clone(name, repository, destination, debug=False): """ Clone the specified repository into a temporary directory and return None. :param name: A string containing the name of the repository being cloned. :param repository: A string containing the repository to clone. :param destination: A string containing the directory to clone the repository into. :param debug: An optional bool to toggle debug output. :return: None """ msg = ' - cloning {} to {}'.format(name, destination) util.print_info(msg) cmd = sh.git.bake('clone', repository, destination) util.run_command(cmd, debug=debug)
[ "def", "clone", "(", "name", ",", "repository", ",", "destination", ",", "debug", "=", "False", ")", ":", "msg", "=", "' - cloning {} to {}'", ".", "format", "(", "name", ",", "destination", ")", "util", ".", "print_info", "(", "msg", ")", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'clone'", ",", "repository", ",", "destination", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")" ]
Clone the specified repository into a temporary directory and return None. :param name: A string containing the name of the repository being cloned. :param repository: A string containing the repository to clone. :param destination: A string containing the directory to clone the repository into. :param debug: An optional bool to toggle debug output. :return: None
[ "Clone", "the", "specified", "repository", "into", "a", "temporary", "directory", "and", "return", "None", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L32-L46
8,050
metacloud/gilt
gilt/git.py
_get_version
def _get_version(version, debug=False): """ Handle switching to the specified version and return None. 1. Fetch the origin. 2. Checkout the specified version. 3. Clean the repository before we begin. 4. Pull the origin when a branch; _not_ a commit id. :param version: A string containing the branch/tag/sha to be exported. :param debug: An optional bool to toggle debug output. :return: None """ if not any( (_has_branch(version, debug), _has_tag(version, debug), _has_commit( version, debug))): cmd = sh.git.bake('fetch') util.run_command(cmd, debug=debug) cmd = sh.git.bake('checkout', version) util.run_command(cmd, debug=debug) cmd = sh.git.bake('clean', '-d', '-x', '-f') util.run_command(cmd, debug=debug) if _has_branch(version, debug): cmd = sh.git.bake('pull', rebase=True, ff_only=True) util.run_command(cmd, debug=debug)
python
def _get_version(version, debug=False): """ Handle switching to the specified version and return None. 1. Fetch the origin. 2. Checkout the specified version. 3. Clean the repository before we begin. 4. Pull the origin when a branch; _not_ a commit id. :param version: A string containing the branch/tag/sha to be exported. :param debug: An optional bool to toggle debug output. :return: None """ if not any( (_has_branch(version, debug), _has_tag(version, debug), _has_commit( version, debug))): cmd = sh.git.bake('fetch') util.run_command(cmd, debug=debug) cmd = sh.git.bake('checkout', version) util.run_command(cmd, debug=debug) cmd = sh.git.bake('clean', '-d', '-x', '-f') util.run_command(cmd, debug=debug) if _has_branch(version, debug): cmd = sh.git.bake('pull', rebase=True, ff_only=True) util.run_command(cmd, debug=debug)
[ "def", "_get_version", "(", "version", ",", "debug", "=", "False", ")", ":", "if", "not", "any", "(", "(", "_has_branch", "(", "version", ",", "debug", ")", ",", "_has_tag", "(", "version", ",", "debug", ")", ",", "_has_commit", "(", "version", ",", "debug", ")", ")", ")", ":", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'fetch'", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'checkout'", ",", "version", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'clean'", ",", "'-d'", ",", "'-x'", ",", "'-f'", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")", "if", "_has_branch", "(", "version", ",", "debug", ")", ":", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'pull'", ",", "rebase", "=", "True", ",", "ff_only", "=", "True", ")", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")" ]
Handle switching to the specified version and return None. 1. Fetch the origin. 2. Checkout the specified version. 3. Clean the repository before we begin. 4. Pull the origin when a branch; _not_ a commit id. :param version: A string containing the branch/tag/sha to be exported. :param debug: An optional bool to toggle debug output. :return: None
[ "Handle", "switching", "to", "the", "specified", "version", "and", "return", "None", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L109-L133
8,051
metacloud/gilt
gilt/git.py
_has_commit
def _has_commit(version, debug=False): """ Determine a version is a local git commit sha or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool """ if _has_tag(version, debug) or _has_branch(version, debug): return False cmd = sh.git.bake('cat-file', '-e', version) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
python
def _has_commit(version, debug=False): """ Determine a version is a local git commit sha or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool """ if _has_tag(version, debug) or _has_branch(version, debug): return False cmd = sh.git.bake('cat-file', '-e', version) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
[ "def", "_has_commit", "(", "version", ",", "debug", "=", "False", ")", ":", "if", "_has_tag", "(", "version", ",", "debug", ")", "or", "_has_branch", "(", "version", ",", "debug", ")", ":", "return", "False", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'cat-file'", ",", "'-e'", ",", "version", ")", "try", ":", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")", "return", "True", "except", "sh", ".", "ErrorReturnCode", ":", "return", "False" ]
Determine a version is a local git commit sha or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool
[ "Determine", "a", "version", "is", "a", "local", "git", "commit", "sha", "or", "not", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L136-L151
8,052
metacloud/gilt
gilt/git.py
_has_tag
def _has_tag(version, debug=False): """ Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool """ cmd = sh.git.bake('show-ref', '--verify', '--quiet', "refs/tags/{}".format(version)) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
python
def _has_tag(version, debug=False): """ Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool """ cmd = sh.git.bake('show-ref', '--verify', '--quiet', "refs/tags/{}".format(version)) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
[ "def", "_has_tag", "(", "version", ",", "debug", "=", "False", ")", ":", "cmd", "=", "sh", ".", "git", ".", "bake", "(", "'show-ref'", ",", "'--verify'", ",", "'--quiet'", ",", "\"refs/tags/{}\"", ".", "format", "(", "version", ")", ")", "try", ":", "util", ".", "run_command", "(", "cmd", ",", "debug", "=", "debug", ")", "return", "True", "except", "sh", ".", "ErrorReturnCode", ":", "return", "False" ]
Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool
[ "Determine", "a", "version", "is", "a", "local", "git", "tag", "name", "or", "not", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L154-L168
8,053
metacloud/gilt
gilt/util.py
run_command
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
python
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
[ "def", "run_command", "(", "cmd", ",", "debug", "=", "False", ")", ":", "if", "debug", ":", "msg", "=", "' PWD: {}'", ".", "format", "(", "os", ".", "getcwd", "(", ")", ")", "print_warn", "(", "msg", ")", "msg", "=", "' COMMAND: {}'", ".", "format", "(", "cmd", ")", "print_warn", "(", "msg", ")", "cmd", "(", ")" ]
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
[ "Execute", "the", "given", "command", "and", "return", "None", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L46-L59
8,054
metacloud/gilt
gilt/util.py
build_sh_cmd
def build_sh_cmd(cmd, cwd=None): """Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` """ args = cmd.split() return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
python
def build_sh_cmd(cmd, cwd=None): """Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` """ args = cmd.split() return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
[ "def", "build_sh_cmd", "(", "cmd", ",", "cwd", "=", "None", ")", ":", "args", "=", "cmd", ".", "split", "(", ")", "return", "getattr", "(", "sh", ",", "args", "[", "0", "]", ")", ".", "bake", "(", "_cwd", "=", "cwd", ",", "*", "args", "[", "1", ":", "]", ")" ]
Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command`
[ "Build", "a", "sh", ".", "Command", "from", "a", "string", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L62-L70
8,055
metacloud/gilt
gilt/util.py
copy
def copy(src, dst): """ Handle the copying of a file or directory. The destination basedir _must_ exist. :param src: A string containing the path of the source to copy. If the source ends with a '/', will become a recursive directory copy of source. :param dst: A string containing the path to the destination. If the destination ends with a '/', will copy into the target directory. :return: None """ try: shutil.copytree(src, dst) except OSError as exc: if exc.errno == errno.ENOTDIR: shutil.copy(src, dst) else: raise
python
def copy(src, dst): """ Handle the copying of a file or directory. The destination basedir _must_ exist. :param src: A string containing the path of the source to copy. If the source ends with a '/', will become a recursive directory copy of source. :param dst: A string containing the path to the destination. If the destination ends with a '/', will copy into the target directory. :return: None """ try: shutil.copytree(src, dst) except OSError as exc: if exc.errno == errno.ENOTDIR: shutil.copy(src, dst) else: raise
[ "def", "copy", "(", "src", ",", "dst", ")", ":", "try", ":", "shutil", ".", "copytree", "(", "src", ",", "dst", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "ENOTDIR", ":", "shutil", ".", "copy", "(", "src", ",", "dst", ")", "else", ":", "raise" ]
Handle the copying of a file or directory. The destination basedir _must_ exist. :param src: A string containing the path of the source to copy. If the source ends with a '/', will become a recursive directory copy of source. :param dst: A string containing the path to the destination. If the destination ends with a '/', will copy into the target directory. :return: None
[ "Handle", "the", "copying", "of", "a", "file", "or", "directory", "." ]
234eec23fe2f8144369d0ec3b35ad2fef508b8d1
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L83-L101
8,056
yhat/db.py
db/table.py
Table.to_dict
def to_dict(self): """Serialize representation of the table for local caching.""" return {'schema': self.schema, 'name': self.name, 'columns': [col.to_dict() for col in self._columns], 'foreign_keys': self.foreign_keys.to_dict(), 'ref_keys': self.ref_keys.to_dict()}
python
def to_dict(self): """Serialize representation of the table for local caching.""" return {'schema': self.schema, 'name': self.name, 'columns': [col.to_dict() for col in self._columns], 'foreign_keys': self.foreign_keys.to_dict(), 'ref_keys': self.ref_keys.to_dict()}
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'schema'", ":", "self", ".", "schema", ",", "'name'", ":", "self", ".", "name", ",", "'columns'", ":", "[", "col", ".", "to_dict", "(", ")", "for", "col", "in", "self", ".", "_columns", "]", ",", "'foreign_keys'", ":", "self", ".", "foreign_keys", ".", "to_dict", "(", ")", ",", "'ref_keys'", ":", "self", ".", "ref_keys", ".", "to_dict", "(", ")", "}" ]
Serialize representation of the table for local caching.
[ "Serialize", "representation", "of", "the", "table", "for", "local", "caching", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/table.py#L348-L351
8,057
yhat/db.py
db/db.py
list_profiles
def list_profiles(): """ Lists all of the database profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': None, u'port': 5432, u'username': u'kermit'}} """ profiles = {} user = os.path.expanduser("~") for f in os.listdir(user): if f.startswith(".db.py_"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles
python
def list_profiles(): """ Lists all of the database profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': None, u'port': 5432, u'username': u'kermit'}} """ profiles = {} user = os.path.expanduser("~") for f in os.listdir(user): if f.startswith(".db.py_"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles
[ "def", "list_profiles", "(", ")", ":", "profiles", "=", "{", "}", "user", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "for", "f", "in", "os", ".", "listdir", "(", "user", ")", ":", "if", "f", ".", "startswith", "(", "\".db.py_\"", ")", ":", "profile", "=", "load_from_json", "(", "os", ".", "path", ".", "join", "(", "user", ",", "f", ")", ")", "tables", "=", "profile", ".", "pop", "(", "'tables'", ",", "None", ")", "if", "tables", ":", "profile", "[", "'metadata'", "]", "=", "True", "else", ":", "profile", "[", "'metadata'", "]", "=", "False", "profiles", "[", "f", "[", "7", ":", "]", "]", "=", "profile", "return", "profiles" ]
Lists all of the database profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': None, u'port': 5432, u'username': u'kermit'}}
[ "Lists", "all", "of", "the", "database", "profiles", "available" ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L1059-L1094
8,058
yhat/db.py
db/db.py
remove_profile
def remove_profile(name, s3=False): """ Removes a profile from your config """ user = os.path.expanduser("~") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f)) os.remove(f) except Exception as e: raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
python
def remove_profile(name, s3=False): """ Removes a profile from your config """ user = os.path.expanduser("~") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f)) os.remove(f) except Exception as e: raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
[ "def", "remove_profile", "(", "name", ",", "s3", "=", "False", ")", ":", "user", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "if", "s3", ":", "f", "=", "os", ".", "path", ".", "join", "(", "user", ",", "S3_PROFILE_ID", "+", "name", ")", "else", ":", "f", "=", "os", ".", "path", ".", "join", "(", "user", ",", "DBPY_PROFILE_ID", "+", "name", ")", "try", ":", "try", ":", "open", "(", "f", ")", "except", ":", "raise", "Exception", "(", "\"Profile '{0}' does not exist. Could not find file {1}\"", ".", "format", "(", "name", ",", "f", ")", ")", "os", ".", "remove", "(", "f", ")", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "\"Could not remove profile {0}! Excpetion: {1}\"", ".", "format", "(", "name", ",", "e", ")", ")" ]
Removes a profile from your config
[ "Removes", "a", "profile", "from", "your", "config" ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L1097-L1114
8,059
yhat/db.py
db/db.py
DB.tables
def tables(self): """A lazy loaded reference to the table metadata for the DB.""" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables
python
def tables(self): """A lazy loaded reference to the table metadata for the DB.""" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables
[ "def", "tables", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_tables", ")", "==", "0", ":", "self", ".", "refresh_schema", "(", "self", ".", "_exclude_system_tables", ",", "self", ".", "_use_cache", ")", "return", "self", ".", "_tables" ]
A lazy loaded reference to the table metadata for the DB.
[ "A", "lazy", "loaded", "reference", "to", "the", "table", "metadata", "for", "the", "DB", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L262-L266
8,060
yhat/db.py
db/db.py
DB.save_credentials
def save_credentials(self, profile="default"): """ Save your database credentials so you don't have to save them in script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. "dw", "prod") from db import DB import pymysql db = DB(username="hank", password="foo", hostname="prod.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="production") db = DB(username="hank", password="foo", hostname="staging.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="staging") db = DB(profile="staging") >>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') """ f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials)
python
def save_credentials(self, profile="default"): """ Save your database credentials so you don't have to save them in script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. "dw", "prod") from db import DB import pymysql db = DB(username="hank", password="foo", hostname="prod.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="production") db = DB(username="hank", password="foo", hostname="staging.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="staging") db = DB(profile="staging") >>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') """ f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials)
[ "def", "save_credentials", "(", "self", ",", "profile", "=", "\"default\"", ")", ":", "f", "=", "profile_path", "(", "DBPY_PROFILE_ID", ",", "profile", ")", "dump_to_json", "(", "f", ",", "self", ".", "credentials", ")" ]
Save your database credentials so you don't have to save them in script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. "dw", "prod") from db import DB import pymysql db = DB(username="hank", password="foo", hostname="prod.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="production") db = DB(username="hank", password="foo", hostname="staging.mardukas.com", dbname="bar", dbtype="mysql") db.save_credentials(profile="staging") db = DB(profile="staging") >>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test')
[ "Save", "your", "database", "credentials", "so", "you", "don", "t", "have", "to", "save", "them", "in", "script", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L307-L329
8,061
yhat/db.py
db/db.py
DB.save_metadata
def save_metadata(self, profile="default"): """Save the database credentials, plus the database properties to your db.py profile.""" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict())
python
def save_metadata(self, profile="default"): """Save the database credentials, plus the database properties to your db.py profile.""" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict())
[ "def", "save_metadata", "(", "self", ",", "profile", "=", "\"default\"", ")", ":", "if", "len", "(", "self", ".", "tables", ")", ">", "0", ":", "f", "=", "profile_path", "(", "DBPY_PROFILE_ID", ",", "profile", ")", "dump_to_json", "(", "f", ",", "self", ".", "to_dict", "(", ")", ")" ]
Save the database credentials, plus the database properties to your db.py profile.
[ "Save", "the", "database", "credentials", "plus", "the", "database", "properties", "to", "your", "db", ".", "py", "profile", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L338-L342
8,062
yhat/db.py
db/db.py
DB.credentials
def credentials(self): """Dict representation of all credentials for the database.""" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { "username": self.username, "password": self.password, "hostname": self.hostname, "port": self.port, "filename": db_filename, "dbname": self.dbname, "dbtype": self.dbtype, "schemas": self.schemas, "limit": self.limit, "keys_per_column": self.keys_per_column, }
python
def credentials(self): """Dict representation of all credentials for the database.""" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { "username": self.username, "password": self.password, "hostname": self.hostname, "port": self.port, "filename": db_filename, "dbname": self.dbname, "dbtype": self.dbtype, "schemas": self.schemas, "limit": self.limit, "keys_per_column": self.keys_per_column, }
[ "def", "credentials", "(", "self", ")", ":", "if", "self", ".", "filename", ":", "db_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "self", ".", "filename", ")", "else", ":", "db_filename", "=", "None", "return", "{", "\"username\"", ":", "self", ".", "username", ",", "\"password\"", ":", "self", ".", "password", ",", "\"hostname\"", ":", "self", ".", "hostname", ",", "\"port\"", ":", "self", ".", "port", ",", "\"filename\"", ":", "db_filename", ",", "\"dbname\"", ":", "self", ".", "dbname", ",", "\"dbtype\"", ":", "self", ".", "dbtype", ",", "\"schemas\"", ":", "self", ".", "schemas", ",", "\"limit\"", ":", "self", ".", "limit", ",", "\"keys_per_column\"", ":", "self", ".", "keys_per_column", ",", "}" ]
Dict representation of all credentials for the database.
[ "Dict", "representation", "of", "all", "credentials", "for", "the", "database", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L345-L363
8,063
yhat/db.py
db/db.py
DB.find_table
def find_table(self, search): """ Aggresively search through your database's schema for a table. Parameters ----------- search: str glob pattern for what you're looking for Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> db.find_table("A*") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp >>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_ >>> results = db.find_table("*Invoice*") # returns all tables containing trans >>> results = db.find_table("*") # returns everything """ tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables)
python
def find_table(self, search): """ Aggresively search through your database's schema for a table. Parameters ----------- search: str glob pattern for what you're looking for Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> db.find_table("A*") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp >>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_ >>> results = db.find_table("*Invoice*") # returns all tables containing trans >>> results = db.find_table("*") # returns everything """ tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables)
[ "def", "find_table", "(", "self", ",", "search", ")", ":", "tables", "=", "[", "]", "for", "table", "in", "self", ".", "tables", ":", "if", "glob", ".", "fnmatch", ".", "fnmatch", "(", "table", ".", "name", ",", "search", ")", ":", "tables", ".", "append", "(", "table", ")", "return", "TableSet", "(", "tables", ")" ]
Aggresively search through your database's schema for a table. Parameters ----------- search: str glob pattern for what you're looking for Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> db.find_table("A*") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp >>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_ >>> results = db.find_table("*Invoice*") # returns all tables containing trans >>> results = db.find_table("*") # returns everything
[ "Aggresively", "search", "through", "your", "database", "s", "schema", "for", "a", "table", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L365-L394
8,064
yhat/db.py
db/db.py
DB.find_column
def find_column(self, search, data_type=None): """ Aggresively search through your database's schema for a column. Parameters ----------- search: str glob pattern for what you're looking for data_type: str, list (optional) specify which data type(s) you want to return Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column("Name").columns) 5 >>> len(db.find_column("*Id").columns) 20 >>> len(db.find_column("*Address*").columns) 3 >>> len(db.find_column("*Address*", data_type="NVARCHAR(70)").columns) 3 >>> len(db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]).columns) 17 -= Should sort in some way for all those doctests to be viable... -= if not, there's always a random issue where rows are not in the same order, making doctest fail. db.find_column("Name") # returns all columns named "Name" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column("*Id") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album | ArtistId | INTEGER | | Artist | ArtistId | INTEGER | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | EmployeeId | INTEGER | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId | INTEGER | | Track | AlbumId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column("*Address*") # returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER | | Track | GenreId | INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+ """ if isinstance(data_type, str): data_type = [data_type] cols = [] for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols)
python
def find_column(self, search, data_type=None): """ Aggresively search through your database's schema for a column. Parameters ----------- search: str glob pattern for what you're looking for data_type: str, list (optional) specify which data type(s) you want to return Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column("Name").columns) 5 >>> len(db.find_column("*Id").columns) 20 >>> len(db.find_column("*Address*").columns) 3 >>> len(db.find_column("*Address*", data_type="NVARCHAR(70)").columns) 3 >>> len(db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]).columns) 17 -= Should sort in some way for all those doctests to be viable... -= if not, there's always a random issue where rows are not in the same order, making doctest fail. db.find_column("Name") # returns all columns named "Name" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column("*Id") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album | ArtistId | INTEGER | | Artist | ArtistId | INTEGER | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | EmployeeId | INTEGER | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId | INTEGER | | Track | AlbumId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column("*Address*") # returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER | | Track | GenreId | INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+ """ if isinstance(data_type, str): data_type = [data_type] cols = [] for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols)
[ "def", "find_column", "(", "self", ",", "search", ",", "data_type", "=", "None", ")", ":", "if", "isinstance", "(", "data_type", ",", "str", ")", ":", "data_type", "=", "[", "data_type", "]", "cols", "=", "[", "]", "for", "table", "in", "self", ".", "tables", ":", "for", "col", "in", "vars", "(", "table", ")", ":", "if", "glob", ".", "fnmatch", ".", "fnmatch", "(", "col", ",", "search", ")", ":", "if", "data_type", "and", "isinstance", "(", "getattr", "(", "table", ",", "col", ")", ",", "Column", ")", "and", "getattr", "(", "table", ",", "col", ")", ".", "type", "not", "in", "data_type", ":", "continue", "if", "isinstance", "(", "getattr", "(", "table", ",", "col", ")", ",", "Column", ")", ":", "cols", ".", "append", "(", "getattr", "(", "table", ",", "col", ")", ")", "return", "ColumnSet", "(", "cols", ")" ]
Aggresively search through your database's schema for a column. Parameters ----------- search: str glob pattern for what you're looking for data_type: str, list (optional) specify which data type(s) you want to return Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column("Name").columns) 5 >>> len(db.find_column("*Id").columns) 20 >>> len(db.find_column("*Address*").columns) 3 >>> len(db.find_column("*Address*", data_type="NVARCHAR(70)").columns) 3 >>> len(db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]).columns) 17 -= Should sort in some way for all those doctests to be viable... -= if not, there's always a random issue where rows are not in the same order, making doctest fail. db.find_column("Name") # returns all columns named "Name" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column("*Id") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album | ArtistId | INTEGER | | Artist | ArtistId | INTEGER | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | EmployeeId | INTEGER | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId | INTEGER | | Track | AlbumId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column("*Address*") # returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee | ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER | | Track | GenreId | INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+
[ "Aggresively", "search", "through", "your", "database", "s", "schema", "for", "a", "column", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L396-L508
8,065
yhat/db.py
db/db.py
DB.query
def query(self, q, data=None, union=True, limit=None): """ Query your database with a raw string. Parameters ---------- q: str Query string to execute data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() db.query("select * from Track").head(2) TrackId Name AlbumId MediaTypeId \\\r 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\r 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query("select * from Track", limit=10) TrackId Name AlbumId MediaTypeId \ 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 2 3 Fast As a Shark 3 2 3 4 Restless and Wild 3 2 4 5 Princess of the Dawn 3 2 5 6 Put The Finger On You 1 1 6 7 Let's Get It Up 1 1 7 8 Inject The Venom 1 1 8 9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \ 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 1 1 None 342562 2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619 3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 Angus Young, Malcolm Young, Brian Johnson 205662 6 1 Angus Young, Malcolm Young, Brian Johnson 233926 7 1 Angus Young, Malcolm Young, Brian Johnson 210834 8 1 Angus Young, Malcolm Young, Brian Johnson 203102 9 1 Angus Young, Malcolm Young, Brian Johnson 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP BY ... table_name ... ''' >>> data = [ ... {"name": "Album"}, ... {"name": "Artist"}, ... {"name": "Track"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data = {"cols": ["AlbumId", "Title", "ArtistId"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To Rock We Salute You 1 1 2 Balls to the Wall 2 2 3 Restless and Wild 2 3 4 Let There Be Rock 1 4 5 Big Ones 3 """ if data: q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con)
python
def query(self, q, data=None, union=True, limit=None): """ Query your database with a raw string. Parameters ---------- q: str Query string to execute data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() db.query("select * from Track").head(2) TrackId Name AlbumId MediaTypeId \\\r 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\r 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query("select * from Track", limit=10) TrackId Name AlbumId MediaTypeId \ 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 2 3 Fast As a Shark 3 2 3 4 Restless and Wild 3 2 4 5 Princess of the Dawn 3 2 5 6 Put The Finger On You 1 1 6 7 Let's Get It Up 1 1 7 8 Inject The Venom 1 1 8 9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \ 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 1 1 None 342562 2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619 3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 Angus Young, Malcolm Young, Brian Johnson 205662 6 1 Angus Young, Malcolm Young, Brian Johnson 233926 7 1 Angus Young, Malcolm Young, Brian Johnson 210834 8 1 Angus Young, Malcolm Young, Brian Johnson 203102 9 1 Angus Young, Malcolm Young, Brian Johnson 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP BY ... table_name ... ''' >>> data = [ ... {"name": "Album"}, ... {"name": "Artist"}, ... {"name": "Track"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data = {"cols": ["AlbumId", "Title", "ArtistId"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To Rock We Salute You 1 1 2 Balls to the Wall 2 2 3 Restless and Wild 2 3 4 Let There Be Rock 1 4 5 Big Ones 3 """ if data: q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con)
[ "def", "query", "(", "self", ",", "q", ",", "data", "=", "None", ",", "union", "=", "True", ",", "limit", "=", "None", ")", ":", "if", "data", ":", "q", "=", "self", ".", "_apply_handlebars", "(", "q", ",", "data", ",", "union", ")", "if", "limit", ":", "q", "=", "self", ".", "_assign_limit", "(", "q", ",", "limit", ")", "return", "pd", ".", "read_sql", "(", "q", ",", "self", ".", "con", ")" ]
Query your database with a raw string. Parameters ---------- q: str Query string to execute data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() db.query("select * from Track").head(2) TrackId Name AlbumId MediaTypeId \\\r 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\r 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query("select * from Track", limit=10) TrackId Name AlbumId MediaTypeId \ 0 1 For Those About To Rock (We Salute You) 1 1 1 2 Balls to the Wall 2 2 2 3 Fast As a Shark 3 2 3 4 Restless and Wild 3 2 4 5 Princess of the Dawn 3 2 5 6 Put The Finger On You 1 1 6 7 Let's Get It Up 1 1 7 8 Inject The Venom 1 1 8 9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \ 0 1 Angus Young, Malcolm Young, Brian Johnson 343719 1 1 None 342562 2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619 3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 Angus Young, Malcolm Young, Brian Johnson 205662 6 1 Angus Young, Malcolm Young, Brian Johnson 233926 7 1 Angus Young, Malcolm Young, Brian Johnson 210834 8 1 Angus Young, Malcolm Young, Brian Johnson 203102 9 1 Angus Young, Malcolm Young, Brian Johnson 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP BY ... table_name ... ''' >>> data = [ ... {"name": "Album"}, ... {"name": "Artist"}, ... {"name": "Track"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data = {"cols": ["AlbumId", "Title", "ArtistId"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To Rock We Salute You 1 1 2 Balls to the Wall 2 2 3 Restless and Wild 2 3 4 Let There Be Rock 1 4 5 Big Ones 3
[ "Query", "your", "database", "with", "a", "raw", "string", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L541-L702
8,066
yhat/db.py
db/db.py
DB.query_from_file
def query_from_file(self, filename, data=None, union=True, limit=None): """ Query your database from a file. Parameters ---------- filename: str A SQL script data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open("db/tests/myscript.sql", "w") as f: ... f.write(q) 109 >>> len(db.query_from_file("db/tests/myscript.sql", limit=10)) 10 db.query_from_file("db/tests/myscript.sql", limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 """ with open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit)
python
def query_from_file(self, filename, data=None, union=True, limit=None): """ Query your database from a file. Parameters ---------- filename: str A SQL script data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open("db/tests/myscript.sql", "w") as f: ... f.write(q) 109 >>> len(db.query_from_file("db/tests/myscript.sql", limit=10)) 10 db.query_from_file("db/tests/myscript.sql", limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 """ with open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit)
[ "def", "query_from_file", "(", "self", ",", "filename", ",", "data", "=", "None", ",", "union", "=", "True", ",", "limit", "=", "None", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "q", "=", "fp", ".", "read", "(", ")", "return", "self", ".", "query", "(", "q", ",", "data", "=", "data", ",", "union", "=", "union", ",", "limit", "=", "limit", ")" ]
Query your database from a file. Parameters ---------- filename: str A SQL script data: list, dict Optional argument for handlebars-queries. Data will be passed to the template and rendered using handlebars. union: bool Whether or not "UNION ALL" handlebars templates. This will return any handlebars queries as a single data frame. limit: int Number of records to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open("db/tests/myscript.sql", "w") as f: ... f.write(q) 109 >>> len(db.query_from_file("db/tests/myscript.sql", limit=10)) 10 db.query_from_file("db/tests/myscript.sql", limit=10) Title \ 0 For Those About To Rock We Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About To Rock We Salute You 6 For Those About To Rock We Salute You 7 For Those About To Rock We Salute You 8 For Those About To Rock We Salute You 9 For Those About To Rock We Salute You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99
[ "Query", "your", "database", "from", "a", "file", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L704-L771
8,067
yhat/db.py
db/db.py
DB.refresh_schema
def refresh_schema(self, exclude_system_tables=True, use_cache=False): """ Pulls your database's schema again and looks for any new tables and columns. """ col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from cache # 2. use a single query for getting all key relationships # 3. use the naive approach if use_cache: # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write("done!\n")
python
def refresh_schema(self, exclude_system_tables=True, use_cache=False): """ Pulls your database's schema again and looks for any new tables and columns. """ col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from cache # 2. use a single query for getting all key relationships # 3. use the naive approach if use_cache: # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write("done!\n")
[ "def", "refresh_schema", "(", "self", ",", "exclude_system_tables", "=", "True", ",", "use_cache", "=", "False", ")", ":", "col_meta", ",", "table_meta", "=", "self", ".", "_get_db_metadata", "(", "exclude_system_tables", ",", "use_cache", ")", "tables", "=", "self", ".", "_gen_tables_from_col_tuples", "(", "col_meta", ")", "# Three modes for refreshing schema", "# 1. load directly from cache", "# 2. use a single query for getting all key relationships", "# 3. use the naive approach", "if", "use_cache", ":", "# generate our Tables, and load them into a TableSet", "self", ".", "_tables", "=", "TableSet", "(", "[", "Table", "(", "self", ".", "con", ",", "self", ".", "_query_templates", ",", "table_meta", "[", "t", "]", "[", "'schema'", "]", ",", "t", ",", "tables", "[", "t", "]", ",", "keys_per_column", "=", "self", ".", "keys_per_column", ",", "foreign_keys", "=", "table_meta", "[", "t", "]", "[", "'foreign_keys'", "]", "[", "'columns'", "]", ",", "ref_keys", "=", "table_meta", "[", "t", "]", "[", "'ref_keys'", "]", "[", "'columns'", "]", ")", "for", "t", "in", "sorted", "(", "tables", ".", "keys", "(", ")", ")", "]", ")", "# optimize the foreign/ref key query by doing it one time, database-wide, if query is available", "elif", "not", "use_cache", "and", "isinstance", "(", "self", ".", "_query_templates", ".", "get", "(", "'system'", ",", "{", "}", ")", ".", "get", "(", "'foreign_keys_for_db'", ",", "None", ")", ",", "str", ")", ":", "self", ".", "cur", ".", "execute", "(", "self", ".", "_query_templates", "[", "'system'", "]", "[", "'foreign_keys_for_db'", "]", ")", "table_db_foreign_keys", "=", "defaultdict", "(", "list", ")", "for", "rel", "in", "self", ".", "cur", ":", "# second value in relationship tuple is the table name", "table_db_foreign_keys", "[", "rel", "[", "1", "]", "]", ".", "append", "(", "rel", ")", "self", ".", "cur", ".", "execute", "(", "self", ".", "_query_templates", "[", "'system'", "]", "[", "'ref_keys_for_db'", "]", ")", "table_db_ref_keys", "=", "defaultdict", "(", "list", ")", "for", "rel", "in", "self", ".", "cur", ":", "# second value in relationship tuple is the table name", "table_db_ref_keys", "[", "rel", "[", "1", "]", "]", ".", "append", "(", "rel", ")", "# generate our Tables, and load them into a TableSet", "self", ".", "_tables", "=", "TableSet", "(", "[", "Table", "(", "self", ".", "con", ",", "self", ".", "_query_templates", ",", "tables", "[", "t", "]", "[", "0", "]", ".", "schema", ",", "t", ",", "tables", "[", "t", "]", ",", "keys_per_column", "=", "self", ".", "keys_per_column", ",", "foreign_keys", "=", "table_db_foreign_keys", "[", "t", "]", ",", "ref_keys", "=", "table_db_ref_keys", "[", "t", "]", ")", "for", "t", "in", "sorted", "(", "tables", ".", "keys", "(", ")", ")", "]", ")", "elif", "not", "use_cache", ":", "self", ".", "_tables", "=", "TableSet", "(", "[", "Table", "(", "self", ".", "con", ",", "self", ".", "_query_templates", ",", "tables", "[", "t", "]", "[", "0", "]", ".", "schema", ",", "t", ",", "tables", "[", "t", "]", ",", "keys_per_column", "=", "self", ".", "keys_per_column", ")", "for", "t", "in", "sorted", "(", "tables", ".", "keys", "(", ")", ")", "]", ")", "sys", ".", "stderr", ".", "write", "(", "\"done!\\n\"", ")" ]
Pulls your database's schema again and looks for any new tables and columns.
[ "Pulls", "your", "database", "s", "schema", "again", "and", "looks", "for", "any", "new", "tables", "and", "columns", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L809-L854
8,068
yhat/db.py
db/db.py
DB.to_dict
def to_dict(self): """Dict representation of the database as credentials plus tables dict representation.""" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict
python
def to_dict(self): """Dict representation of the database as credentials plus tables dict representation.""" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict
[ "def", "to_dict", "(", "self", ")", ":", "db_dict", "=", "self", ".", "credentials", "db_dict", ".", "update", "(", "self", ".", "tables", ".", "to_dict", "(", ")", ")", "return", "db_dict" ]
Dict representation of the database as credentials plus tables dict representation.
[ "Dict", "representation", "of", "the", "database", "as", "credentials", "plus", "tables", "dict", "representation", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L1052-L1056
8,069
yhat/db.py
db/utils.py
profile_path
def profile_path(profile_id, profile): """Create full path to given provide for the current user.""" user = os.path.expanduser("~") return os.path.join(user, profile_id + profile)
python
def profile_path(profile_id, profile): """Create full path to given provide for the current user.""" user = os.path.expanduser("~") return os.path.join(user, profile_id + profile)
[ "def", "profile_path", "(", "profile_id", ",", "profile", ")", ":", "user", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "return", "os", ".", "path", ".", "join", "(", "user", ",", "profile_id", "+", "profile", ")" ]
Create full path to given provide for the current user.
[ "Create", "full", "path", "to", "given", "provide", "for", "the", "current", "user", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/utils.py#L5-L8
8,070
yhat/db.py
db/utils.py
load_from_json
def load_from_json(file_path): """Load the stored data from json, and return as a dict.""" if os.path.exists(file_path): raw_data = open(file_path, 'rb').read() return json.loads(base64.decodestring(raw_data).decode('utf-8'))
python
def load_from_json(file_path): """Load the stored data from json, and return as a dict.""" if os.path.exists(file_path): raw_data = open(file_path, 'rb').read() return json.loads(base64.decodestring(raw_data).decode('utf-8'))
[ "def", "load_from_json", "(", "file_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "raw_data", "=", "open", "(", "file_path", ",", "'rb'", ")", ".", "read", "(", ")", "return", "json", ".", "loads", "(", "base64", ".", "decodestring", "(", "raw_data", ")", ".", "decode", "(", "'utf-8'", ")", ")" ]
Load the stored data from json, and return as a dict.
[ "Load", "the", "stored", "data", "from", "json", "and", "return", "as", "a", "dict", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/utils.py#L13-L17
8,071
yhat/db.py
db/s3.py
S3.save_credentials
def save_credentials(self, profile): """ Saves credentials to a dotfile so you can open them grab them later. Parameters ---------- profile: str name for your profile (i.e. "dev", "prod") """ filename = profile_path(S3_PROFILE_ID, profile) creds = { "access_key": self.access_key, "secret_key": self.secret_key } dump_to_json(filename, creds)
python
def save_credentials(self, profile): """ Saves credentials to a dotfile so you can open them grab them later. Parameters ---------- profile: str name for your profile (i.e. "dev", "prod") """ filename = profile_path(S3_PROFILE_ID, profile) creds = { "access_key": self.access_key, "secret_key": self.secret_key } dump_to_json(filename, creds)
[ "def", "save_credentials", "(", "self", ",", "profile", ")", ":", "filename", "=", "profile_path", "(", "S3_PROFILE_ID", ",", "profile", ")", "creds", "=", "{", "\"access_key\"", ":", "self", ".", "access_key", ",", "\"secret_key\"", ":", "self", ".", "secret_key", "}", "dump_to_json", "(", "filename", ",", "creds", ")" ]
Saves credentials to a dotfile so you can open them grab them later. Parameters ---------- profile: str name for your profile (i.e. "dev", "prod")
[ "Saves", "credentials", "to", "a", "dotfile", "so", "you", "can", "open", "them", "grab", "them", "later", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/s3.py#L16-L30
8,072
yhat/db.py
db/column.py
Column.to_dict
def to_dict(self): """ Serialize representation of the column for local caching. """ return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
python
def to_dict(self): """ Serialize representation of the column for local caching. """ return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'schema'", ":", "self", ".", "schema", ",", "'table'", ":", "self", ".", "table", ",", "'name'", ":", "self", ".", "name", ",", "'type'", ":", "self", ".", "type", "}" ]
Serialize representation of the column for local caching.
[ "Serialize", "representation", "of", "the", "column", "for", "local", "caching", "." ]
df2dbb8ef947c2d4253d31f29eb58c4084daffc5
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/column.py#L192-L196
8,073
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.rate_limit
def rate_limit(self, rate_limit): """ Turn on or off rate limiting """ self._rate_limit = bool(rate_limit) self._rate_limit_last_call = None self.clear_memoized()
python
def rate_limit(self, rate_limit): """ Turn on or off rate limiting """ self._rate_limit = bool(rate_limit) self._rate_limit_last_call = None self.clear_memoized()
[ "def", "rate_limit", "(", "self", ",", "rate_limit", ")", ":", "self", ".", "_rate_limit", "=", "bool", "(", "rate_limit", ")", "self", ".", "_rate_limit_last_call", "=", "None", "self", ".", "clear_memoized", "(", ")" ]
Turn on or off rate limiting
[ "Turn", "on", "or", "off", "rate", "limiting" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L136-L140
8,074
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.language
def language(self, lang): """ Set the language to use; attempts to change the API URL """ lang = lang.lower() if self._lang == lang: return url = self._api_url tmp = url.replace("/{0}.".format(self._lang), "/{0}.".format(lang)) self._api_url = tmp self._lang = lang self.clear_memoized()
python
def language(self, lang): """ Set the language to use; attempts to change the API URL """ lang = lang.lower() if self._lang == lang: return url = self._api_url tmp = url.replace("/{0}.".format(self._lang), "/{0}.".format(lang)) self._api_url = tmp self._lang = lang self.clear_memoized()
[ "def", "language", "(", "self", ",", "lang", ")", ":", "lang", "=", "lang", ".", "lower", "(", ")", "if", "self", ".", "_lang", "==", "lang", ":", "return", "url", "=", "self", ".", "_api_url", "tmp", "=", "url", ".", "replace", "(", "\"/{0}.\"", ".", "format", "(", "self", ".", "_lang", ")", ",", "\"/{0}.\"", ".", "format", "(", "lang", ")", ")", "self", ".", "_api_url", "=", "tmp", "self", ".", "_lang", "=", "lang", "self", ".", "clear_memoized", "(", ")" ]
Set the language to use; attempts to change the API URL
[ "Set", "the", "language", "to", "use", ";", "attempts", "to", "change", "the", "API", "URL" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L197-L208
8,075
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.refresh_interval
def refresh_interval(self, refresh_interval): """ Set the new cache refresh interval """ if isinstance(refresh_interval, int) and refresh_interval > 0: self._refresh_interval = refresh_interval else: self._refresh_interval = None
python
def refresh_interval(self, refresh_interval): """ Set the new cache refresh interval """ if isinstance(refresh_interval, int) and refresh_interval > 0: self._refresh_interval = refresh_interval else: self._refresh_interval = None
[ "def", "refresh_interval", "(", "self", ",", "refresh_interval", ")", ":", "if", "isinstance", "(", "refresh_interval", ",", "int", ")", "and", "refresh_interval", ">", "0", ":", "self", ".", "_refresh_interval", "=", "refresh_interval", "else", ":", "self", ".", "_refresh_interval", "=", "None" ]
Set the new cache refresh interval
[ "Set", "the", "new", "cache", "refresh", "interval" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L262-L267
8,076
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.login
def login(self, username, password, strict=True): """ Login as specified user Args: username (str): The username to log in with password (str): The password for the user strict (bool): `True` to thow an error on failure Returns: bool: `True` if successfully logged in; `False` otherwise Raises: :py:func:`mediawiki.exceptions.MediaWikiLoginError`: if \ unable to login Note: Per the MediaWiki API, one should use the `bot password`; \ see https://www.mediawiki.org/wiki/API:Login for more \ information """ # get login token params = { "action": "query", "meta": "tokens", "type": "login", "format": "json", } token_res = self._get_response(params) if "query" in token_res and "tokens" in token_res["query"]: token = token_res["query"]["tokens"]["logintoken"] params = { "action": "login", "lgname": username, "lgpassword": password, "lgtoken": token, "format": "json", } res = self._post_response(params) if res["login"]["result"] == "Success": self._is_logged_in = True return True self._is_logged_in = False reason = res["login"]["reason"] if strict: msg = "MediaWiki login failure: {}".format(reason) raise MediaWikiLoginError(msg) return False
python
def login(self, username, password, strict=True): """ Login as specified user Args: username (str): The username to log in with password (str): The password for the user strict (bool): `True` to thow an error on failure Returns: bool: `True` if successfully logged in; `False` otherwise Raises: :py:func:`mediawiki.exceptions.MediaWikiLoginError`: if \ unable to login Note: Per the MediaWiki API, one should use the `bot password`; \ see https://www.mediawiki.org/wiki/API:Login for more \ information """ # get login token params = { "action": "query", "meta": "tokens", "type": "login", "format": "json", } token_res = self._get_response(params) if "query" in token_res and "tokens" in token_res["query"]: token = token_res["query"]["tokens"]["logintoken"] params = { "action": "login", "lgname": username, "lgpassword": password, "lgtoken": token, "format": "json", } res = self._post_response(params) if res["login"]["result"] == "Success": self._is_logged_in = True return True self._is_logged_in = False reason = res["login"]["reason"] if strict: msg = "MediaWiki login failure: {}".format(reason) raise MediaWikiLoginError(msg) return False
[ "def", "login", "(", "self", ",", "username", ",", "password", ",", "strict", "=", "True", ")", ":", "# get login token", "params", "=", "{", "\"action\"", ":", "\"query\"", ",", "\"meta\"", ":", "\"tokens\"", ",", "\"type\"", ":", "\"login\"", ",", "\"format\"", ":", "\"json\"", ",", "}", "token_res", "=", "self", ".", "_get_response", "(", "params", ")", "if", "\"query\"", "in", "token_res", "and", "\"tokens\"", "in", "token_res", "[", "\"query\"", "]", ":", "token", "=", "token_res", "[", "\"query\"", "]", "[", "\"tokens\"", "]", "[", "\"logintoken\"", "]", "params", "=", "{", "\"action\"", ":", "\"login\"", ",", "\"lgname\"", ":", "username", ",", "\"lgpassword\"", ":", "password", ",", "\"lgtoken\"", ":", "token", ",", "\"format\"", ":", "\"json\"", ",", "}", "res", "=", "self", ".", "_post_response", "(", "params", ")", "if", "res", "[", "\"login\"", "]", "[", "\"result\"", "]", "==", "\"Success\"", ":", "self", ".", "_is_logged_in", "=", "True", "return", "True", "self", ".", "_is_logged_in", "=", "False", "reason", "=", "res", "[", "\"login\"", "]", "[", "\"reason\"", "]", "if", "strict", ":", "msg", "=", "\"MediaWiki login failure: {}\"", ".", "format", "(", "reason", ")", "raise", "MediaWikiLoginError", "(", "msg", ")", "return", "False" ]
Login as specified user Args: username (str): The username to log in with password (str): The password for the user strict (bool): `True` to thow an error on failure Returns: bool: `True` if successfully logged in; `False` otherwise Raises: :py:func:`mediawiki.exceptions.MediaWikiLoginError`: if \ unable to login Note: Per the MediaWiki API, one should use the `bot password`; \ see https://www.mediawiki.org/wiki/API:Login for more \ information
[ "Login", "as", "specified", "user" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L269-L314
8,077
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.set_api_url
def set_api_url(self, api_url="https://{lang}.wikipedia.org/w/api.php", lang="en"): """ Set the API URL and language Args: api_url (str): API URL to use lang (str): Language of the API URL Raises: :py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \ url is not a valid MediaWiki site """ old_api_url = self._api_url old_lang = self._lang self._lang = lang.lower() self._api_url = api_url.format(lang=self._lang) try: self._get_site_info() self.__supported_languages = None # reset this except MediaWikiException: # reset api url and lang in the event that the exception was caught self._api_url = old_api_url self._lang = old_lang raise MediaWikiAPIURLError(api_url) self.clear_memoized()
python
def set_api_url(self, api_url="https://{lang}.wikipedia.org/w/api.php", lang="en"): """ Set the API URL and language Args: api_url (str): API URL to use lang (str): Language of the API URL Raises: :py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \ url is not a valid MediaWiki site """ old_api_url = self._api_url old_lang = self._lang self._lang = lang.lower() self._api_url = api_url.format(lang=self._lang) try: self._get_site_info() self.__supported_languages = None # reset this except MediaWikiException: # reset api url and lang in the event that the exception was caught self._api_url = old_api_url self._lang = old_lang raise MediaWikiAPIURLError(api_url) self.clear_memoized()
[ "def", "set_api_url", "(", "self", ",", "api_url", "=", "\"https://{lang}.wikipedia.org/w/api.php\"", ",", "lang", "=", "\"en\"", ")", ":", "old_api_url", "=", "self", ".", "_api_url", "old_lang", "=", "self", ".", "_lang", "self", ".", "_lang", "=", "lang", ".", "lower", "(", ")", "self", ".", "_api_url", "=", "api_url", ".", "format", "(", "lang", "=", "self", ".", "_lang", ")", "try", ":", "self", ".", "_get_site_info", "(", ")", "self", ".", "__supported_languages", "=", "None", "# reset this", "except", "MediaWikiException", ":", "# reset api url and lang in the event that the exception was caught", "self", ".", "_api_url", "=", "old_api_url", "self", ".", "_lang", "=", "old_lang", "raise", "MediaWikiAPIURLError", "(", "api_url", ")", "self", ".", "clear_memoized", "(", ")" ]
Set the API URL and language Args: api_url (str): API URL to use lang (str): Language of the API URL Raises: :py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \ url is not a valid MediaWiki site
[ "Set", "the", "API", "URL", "and", "language" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L317-L338
8,078
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._reset_session
def _reset_session(self): """ Set session information """ headers = {"User-Agent": self._user_agent} self._session = requests.Session() self._session.headers.update(headers) self._is_logged_in = False
python
def _reset_session(self): """ Set session information """ headers = {"User-Agent": self._user_agent} self._session = requests.Session() self._session.headers.update(headers) self._is_logged_in = False
[ "def", "_reset_session", "(", "self", ")", ":", "headers", "=", "{", "\"User-Agent\"", ":", "self", ".", "_user_agent", "}", "self", ".", "_session", "=", "requests", ".", "Session", "(", ")", "self", ".", "_session", ".", "headers", ".", "update", "(", "headers", ")", "self", ".", "_is_logged_in", "=", "False" ]
Set session information
[ "Set", "session", "information" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L340-L345
8,079
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.random
def random(self, pages=1): """ Request a random page title or list of random titles Args: pages (int): Number of random pages to return Returns: list or int: A list of random page titles or a random page \ title if pages = 1 """ if pages is None or pages < 1: raise ValueError("Number of pages must be greater than 0") query_params = {"list": "random", "rnnamespace": 0, "rnlimit": pages} request = self.wiki_request(query_params) titles = [page["title"] for page in request["query"]["random"]] if len(titles) == 1: return titles[0] return titles
python
def random(self, pages=1): """ Request a random page title or list of random titles Args: pages (int): Number of random pages to return Returns: list or int: A list of random page titles or a random page \ title if pages = 1 """ if pages is None or pages < 1: raise ValueError("Number of pages must be greater than 0") query_params = {"list": "random", "rnnamespace": 0, "rnlimit": pages} request = self.wiki_request(query_params) titles = [page["title"] for page in request["query"]["random"]] if len(titles) == 1: return titles[0] return titles
[ "def", "random", "(", "self", ",", "pages", "=", "1", ")", ":", "if", "pages", "is", "None", "or", "pages", "<", "1", ":", "raise", "ValueError", "(", "\"Number of pages must be greater than 0\"", ")", "query_params", "=", "{", "\"list\"", ":", "\"random\"", ",", "\"rnnamespace\"", ":", "0", ",", "\"rnlimit\"", ":", "pages", "}", "request", "=", "self", ".", "wiki_request", "(", "query_params", ")", "titles", "=", "[", "page", "[", "\"title\"", "]", "for", "page", "in", "request", "[", "\"query\"", "]", "[", "\"random\"", "]", "]", "if", "len", "(", "titles", ")", "==", "1", ":", "return", "titles", "[", "0", "]", "return", "titles" ]
Request a random page title or list of random titles Args: pages (int): Number of random pages to return Returns: list or int: A list of random page titles or a random page \ title if pages = 1
[ "Request", "a", "random", "page", "title", "or", "list", "of", "random", "titles" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L371-L389
8,080
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.search
def search(self, query, results=10, suggestion=False): """ Search for similar titles Args: query (str): Page title results (int): Number of pages to return suggestion (bool): Use suggestion Returns: tuple or list: tuple (list results, suggestion) if \ suggestion is **True**; list of results \ otherwise """ self._check_query(query, "Query must be specified") search_params = { "list": "search", "srprop": "", "srlimit": results, "srsearch": query, } if suggestion: search_params["srinfo"] = "suggestion" raw_results = self.wiki_request(search_params) self._check_error_response(raw_results, query) search_results = [d["title"] for d in raw_results["query"]["search"]] if suggestion: sug = None if raw_results["query"].get("searchinfo"): sug = raw_results["query"]["searchinfo"]["suggestion"] return search_results, sug return search_results
python
def search(self, query, results=10, suggestion=False): """ Search for similar titles Args: query (str): Page title results (int): Number of pages to return suggestion (bool): Use suggestion Returns: tuple or list: tuple (list results, suggestion) if \ suggestion is **True**; list of results \ otherwise """ self._check_query(query, "Query must be specified") search_params = { "list": "search", "srprop": "", "srlimit": results, "srsearch": query, } if suggestion: search_params["srinfo"] = "suggestion" raw_results = self.wiki_request(search_params) self._check_error_response(raw_results, query) search_results = [d["title"] for d in raw_results["query"]["search"]] if suggestion: sug = None if raw_results["query"].get("searchinfo"): sug = raw_results["query"]["searchinfo"]["suggestion"] return search_results, sug return search_results
[ "def", "search", "(", "self", ",", "query", ",", "results", "=", "10", ",", "suggestion", "=", "False", ")", ":", "self", ".", "_check_query", "(", "query", ",", "\"Query must be specified\"", ")", "search_params", "=", "{", "\"list\"", ":", "\"search\"", ",", "\"srprop\"", ":", "\"\"", ",", "\"srlimit\"", ":", "results", ",", "\"srsearch\"", ":", "query", ",", "}", "if", "suggestion", ":", "search_params", "[", "\"srinfo\"", "]", "=", "\"suggestion\"", "raw_results", "=", "self", ".", "wiki_request", "(", "search_params", ")", "self", ".", "_check_error_response", "(", "raw_results", ",", "query", ")", "search_results", "=", "[", "d", "[", "\"title\"", "]", "for", "d", "in", "raw_results", "[", "\"query\"", "]", "[", "\"search\"", "]", "]", "if", "suggestion", ":", "sug", "=", "None", "if", "raw_results", "[", "\"query\"", "]", ".", "get", "(", "\"searchinfo\"", ")", ":", "sug", "=", "raw_results", "[", "\"query\"", "]", "[", "\"searchinfo\"", "]", "[", "\"suggestion\"", "]", "return", "search_results", ",", "sug", "return", "search_results" ]
Search for similar titles Args: query (str): Page title results (int): Number of pages to return suggestion (bool): Use suggestion Returns: tuple or list: tuple (list results, suggestion) if \ suggestion is **True**; list of results \ otherwise
[ "Search", "for", "similar", "titles" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L392-L426
8,081
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.suggest
def suggest(self, query): """ Gather suggestions based on the provided title or None if no suggestions found Args: query (str): Page title Returns: String or None: Suggested page title or **None** if no \ suggestion found """ res, suggest = self.search(query, results=1, suggestion=True) try: title = suggest or res[0] except IndexError: # page doesn't exist title = None return title
python
def suggest(self, query): """ Gather suggestions based on the provided title or None if no suggestions found Args: query (str): Page title Returns: String or None: Suggested page title or **None** if no \ suggestion found """ res, suggest = self.search(query, results=1, suggestion=True) try: title = suggest or res[0] except IndexError: # page doesn't exist title = None return title
[ "def", "suggest", "(", "self", ",", "query", ")", ":", "res", ",", "suggest", "=", "self", ".", "search", "(", "query", ",", "results", "=", "1", ",", "suggestion", "=", "True", ")", "try", ":", "title", "=", "suggest", "or", "res", "[", "0", "]", "except", "IndexError", ":", "# page doesn't exist", "title", "=", "None", "return", "title" ]
Gather suggestions based on the provided title or None if no suggestions found Args: query (str): Page title Returns: String or None: Suggested page title or **None** if no \ suggestion found
[ "Gather", "suggestions", "based", "on", "the", "provided", "title", "or", "None", "if", "no", "suggestions", "found" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L429-L443
8,082
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.geosearch
def geosearch( self, latitude=None, longitude=None, radius=1000, title=None, auto_suggest=True, results=10, ): """ Search for pages that relate to the provided geocoords or near the page Args: latitude (Decimal or None): Latitude geocoord; must be \ coercable to decimal longitude (Decimal or None): Longitude geocoord; must be \ coercable to decimal radius (int): Radius around page or geocoords to pull back; \ in meters title (str): Page title to use as a geocoordinate; this has \ precedence over lat/long auto_suggest (bool): Auto-suggest the page title results (int): Number of pages within the radius to return Returns: list: A listing of page titles Raises: ValueError: If either the passed latitutde or longitude are \ not coercable to a Decimal """ def test_lat_long(val): """ handle testing lat and long """ if not isinstance(val, Decimal): error = ( "Latitude and Longitude must be specified either as " "a Decimal or in formats that can be coerced into " "a Decimal." ) try: return Decimal(val) except (DecimalException, TypeError): raise ValueError(error) return val # end local function params = {"list": "geosearch", "gsradius": radius, "gslimit": results} if title is not None: if auto_suggest: title = self.suggest(title) params["gspage"] = title else: lat = test_lat_long(latitude) lon = test_lat_long(longitude) params["gscoord"] = "{0}|{1}".format(lat, lon) raw_results = self.wiki_request(params) self._check_error_response(raw_results, title) return [d["title"] for d in raw_results["query"]["geosearch"]]
python
def geosearch( self, latitude=None, longitude=None, radius=1000, title=None, auto_suggest=True, results=10, ): """ Search for pages that relate to the provided geocoords or near the page Args: latitude (Decimal or None): Latitude geocoord; must be \ coercable to decimal longitude (Decimal or None): Longitude geocoord; must be \ coercable to decimal radius (int): Radius around page or geocoords to pull back; \ in meters title (str): Page title to use as a geocoordinate; this has \ precedence over lat/long auto_suggest (bool): Auto-suggest the page title results (int): Number of pages within the radius to return Returns: list: A listing of page titles Raises: ValueError: If either the passed latitutde or longitude are \ not coercable to a Decimal """ def test_lat_long(val): """ handle testing lat and long """ if not isinstance(val, Decimal): error = ( "Latitude and Longitude must be specified either as " "a Decimal or in formats that can be coerced into " "a Decimal." ) try: return Decimal(val) except (DecimalException, TypeError): raise ValueError(error) return val # end local function params = {"list": "geosearch", "gsradius": radius, "gslimit": results} if title is not None: if auto_suggest: title = self.suggest(title) params["gspage"] = title else: lat = test_lat_long(latitude) lon = test_lat_long(longitude) params["gscoord"] = "{0}|{1}".format(lat, lon) raw_results = self.wiki_request(params) self._check_error_response(raw_results, title) return [d["title"] for d in raw_results["query"]["geosearch"]]
[ "def", "geosearch", "(", "self", ",", "latitude", "=", "None", ",", "longitude", "=", "None", ",", "radius", "=", "1000", ",", "title", "=", "None", ",", "auto_suggest", "=", "True", ",", "results", "=", "10", ",", ")", ":", "def", "test_lat_long", "(", "val", ")", ":", "\"\"\" handle testing lat and long \"\"\"", "if", "not", "isinstance", "(", "val", ",", "Decimal", ")", ":", "error", "=", "(", "\"Latitude and Longitude must be specified either as \"", "\"a Decimal or in formats that can be coerced into \"", "\"a Decimal.\"", ")", "try", ":", "return", "Decimal", "(", "val", ")", "except", "(", "DecimalException", ",", "TypeError", ")", ":", "raise", "ValueError", "(", "error", ")", "return", "val", "# end local function", "params", "=", "{", "\"list\"", ":", "\"geosearch\"", ",", "\"gsradius\"", ":", "radius", ",", "\"gslimit\"", ":", "results", "}", "if", "title", "is", "not", "None", ":", "if", "auto_suggest", ":", "title", "=", "self", ".", "suggest", "(", "title", ")", "params", "[", "\"gspage\"", "]", "=", "title", "else", ":", "lat", "=", "test_lat_long", "(", "latitude", ")", "lon", "=", "test_lat_long", "(", "longitude", ")", "params", "[", "\"gscoord\"", "]", "=", "\"{0}|{1}\"", ".", "format", "(", "lat", ",", "lon", ")", "raw_results", "=", "self", ".", "wiki_request", "(", "params", ")", "self", ".", "_check_error_response", "(", "raw_results", ",", "title", ")", "return", "[", "d", "[", "\"title\"", "]", "for", "d", "in", "raw_results", "[", "\"query\"", "]", "[", "\"geosearch\"", "]", "]" ]
Search for pages that relate to the provided geocoords or near the page Args: latitude (Decimal or None): Latitude geocoord; must be \ coercable to decimal longitude (Decimal or None): Longitude geocoord; must be \ coercable to decimal radius (int): Radius around page or geocoords to pull back; \ in meters title (str): Page title to use as a geocoordinate; this has \ precedence over lat/long auto_suggest (bool): Auto-suggest the page title results (int): Number of pages within the radius to return Returns: list: A listing of page titles Raises: ValueError: If either the passed latitutde or longitude are \ not coercable to a Decimal
[ "Search", "for", "pages", "that", "relate", "to", "the", "provided", "geocoords", "or", "near", "the", "page" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L446-L505
8,083
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.opensearch
def opensearch(self, query, results=10, redirect=True): """ Execute a MediaWiki opensearch request, similar to search box suggestions and conforming to the OpenSearch specification Args: query (str): Title to search for results (int): Number of pages within the radius to return redirect (bool): If **False** return the redirect itself, \ otherwise resolve redirects Returns: List: List of results that are stored in a tuple \ (Title, Summary, URL) """ self._check_query(query, "Query must be specified") query_params = { "action": "opensearch", "search": query, "limit": (100 if results > 100 else results), "redirects": ("resolve" if redirect else "return"), "warningsaserror": True, "namespace": "", } results = self.wiki_request(query_params) self._check_error_response(results, query) res = list() for i, item in enumerate(results[1]): res.append((item, results[2][i], results[3][i])) return res
python
def opensearch(self, query, results=10, redirect=True): """ Execute a MediaWiki opensearch request, similar to search box suggestions and conforming to the OpenSearch specification Args: query (str): Title to search for results (int): Number of pages within the radius to return redirect (bool): If **False** return the redirect itself, \ otherwise resolve redirects Returns: List: List of results that are stored in a tuple \ (Title, Summary, URL) """ self._check_query(query, "Query must be specified") query_params = { "action": "opensearch", "search": query, "limit": (100 if results > 100 else results), "redirects": ("resolve" if redirect else "return"), "warningsaserror": True, "namespace": "", } results = self.wiki_request(query_params) self._check_error_response(results, query) res = list() for i, item in enumerate(results[1]): res.append((item, results[2][i], results[3][i])) return res
[ "def", "opensearch", "(", "self", ",", "query", ",", "results", "=", "10", ",", "redirect", "=", "True", ")", ":", "self", ".", "_check_query", "(", "query", ",", "\"Query must be specified\"", ")", "query_params", "=", "{", "\"action\"", ":", "\"opensearch\"", ",", "\"search\"", ":", "query", ",", "\"limit\"", ":", "(", "100", "if", "results", ">", "100", "else", "results", ")", ",", "\"redirects\"", ":", "(", "\"resolve\"", "if", "redirect", "else", "\"return\"", ")", ",", "\"warningsaserror\"", ":", "True", ",", "\"namespace\"", ":", "\"\"", ",", "}", "results", "=", "self", ".", "wiki_request", "(", "query_params", ")", "self", ".", "_check_error_response", "(", "results", ",", "query", ")", "res", "=", "list", "(", ")", "for", "i", ",", "item", "in", "enumerate", "(", "results", "[", "1", "]", ")", ":", "res", ".", "append", "(", "(", "item", ",", "results", "[", "2", "]", "[", "i", "]", ",", "results", "[", "3", "]", "[", "i", "]", ")", ")", "return", "res" ]
Execute a MediaWiki opensearch request, similar to search box suggestions and conforming to the OpenSearch specification Args: query (str): Title to search for results (int): Number of pages within the radius to return redirect (bool): If **False** return the redirect itself, \ otherwise resolve redirects Returns: List: List of results that are stored in a tuple \ (Title, Summary, URL)
[ "Execute", "a", "MediaWiki", "opensearch", "request", "similar", "to", "search", "box", "suggestions", "and", "conforming", "to", "the", "OpenSearch", "specification" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L508-L540
8,084
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.prefixsearch
def prefixsearch(self, prefix, results=10): """ Perform a prefix search using the provided prefix string Args: prefix (str): Prefix string to use for search results (int): Number of pages with the prefix to return Returns: list: List of page titles Note: **Per the documentation:** "The purpose of this module is \ similar to action=opensearch: to take user input and provide \ the best-matching titles. Depending on the search engine \ backend, this might include typo correction, redirect \ avoidance, or other heuristics." """ self._check_query(prefix, "Prefix must be specified") query_params = { "list": "prefixsearch", "pssearch": prefix, "pslimit": ("max" if results > 500 else results), "psnamespace": 0, "psoffset": 0, # parameterize to skip to later in the list? } raw_results = self.wiki_request(query_params) self._check_error_response(raw_results, prefix) return [rec["title"] for rec in raw_results["query"]["prefixsearch"]]
python
def prefixsearch(self, prefix, results=10): """ Perform a prefix search using the provided prefix string Args: prefix (str): Prefix string to use for search results (int): Number of pages with the prefix to return Returns: list: List of page titles Note: **Per the documentation:** "The purpose of this module is \ similar to action=opensearch: to take user input and provide \ the best-matching titles. Depending on the search engine \ backend, this might include typo correction, redirect \ avoidance, or other heuristics." """ self._check_query(prefix, "Prefix must be specified") query_params = { "list": "prefixsearch", "pssearch": prefix, "pslimit": ("max" if results > 500 else results), "psnamespace": 0, "psoffset": 0, # parameterize to skip to later in the list? } raw_results = self.wiki_request(query_params) self._check_error_response(raw_results, prefix) return [rec["title"] for rec in raw_results["query"]["prefixsearch"]]
[ "def", "prefixsearch", "(", "self", ",", "prefix", ",", "results", "=", "10", ")", ":", "self", ".", "_check_query", "(", "prefix", ",", "\"Prefix must be specified\"", ")", "query_params", "=", "{", "\"list\"", ":", "\"prefixsearch\"", ",", "\"pssearch\"", ":", "prefix", ",", "\"pslimit\"", ":", "(", "\"max\"", "if", "results", ">", "500", "else", "results", ")", ",", "\"psnamespace\"", ":", "0", ",", "\"psoffset\"", ":", "0", ",", "# parameterize to skip to later in the list?", "}", "raw_results", "=", "self", ".", "wiki_request", "(", "query_params", ")", "self", ".", "_check_error_response", "(", "raw_results", ",", "prefix", ")", "return", "[", "rec", "[", "\"title\"", "]", "for", "rec", "in", "raw_results", "[", "\"query\"", "]", "[", "\"prefixsearch\"", "]", "]" ]
Perform a prefix search using the provided prefix string Args: prefix (str): Prefix string to use for search results (int): Number of pages with the prefix to return Returns: list: List of page titles Note: **Per the documentation:** "The purpose of this module is \ similar to action=opensearch: to take user input and provide \ the best-matching titles. Depending on the search engine \ backend, this might include typo correction, redirect \ avoidance, or other heuristics."
[ "Perform", "a", "prefix", "search", "using", "the", "provided", "prefix", "string" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L543-L572
8,085
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.summary
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True): """ Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned """ page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect) return page_info.summarize(sentences, chars)
python
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True): """ Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned """ page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect) return page_info.summarize(sentences, chars)
[ "def", "summary", "(", "self", ",", "title", ",", "sentences", "=", "0", ",", "chars", "=", "0", ",", "auto_suggest", "=", "True", ",", "redirect", "=", "True", ")", ":", "page_info", "=", "self", ".", "page", "(", "title", ",", "auto_suggest", "=", "auto_suggest", ",", "redirect", "=", "redirect", ")", "return", "page_info", ".", "summarize", "(", "sentences", ",", "chars", ")" ]
Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned
[ "Get", "the", "summary", "for", "the", "title", "in", "question" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L575-L591
8,086
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.categorytree
def categorytree(self, category, depth=5): """ Generate the Category Tree for the given categories Args: category(str or list of strings): Category name(s) depth(int): Depth to traverse the tree Returns: dict: Category tree structure Note: Set depth to **None** to get the whole tree Note: Return Data Structure: Subcategory contains the same \ recursive structure >>> { 'category': { 'depth': Number, 'links': list, 'parent-categories': list, 'sub-categories': dict } } .. versionadded:: 0.3.10 """ def __cat_tree_rec(cat, depth, tree, level, categories, links): """ recursive function to build out the tree """ tree[cat] = dict() tree[cat]["depth"] = level tree[cat]["sub-categories"] = dict() tree[cat]["links"] = list() tree[cat]["parent-categories"] = list() parent_cats = list() if cat not in categories: tries = 0 while True: if tries > 10: raise MediaWikiCategoryTreeError(cat) try: pag = self.page("{0}:{1}".format(self.category_prefix, cat)) categories[cat] = pag parent_cats = categories[cat].categories links[cat] = self.categorymembers( cat, results=None, subcategories=True ) break except PageError: raise PageError("{0}:{1}".format(self.category_prefix, cat)) except KeyboardInterrupt: raise except Exception: tries = tries + 1 time.sleep(1) else: parent_cats = categories[cat].categories tree[cat]["parent-categories"].extend(parent_cats) tree[cat]["links"].extend(links[cat][0]) if depth and level >= depth: for ctg in links[cat][1]: tree[cat]["sub-categories"][ctg] = None else: for ctg in links[cat][1]: __cat_tree_rec( ctg, depth, tree[cat]["sub-categories"], level + 1, categories, links, ) # ################################### # ### Actual Function Code ### # ################################### # make it simple to use both a list or a single category term if not isinstance(category, list): cats = [category] else: cats = category # parameter verification if len(cats) == 1 and (cats[0] is None or cats[0] == ""): msg = ( "CategoryTree: Parameter 'category' must either " "be a list of one or more categories or a string; " "provided: '{}'".format(category) ) raise ValueError(msg) if depth is not None and depth < 1: msg = ( "CategoryTree: Parameter 'depth' must be either None " "(for the full tree) or be greater than 0" ) raise ValueError(msg) results = dict() categories = dict() links = dict() for cat in cats: if cat is None or cat == "": continue __cat_tree_rec(cat, depth, results, 0, categories, links) return results
python
def categorytree(self, category, depth=5): """ Generate the Category Tree for the given categories Args: category(str or list of strings): Category name(s) depth(int): Depth to traverse the tree Returns: dict: Category tree structure Note: Set depth to **None** to get the whole tree Note: Return Data Structure: Subcategory contains the same \ recursive structure >>> { 'category': { 'depth': Number, 'links': list, 'parent-categories': list, 'sub-categories': dict } } .. versionadded:: 0.3.10 """ def __cat_tree_rec(cat, depth, tree, level, categories, links): """ recursive function to build out the tree """ tree[cat] = dict() tree[cat]["depth"] = level tree[cat]["sub-categories"] = dict() tree[cat]["links"] = list() tree[cat]["parent-categories"] = list() parent_cats = list() if cat not in categories: tries = 0 while True: if tries > 10: raise MediaWikiCategoryTreeError(cat) try: pag = self.page("{0}:{1}".format(self.category_prefix, cat)) categories[cat] = pag parent_cats = categories[cat].categories links[cat] = self.categorymembers( cat, results=None, subcategories=True ) break except PageError: raise PageError("{0}:{1}".format(self.category_prefix, cat)) except KeyboardInterrupt: raise except Exception: tries = tries + 1 time.sleep(1) else: parent_cats = categories[cat].categories tree[cat]["parent-categories"].extend(parent_cats) tree[cat]["links"].extend(links[cat][0]) if depth and level >= depth: for ctg in links[cat][1]: tree[cat]["sub-categories"][ctg] = None else: for ctg in links[cat][1]: __cat_tree_rec( ctg, depth, tree[cat]["sub-categories"], level + 1, categories, links, ) # ################################### # ### Actual Function Code ### # ################################### # make it simple to use both a list or a single category term if not isinstance(category, list): cats = [category] else: cats = category # parameter verification if len(cats) == 1 and (cats[0] is None or cats[0] == ""): msg = ( "CategoryTree: Parameter 'category' must either " "be a list of one or more categories or a string; " "provided: '{}'".format(category) ) raise ValueError(msg) if depth is not None and depth < 1: msg = ( "CategoryTree: Parameter 'depth' must be either None " "(for the full tree) or be greater than 0" ) raise ValueError(msg) results = dict() categories = dict() links = dict() for cat in cats: if cat is None or cat == "": continue __cat_tree_rec(cat, depth, results, 0, categories, links) return results
[ "def", "categorytree", "(", "self", ",", "category", ",", "depth", "=", "5", ")", ":", "def", "__cat_tree_rec", "(", "cat", ",", "depth", ",", "tree", ",", "level", ",", "categories", ",", "links", ")", ":", "\"\"\" recursive function to build out the tree \"\"\"", "tree", "[", "cat", "]", "=", "dict", "(", ")", "tree", "[", "cat", "]", "[", "\"depth\"", "]", "=", "level", "tree", "[", "cat", "]", "[", "\"sub-categories\"", "]", "=", "dict", "(", ")", "tree", "[", "cat", "]", "[", "\"links\"", "]", "=", "list", "(", ")", "tree", "[", "cat", "]", "[", "\"parent-categories\"", "]", "=", "list", "(", ")", "parent_cats", "=", "list", "(", ")", "if", "cat", "not", "in", "categories", ":", "tries", "=", "0", "while", "True", ":", "if", "tries", ">", "10", ":", "raise", "MediaWikiCategoryTreeError", "(", "cat", ")", "try", ":", "pag", "=", "self", ".", "page", "(", "\"{0}:{1}\"", ".", "format", "(", "self", ".", "category_prefix", ",", "cat", ")", ")", "categories", "[", "cat", "]", "=", "pag", "parent_cats", "=", "categories", "[", "cat", "]", ".", "categories", "links", "[", "cat", "]", "=", "self", ".", "categorymembers", "(", "cat", ",", "results", "=", "None", ",", "subcategories", "=", "True", ")", "break", "except", "PageError", ":", "raise", "PageError", "(", "\"{0}:{1}\"", ".", "format", "(", "self", ".", "category_prefix", ",", "cat", ")", ")", "except", "KeyboardInterrupt", ":", "raise", "except", "Exception", ":", "tries", "=", "tries", "+", "1", "time", ".", "sleep", "(", "1", ")", "else", ":", "parent_cats", "=", "categories", "[", "cat", "]", ".", "categories", "tree", "[", "cat", "]", "[", "\"parent-categories\"", "]", ".", "extend", "(", "parent_cats", ")", "tree", "[", "cat", "]", "[", "\"links\"", "]", ".", "extend", "(", "links", "[", "cat", "]", "[", "0", "]", ")", "if", "depth", "and", "level", ">=", "depth", ":", "for", "ctg", "in", "links", "[", "cat", "]", "[", "1", "]", ":", "tree", "[", "cat", "]", "[", "\"sub-categories\"", "]", "[", "ctg", "]", "=", "None", "else", ":", "for", "ctg", "in", "links", "[", "cat", "]", "[", "1", "]", ":", "__cat_tree_rec", "(", "ctg", ",", "depth", ",", "tree", "[", "cat", "]", "[", "\"sub-categories\"", "]", ",", "level", "+", "1", ",", "categories", ",", "links", ",", ")", "# ###################################", "# ### Actual Function Code ###", "# ###################################", "# make it simple to use both a list or a single category term", "if", "not", "isinstance", "(", "category", ",", "list", ")", ":", "cats", "=", "[", "category", "]", "else", ":", "cats", "=", "category", "# parameter verification", "if", "len", "(", "cats", ")", "==", "1", "and", "(", "cats", "[", "0", "]", "is", "None", "or", "cats", "[", "0", "]", "==", "\"\"", ")", ":", "msg", "=", "(", "\"CategoryTree: Parameter 'category' must either \"", "\"be a list of one or more categories or a string; \"", "\"provided: '{}'\"", ".", "format", "(", "category", ")", ")", "raise", "ValueError", "(", "msg", ")", "if", "depth", "is", "not", "None", "and", "depth", "<", "1", ":", "msg", "=", "(", "\"CategoryTree: Parameter 'depth' must be either None \"", "\"(for the full tree) or be greater than 0\"", ")", "raise", "ValueError", "(", "msg", ")", "results", "=", "dict", "(", ")", "categories", "=", "dict", "(", ")", "links", "=", "dict", "(", ")", "for", "cat", "in", "cats", ":", "if", "cat", "is", "None", "or", "cat", "==", "\"\"", ":", "continue", "__cat_tree_rec", "(", "cat", ",", "depth", ",", "results", ",", "0", ",", "categories", ",", "links", ")", "return", "results" ]
Generate the Category Tree for the given categories Args: category(str or list of strings): Category name(s) depth(int): Depth to traverse the tree Returns: dict: Category tree structure Note: Set depth to **None** to get the whole tree Note: Return Data Structure: Subcategory contains the same \ recursive structure >>> { 'category': { 'depth': Number, 'links': list, 'parent-categories': list, 'sub-categories': dict } } .. versionadded:: 0.3.10
[ "Generate", "the", "Category", "Tree", "for", "the", "given", "categories" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L662-L770
8,087
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.page
def page( self, title=None, pageid=None, auto_suggest=True, redirect=True, preload=False ): """ Get MediaWiki page based on the provided title or pageid Args: title (str): Page title pageid (int): MediaWiki page identifier auto-suggest (bool): **True:** Allow page title auto-suggest redirect (bool): **True:** Follow page redirects preload (bool): **True:** Load most page properties Raises: ValueError: when title is blank or None and no pageid is \ provided Raises: :py:func:`mediawiki.exceptions.PageError`: if page does \ not exist Note: Title takes precedence over pageid if both are provided """ if (title is None or title.strip() == "") and pageid is None: raise ValueError("Either a title or a pageid must be specified") elif title: if auto_suggest: temp_title = self.suggest(title) if temp_title is None: # page doesn't exist raise PageError(title=title) else: title = temp_title return MediaWikiPage(self, title, redirect=redirect, preload=preload) else: # must be pageid return MediaWikiPage(self, pageid=pageid, preload=preload)
python
def page( self, title=None, pageid=None, auto_suggest=True, redirect=True, preload=False ): """ Get MediaWiki page based on the provided title or pageid Args: title (str): Page title pageid (int): MediaWiki page identifier auto-suggest (bool): **True:** Allow page title auto-suggest redirect (bool): **True:** Follow page redirects preload (bool): **True:** Load most page properties Raises: ValueError: when title is blank or None and no pageid is \ provided Raises: :py:func:`mediawiki.exceptions.PageError`: if page does \ not exist Note: Title takes precedence over pageid if both are provided """ if (title is None or title.strip() == "") and pageid is None: raise ValueError("Either a title or a pageid must be specified") elif title: if auto_suggest: temp_title = self.suggest(title) if temp_title is None: # page doesn't exist raise PageError(title=title) else: title = temp_title return MediaWikiPage(self, title, redirect=redirect, preload=preload) else: # must be pageid return MediaWikiPage(self, pageid=pageid, preload=preload)
[ "def", "page", "(", "self", ",", "title", "=", "None", ",", "pageid", "=", "None", ",", "auto_suggest", "=", "True", ",", "redirect", "=", "True", ",", "preload", "=", "False", ")", ":", "if", "(", "title", "is", "None", "or", "title", ".", "strip", "(", ")", "==", "\"\"", ")", "and", "pageid", "is", "None", ":", "raise", "ValueError", "(", "\"Either a title or a pageid must be specified\"", ")", "elif", "title", ":", "if", "auto_suggest", ":", "temp_title", "=", "self", ".", "suggest", "(", "title", ")", "if", "temp_title", "is", "None", ":", "# page doesn't exist", "raise", "PageError", "(", "title", "=", "title", ")", "else", ":", "title", "=", "temp_title", "return", "MediaWikiPage", "(", "self", ",", "title", ",", "redirect", "=", "redirect", ",", "preload", "=", "preload", ")", "else", ":", "# must be pageid", "return", "MediaWikiPage", "(", "self", ",", "pageid", "=", "pageid", ",", "preload", "=", "preload", ")" ]
Get MediaWiki page based on the provided title or pageid Args: title (str): Page title pageid (int): MediaWiki page identifier auto-suggest (bool): **True:** Allow page title auto-suggest redirect (bool): **True:** Follow page redirects preload (bool): **True:** Load most page properties Raises: ValueError: when title is blank or None and no pageid is \ provided Raises: :py:func:`mediawiki.exceptions.PageError`: if page does \ not exist Note: Title takes precedence over pageid if both are provided
[ "Get", "MediaWiki", "page", "based", "on", "the", "provided", "title", "or", "pageid" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L772-L802
8,088
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki.wiki_request
def wiki_request(self, params): """ Make a request to the MediaWiki API using the given search parameters Args: params (dict): Request parameters Returns: A parsed dict of the JSON response Note: Useful when wanting to query the MediaWiki site for some \ value that is not part of the wrapper API """ params["format"] = "json" if "action" not in params: params["action"] = "query" limit = self._rate_limit last_call = self._rate_limit_last_call if limit and last_call and last_call + self._min_wait > datetime.now(): # call time to quick for rate limited api requests, wait wait_time = (last_call + self._min_wait) - datetime.now() time.sleep(int(wait_time.total_seconds())) req = self._get_response(params) if self._rate_limit: self._rate_limit_last_call = datetime.now() return req
python
def wiki_request(self, params): """ Make a request to the MediaWiki API using the given search parameters Args: params (dict): Request parameters Returns: A parsed dict of the JSON response Note: Useful when wanting to query the MediaWiki site for some \ value that is not part of the wrapper API """ params["format"] = "json" if "action" not in params: params["action"] = "query" limit = self._rate_limit last_call = self._rate_limit_last_call if limit and last_call and last_call + self._min_wait > datetime.now(): # call time to quick for rate limited api requests, wait wait_time = (last_call + self._min_wait) - datetime.now() time.sleep(int(wait_time.total_seconds())) req = self._get_response(params) if self._rate_limit: self._rate_limit_last_call = datetime.now() return req
[ "def", "wiki_request", "(", "self", ",", "params", ")", ":", "params", "[", "\"format\"", "]", "=", "\"json\"", "if", "\"action\"", "not", "in", "params", ":", "params", "[", "\"action\"", "]", "=", "\"query\"", "limit", "=", "self", ".", "_rate_limit", "last_call", "=", "self", ".", "_rate_limit_last_call", "if", "limit", "and", "last_call", "and", "last_call", "+", "self", ".", "_min_wait", ">", "datetime", ".", "now", "(", ")", ":", "# call time to quick for rate limited api requests, wait", "wait_time", "=", "(", "last_call", "+", "self", ".", "_min_wait", ")", "-", "datetime", ".", "now", "(", ")", "time", ".", "sleep", "(", "int", "(", "wait_time", ".", "total_seconds", "(", ")", ")", ")", "req", "=", "self", ".", "_get_response", "(", "params", ")", "if", "self", ".", "_rate_limit", ":", "self", ".", "_rate_limit_last_call", "=", "datetime", ".", "now", "(", ")", "return", "req" ]
Make a request to the MediaWiki API using the given search parameters Args: params (dict): Request parameters Returns: A parsed dict of the JSON response Note: Useful when wanting to query the MediaWiki site for some \ value that is not part of the wrapper API
[ "Make", "a", "request", "to", "the", "MediaWiki", "API", "using", "the", "given", "search", "parameters" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L804-L832
8,089
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._get_site_info
def _get_site_info(self): """ Parse out the Wikimedia site information including API Version and Extensions """ response = self.wiki_request( {"meta": "siteinfo", "siprop": "extensions|general"} ) # parse what we need out here! query = response.get("query", None) if query is None or query.get("general", None) is None: raise MediaWikiException("Missing query in response") gen = query.get("general", None) api_version = gen["generator"].split(" ")[1].split("-")[0] major_minor = api_version.split(".") for i, item in enumerate(major_minor): major_minor[i] = int(item) self._api_version = tuple(major_minor) self._api_version_str = ".".join([str(x) for x in self._api_version]) # parse the base url out tmp = gen.get("server", "") if tmp == "": raise MediaWikiException("Unable to parse base url") if tmp.startswith("http://") or tmp.startswith("https://"): self._base_url = tmp elif gen["base"].startswith("https:"): self._base_url = "https:{}".format(tmp) else: self._base_url = "http:{}".format(tmp) self._extensions = [ext["name"] for ext in query["extensions"]] self._extensions = sorted(list(set(self._extensions)))
python
def _get_site_info(self): """ Parse out the Wikimedia site information including API Version and Extensions """ response = self.wiki_request( {"meta": "siteinfo", "siprop": "extensions|general"} ) # parse what we need out here! query = response.get("query", None) if query is None or query.get("general", None) is None: raise MediaWikiException("Missing query in response") gen = query.get("general", None) api_version = gen["generator"].split(" ")[1].split("-")[0] major_minor = api_version.split(".") for i, item in enumerate(major_minor): major_minor[i] = int(item) self._api_version = tuple(major_minor) self._api_version_str = ".".join([str(x) for x in self._api_version]) # parse the base url out tmp = gen.get("server", "") if tmp == "": raise MediaWikiException("Unable to parse base url") if tmp.startswith("http://") or tmp.startswith("https://"): self._base_url = tmp elif gen["base"].startswith("https:"): self._base_url = "https:{}".format(tmp) else: self._base_url = "http:{}".format(tmp) self._extensions = [ext["name"] for ext in query["extensions"]] self._extensions = sorted(list(set(self._extensions)))
[ "def", "_get_site_info", "(", "self", ")", ":", "response", "=", "self", ".", "wiki_request", "(", "{", "\"meta\"", ":", "\"siteinfo\"", ",", "\"siprop\"", ":", "\"extensions|general\"", "}", ")", "# parse what we need out here!", "query", "=", "response", ".", "get", "(", "\"query\"", ",", "None", ")", "if", "query", "is", "None", "or", "query", ".", "get", "(", "\"general\"", ",", "None", ")", "is", "None", ":", "raise", "MediaWikiException", "(", "\"Missing query in response\"", ")", "gen", "=", "query", ".", "get", "(", "\"general\"", ",", "None", ")", "api_version", "=", "gen", "[", "\"generator\"", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", ".", "split", "(", "\"-\"", ")", "[", "0", "]", "major_minor", "=", "api_version", ".", "split", "(", "\".\"", ")", "for", "i", ",", "item", "in", "enumerate", "(", "major_minor", ")", ":", "major_minor", "[", "i", "]", "=", "int", "(", "item", ")", "self", ".", "_api_version", "=", "tuple", "(", "major_minor", ")", "self", ".", "_api_version_str", "=", "\".\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "self", ".", "_api_version", "]", ")", "# parse the base url out", "tmp", "=", "gen", ".", "get", "(", "\"server\"", ",", "\"\"", ")", "if", "tmp", "==", "\"\"", ":", "raise", "MediaWikiException", "(", "\"Unable to parse base url\"", ")", "if", "tmp", ".", "startswith", "(", "\"http://\"", ")", "or", "tmp", ".", "startswith", "(", "\"https://\"", ")", ":", "self", ".", "_base_url", "=", "tmp", "elif", "gen", "[", "\"base\"", "]", ".", "startswith", "(", "\"https:\"", ")", ":", "self", ".", "_base_url", "=", "\"https:{}\"", ".", "format", "(", "tmp", ")", "else", ":", "self", ".", "_base_url", "=", "\"http:{}\"", ".", "format", "(", "tmp", ")", "self", ".", "_extensions", "=", "[", "ext", "[", "\"name\"", "]", "for", "ext", "in", "query", "[", "\"extensions\"", "]", "]", "self", ".", "_extensions", "=", "sorted", "(", "list", "(", "set", "(", "self", ".", "_extensions", ")", ")", ")" ]
Parse out the Wikimedia site information including API Version and Extensions
[ "Parse", "out", "the", "Wikimedia", "site", "information", "including", "API", "Version", "and", "Extensions" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L835-L869
8,090
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._check_error_response
def _check_error_response(response, query): """ check for default error messages and throw correct exception """ if "error" in response: http_error = ["HTTP request timed out.", "Pool queue is full"] geo_error = [ "Page coordinates unknown.", "One of the parameters gscoord, gspage, gsbbox is required", "Invalid coordinate provided", ] err = response["error"]["info"] if err in http_error: raise HTTPTimeoutError(query) elif err in geo_error: raise MediaWikiGeoCoordError(err) else: raise MediaWikiException(err)
python
def _check_error_response(response, query): """ check for default error messages and throw correct exception """ if "error" in response: http_error = ["HTTP request timed out.", "Pool queue is full"] geo_error = [ "Page coordinates unknown.", "One of the parameters gscoord, gspage, gsbbox is required", "Invalid coordinate provided", ] err = response["error"]["info"] if err in http_error: raise HTTPTimeoutError(query) elif err in geo_error: raise MediaWikiGeoCoordError(err) else: raise MediaWikiException(err)
[ "def", "_check_error_response", "(", "response", ",", "query", ")", ":", "if", "\"error\"", "in", "response", ":", "http_error", "=", "[", "\"HTTP request timed out.\"", ",", "\"Pool queue is full\"", "]", "geo_error", "=", "[", "\"Page coordinates unknown.\"", ",", "\"One of the parameters gscoord, gspage, gsbbox is required\"", ",", "\"Invalid coordinate provided\"", ",", "]", "err", "=", "response", "[", "\"error\"", "]", "[", "\"info\"", "]", "if", "err", "in", "http_error", ":", "raise", "HTTPTimeoutError", "(", "query", ")", "elif", "err", "in", "geo_error", ":", "raise", "MediaWikiGeoCoordError", "(", "err", ")", "else", ":", "raise", "MediaWikiException", "(", "err", ")" ]
check for default error messages and throw correct exception
[ "check", "for", "default", "error", "messages", "and", "throw", "correct", "exception" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L874-L889
8,091
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._get_response
def _get_response(self, params): """ wrap the call to the requests package """ return self._session.get( self._api_url, params=params, timeout=self._timeout ).json(encoding="utf8")
python
def _get_response(self, params): """ wrap the call to the requests package """ return self._session.get( self._api_url, params=params, timeout=self._timeout ).json(encoding="utf8")
[ "def", "_get_response", "(", "self", ",", "params", ")", ":", "return", "self", ".", "_session", ".", "get", "(", "self", ".", "_api_url", ",", "params", "=", "params", ",", "timeout", "=", "self", ".", "_timeout", ")", ".", "json", "(", "encoding", "=", "\"utf8\"", ")" ]
wrap the call to the requests package
[ "wrap", "the", "call", "to", "the", "requests", "package" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L897-L901
8,092
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._post_response
def _post_response(self, params): """ wrap a post call to the requests package """ return self._session.post( self._api_url, data=params, timeout=self._timeout ).json(encoding="utf8")
python
def _post_response(self, params): """ wrap a post call to the requests package """ return self._session.post( self._api_url, data=params, timeout=self._timeout ).json(encoding="utf8")
[ "def", "_post_response", "(", "self", ",", "params", ")", ":", "return", "self", ".", "_session", ".", "post", "(", "self", ".", "_api_url", ",", "data", "=", "params", ",", "timeout", "=", "self", ".", "_timeout", ")", ".", "json", "(", "encoding", "=", "\"utf8\"", ")" ]
wrap a post call to the requests package
[ "wrap", "a", "post", "call", "to", "the", "requests", "package" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L903-L907
8,093
barrust/mediawiki
mediawiki/utilities.py
parse_all_arguments
def parse_all_arguments(func): """ determine all positional and named arguments as a dict """ args = dict() if sys.version_info < (3, 0): func_args = inspect.getargspec(func) if func_args.defaults is not None: val = len(func_args.defaults) for i, itm in enumerate(func_args.args[-val:]): args[itm] = func_args.defaults[i] else: func_args = inspect.signature(func) for itm in list(func_args.parameters)[1:]: param = func_args.parameters[itm] if param.default is not param.empty: args[param.name] = param.default return args
python
def parse_all_arguments(func): """ determine all positional and named arguments as a dict """ args = dict() if sys.version_info < (3, 0): func_args = inspect.getargspec(func) if func_args.defaults is not None: val = len(func_args.defaults) for i, itm in enumerate(func_args.args[-val:]): args[itm] = func_args.defaults[i] else: func_args = inspect.signature(func) for itm in list(func_args.parameters)[1:]: param = func_args.parameters[itm] if param.default is not param.empty: args[param.name] = param.default return args
[ "def", "parse_all_arguments", "(", "func", ")", ":", "args", "=", "dict", "(", ")", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "func_args", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "func_args", ".", "defaults", "is", "not", "None", ":", "val", "=", "len", "(", "func_args", ".", "defaults", ")", "for", "i", ",", "itm", "in", "enumerate", "(", "func_args", ".", "args", "[", "-", "val", ":", "]", ")", ":", "args", "[", "itm", "]", "=", "func_args", ".", "defaults", "[", "i", "]", "else", ":", "func_args", "=", "inspect", ".", "signature", "(", "func", ")", "for", "itm", "in", "list", "(", "func_args", ".", "parameters", ")", "[", "1", ":", "]", ":", "param", "=", "func_args", ".", "parameters", "[", "itm", "]", "if", "param", ".", "default", "is", "not", "param", ".", "empty", ":", "args", "[", "param", ".", "name", "]", "=", "param", ".", "default", "return", "args" ]
determine all positional and named arguments as a dict
[ "determine", "all", "positional", "and", "named", "arguments", "as", "a", "dict" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/utilities.py#L11-L26
8,094
barrust/mediawiki
mediawiki/utilities.py
str_or_unicode
def str_or_unicode(text): """ handle python 3 unicode and python 2.7 byte strings """ encoding = sys.stdout.encoding if sys.version_info > (3, 0): return text.encode(encoding).decode(encoding) return text.encode(encoding)
python
def str_or_unicode(text): """ handle python 3 unicode and python 2.7 byte strings """ encoding = sys.stdout.encoding if sys.version_info > (3, 0): return text.encode(encoding).decode(encoding) return text.encode(encoding)
[ "def", "str_or_unicode", "(", "text", ")", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "if", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ":", "return", "text", ".", "encode", "(", "encoding", ")", ".", "decode", "(", "encoding", ")", "return", "text", ".", "encode", "(", "encoding", ")" ]
handle python 3 unicode and python 2.7 byte strings
[ "handle", "python", "3", "unicode", "and", "python", "2", ".", "7", "byte", "strings" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/utilities.py#L78-L83
8,095
barrust/mediawiki
mediawiki/utilities.py
is_relative_url
def is_relative_url(url): """ simple method to determine if a url is relative or absolute """ if url.startswith("#"): return None if url.find("://") > 0 or url.startswith("//"): # either 'http(s)://...' or '//cdn...' and therefore absolute return False return True
python
def is_relative_url(url): """ simple method to determine if a url is relative or absolute """ if url.startswith("#"): return None if url.find("://") > 0 or url.startswith("//"): # either 'http(s)://...' or '//cdn...' and therefore absolute return False return True
[ "def", "is_relative_url", "(", "url", ")", ":", "if", "url", ".", "startswith", "(", "\"#\"", ")", ":", "return", "None", "if", "url", ".", "find", "(", "\"://\"", ")", ">", "0", "or", "url", ".", "startswith", "(", "\"//\"", ")", ":", "# either 'http(s)://...' or '//cdn...' and therefore absolute", "return", "False", "return", "True" ]
simple method to determine if a url is relative or absolute
[ "simple", "method", "to", "determine", "if", "a", "url", "is", "relative", "or", "absolute" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/utilities.py#L86-L93
8,096
barrust/mediawiki
setup.py
read_file
def read_file(filepath): """ read the file """ with io.open(filepath, "r") as filepointer: res = filepointer.read() return res
python
def read_file(filepath): """ read the file """ with io.open(filepath, "r") as filepointer: res = filepointer.read() return res
[ "def", "read_file", "(", "filepath", ")", ":", "with", "io", ".", "open", "(", "filepath", ",", "\"r\"", ")", "as", "filepointer", ":", "res", "=", "filepointer", ".", "read", "(", ")", "return", "res" ]
read the file
[ "read", "the", "file" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/setup.py#L14-L18
8,097
barrust/mediawiki
mediawiki/mediawikipage.py
MediaWikiPage._pull_content_revision_parent
def _pull_content_revision_parent(self): """ combine the pulling of these three properties """ if self._revision_id is None: query_params = { "prop": "extracts|revisions", "explaintext": "", "rvprop": "ids", } query_params.update(self.__title_query_param()) request = self.mediawiki.wiki_request(query_params) page_info = request["query"]["pages"][self.pageid] self._content = page_info["extract"] self._revision_id = page_info["revisions"][0]["revid"] self._parent_id = page_info["revisions"][0]["parentid"] return self._content, self._revision_id, self._parent_id
python
def _pull_content_revision_parent(self): """ combine the pulling of these three properties """ if self._revision_id is None: query_params = { "prop": "extracts|revisions", "explaintext": "", "rvprop": "ids", } query_params.update(self.__title_query_param()) request = self.mediawiki.wiki_request(query_params) page_info = request["query"]["pages"][self.pageid] self._content = page_info["extract"] self._revision_id = page_info["revisions"][0]["revid"] self._parent_id = page_info["revisions"][0]["parentid"] return self._content, self._revision_id, self._parent_id
[ "def", "_pull_content_revision_parent", "(", "self", ")", ":", "if", "self", ".", "_revision_id", "is", "None", ":", "query_params", "=", "{", "\"prop\"", ":", "\"extracts|revisions\"", ",", "\"explaintext\"", ":", "\"\"", ",", "\"rvprop\"", ":", "\"ids\"", ",", "}", "query_params", ".", "update", "(", "self", ".", "__title_query_param", "(", ")", ")", "request", "=", "self", ".", "mediawiki", ".", "wiki_request", "(", "query_params", ")", "page_info", "=", "request", "[", "\"query\"", "]", "[", "\"pages\"", "]", "[", "self", ".", "pageid", "]", "self", ".", "_content", "=", "page_info", "[", "\"extract\"", "]", "self", ".", "_revision_id", "=", "page_info", "[", "\"revisions\"", "]", "[", "0", "]", "[", "\"revid\"", "]", "self", ".", "_parent_id", "=", "page_info", "[", "\"revisions\"", "]", "[", "0", "]", "[", "\"parentid\"", "]", "return", "self", ".", "_content", ",", "self", ".", "_revision_id", ",", "self", ".", "_parent_id" ]
combine the pulling of these three properties
[ "combine", "the", "pulling", "of", "these", "three", "properties" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L127-L142
8,098
barrust/mediawiki
mediawiki/mediawikipage.py
MediaWikiPage.section
def section(self, section_title): """ Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API""" section = "== {0} ==".format(section_title) try: content = self.content index = content.index(section) + len(section) # ensure we have the full section header... while True: if content[index + 1] == "=": index += 1 else: break except ValueError: return None except IndexError: pass try: next_index = self.content.index("==", index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip("=").strip()
python
def section(self, section_title): """ Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API""" section = "== {0} ==".format(section_title) try: content = self.content index = content.index(section) + len(section) # ensure we have the full section header... while True: if content[index + 1] == "=": index += 1 else: break except ValueError: return None except IndexError: pass try: next_index = self.content.index("==", index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip("=").strip()
[ "def", "section", "(", "self", ",", "section_title", ")", ":", "section", "=", "\"== {0} ==\"", ".", "format", "(", "section_title", ")", "try", ":", "content", "=", "self", ".", "content", "index", "=", "content", ".", "index", "(", "section", ")", "+", "len", "(", "section", ")", "# ensure we have the full section header...", "while", "True", ":", "if", "content", "[", "index", "+", "1", "]", "==", "\"=\"", ":", "index", "+=", "1", "else", ":", "break", "except", "ValueError", ":", "return", "None", "except", "IndexError", ":", "pass", "try", ":", "next_index", "=", "self", ".", "content", ".", "index", "(", "\"==\"", ",", "index", ")", "except", "ValueError", ":", "next_index", "=", "len", "(", "self", ".", "content", ")", "return", "self", ".", "content", "[", "index", ":", "next_index", "]", ".", "lstrip", "(", "\"=\"", ")", ".", "strip", "(", ")" ]
Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API
[ "Plain", "text", "section", "content" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L412-L447
8,099
barrust/mediawiki
mediawiki/mediawikipage.py
MediaWikiPage.parse_section_links
def parse_section_links(self, section_title): """ Parse all links within a section Args: section_title (str): Name of the section to pull Returns: list: List of (title, url) tuples Note: Returns **None** if section title is not found Note: Side effect is to also pull the html which can be slow Note: This is a parsing operation and not part of the standard API""" soup = BeautifulSoup(self.html, "html.parser") headlines = soup.find_all("span", {"class": "mw-headline"}) tmp_soup = BeautifulSoup(section_title, "html.parser") tmp_sec_title = tmp_soup.get_text().lower() id_tag = None for headline in headlines: tmp_id = headline.text if tmp_id.lower() == tmp_sec_title: id_tag = headline.get("id") break if id_tag is not None: return self._parse_section_links(id_tag) return None
python
def parse_section_links(self, section_title): """ Parse all links within a section Args: section_title (str): Name of the section to pull Returns: list: List of (title, url) tuples Note: Returns **None** if section title is not found Note: Side effect is to also pull the html which can be slow Note: This is a parsing operation and not part of the standard API""" soup = BeautifulSoup(self.html, "html.parser") headlines = soup.find_all("span", {"class": "mw-headline"}) tmp_soup = BeautifulSoup(section_title, "html.parser") tmp_sec_title = tmp_soup.get_text().lower() id_tag = None for headline in headlines: tmp_id = headline.text if tmp_id.lower() == tmp_sec_title: id_tag = headline.get("id") break if id_tag is not None: return self._parse_section_links(id_tag) return None
[ "def", "parse_section_links", "(", "self", ",", "section_title", ")", ":", "soup", "=", "BeautifulSoup", "(", "self", ".", "html", ",", "\"html.parser\"", ")", "headlines", "=", "soup", ".", "find_all", "(", "\"span\"", ",", "{", "\"class\"", ":", "\"mw-headline\"", "}", ")", "tmp_soup", "=", "BeautifulSoup", "(", "section_title", ",", "\"html.parser\"", ")", "tmp_sec_title", "=", "tmp_soup", ".", "get_text", "(", ")", ".", "lower", "(", ")", "id_tag", "=", "None", "for", "headline", "in", "headlines", ":", "tmp_id", "=", "headline", ".", "text", "if", "tmp_id", ".", "lower", "(", ")", "==", "tmp_sec_title", ":", "id_tag", "=", "headline", ".", "get", "(", "\"id\"", ")", "break", "if", "id_tag", "is", "not", "None", ":", "return", "self", ".", "_parse_section_links", "(", "id_tag", ")", "return", "None" ]
Parse all links within a section Args: section_title (str): Name of the section to pull Returns: list: List of (title, url) tuples Note: Returns **None** if section title is not found Note: Side effect is to also pull the html which can be slow Note: This is a parsing operation and not part of the standard API
[ "Parse", "all", "links", "within", "a", "section" ]
292e0be6c752409062dceed325d74839caf16a9b
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L449-L475