blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7a2225fb76bf9459f66776aa69b4cf0239c4723 | 64d4e61c73d158a81300b4c43767971a512f66e9 | /KingPhisherServer | bfe4504a4c49194e930b70de7a8939fcf7654b47 | [
"BSD-3-Clause"
] | permissive | nebooben/king-phisher | 82f384da8686149f270d0a117a5536fc56bc949a | 23ea1f2749cd7af031025802557e9b84d5c74ece | refs/heads/master | 2021-01-18T12:17:15.088018 | 2015-09-22T20:37:42 | 2015-09-22T20:37:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,103 | #!/usr/bin/python -B
# -*- coding: utf-8 -*-
#
# KingPhisherServer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pylint: disable=R0914
import argparse
import logging
import os
import pwd
import signal
import sys
import threading
from king_phisher import color
from king_phisher import errors
from king_phisher import find
from king_phisher import geoip
from king_phisher import its
from king_phisher import utilities
from king_phisher import version
from king_phisher.server import server
from boltons import strutils
from smoke_zephyr.configuration import Configuration
from smoke_zephyr.requirements import check_requirements
__requirements__ = [
'alembic>=0.6.7',
'boltons>=0.6.4',
"dns{0}>=1.12.0".format('python' if its.py_v2 else 'python3'),
'geoip2>=2.1.0',
'Jinja2>=2.7.3',
'markupsafe>=0.23',
'msgpack-python>=0.4.5',
'psycopg2>=2.6',
'PyYAML>=3.11',
'requests>=2.7.0',
'SQLAlchemy>=1.0.2'
]
if its.py_v2:
__requirements__.append('py2-ipaddress>=3.4')
def main():
parser = argparse.ArgumentParser(description='King Phisher Server', conflict_handler='resolve')
utilities.argp_add_args(parser)
parser.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='run in the foreground (do not fork)')
parser.add_argument('--verify-config', dest='verify_config', action='store_true', default=False, help='verify the configuration and exit')
parser.add_argument('--update-geoip-db', dest='update_geoip_db', action='store_true', default=False, help='update the geoip database and exit')
parser.add_argument('config_file', action='store', type=argparse.FileType('r'), help='configuration file to use')
arguments = parser.parse_args()
console_log_handler = utilities.configure_stream_logger(arguments.loglvl, arguments.logger)
config_file = arguments.config_file
del parser
logger = logging.getLogger('KingPhisher.Server.CLI')
missing_requirements = check_requirements(__requirements__)
if missing_requirements:
color.print_error('the following package requirements are missing or incompatible:')
for missing_req in missing_requirements:
color.print_error(' - ' + missing_req)
color.print_error('please install the missing requirements with pip')
return os.EX_SOFTWARE
if os.getuid():
color.print_error('the server must be started as root, configure the')
color.print_error('\'server.setuid_username\' option in the config file to drop privileges')
return os.EX_NOPERM
try:
config = Configuration(config_file.name)
except Exception as error:
color.print_error('an error occurred while parsing the server configuration file')
error_name = "{0}.{1}".format(error.__module__, error.__class__.__name__)
if error_name != 'yaml.parser.ParserError':
raise
for line in str(error).split('\n'):
color.print_error(line.rstrip())
return os.EX_CONFIG
# configure environment variables
data_path = os.path.dirname(__file__)
data_path = os.path.join(data_path, 'data', 'server')
data_path = os.path.abspath(data_path)
find.data_path_append(data_path)
data_path = os.getcwd()
data_path = os.path.join(data_path, 'data', 'server')
data_path = os.path.abspath(data_path)
find.data_path_append(data_path)
if config.has_option('server.data_path'):
find.data_path_append(config.get('server.data_path'))
# check the configuration for missing and incompatible options
verify_config = find.find_data_file('server_config_verification.yml')
if not verify_config:
color.print_error('could not load server config verification data')
return os.EX_NOINPUT
missing_options = config.get_missing(verify_config)
if missing_options:
if 'missing' in missing_options:
color.print_error('the following required options are missing from the server configuration:')
for option in missing_options['missing']:
color.print_error(' - ' + option)
if 'incompatible' in missing_options:
color.print_error('the following options are of an incompatible data type in the server configuration:')
for option in missing_options['incompatible']:
color.print_error(" - {0} (type: {1})".format(option[0], option[1]))
return os.EX_CONFIG
if arguments.verify_config:
color.print_good('configuration verification passed')
return os.EX_OK
if arguments.update_geoip_db:
color.print_status('downloading a new geoip database')
size = geoip.download_geolite2_city_db(config.get('server.geoip.database'))
color.print_good("download complete, file size: {0}".format(strutils.bytes2human(size)))
return os.EX_OK
# setup logging based on the configuration
log_file_path = None
if config.has_section('logging'):
log_level = min(getattr(logging, arguments.loglvl), getattr(logging, config.get('logging.level').upper()))
if config.has_option('logging.file') and config.get('logging.file'):
log_file_path = config.get('logging.file')
file_handler = logging.FileHandler(log_file_path)
file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-50s %(levelname)-8s %(message)s"))
logging.getLogger('').addHandler(file_handler)
file_handler.setLevel(log_level)
if config.has_option('logging.console') and config.get('logging.console'):
console_log_handler.setLevel(log_level)
logger.debug("king phisher version: {0} python version: {1}.{2}.{3}".format(version.version, sys.version_info[0], sys.version_info[1], sys.version_info[2]))
# fork into the background
should_fork = True
if arguments.foreground:
should_fork = False
elif config.has_option('server.fork'):
should_fork = bool(config.get('server.fork'))
if should_fork and os.fork():
return os.EX_OK
try:
king_phisher_server = server.build_king_phisher_server(config)
except errors.KingPhisherError as error:
logger.critical(error.message)
return os.EX_SOFTWARE
server_pid = os.getpid()
logger.info("server running in process: {0} main tid: 0x{1:x}".format(server_pid, threading.current_thread().ident))
if should_fork and config.has_option('server.pid_file'):
pid_file = open(config.get('server.pid_file'), 'w')
pid_file.write(str(server_pid))
pid_file.close()
if config.has_option('server.setuid_username'):
setuid_username = config.get('server.setuid_username')
try:
user_info = pwd.getpwnam(setuid_username)
except KeyError:
logger.critical('an invalid username was specified as \'server.setuid_username\'')
king_phisher_server.shutdown()
return os.EX_NOUSER
if log_file_path:
os.chown(log_file_path, user_info.pw_uid, user_info.pw_gid)
os.setregid(user_info.pw_gid, user_info.pw_gid)
os.setreuid(user_info.pw_uid, user_info.pw_uid)
logger.info("dropped privileges to the {0} account".format(setuid_username))
else:
logger.warning('running with root privileges is dangerous, drop them by configuring \'server.setuid_username\'')
db_engine_url = king_phisher_server.database_engine.url
if db_engine_url.drivername == 'sqlite':
logger.warning('sqlite is no longer fully supported, see https://github.com/securestate/king-phisher/wiki/Database#sqlite for more details')
database_dir = os.path.dirname(db_engine_url.database)
if not os.access(database_dir, os.W_OK):
logger.critical('sqlite requires write permissions to the folder containing the database')
king_phisher_server.shutdown()
return os.EX_NOPERM
sighup_handler = lambda: threading.Thread(target=king_phisher_server.shutdown).start()
signal.signal(signal.SIGHUP, lambda signum, frame: sighup_handler())
try:
king_phisher_server.serve_forever(fork=False)
except KeyboardInterrupt:
pass
king_phisher_server.shutdown()
logging.shutdown()
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
| [
"zeroSteiner@gmail.com"
] | zeroSteiner@gmail.com | |
5fc50f121069b89a5ab8f5583eb3199e86306300 | bd2fcd9d9ca5808af8c13c557094fd6e5ac33984 | /09_Exam/RevIT.py | 2df7816b69d6a28ffc717228efbc34f06252b0e8 | [] | no_license | danslavov/Programming-Basics-Python-2017-MAY | 586a57ea586bd8e34d7b182116207cd1d2ec112a | 2ed6256664f336e59f1941784f2c10fa9a561341 | refs/heads/master | 2021-01-23T07:37:43.259225 | 2017-09-05T17:49:40 | 2017-09-05T17:49:40 | 102,511,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | n = int(input())
print(n, end="")
while n > 0:
print(n % 10, end="")
n //= 10
print() | [
"noreply@github.com"
] | noreply@github.com |
2f7e54e0c60605a77ca218f06a13af39dfdcf37d | 7d24cbc5da5f38ee4f70159f19e2a50e5fb1208a | /delete_docker_registry_image.py | 37ba692609d59511e3463a139d67fca607f62f07 | [] | no_license | donniezhanggit/demo-kubernetes-yml | c4aa3c8544f4b07f5e616bcf71cc84e550fd8117 | ee9faae0cf05f7ea924dbda6bda5218d2d55d0d3 | refs/heads/master | 2023-07-19T06:21:44.027653 | 2021-08-18T08:23:54 | 2021-08-18T08:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,693 | py | #!/usr/bin/env python
"""
Usage:
Shut down your registry service to avoid race conditions and possible data loss
and then run the command with an image repo like this:
delete_docker_registry_image.py --image awesomeimage --dry-run
"""
import argparse
import json
import logging
import os
import sys
import shutil
import glob
logger = logging.getLogger(__name__)
def del_empty_dirs(s_dir, top_level):
"""recursively delete empty directories"""
b_empty = True
for s_target in os.listdir(s_dir):
s_path = os.path.join(s_dir, s_target)
if os.path.isdir(s_path):
if not del_empty_dirs(s_path, False):
b_empty = False
else:
b_empty = False
if b_empty:
logger.debug("Deleting empty directory '%s'", s_dir)
if not top_level:
os.rmdir(s_dir)
return b_empty
def get_layers_from_blob(path):
"""parse json blob and get set of layer digests"""
try:
with open(path, "r") as blob:
data_raw = blob.read()
data = json.loads(data_raw)
if data["schemaVersion"] == 1:
result = set([entry["blobSum"].split(":")[1] for entry in data["fsLayers"]])
else:
result = set([entry["digest"].split(":")[1] for entry in data["layers"]])
if "config" in data:
result.add(data["config"]["digest"].split(":")[1])
return result
except Exception as error:
logger.critical("Failed to read layers from blob:%s", error)
return set()
def get_digest_from_blob(path):
"""parse file and get digest"""
try:
with open(path, "r") as blob:
return blob.read().split(":")[1]
except Exception as error:
logger.critical("Failed to read digest from blob:%s", error)
return ""
def get_links(path, _filter=None):
"""recursively walk `path` and parse every link inside"""
result = []
for root, _, files in os.walk(path):
for each in files:
if each == "link":
filepath = os.path.join(root, each)
if not _filter or _filter in filepath:
result.append(get_digest_from_blob(filepath))
return result
class RegistryCleanerError(Exception):
pass
class RegistryCleaner(object):
"""Clean registry"""
def __init__(self, registry_data_dir, dry_run=False):
self.registry_data_dir = registry_data_dir
if not os.path.isdir(self.registry_data_dir):
raise RegistryCleanerError("No repositories directory found inside " \
"REGISTRY_DATA_DIR '{0}'.".
format(self.registry_data_dir))
self.dry_run = dry_run
def _delete_layer(self, repo, digest):
"""remove blob directory from filesystem"""
path = os.path.join(self.registry_data_dir, "repositories", repo, "_layers/sha256", digest)
self._delete_dir(path)
def _delete_blob(self, digest):
"""remove blob directory from filesystem"""
path = os.path.join(self.registry_data_dir, "blobs/sha256", digest[0:2], digest)
self._delete_dir(path)
def _blob_path_for_revision(self, digest):
"""where we can find the blob that contains the json describing this digest"""
return os.path.join(self.registry_data_dir, "blobs/sha256",
digest[0:2], digest, "data")
def _blob_path_for_revision_is_missing(self, digest):
"""for each revision, there should be a blob describing it"""
return not os.path.isfile(self._blob_path_for_revision(digest))
def _get_layers_from_blob(self, digest):
"""get layers from blob by digest"""
return get_layers_from_blob(self._blob_path_for_revision(digest))
def _delete_dir(self, path):
"""remove directory from filesystem"""
if self.dry_run:
logger.info("DRY_RUN: would have deleted %s", path)
else:
logger.info("Deleting %s", path)
try:
shutil.rmtree(path)
except Exception as error:
logger.critical("Failed to delete directory:%s", error)
def _delete_from_tag_index_for_revision(self, repo, digest):
"""delete revision from tag indexes"""
paths = glob.glob(
os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags/*/index/sha256", digest)
)
for path in paths:
self._delete_dir(path)
def _delete_revisions(self, repo, revisions, blobs_to_keep=None):
"""delete revisions from list of directories"""
if blobs_to_keep is None:
blobs_to_keep = []
for revision_dir in revisions:
digests = get_links(revision_dir)
for digest in digests:
self._delete_from_tag_index_for_revision(repo, digest)
if digest not in blobs_to_keep:
self._delete_blob(digest)
self._delete_dir(revision_dir)
def _get_tags(self, repo):
"""get all tags for given repository"""
path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags")
if not os.path.isdir(path):
logger.critical("No repository '%s' found in repositories directory %s",
repo, self.registry_data_dir)
return None
result = []
for each in os.listdir(path):
filepath = os.path.join(path, each)
if os.path.isdir(filepath):
result.append(each)
return result
def _get_repositories(self):
"""get all repository repos"""
result = []
root = os.path.join(self.registry_data_dir, "repositories")
for each in os.listdir(root):
filepath = os.path.join(root, each)
if os.path.isdir(filepath):
inside = os.listdir(filepath)
if "_layers" in inside:
result.append(each)
else:
for inner in inside:
result.append(os.path.join(each, inner))
return result
def _get_all_links(self, except_repo=""):
"""get links for every repository"""
result = []
repositories = self._get_repositories()
for repo in [r for r in repositories if r != except_repo]:
path = os.path.join(self.registry_data_dir, "repositories", repo)
for link in get_links(path):
result.append(link)
return result
def prune(self):
"""delete all empty directories in registry_data_dir"""
del_empty_dirs(self.registry_data_dir, True)
def _layer_in_same_repo(self, repo, tag, layer):
"""check if layer is found in other tags of same repository"""
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
path = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag, "current/link")
manifest = get_digest_from_blob(path)
try:
layers = self._get_layers_from_blob(manifest)
if layer in layers:
return True
except IOError:
if self._blob_path_for_revision_is_missing(manifest):
logger.warn("Blob for digest %s does not exist. Deleting tag manifest: %s", manifest, other_tag)
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag)
self._delete_dir(tag_dir)
else:
raise
return False
def _manifest_in_same_repo(self, repo, tag, manifest):
"""check if manifest is found in other tags of same repository"""
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
path = os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/tags", other_tag, "current/link")
other_manifest = get_digest_from_blob(path)
if other_manifest == manifest:
return True
return False
def delete_entire_repository(self, repo):
"""delete all blobs for given repository repo"""
logger.debug("Deleting entire repository '%s'", repo)
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
if not os.path.isdir(repo_dir):
raise RegistryCleanerError("No repository '{0}' found in repositories "
"directory {1}/repositories".
format(repo, self.registry_data_dir))
links = set(get_links(repo_dir))
all_links_but_current = set(self._get_all_links(except_repo=repo))
for layer in links:
if layer in all_links_but_current:
logger.debug("Blob found in another repository. Not deleting: %s", layer)
else:
self._delete_blob(layer)
self._delete_dir(repo_dir)
def delete_repository_tag(self, repo, tag):
"""delete all blobs only for given tag of repository"""
logger.debug("Deleting repository '%s' with tag '%s'", repo, tag)
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", tag)
if not os.path.isdir(tag_dir):
raise RegistryCleanerError("No repository '{0}' tag '{1}' found in repositories "
"directory {2}/repositories".
format(repo, tag, self.registry_data_dir))
manifests_for_tag = set(get_links(tag_dir))
revisions_to_delete = []
blobs_to_keep = []
layers = []
all_links_not_in_current_repo = set(self._get_all_links(except_repo=repo))
for manifest in manifests_for_tag:
logger.debug("Looking up filesystem layers for manifest digest %s", manifest)
if self._manifest_in_same_repo(repo, tag, manifest):
logger.debug("Not deleting since we found another tag using manifest: %s", manifest)
continue
else:
revisions_to_delete.append(
os.path.join(self.registry_data_dir, "repositories", repo,
"_manifests/revisions/sha256", manifest)
)
if manifest in all_links_not_in_current_repo:
logger.debug("Not deleting the blob data since we found another repo using manifest: %s", manifest)
blobs_to_keep.append(manifest)
layers.extend(self._get_layers_from_blob(manifest))
layers_uniq = set(layers)
for layer in layers_uniq:
if self._layer_in_same_repo(repo, tag, layer):
logger.debug("Not deleting since we found another tag using digest: %s", layer)
continue
self._delete_layer(repo, layer)
if layer in all_links_not_in_current_repo:
logger.debug("Blob found in another repository. Not deleting: %s", layer)
else:
self._delete_blob(layer)
self._delete_revisions(repo, revisions_to_delete, blobs_to_keep)
self._delete_dir(tag_dir)
def delete_untagged(self, repo):
"""delete all untagged data from repo"""
logger.debug("Deleting utagged data from repository '%s'", repo)
repositories_dir = os.path.join(self.registry_data_dir, "repositories")
repo_dir = os.path.join(repositories_dir, repo)
if not os.path.isdir(repo_dir):
raise RegistryCleanerError("No repository '{0}' found in repositories "
"directory {1}/repositories".
format(repo, self.registry_data_dir))
tagged_links = set(get_links(repositories_dir, _filter="current"))
layers_to_protect = []
for link in tagged_links:
layers_to_protect.extend(self._get_layers_from_blob(link))
unique_layers_to_protect = set(layers_to_protect)
for layer in unique_layers_to_protect:
logger.debug("layer_to_protect: %s", layer)
tagged_revisions = set(get_links(repo_dir, _filter="current"))
revisions_to_delete = []
layers_to_delete = []
dir_for_revisions = os.path.join(repo_dir, "_manifests/revisions/sha256")
for rev in os.listdir(dir_for_revisions):
if rev not in tagged_revisions:
revisions_to_delete.append(os.path.join(dir_for_revisions, rev))
for layer in self._get_layers_from_blob(rev):
if layer not in unique_layers_to_protect:
layers_to_delete.append(layer)
unique_layers_to_delete = set(layers_to_delete)
self._delete_revisions(repo, revisions_to_delete)
for layer in unique_layers_to_delete:
self._delete_blob(layer)
self._delete_layer(repo, layer)
def get_tag_count(self, repo):
logger.debug("Get tag count of repository '%s'", repo)
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
tags_dir = os.path.join(repo_dir, "_manifests/tags")
if os.path.isdir(tags_dir):
tags = os.listdir(tags_dir)
return len(tags)
else:
logger.info("Tags directory does not exist: '%s'", tags_dir)
return -1
def main():
"""cli entrypoint"""
parser = argparse.ArgumentParser(description="Cleanup docker registry")
parser.add_argument("-i", "--image",
dest="image",
required=True,
help="Docker image to cleanup")
parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="verbose")
parser.add_argument("-n", "--dry-run",
dest="dry_run",
action="store_true",
help="Dry run")
parser.add_argument("-f", "--force",
dest="force",
action="store_true",
help="Force delete (deprecated)")
parser.add_argument("-p", "--prune",
dest="prune",
action="store_true",
help="Prune")
parser.add_argument("-u", "--untagged",
dest="untagged",
action="store_true",
help="Delete all untagged blobs for image")
args = parser.parse_args()
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s'))
logger.addHandler(handler)
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# make sure not to log before logging is setup. that'll hose your logging config.
if args.force:
logger.info(
"You supplied the force switch, which is deprecated. It has no effect now, and the script defaults to doing what used to be only happen when force was true")
splitted = args.image.split(":")
if len(splitted) == 2:
image = splitted[0]
tag = splitted[1]
else:
image = args.image
tag = None
if 'REGISTRY_DATA_DIR' in os.environ:
registry_data_dir = os.environ['REGISTRY_DATA_DIR']
else:
#registry_data_dir = "/opt/registry_data/docker/registry/v2"
#registry_data_dir = "/var/lib/docker/volumes/654169e21fb16b74a1f872e7914c14703a72c3b07f1e22e59cf373d7d63b09c7/_data/docker/registry/v2"
registry_data_dir = "/opt/registry/docker/registry/v2"
try:
cleaner = RegistryCleaner(registry_data_dir, dry_run=args.dry_run)
if args.untagged:
cleaner.delete_untagged(image)
else:
if tag:
tag_count = cleaner.get_tag_count(image)
if tag_count == 1:
cleaner.delete_entire_repository(image)
else:
cleaner.delete_repository_tag(image, tag)
else:
cleaner.delete_entire_repository(image)
if args.prune:
cleaner.prune()
except RegistryCleanerError as error:
logger.fatal(error)
sys.exit(1)
if __name__ == "__main__":
main()
| [
"fangang@LAPTOP-8BACI9IA"
] | fangang@LAPTOP-8BACI9IA |
db123b88102b476b1f7aa06f89b3742fe4ef29c6 | 805a795ea81ca8b5cee1dec638585011da3aa12f | /MAIN/2.79/python/lib/test/test_asyncio/test_events.py | 28d92a9f4e3eac21e2aed7993f0e60dbd7ff1e89 | [
"Apache-2.0"
] | permissive | josipamrsa/Interactive3DAnimation | 5b3837382eb0cc2ebdee9ee69adcee632054c00a | a4b7be78514b38fb096ced5601f25486d2a1d3a4 | refs/heads/master | 2022-10-12T05:48:20.572061 | 2019-09-26T09:50:49 | 2019-09-26T09:50:49 | 210,919,746 | 0 | 1 | Apache-2.0 | 2022-10-11T01:53:36 | 2019-09-25T19:03:51 | Python | UTF-8 | Python | false | false | 102,515 | py | """Tests for events.py."""
import collections.abc
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = Coro()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
if __name__ == '__main__':
unittest.main()
| [
"jmrsa21@gmail.com"
] | jmrsa21@gmail.com |
7d7604b235b0937ca199f553e1f4b073945d9c89 | b10a759a2fd1b80ee810720d0f10bebe4706b84e | /ZenPacks/community/EMCIsilon/EMCIsilonQuota.py | 393cb03d3000a81f99c76aa196b23a90e259bbd2 | [
"MIT"
] | permissive | linkslice/ZenPacks.community.EMCIsilon | afbe41fb30cb08501bc3349dfa2bad99617835fe | 5f0483f3785ee6cbcb6034b3682fcb3f105df8c5 | refs/heads/master | 2021-01-17T00:45:24.505466 | 2019-10-08T16:53:49 | 2019-10-08T16:53:49 | 61,917,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | from Products.ZenModel.DeviceComponent import DeviceComponent
from Products.ZenModel.ManagedEntity import ManagedEntity
from Products.ZenModel.ZenossSecurity import ZEN_CHANGE_DEVICE
from Products.ZenRelations.RelSchema import ToManyCont, ToOne
class EMCIsilonQuota(DeviceComponent, ManagedEntity):
meta_type = portal_type = 'EMCIsilonIsilonQuota'
quota_type = None
quota_includes_snapshot_usage = None
quota_path = None
quota_hard_threshold_defined = None
quota_hard_threshold = None
quota_soft_threshold_defined = None
quota_soft_threshold = None
quota_advisory_threshold_defined = None
quota_advisory_threshold = None
quota_grace_period = None
quota_usage = None
quota_usage_with_overhead = None
quota_inode_usage = None
quota_includes_overhead = None
_properties = ManagedEntity._properties + (
{'id': 'quota_type','type': 'string'},
{'id': 'quota_includes_snapshot_usage','type': 'string'},
{'id': 'quota_path','type': 'string'},
{'id': 'quota_hard_threshold_defined','type': 'string'},
{'id': 'quota_hard_threshold','type': 'string'},
{'id': 'quota_soft_threshold_defined','type': 'string'},
{'id': 'quota_soft_threshold','type': 'string'},
{'id': 'quota_advisory_threshold_defined','type': 'string'},
{'id': 'quota_advisory_threshold','type': 'string'},
{'id': 'quota_grace_period','type': 'string'},
{'id': 'quota_usage','type': 'string'},
{'id': 'quota_usage_with_overhead','type': 'string'},
{'id': 'quota_inode_usage','type': 'string'},
{'id': 'quota_includes_overhead','type': 'string'},
)
_relations = ManagedEntity._relations + (
('emcisilon_quota', ToOne(ToManyCont,
'ZenPacks.community.EMCIsilon.EMCIsilon',
'emcisilon_quotas',
)),
)
factory_type_information = ({
'actions': ({
'id': 'perfConf',
'name': 'Template',
'action': 'objTemplates',
'permissions': (ZEN_CHANGE_DEVICE,),
},),
},)
def device(self):
return self.emcisilon_quota()
def getRRDTemplateName(self):
return 'EMCIsilonQuotas'
| [
"sparctacus@gmail.com"
] | sparctacus@gmail.com |
7c60e64a4ec9b2e945c59909b072566c91cf039a | 608ff019d76ed0cc3cb6e6c19e94d65fd33e5273 | /bin/epylint | 647663eaf5ef526902bf8740db548876e61463a6 | [] | no_license | nwoodr94/4runner | 2975a0cf4fda416b634739d459bf51bb693a4666 | 0ad33beffedd5560c4a50edfad76732a249272b4 | refs/heads/master | 2021-05-22T02:26:10.405300 | 2020-08-11T18:59:33 | 2020-08-11T18:59:33 | 252,927,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/nwoodr94/code/4runner/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"nwoodr94@gmail.com"
] | nwoodr94@gmail.com | |
b349afad698bff5c9b153392fe731f23da2933db | db8d9e2be7a1c73678b1450da652939c0bcc5a70 | /manage.py | ac6853ebb230107da82e9b63d8277b4a335c9ffc | [] | no_license | KarthikRP05/Employee_read-operation | c015658e77bd72dc918d16142bf1ef5dcfb39a58 | 31b0709fa6dd7a60c01dd1744a0e3db75626c4c6 | refs/heads/master | 2023-02-17T19:02:17.361919 | 2021-01-15T10:45:52 | 2021-01-15T10:45:52 | 329,883,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'employeetable.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"karthikrprpk@gmail.com"
] | karthikrprpk@gmail.com |
8f8ecbbb79cad83b056e79575e51f056efcdfaf3 | 8f7f36acd739835a8d901a4be60929c7156557fb | /MyMain.py | a8c926100fa63114d34f3681f3442219c3dfe33e | [] | no_license | qingyue2014/PytorchNMT | 7013b9e8ab1f95b9f726f78d70ff5e524a1ec1fc | 5b02574693ad31a0d893b54a0c9373780baec282 | refs/heads/master | 2020-04-16T06:13:46.209884 | 2019-09-28T07:27:12 | 2019-09-28T07:27:12 | 165,337,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,647 | py | from __future__ import unicode_literals, print_function, division
from io import open
import torch
import torch.nn as nn
from torch import optim
import MyClass
import MyData, pickle
import datetime
from nltk.translate.bleu_score import sentence_bleu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_LENGTH = 50
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
import os, time, random
import argparse
## hyperparameters
parser = argparse.ArgumentParser(description='MT for Chinese to English')
parser.add_argument('--train_data', type=str, default='data', help='train data source')
parser.add_argument('--test_data', type=str, default='data', help='test data source')
parser.add_argument('--epoch_num', type=int, default=10, help='#epoch of training')
parser.add_argument('--hidden_size', type=int, default=256, help='#dim of hidden state')
parser.add_argument('--embedding_size', type=int, default=256, help='random init char embedding_dim')
parser.add_argument('--mode', type=str, default='test', help='train/test')
args = parser.parse_args()
import logging
from logging import handlers
class Logger(object):
level_relations = {
'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'crit':logging.CRITICAL
}#日志级别关系映射
def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(levelname)s: %(message)s'):
self.logger = logging.getLogger(filename)
format_str = logging.Formatter(fmt)#设置日志格式
self.logger.setLevel(self.level_relations.get(level))#设置日志级别
sh = logging.StreamHandler()#往屏幕上输出
sh.setFormatter(format_str) #设置屏幕上显示的格式
th = handlers.TimedRotatingFileHandler(filename=filename,when=when,backupCount=backCount,encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器
#实例化TimedRotatingFileHandler
#interval是时间间隔,backupCount是备份文件的个数,如果超过这个个数,就会自动删除,when是间隔的时间单位,单位有以下几种:
# S 秒
# M 分
# H 小时、
# D 天、
# W 每星期(interval==0时代表星期一)
# midnight 每天凌晨
th.setFormatter(format_str)#设置文件里写入的格式
self.logger.addHandler(sh) #把对象加到logger里
self.logger.addHandler(th)
def tensorFromSentence(sent, vocab):
#添加eos标记,并转换为tensor类型
indexes = MyData.sentence2id(sent, vocab)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(pair[0], cn2id)
target_tensor = tensorFromSentence(pair[1], en2id)
return (input_tensor, target_tensor)
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
#encoder_outputs: [10,256]
encoder_outputs = torch.zeros(max_length, encoder.hidden_size*2, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) #decoder_input: [1,1]
decoder_hidden = encoder_hidden[1,:,:].view(1,1,-1) #编码层最后一个时刻的隐藏状态作为解码层的初始状态
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False #随机确定是否使用teaching force
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1) #topk(n) : 求前n大的数(无序)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
def trainIters(encoder, decoder, pairs, epoch_num, print_every=10, learning_rate=0.01):
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(pair) for pair in pairs]
n_iters = len(training_pairs)
criterion = nn.NLLLoss() # 负似然损失
log = Logger('all.log', level='debug')
for epoch in range(1, epoch_num+1):
random.shuffle(training_pairs)
print_loss_total = 0 # Reset every print_every
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
# 每次训练随机地选择是否使用teacher_forcing
loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
# 每迭代若干次次输出一次平均损失
if iter % print_every == 0:
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
#get_logger('log.txt').info('Epoch {}, {}/{}, loss: {:.5}'.format(epoch, iter, n_iters, print_loss_avg))
string1 = 'Epoch: {}/{} iter: {}/{} loss: {:.4}' .format(epoch, epoch_num,
iter, n_iters, print_loss_avg)
log.logger.info(string1)
def evaluate(encoder, decoder, sentence, word2id, id2word, max_length=MAX_LENGTH):
with torch.no_grad():
input_tensor = tensorFromSentence(sentence, word2id)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size*2, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden[1,:,:].view(1,1,-1)
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(id2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
def evaluateTest(encoder, decoder, pairs, vocab, id2tag):
fw1 = open('result.txt','w', encoding='utf-8')
total_score = 0
for pair in pairs:
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0], vocab, id2tag)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
total_score += sentence_bleu([pair[1]],output_words)
s='> {}\nT: {}\nP: {}\n'.format(pair[0],pair[1],output_sentence)
fw1.write(s)
result_s="BLEU: {:.2}".format(total_score*1.0/len(pairs))
print(result_s)
fw1.close()
train_data_path = os.path.join('.', args.train_data, 'cn_train.txt')
train_label_path = os.path.join('.', args.train_data, 'en_train.txt')
test_data_path = os.path.join('.', args.test_data, 'cn_test.txt')
test_label_path = os.path.join('.', args.test_data, 'en_test.txt')
if not os.path.exists('train_data.pkl'):
train_data = MyData.read_corpus(train_data_path, train_label_path, 'train')
test_data = MyData.read_corpus(test_data_path, test_label_path, 'test')
test_size = len(test_data)
else:
print('loading existing data...')
with open('train_data.pkl', 'rb') as fr:
train_data = pickle.load(fr)
with open('test_data.pkl', 'rb') as fr:
test_data = pickle.load(fr)
test_size = len(test_data)
vocab_path = os.path.join('.', args.train_data, 'word2id.pkl')
tag_path = os.path.join('.', args.train_data, 'tag2id.pkl')
id2tag_path = os.path.join('.', args.train_data, 'id2tag.pkl')
if not os.path.exists(vocab_path):
MyData.vocab_build(vocab_path, tag_path, id2tag_path, train_data, 5)
cn2id, en2id, id2en = MyData.read_dictionary(vocab_path, tag_path, id2tag_path)
if args.mode == 'train':
print('start training...')
encoder1 = MyClass.EncoderRNN(len(cn2id), args.embedding_size, args.hidden_size).to(device)
attn_decoder1 = MyClass.AttnDecoderRNN(args.hidden_size, len(en2id), dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, train_data, args.epoch_num, print_every=64) #75000:训练预料条数 5000:每5000次输出一次损失情况
torch.save(encoder1,'model/encoder.pkl')
torch.save(attn_decoder1,'model/decoder.pkl')
else:
encoder = torch.load('model/encoder.pkl')
decoder = torch.load('model/decoder.pkl')
evaluateTest(encoder, decoder, test_data, cn2id, id2en) | [
"noreply@github.com"
] | noreply@github.com |
e9529238cbc47916e001451674d12f106fbd8037 | 4dd5dbebc7b7f6dbfcbd6cc662311c91ad6d47e9 | /AtCoder/AGC030A.py | 641bf990c39cb0b7b2f7bfded4df6569c61e550e | [] | no_license | sourjp/programming_contest | aa6925b3317bd3aeb646df93a611af1199bfc7aa | 2a50e1be45441789e81eb49bfdfc0c598d2a534b | refs/heads/master | 2021-04-01T05:08:44.097226 | 2020-08-20T13:01:55 | 2020-08-20T13:01:55 | 248,158,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | a, b, c = map(int, input().split())
cnt = 0
if a + b + 1>= c:
ans = b + c
else:
ans = b + a + b + 1
print(ans) | [
"hiro0209.ki@gmail.com"
] | hiro0209.ki@gmail.com |
3918393da646b4a519205518bd050446fa00d018 | 2452a1a846ec675cc6eb8b24261e3c0c8ec0dac5 | /bin/git-try | 62ce1bdc422cd74c69e3983a9501fe3c2ad000ca | [] | no_license | michaelbernstein/homedir | e5d5709c994361e34e929a351c12de3893274052 | d91a1e203eaf20d0a20e75204622cbe35f8ef579 | refs/heads/master | 2021-01-16T20:00:27.479753 | 2012-04-19T18:59:14 | 2012-04-19T18:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,731 | #!/usr/bin/env python
import argparse
import os
import shutil
import StringIO
import subprocess
import sys
import tempfile
PLATFORMS = ('all', 'none', 'linux', 'linuxqt', 'linux64', 'macosx64', 'win32',
'linux-android', 'linux-maemo5-gtk', 'linux-maemo5-qt',
'linux-mobile', 'win32-mobile', 'macosx-mobile', 'macosx')
MOCHITESTS = ('mochitest-1', 'mochitest-2', 'mochitest-3', 'mochitest-4',
'mochitest-5', 'mochitest-o')
UNITTESTS = ('all', 'none', 'reftest', 'reftest-ipc', 'reftest-no-accel',
'crashtest', 'crashtest-ipc', 'xpcshell', 'jsreftest', 'jetpack',
'mozmill-all', 'opengl', 'mochitests') + MOCHITESTS
TALOS = ('all', 'none', 'chrome', 'nochrome', 'dirty', 'tp', 'tp4', 'cold',
'v8', 'svg', 'scroll', 'dromaeo', 'a11y', 'paint', 'remote-ts',
'remote-tdhtml', 'remote-tsvg', 'remote-tsspider', 'remote-tpan',
'remote-tp4m', 'remote-tp4m_nochrome', 'remote-twinopen',
'remote-zoom')
class GitTryerException(Exception):
pass
class GitTryer(object):
def __init__(self, args):
self.rev_from = 'master'
self.rev_to = 'HEAD'
self.args = {'build':None, 'platform':None, 'unittests':None,
'talos':None, 'email':None}
self.tmpdir = None
self.oldcwd = None
name = self._get_conf('user.name')
if not name:
raise GitTryerException, 'Missing user name'
email = self._get_conf('user.email')
if not email:
raise GitTryerException, 'Missing user email'
self.userstr = '%s <%s>' % (name, email)
self.url = self._get_conf('try.url')
if not self.url:
raise GitTryerException, 'Missing URL for try server'
self.repo = self._get_conf('try.repo')
if not self.repo:
raise GitTryerException, 'Missing path to HG repo'
default_args = self._get_conf('try.defaults')
if default_args:
self._parse_tryargs(default_args)
self._compute_args_and_revs(args)
def _get_conf(self, var):
p = subprocess.Popen(['git', 'config', '--get', var],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
val = p.stdout.read().strip()
p.wait()
return val
def _parse_tryargs(self, argstr):
args = argstr.split()
i = 0
while i < len(args):
skip = 1
if args[i] in ('-b', '--build'):
skip = 2
self.args['build'] = args[i + 1]
if args[i] in ('-p', '--platform'):
skip = 2
self.args['platform'] = args[i + 1]
if args[i] in ('-u', '--unittests'):
skip = 2
self.args['unittests'] = args[i + 1]
if args[i] in ('-t', '--talos'):
skip = 2
self.args['talos'] = args[i + 1]
if args[i] in ('-e', '--all-emails'):
self.args['email'] = '-e'
if args[i] in ('-n', '--no-emails'):
self.args['email'] = '-n'
i += skip
def _create_args(self, atype, args, check_mochitests=False):
if 'all' in args:
if len(args) != 1:
raise GitTryerException, 'all can not be used with any other %s' % atype
return 'all'
if 'none' in args:
if len(args) != 1:
raise GitTryerException, 'none can not be used with any other %s' % atype
return 'none'
if check_mochitests and 'mochitests' in args:
for m in MOCHITESTS:
if m in args:
raise GitTryerException, 'mochitests can not be used with any other mochitest'
return ','.join(args)
def _compute_args_and_revs(self, args):
p = argparse.ArgumentParser(description='Do a try run on a patch',
prog='git try')
p.add_argument('-b', '--build', choices=('d', 'o', 'do'))
p.add_argument('-p', '--platform', choices=PLATFORMS,
help='Platforms to build', action='append')
p.add_argument('-u', '--unittests', choices=UNITTESTS,
help='Unit tests to run', action='append')
p.add_argument('-t', '--talos', choices=TALOS,
help='Talos tests to run', action='append')
p.add_argument('-e', '--all-emails', help='Send all email',
action='store_true')
p.add_argument('-n', '--no-emails', help='Send no email',
action='store_true')
p.add_argument('-m', '--mozilla-central', action='store_true',
help='Use mozilla-central configuration')
p.add_argument('-f', '--force', action='store_true', default=False,
help='Force operation even with uncommitted changes')
p.add_argument('rev', nargs='*', help='Revisions to push')
args = p.parse_args(args)
self.args['force'] = args.force
if args.build:
self.args['build'] = args.build
if args.platform:
self.args['platform'] = self._create_args('platform', args.platform)
if args.unittests:
self.args['unittests'] = self._create_args('unittests',
args.unittests, True)
if args.talos:
self.args['talos'] = self._create_args('talos', args.talos)
if args.all_emails and args.no_emails:
raise GitTryerException, '-e and -n are exclusive'
if args.all_emails:
self.args['email'] = '-e'
if args.no_emails:
self.args['email'] = '-n'
if args.mozilla_central:
if args.build or args.platform or args.unittests or args.talos:
raise GitTryerException, '-m may not be used with -b, -p, -u or -t'
self.args['build'] = 'do'
self.args['platform'] = 'all'
self.args['unittests'] = 'all'
self.args['talos'] = 'all'
self._get_revs(args.rev)
def _get_revs(self, args):
if len(args) > 2:
raise GitTryerException, 'At most 2 revisions are allowed (from to)'
if len(args) == 2:
self.rev_from, self.rev_to = args
elif args:
self.rev_from = args[0]
p = subprocess.Popen(['git', 'rev-parse', '-q', '--verify', self.rev_from],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
raise GitTryerException, 'Invalid revision %s' % self.rev_from
p = subprocess.Popen(['git', 'rev-parse', '-q', '--verify', self.rev_to],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
raise GitTryerException, 'Invalid revision %s' % self.rev_to
def _build_try(self):
trylist = ['try:']
if not any((self.args['build'], self.args['platform'], self.args['unittests'], self.args['talos'])):
raise GitTryerException, 'Empty try configuration'
if self.args['build']:
trylist.extend(['-b', self.args['build']])
if self.args['platform']:
trylist.extend(['-p', self.args['platform']])
if self.args['unittests']:
trylist.extend(['-u', self.args['unittests']])
if self.args['talos']:
trylist.extend(['-t', self.args['talos']])
if self.args['email']:
trylist.append(self.args['email'])
return ' '.join(trylist)
def _cleanup(self):
if self.oldcwd:
os.chdir(self.oldcwd)
if self.tmpdir:
shutil.rmtree(self.tmpdir)
def _save_output(self, proc, fname):
with file(os.path.join(self.tmpdir, fname), 'w') as f:
f.write('STDOUT\n')
f.write(proc.stdout.read())
f.write('\n\nSTDERR\n')
f.write(proc.stderr.read())
def run(self):
# Make our working directory
self.tmpdir = tempfile.mkdtemp(prefix='try', dir='/tmp')
self.oldcwd = os.getcwd()
patchfile = os.path.join(self.tmpdir, 'patch')
repodir = os.path.join(self.tmpdir, 'repo')
# Ensure there aren't any outstanding changes
if not self.args['force']:
p = subprocess.Popen(['git', 'status', '--porcelain', '-z'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
if p.stdout.read():
raise GitTryerException, 'You have uncommitted changes'
# Get the contents of the patch
sys.stdout.write('making patch...\n')
with file(patchfile, 'w') as f:
p = subprocess.Popen(['git', 'mkpatch', self.rev_from, self.rev_to],
stdout=f, stderr=subprocess.PIPE)
p.wait()
# Go to our hg repo and get a working copy
sys.stdout.write('cloning source repository...\n')
p = subprocess.Popen(['hg', 'clone', self.repo, repodir], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if p.wait():
self._save_output(p, 'clone.out')
raise GitTryerException, 'Could not check out hg repository'
# Apply the patch
sys.stdout.write('applying patch...\n')
os.chdir(repodir)
p = subprocess.Popen(['hg', 'import', '--config', 'defaults.import=',
patchfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
self._save_output(p, 'import.out')
raise GitTryerException, 'Could not apply patch to hg repository'
# Make our try selections
sys.stdout.write('setting try selections...\n')
p = subprocess.Popen(['hg', '--config', 'ui.editor=true', 'qnew',
'-m', self._build_try(), '-u', self.userstr, 'try_config'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
self._save_output(p, 'qnew.out')
raise GitTryerException, 'Failed to set try configuration'
# Push to the try server
sys.stdout.write('pushing to try...\n')
if os.system('hg push -f %s' % self.url):
raise GitTryerException, 'Failed to push!'
# Get rid of our working copy if everything went well
sys.stdout.write('cleaning up...\n')
self._cleanup()
sys.stdout.write('done!\n')
if __name__ == '__main__':
try:
t = GitTryer(sys.argv[1:])
t.run()
except GitTryerException, e:
try:
sys.stderr.write('Working directory at %s\n' % t.tmpdir)
except:
sys.stderr.write('No working directory created\n')
sys.stderr.write('%s\n' % str(e))
sys.exit(1)
| [
"hurley@todesschaf.org"
] | hurley@todesschaf.org | |
72459fc22c1d18cd64c343211bbff66bc5053b8f | e5a6cdbb0c228a9ceafb4128b28ffc7e79be8e40 | /Математика и Python для анализа данных/4 Неделя 2 - Векторы, Матрицы/4.2 ПРАКТИКУМ - Сходство текстов (кошачья задача) в Python/4.2.3.py | 449e07f6e24b1a420a56cada325339f3b6892a42 | [] | no_license | SergioKulyk/Stepic | 2ef43f9dd037e43edd586d06ca24f6251152cb4d | 51070979f3ac2578244e07ae83a8d81c937be63c | refs/heads/master | 2021-04-26T23:10:30.297175 | 2018-03-15T09:30:01 | 2018-03-15T09:30:01 | 123,879,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Шаг 2.1 - проход по списку
# На вход подаётся 1 строка.
#
# Напечатайте в 1 строку все 1-е символы из каждого слова через пробел.
#
# Для разделения строки на слова можно использоваться функцию split():
#
# S = 'Some string'
# L = S.split() # ['Some', 'string']
#
# Sample Input:
#
# Hello my friends
# Sample Output:
#
# H m f
import re
S = input()
for word in re.split('[^A-z]', S):
if word:
print(word[0], end=' ')
| [
"sergiokulyk98@gmail.com"
] | sergiokulyk98@gmail.com |
99d58cfffec18317f497271c87e04c101c9d5fbf | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/rdbms/azure-mgmt-rdbms/generated_samples/mysql/server_security_alerts_create_max.py | 702f9e0bb6a8a7da00508fb08c8a992824a0c71c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,227 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.mysql import MySQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python server_security_alerts_create_max.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MySQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.server_security_alert_policies.begin_create_or_update(
resource_group_name="securityalert-4799",
server_name="securityalert-6440",
security_alert_policy_name="Default",
parameters={
"properties": {
"disabledAlerts": ["Access_Anomaly", "Usage_Anomaly"],
"emailAccountAdmins": True,
"emailAddresses": ["testSecurityAlert@microsoft.com"],
"retentionDays": 5,
"state": "Enabled",
"storageAccountAccessKey": "sdlfkjabc+sdlfkjsdlkfsjdfLDKFTERLKFDFKLjsdfksjdflsdkfD2342309432849328476458/3RSD==",
"storageEndpoint": "https://mystorage.blob.core.windows.net",
}
},
).result()
print(response)
# x-ms-original-file: specification/mysql/resource-manager/Microsoft.DBforMySQL/legacy/stable/2017-12-01/examples/ServerSecurityAlertsCreateMax.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
6fa69e13fb0b3ab5027f35b52980589e1d1dd5eb | 6a6a6e5ce10726a51a82e4dc4ee6c327cfd9dfea | /home/urls.py | 23afcb73dca61c2ae3446ae75b750f9fbf235112 | [] | no_license | Artemka-dev/authsys | dbdd36d621065f5d6e33f538efc8a0f0ffbc23d9 | f45639f331ac29b859d4c338ed963403a824418d | refs/heads/master | 2022-04-10T10:38:42.482916 | 2020-03-23T14:08:01 | 2020-03-23T14:08:01 | 238,509,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from django.urls import path, include
from .views import HomePage, redirect_view, PostsView, DetailPage, CreatePost, ChangePost
urlpatterns = [
path("home/", HomePage.as_view(), name="home_page"),
path("", redirect_view, name="redirect"),
path("posts/", PostsView.as_view(), name="posts_page"),
path("detail/<int:id>/", DetailPage.as_view(), name="detail_page"),
path("create_post/", CreatePost.as_view(), name="create_post_page"),
path("change_post/<int:id>", ChangePost.as_view(), name="change_post_page")
] | [
"artem@MacBook-Pro-Artem.local"
] | artem@MacBook-Pro-Artem.local |
9b69e612ce1735e3297a37f0e864bb92f94d2547 | 84cc47b288292a890f115ed93e3ad1437f803ed8 | /TAL2.py | 7bfd3479b4e930425e2d2779ef7e161c1a777413 | [] | no_license | ClaireLozano/TAL_TD2 | effb6a828f2610c8eb245924cd18c9cf66286e18 | c88a26e19737eb3a2e0bff836cdc52794ecbf1be | refs/heads/master | 2021-05-10T13:34:15.318794 | 2018-01-22T15:43:16 | 2018-01-22T15:43:16 | 118,477,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | import os
import sys
import matplotlib.pyplot as plt
import json
def getDataFromTextFile(folder):
dictionnary = {}
for f in folder:
pathSplit = f.split('/')
language = pathSplit[1]
data = []
with open(f) as inp:
for line in inp:
words = splitByWord(line)
for w in words:
data.append(w)
if dictionnary.get(language) is not None:
resToAppend = dictionnary[language]
dictionnary[language] = dictionnary[language] + data
else :
dictionnary[language] = data
return dictionnary
def splitByWord(line):
wordsList = []
words = line.split()
for word in words:
wordsList.append(word)
return wordsList
def sortByWord(words):
dictionnary = {}
for w in words:
if len(dictionnary):
if w in dictionnary.keys():
dictionnary[w] = dictionnary.get(w) + 1
else:
dictionnary[w] = 1
else :
dictionnary[w] = 1
l = sorted([[y, x] for x,y in dictionnary.items()], reverse=True)
return l[0:8]
# python TAL2.py corpus_multi2/*/appr/*.html
# Get input
folderAppr = sys.argv[1:]
# get words
words = getDataFromTextFile(folderAppr)
# Write in a json file
file = open('frequence.json', 'w+')
file.write("{")
for language, element in words.items():
listSortedWords = sortByWord(element)
file.write('\n\t"' + language + '"' + " : ")
json.dump(listSortedWords, file)
file.write(",")
file.seek(-1, os.SEEK_END)
file.truncate()
file.write("\n}" + "\n")
file.close()
| [
"claire.lozano@live.fr"
] | claire.lozano@live.fr |
ae0dbc246333dba25047afcdcb3eaac9c9418de1 | ade4a86d59f06f9fb9defa0d91ef31ca331e00bc | /ANGELICA/asgi.py | 8481ae10bf7a401465e6fe244385737d8b351d05 | [] | no_license | norma22/practica | 67e4d86b88ed451e9ac04ed0898714165a02e187 | b58ba4d639e7d12a0ad684871b19d176b235b4ac | refs/heads/master | 2023-04-01T05:39:12.153231 | 2021-04-08T01:51:01 | 2021-04-08T01:51:01 | 355,734,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for ANGELICA project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ANGELICA.settings')
application = get_asgi_application()
| [
"masayadelcielo@gmail.com"
] | masayadelcielo@gmail.com |
a182ec951bb86beb1378570c2541409aaf661687 | 7ded0e91c75f9b93ce3cc6df01aa336dc09002c9 | /erg3/erg3.py | b414c01e344537aff7afd12e2b4c9b92b45e5680 | [] | no_license | ArmandoDomi/Machine-Learning-Python | 650a9a7e04752d019fdee735d53e1d867458796c | 6edbd582852b1e9ddd39323623f04ca089f44d00 | refs/heads/master | 2022-02-10T17:11:29.660579 | 2019-06-30T17:05:28 | 2019-06-30T17:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,170 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import perceptron as pr
def evaluate(t,predict,criterion):
tn=fn=tp=fp=float(0)
'''
tn = sum(predict[predict== False] & (t[t==0]))
tp = sum(predict[predict==True] & (t[t==1]))
fn = sum(predict[predict==True] & (t[t==0]))
fp = sum(predict[predict==False] & (t[t==1]))
'''
for i in range(0,len(t)):
if(predict[i]== False and t[i]==0):
tn=tn+1
if(predict[i]== True and t[i] == 1):
tp=tp+1
if(predict[i]== True and t[i]==0):
fn=fn+1
if(predict[i]==False and t[i]==1):
fp=fp+1
# Xrhsh try-catch. Se periptwsh pou ginei diairesh me to 0 tote apla epistrefw thn timh 0
try:
mydict = {'accuracy':(tp+tn)/(tp+tn+fp+fn), 'precision':(tp)/(tp+fp), 'recall':(tp)/(tp+fn), 'fmeasure':((tp)/(tp+fp))/((tp)/(tp+fn)), 'sensitivity':(tp)/(tp+fn), 'specificity':(tn)/(tn+fp)}
except ZeroDivisionError:
return 0
return mydict[criterion]
#diavasma apo to arxeio
data=pd.read_csv("iris.data",delimiter=',',header=None).values;
#plithis protipwn kai dianismaton
NumberOfAttributes=len(data[0,:])
NumberOfPatterns=len(data)
accuracy = precision = recall = fmeasure = sensitivity = specificity = float(0)
map_dict={"Iris-setosa":0,"Iris-versicolor":1,"Iris-virginica":0}
#arxikopoisi kai toy dianismatos x kai t
x=data[:,0:NumberOfAttributes-1]
t=np.zeros(shape=(NumberOfPatterns),dtype=float)
myClass=np.zeros(shape=(NumberOfPatterns),dtype=float)
option=0
while option !=4:
print("1.Diaxwrismos Iris-setosa apo Iris-virginica - Iris-versicolor\n");
print("2.Diaxwrismos Iris-versicolor apo Iris-setosa - Iris-virginica\n");
print("3.Diaxwrismos Iris-virginica apo Iris-setosa - Iris-versicolor\n");
print('4.exodos\n');
option=input("Dwse epilogi : \n");
if option == 1:
t[0:49]=1
elif option == 2:
t[50:99]=1
elif option==3:
t[100:]=1
elif option ==4:
print "bye"
break
else:
print "** Lathos epilogi ** "
break
epochs=input("Give the number of max epochs: ")
b=input("Give the number of learning rate: ")
#epauxismos toy pinaka protipwn x
X=np.hstack((x,np.ones((len(data),1))))
#start_Of_folds
fig,subplt=plt.subplots(3,3);
n_folds=9;
for folds in range(0,n_folds):
xtrain,xtest,ttrain,ttest=train_test_split(X,t,test_size=0.25)
numberOfTrain=len(xtrain)
numberOfTest=len(xtest)
ttrain1 = 2*ttrain - 1; # metatropi twn 0 se -1 kai to 1 paramenei 1
ttest1 = 2*ttest - 1; # metatropi twn 0 se -1 kai to 1 paramenei 1
xtrain = np.array(xtrain, dtype=float)
xtest = np.array(xtest, dtype=float)
w=pr.perceptron(np.transpose(xtrain),ttrain1,epochs,b)
y=np.dot(xtest,np.transpose(w))
predict=(y>0)
accuracy+=evaluate(ttest,predict,'accuracy')
precision+=evaluate(ttest,predict,'precision')
recall+=evaluate(ttest,predict,'recall')
fmeasure+=evaluate(ttest,predict,'fmeasure')
sensitivity+=evaluate(ttest,predict,'sensitivity')
specificity+=evaluate(ttest,predict,'specificity')
#plots
subplt[(folds)/3, (folds)%3].plot(ttest, "ro")
subplt[(folds)/3, (folds)%3].plot(predict, "b.")
print('Mean accuracy for all folds is : %f\n',np.mean(accuracy))
print('Mean precision for all folds is : %f\n',np.mean(precision))
print('Mean recall for all folds is : %f\n',np.mean(recall))
print('Mean f-measure for all folds is : %f\n',np.mean(fmeasure))
print('Mean sensitivity for all folds is : %f\n',np.mean(sensitivity))
print('Mean specificity for all folds is : %f\n',np.mean(specificity))
print('\n');
accuracy = precision = recall = fmeasure = sensitivity = specificity = float(0)
| [
"noreply@github.com"
] | noreply@github.com |
f1dfe353f054cafbfba858bdbd4445953f5aa987 | 315e14423cc1ef8167ce3727eeead0baa7687e7c | /courseDesignProject/miniLibrary/admin.py | 2688b6cb5e830956ae3eccf67a1641b1a7762d56 | [] | no_license | tombsmrliu/django | 61d26ae19ee079555d112c5898ea5eb29ffa5884 | 255e0ea1071d418c72887c51e5b9b855c4783d73 | refs/heads/master | 2021-01-20T04:09:04.231350 | 2017-04-27T14:56:09 | 2017-04-27T14:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | from django.contrib import admin
from .models import Category, Topic, TopicProfile, Comment
# Register your models here.
@admin.register(TopicProfile)
class TopicProfileAdmin(admin.ModelAdmin):
pass
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'color')
fields = ('name', 'color')
@admin.register(Topic)
class TopicAdmin(admin.ModelAdmin):
def change_select_to_published(self, request, queryset):
opera_num = queryset.update(status='published')
self.message_user(request, '{}篇主题已更新为\'审核通过\'!'.format(opera_num))
def change_select_to_ban(self, request, queryset):
opera_num = queryset.update(status='ban')
self.message_user(request, '{}篇主题已更新为\'审核未通过\'!'.format(opera_num))
change_select_to_published.short_description = '通过审核所选主题'
change_select_to_ban.short_description = '审核不通过所选主题'
actions = [change_select_to_published, change_select_to_ban]
list_display = ('title', 'auth', 'status', 'created')
list_fields = ('title', 'auth', 'description', 'files', 'status')
list_filter = ('status',)
list_editable = ('status',)
| [
"c2720043432@gmail.com"
] | c2720043432@gmail.com |
6f5f64793115b2302e947c9f8ce5be419d5740e1 | 7f4da36c8a5a8f771de4ca51a9a8ba1c64574d69 | /Homework-4/Homework_4_12.7.py | 9bff18588c648c7c462ef7bbf36703c7bfb414ff | [] | no_license | johnrick10234/hello-world | 37576d80636294f3c60db1989649757b1743cdde | 90d090645ddb2e605f89319eec5089a4e5dc7d3b | refs/heads/main | 2023-04-21T03:57:44.943841 | 2021-05-07T02:23:28 | 2021-05-07T02:23:28 | 331,312,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | #John Rick Santillan #1910045
def get_age():
age=int(input())
if age<18 or age>75:
raise ValueError('Invalid age.')
return age
def fat_burning_heart_rate(age):
h_rate=(220-age)*.70
return h_rate
if __name__=='__main__':
try:
age=get_age()
print('Fat burning heart rate for a',age,'year-old:',"{:.1f}".format(fat_burning_heart_rate(age)),'bpm')
except ValueError as ex:
print(ex)
print('Could not calculate heart rate info.\n') | [
"jcsantil@cougarnet.uh.edu"
] | jcsantil@cougarnet.uh.edu |
9641f3a1d9b171033732295dad72e3d86a747a41 | d1ea082d5704cc8f16ff9ad01f5a28d9b287e7db | /run.py | 6a6dd0bb9899e3875183b54fb6a376c8ce97ec7f | [] | no_license | YaoChungLiang/CSE571_HW2 | a87d43b1fcde4ef86da024f226bae93845059aa1 | 8cddce39e535ead2753c78d3b04ab11bf7c57887 | refs/heads/master | 2020-04-26T06:24:41.786754 | 2020-01-08T15:48:38 | 2020-01-08T15:48:38 | 173,363,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | #!/usr/bin/env python
import argparse, numpy, time
from MapEnvironment import MapEnvironment
from RRTPlanner import RRTPlanner
from RRTStarPlanner import RRTStarPlanner
from AStarPlanner import AStarPlanner
from IPython import embed
def main(planning_env, planner, start, goal):
# Notify.
#raw_input('Press any key to begin planning')
input('Press any key to begin planning')
# Plan.
start=tuple(start)
goal=tuple(goal)
plan = planner.Plan(start, goal)
# Shortcut the path.
# TODO (student): Do not shortcut when comparing the performance of algorithms.
# Comment this line out when collecting data over performance metrics.
plan_short = planner.ShortenPath(plan)
# Visualize the final path.
#print("in main")
#print(plan)
planning_env.visualize_plan(plan)
#embed()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='script for testing planners')
parser.add_argument('-m', '--map', type=str, default='map1.txt',
help='The environment to plan on')
parser.add_argument('-p', '--planner', type=str, default='rrt',
help='The planner to run (star, rrt, rrtstar)')
parser.add_argument('-s', '--start', nargs='+', type=int, required=True)
parser.add_argument('-g', '--goal', nargs='+', type=int, required=True)
args = parser.parse_args()
# First setup the environment and the robot.
planning_env = MapEnvironment(args.map, args.start, args.goal)
# Next setup the planner
if args.planner == 'astar':
planner = AStarPlanner(planning_env)
elif args.planner == 'rrt':
planner = RRTPlanner(planning_env)
elif args.planner == 'rrt*':
planner = RRTStarPlanner(planning_env)
else:
print('Unknown planner option: %s' % args.planner)
exit(0)
main(planning_env, planner, args.start, args.goal)
| [
"noreply@github.com"
] | noreply@github.com |
5374279c48d6c2393d43f2cabec46806407affec | c6ba762b22bf2b6c700bdec991cef5fa75bf7bad | /config.py | 28fc04c83eecc352bc7657925b87ed26d1f5a342 | [] | no_license | jcarlos20/00_appbuilder | 9974cfcd2508206aca4613773e1da2bc16da237e | 2cddefa01e04757664eeed1c3bd4636e9fbfae13 | refs/heads/master | 2023-08-18T13:09:04.045068 | 2021-09-22T23:27:07 | 2021-09-22T23:27:07 | 409,384,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Google", "url": "https://www.google.com/accounts/o8/id"},
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
BABEL_DEFAULT_LOCALE = "en"
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portuguese"},
"es": {"flag": "es", "name": "Spanish"},
"de": {"flag": "de", "name": "German"},
"zh": {"flag": "cn", "name": "Chinese"},
"ru": {"flag": "ru", "name": "Russian"},
}
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
AUTH_TYPE = 1
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
# APP_NAME = "My App Name"
# APP_ICON = "static/img/logo.jpg"
APP_THEME = "" # default
# APP_THEME = "cerulean.css"
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css"
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css"
# APP_THEME = "spacelab.css"
# APP_THEME = "united.css"
# APP_THEME = "yeti.css" | [
"jtorres@noven.io"
] | jtorres@noven.io |
64fe194f6f6f6b4fdd89e79f2467a83037405210 | 49fd63987c06bb07c016bea4e4a42b4f6209a1d2 | /hash.py | c26e3f5d5f501165283ec9c054b5b8fdf380a663 | [] | no_license | gonzalpetraglia/finger-4 | 65f885ce9ec0e8ecd70a977b865f0a10fcbd2371 | 14caffc483871a9156b54ed1d333b75862aab16d | refs/heads/master | 2020-12-26T01:12:15.231148 | 2016-09-17T15:45:28 | 2016-09-17T15:45:28 | 68,461,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | def myhash(s):
h=1
for c in s:
intC=ord(c)
h=ord(c)*h
h=(h>>32)+(h<<32)
h=h^((intC)or(intC<<16)or(intC<<32)or(intC<<48)or(intC<<8)or(intC<<24)or(intC<<56)or(intC<<40))
h=h%7205759403792700000
return h
| [
"noreply@github.com"
] | noreply@github.com |
c56d89a5521271887607380dcd98129d793d979f | 3708c3ae5aee629b0ca563e7796e77b9ada61c12 | /Lib/site-packages/pygments/lexers/dotnet.py | dda0230893a23b9b5ec9ab190cf4a0e830d9ee4d | [] | no_license | Lilyonegithub/venv | 5cec8ef4e62056137cbf8f9fa727a73321c10e43 | ae8ba5e0f224d5990d53b7203ec6cae7e7462faf | refs/heads/master | 2020-04-19T16:27:20.329731 | 2019-01-30T08:19:14 | 2019-01-30T08:19:14 | 168,305,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,694 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib1', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
AAAAACK Strings
http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
.. versionadded:: 1.5
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
| [
"2362143268@qq.com"
] | 2362143268@qq.com |
83a95913c2ac03d2345d00ae0bd4d376da222c85 | 8b83842b1f6a5594c0a3e43c3edd6b5162dde512 | /src/preprocess/convert_and_pad_data.py | d455debf12454e3b524bea3cde9887b2a734f864 | [] | no_license | petersiemen/sagemaker-deployment | 357acc0350a19a8d1c874557dafa23b5d8280ff6 | be90d605cc861d08b65ab23ef5926b9ec01bd90a | refs/heads/master | 2022-02-05T01:31:37.869645 | 2019-06-21T10:40:28 | 2019-06-21T10:40:28 | 190,742,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from serve.utils import convert_and_pad
import numpy as np
def convert_and_pad_data(word_dict, data, pad=500):
result = []
lengths = []
for sentence in data:
converted, leng = convert_and_pad(word_dict, sentence, pad)
result.append(converted)
lengths.append(leng)
return np.array(result), np.array(lengths)
| [
"peter.siemen@googlemail.com"
] | peter.siemen@googlemail.com |
248fd13fb4eaab0035922b523cc263c22c4e30dd | cc0b10f5037f38fbb12d47a55fbab1bcbf90996d | /main/migrations/0006_auto_20191022_1428.py | 54ece5ca3ae4e37808e80bca58fda17fbf349f5a | [] | no_license | sreebhargava143/researchtoolproject | fe92fdfa7533110e2bdf4f7f9fa661e2fb528d96 | 3a19fa423d19053fa8baa395c5b9753948e3eecb | refs/heads/master | 2022-12-22T19:47:29.202845 | 2019-10-22T21:47:54 | 2019-10-22T21:47:54 | 215,212,305 | 0 | 0 | null | 2022-07-06T20:18:58 | 2019-10-15T05:15:06 | CSS | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.2.6 on 2019-10-22 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20191021_1130'),
]
operations = [
migrations.AlterField(
model_name='storycard',
name='card_data',
field=models.TextField(verbose_name='Write your story'),
),
]
| [
"sreebhargava143@gmail.com"
] | sreebhargava143@gmail.com |
9edd138ebcf348533c62d8526ba75efddf77b4bc | 2876fa06e8bf1812526ec888367d626029a9f0f9 | /anatomy/migrations/0001_initial.py | c95642771e10e52abe90a3b3a8505e15f11b0e12 | [] | no_license | Ashwary-Jharbade/anatomy | af9e2c33e60c397b8cab63cfcc4d900e7789d531 | ea4534bac582b044c00c8592e3b8f7057177e165 | refs/heads/main | 2023-07-17T15:32:47.503613 | 2021-08-29T17:39:45 | 2021-08-29T17:39:45 | 382,641,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # Generated by Django 3.0.4 on 2021-06-20 15:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile', models.CharField(max_length=10)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"ashwary.jharbade999@gmail.com"
] | ashwary.jharbade999@gmail.com |
fb8cdf8a313e492be527551aa492bf665d7a5c79 | 11c2e1b6fada746b71e0bd9575f5936a352f14df | /Box.py | 534a08beada9fdc035eded1c97ca9c76bcb8ef29 | [] | no_license | Eglopez/Python-GUI | 6e3e49f25ebb9f60b41d1981c46d6852f5a5eb51 | 05d7c71206d293aea2e8a32a21809f06d9fdcb2c | refs/heads/master | 2020-07-05T08:34:26.338755 | 2019-08-20T00:55:45 | 2019-08-20T00:55:45 | 202,592,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 104)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(20, 40, 371, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(20, 44, 171, 31))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Agregar"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"eduardolopezlainez2001@gmail.com"
] | eduardolopezlainez2001@gmail.com |
f32df982f5733226b7805e1b39d44e5cdad86efa | d4da2760dd011facd24ecbb6955cbedf04dd1a1d | /src/bfvae1/solver_test.py | 216d4953f3163c0a52ea2bd31501fee2d35e8eba | [] | no_license | entn-at/bfvae | 658f59bfbdb8223a1dbe335a5605394328f3045d | 6308734b195d1d937f5b72ed7f8535f0bcca96b7 | refs/heads/master | 2023-01-06T21:48:42.905720 | 2020-11-06T16:51:25 | 2020-11-06T16:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,652 | py | import os
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.utils import save_image
#-----------------------------------------------------------------------------#
from utils import DataGather, mkdirs, grid2gif
from model import *
from dataset import create_dataloader
###############################################################################
class Solver(object):
####
def __init__(self, args):
self.args = args
self.name = ( '%s_gamma_%s_zDim_%s' + \
'_lrVAE_%s_lrD_%s_rseed_%s' ) % \
( args.dataset, args.gamma, args.z_dim,
args.lr_VAE, args.lr_D, args.rseed )
self.name = self.name + '_run_' + str(args.run_id)
self.use_cuda = args.cuda and torch.cuda.is_available()
# data info
self.dset_dir = args.dset_dir
self.dataset = args.dataset
if args.dataset.endswith('dsprites'):
self.nc = 1
elif args.dataset == '3dfaces':
self.nc = 1
else:
self.nc = 3
# groundtruth factor labels (only available for "dsprites")
if self.dataset=='dsprites':
# latent factor = (color, shape, scale, orient, pos-x, pos-y)
# color = {1} (1)
# shape = {1=square, 2=oval, 3=heart} (3)
# scale = {0.5, 0.6, ..., 1.0} (6)
# orient = {2*pi*(k/39)}_{k=0}^39 (40)
# pos-x = {k/31}_{k=0}^31 (32)
# pos-y = {k/31}_{k=0}^31 (32)
# (number of variations = 1*3*6*40*32*32 = 737280)
latent_values = np.load( os.path.join( self.dset_dir,
'dsprites-dataset', 'latents_values.npy'), encoding='latin1' )
self.latent_values = latent_values[:, [1,2,3,4,5]]
# latent values (actual values);(737280 x 5)
latent_classes = np.load( os.path.join( self.dset_dir,
'dsprites-dataset', 'latents_classes.npy'), encoding='latin1' )
self.latent_classes = latent_classes[:, [1,2,3,4,5]]
# classes ({0,1,...,K}-valued); (737280 x 5)
self.latent_sizes = np.array([3, 6, 40, 32, 32])
self.N = self.latent_values.shape[0]
# groundtruth factor labels
elif self.dataset=='oval_dsprites':
latent_classes = np.load( os.path.join( self.dset_dir,
'dsprites-dataset', 'latents_classes.npy'), encoding='latin1' )
idx = np.where(latent_classes[:,1]==1)[0] # "oval" shape only
self.latent_classes = latent_classes[idx,:]
self.latent_classes = self.latent_classes[:,[2,3,4,5]]
# classes ({0,1,...,K}-valued); (245760 x 4)
latent_values = np.load( os.path.join( self.dset_dir,
'dsprites-dataset', 'latents_values.npy'), encoding='latin1' )
self.latent_values = latent_values[idx,:]
self.latent_values = self.latent_values[:,[2,3,4,5]]
# latent values (actual values);(245760 x 4)
self.latent_sizes = np.array([6, 40, 32, 32])
self.N = self.latent_values.shape[0]
# groundtruth factor labels
elif self.dataset=='3dfaces':
# latent factor = (id, azimuth, elevation, lighting)
# id = {0,1,...,49} (50)
# azimuth = {-1.0,-0.9,...,0.9,1.0} (21)
# elevation = {-1.0,0.8,...,0.8,1.0} (11)
# lighting = {-1.0,0.8,...,0.8,1.0} (11)
# (number of variations = 50*21*11*11 = 127050)
latent_classes, latent_values = np.load( os.path.join(
self.dset_dir, '3d_faces/rtqichen/gt_factor_labels.npy' ) )
self.latent_values = latent_values
# latent values (actual values);(127050 x 4)
self.latent_classes = latent_classes
# classes ({0,1,...,K}-valued); (127050 x 4)
self.latent_sizes = np.array([50, 21, 11, 11])
self.N = self.latent_values.shape[0]
elif self.dataset=='celeba':
self.N = 202599
self.eval_metrics = False
elif self.dataset=='edinburgh_teapots':
# latent factor = (azimuth, elevation, R, G, B)
# azimuth = [0, 2*pi]
# elevation = [0, pi/2]
# R, G, B = [0,1]
#
# "latent_values" = original (real) factor values
# "latent_classes" = equal binning into K=10 classes
#
# (refer to "data/edinburgh_teapots/my_make_split_data.py")
K = 10
val_ranges = [2*np.pi, np.pi/2, 1, 1, 1]
bins = []
for j in range(5):
bins.append(np.linspace(0, val_ranges[j], K+1))
latent_values = np.load( os.path.join( self.dset_dir,
'edinburgh_teapots', 'gtfs_tr.npz' ) )['data']
latent_values = np.concatenate( ( latent_values,
np.load( os.path.join( self.dset_dir,
'edinburgh_teapots', 'gtfs_va.npz' ) )['data'] ),
axis = 0 )
latent_values = np.concatenate( ( latent_values,
np.load( os.path.join( self.dset_dir,
'edinburgh_teapots', 'gtfs_te.npz' ) )['data'] ),
axis = 0 )
self.latent_values = latent_values
latent_classes = np.zeros(latent_values.shape)
for j in range(5):
latent_classes[:,j] = np.digitize(latent_values[:,j], bins[j])
self.latent_classes = latent_classes-1 # {0,...,K-1}-valued
self.latent_sizes = K*np.ones(5, 'int64')
self.N = self.latent_values.shape[0]
# networks and optimizers
self.batch_size = args.batch_size
self.z_dim = args.z_dim
self.gamma = args.gamma
# what to do in this test
self.num_recon = args.num_recon
self.num_synth = args.num_synth
self.num_trvsl = args.num_trvsl
self.losses = args.losses
self.num_eval_metric1 = args.num_eval_metric1
self.num_eval_metric2 = args.num_eval_metric2
# checkpoints
self.ckpt_dir = os.path.join("ckpts", self.name)
# create dirs: "records", "ckpts", "outputs" (if not exist)
mkdirs("records"); mkdirs("outputs")
# records
self.record_file = 'records/%s.txt' % ("test_" + self.name)
# outputs
self.output_dir_recon = os.path.join( "outputs",
"test_" + self.name + '_recon' )
self.output_dir_synth = os.path.join( "outputs",
"test_" + self.name + '_synth' )
self.output_dir_trvsl = os.path.join( "outputs",
"test_" + self.name + '_trvsl' )
# load a previously saved model
self.ckpt_load_iter = args.ckpt_load_iter
print('Loading saved models (iter: %d)...' % self.ckpt_load_iter)
self.load_checkpoint()
print('...done')
if self.use_cuda:
print('Models moved to GPU...')
self.encoder = self.encoder.cuda()
self.decoder = self.decoder.cuda()
self.prior_alpha = self.prior_alpha.cuda()
self.post_alpha = self.post_alpha.cuda()
self.D = self.D.cuda()
print('...done')
self.set_mode(train=False)
####
def test(self):
ones = torch.ones( self.batch_size, dtype=torch.long )
zeros = torch.zeros( self.batch_size, dtype=torch.long )
if self.use_cuda:
ones = ones.cuda()
zeros = zeros.cuda()
# prepare dataloader (iterable)
print('Start loading data...')
self.data_loader = create_dataloader(self.args)
print('...done')
# iterator from dataloader
iterator = iter(self.data_loader)
iter_per_epoch = len(iterator)
#----#
# image synthesis
if self.num_trvsl > 0:
prn_str = 'Start doing image synthesis...'
print(prn_str)
self.dump_to_record(prn_str)
for ii in range(self.num_synth):
self.save_synth( str(self.ckpt_load_iter) + '_' + str(ii),
howmany=100 )
# latent traversal
if self.num_trvsl > 0:
prn_str = 'Start doing latent traversal...'
print(prn_str)
self.dump_to_record(prn_str)
# self.save_traverse_new( self.ckpt_load_iter, self.num_trvsl,
# limb=-4, limu=4, inter=0.1 )
self.save_traverse_new( self.ckpt_load_iter, self.num_trvsl,
limb=-16, limu=16, inter=0.2 )
# metric1
if self.num_eval_metric1 > 0:
prn_str = 'Start evaluating metric1...'
print(prn_str)
self.dump_to_record(prn_str)
#
metric1s = np.zeros(self.num_eval_metric1)
C1s = np.zeros([ self.num_eval_metric1,
self.z_dim, len(self.latent_sizes) ])
for ii in range(self.num_eval_metric1):
metric1s[ii], C1s[ii] = self.eval_disentangle_metric1()
prn_str = 'eval metric1: %d/%d done' % \
(ii+1, self.num_eval_metric1)
print(prn_str)
self.dump_to_record(prn_str)
#
prn_str = 'metric1:\n' + str(metric1s)
prn_str += '\nC1:\n' + str(C1s)
print(prn_str)
self.dump_to_record(prn_str)
# metric2
if self.num_eval_metric2 > 0:
prn_str = 'Start evaluating metric2...'
print(prn_str)
self.dump_to_record(prn_str)
#
metric2s = np.zeros(self.num_eval_metric2)
C2s = np.zeros([ self.num_eval_metric2,
self.z_dim, len(self.latent_sizes) ])
for ii in range(self.num_eval_metric2):
metric2s[ii], C2s[ii] = self.eval_disentangle_metric2()
prn_str = 'eval metric2: %d/%d done' % \
(ii+1, self.num_eval_metric2)
print(prn_str)
self.dump_to_record(prn_str)
#
prn_str = 'metric2:\n' + str(metric2s)
prn_str += '\nC2:\n' + str(C2s)
print(prn_str)
self.dump_to_record(prn_str)
#----#
if self.losses or self.num_recon>0:
num_adds = 0
loss_kl_inds = np.zeros(self.z_dim)
losses = {}
losses['vae_loss'] = 0.0
losses['dis_loss'] = 0.0
losses['recon'] = 0.0
losses['kl'] = 0.0
losses['tc'] = 0.0
losses['kl_alpha'] = 0.0
cntdn = self.num_recon
else:
return
prn_str = 'Start going through the entire data...'
print(prn_str)
self.dump_to_record(prn_str)
for iteration in range(1, 100000000):
# reset data iterators for each epoch
if iteration % iter_per_epoch == 0:
# inidividual kls
loss_kl_inds /= num_adds
prn_str = "Individual kl's:\n" + str(loss_kl_inds)
print(prn_str)
self.dump_to_record(prn_str)
# losses
losses['vae_loss'] /= num_adds
losses['dis_loss'] /= num_adds
losses['recon'] /= num_adds
losses['kl'] /= num_adds
losses['tc'] /= num_adds
losses['kl_alpha'] /= num_adds
prn_str = "losses:\n" + str(losses)
print(prn_str)
self.dump_to_record(prn_str)
break
with torch.no_grad():
# sample a mini-batch
X, ids = next(iterator) # (n x C x H x W)
if self.use_cuda:
X = X.cuda()
# enc(X)
mu, std, logvar = self.encoder(X)
# prior alpha params
a, b = self.prior_alpha()
# posterior alpha params
ah, bh = self.post_alpha()
# kl loss
kls = 0.5 * ( \
(ah/bh)*(mu**2+std**2) - 1.0 + \
bh.log() - ah.digamma() - logvar ) # (n x z_dim)
loss_kl = kls.sum(1).mean()
# reparam'ed samples
if self.use_cuda:
Eps = torch.cuda.FloatTensor(mu.shape).normal_()
else:
Eps = torch.randn(mu.shape)
Z = mu + Eps*std
# dec(Z)
X_recon = self.decoder(Z)
# recon loss
loss_recon = F.binary_cross_entropy_with_logits(
X_recon, X, reduction='sum' ).div(X.size(0))
# dis(Z)
DZ = self.D(Z)
# tc loss
loss_tc = (DZ[:,0] - DZ[:,1]).mean()
# kl loss on alpha
kls_alpha = ( \
(ah-a)*ah.digamma() - ah.lgamma() + a.lgamma() + \
a*(bh.log()-b.log()) + (ah/bh)*(b-bh) ) # z_dim-dim
loss_kl_alpha = kls_alpha.sum() / self.N
# total loss for vae
vae_loss = loss_recon + loss_kl + self.gamma*loss_tc + \
loss_kl_alpha
# dim-wise permutated Z over the mini-batch
perm_Z = []
for zj in Z.split(1, 1):
idx = torch.randperm(Z.size(0))
perm_zj = zj[idx]
perm_Z.append(perm_zj)
Z_perm = torch.cat(perm_Z, 1)
Z_perm = Z_perm.detach()
# dis(Z_perm)
DZ_perm = self.D(Z_perm)
# discriminator loss
dis_loss = 0.5*( F.cross_entropy(DZ, zeros) +
F.cross_entropy(DZ_perm, ones) )
if self.losses:
loss_kl_ind = 0.5 * ( \
(ah/bh)*(mu**2+std**2) - 1.0 + \
bh.log() - ah.digamma() - logvar ).mean(0)
loss_kl_inds += loss_kl_ind.cpu().detach().numpy()
#
losses['vae_loss'] += vae_loss.item()
losses['dis_loss'] += dis_loss.item()
losses['recon'] += loss_recon.item()
losses['kl'] += loss_kl.item()
losses['tc'] += loss_tc.item()
losses['kl_alpha'] += loss_kl_alpha.item()
#
num_adds += 1
# print the losses
if iteration % 100 == 0:
prn_str = ( '[%d/%d] vae_loss: %.3f | dis_loss: %.3f\n' + \
' (recon: %.3f, kl: %.3f, tc: %.3f, kl_alpha: %.3f)' \
) % \
( iteration, iter_per_epoch,
vae_loss.item(), dis_loss.item(),
loss_recon.item(), loss_kl.item(), loss_tc.item(),
loss_kl_alpha.item() )
prn_str += '\n a = {}'.format(
a.detach().cpu().numpy().round(2) )
prn_str += '\n b = {}'.format(
b.detach().cpu().numpy().round(2) )
prn_str += '\n ah = {}'.format(
ah.detach().cpu().numpy().round(2) )
prn_str += '\n bh = {}'.format(
bh.detach().cpu().numpy().round(2) )
print(prn_str)
self.dump_to_record(prn_str)
# save reconstructed images
if cntdn>0:
self.save_recon(iteration, X, torch.sigmoid(X_recon).data)
cntdn -= 1
if cntdn==0:
prn_str = 'Completed image reconstruction'
print(prn_str)
self.dump_to_record(prn_str)
if not self.losses:
break
####
def eval_disentangle_metric1(self):
# some hyperparams
num_pairs = 800 # # data pairs (d,y) for majority vote classification
bs = 50 # batch size
nsamps_per_factor = 100 # samples per factor
nsamps_agn_factor = 5000 # factor-agnostic samples
# 1) estimate variances of latent points factor agnostic
dl = DataLoader(
self.data_loader.dataset, batch_size=bs,
shuffle=True, num_workers=self.args.num_workers, pin_memory=True )
iterator = iter(dl)
M = []
for ib in range(int(nsamps_agn_factor/bs)):
# sample a mini-batch
Xb, _ = next(iterator) # (bs x C x H x W)
if self.use_cuda:
Xb = Xb.cuda()
# enc(Xb)
mub, _, _ = self.encoder(Xb) # (bs x z_dim)
M.append(mub.cpu().detach().numpy())
M = np.concatenate(M, 0)
# estimate sample vairance and mean of latent points for each dim
vars_agn_factor = np.var(M, 0)
# 2) estimatet dim-wise vars of latent points with "one factor fixed"
factor_ids = range(0, len(self.latent_sizes)) # true factor ids
vars_per_factor = np.zeros([num_pairs,self.z_dim])
true_factor_ids = np.zeros(num_pairs, np.int) # true factor ids
# prepare data pairs for majority-vote classification
i = 0
for j in factor_ids: # for each factor
# repeat num_paris/num_factors times
for r in range(int(num_pairs/len(factor_ids))):
# a true factor (id and class value) to fix
fac_id = j
fac_class = np.random.randint(self.latent_sizes[fac_id])
# randomly select images (with the fixed factor)
indices = np.where(
self.latent_classes[:,fac_id]==fac_class )[0]
np.random.shuffle(indices)
idx = indices[:nsamps_per_factor]
M = []
for ib in range(int(nsamps_per_factor/bs)):
Xb, _ = dl.dataset[ idx[(ib*bs):(ib+1)*bs] ]
if Xb.shape[0]<1: # no more samples
continue;
if self.use_cuda:
Xb = Xb.cuda()
mub, _, _ = self.encoder(Xb) # (bs x z_dim)
M.append(mub.cpu().detach().numpy())
M = np.concatenate(M, 0)
# estimate sample var and mean of latent points for each dim
if M.shape[0]>=2:
vars_per_factor[i,:] = np.var(M, 0)
else: # not enough samples to estimate variance
vars_per_factor[i,:] = 0.0
# true factor id (will become the class label)
true_factor_ids[i] = fac_id
i += 1
# 3) evaluate majority vote classification accuracy
# inputs in the paired data for classification
smallest_var_dims = np.argmin(
vars_per_factor / (vars_agn_factor + 1e-20), axis=1 )
# contingency table
C = np.zeros([self.z_dim,len(factor_ids)])
for i in range(num_pairs):
C[ smallest_var_dims[i], true_factor_ids[i] ] += 1
num_errs = 0 # # misclassifying errors of majority vote classifier
for k in range(self.z_dim):
num_errs += np.sum(C[k,:]) - np.max(C[k,:])
metric1 = (num_pairs - num_errs) / num_pairs # metric = accuracy
return metric1, C
####
def eval_disentangle_metric2(self):
# some hyperparams
num_pairs = 800 # # data pairs (d,y) for majority vote classification
bs = 50 # batch size
nsamps_per_factor = 100 # samples per factor
nsamps_agn_factor = 5000 # factor-agnostic samples
# 1) estimate variances of latent points factor agnostic
dl = DataLoader(
self.data_loader.dataset, batch_size=bs,
shuffle=True, num_workers=self.args.num_workers, pin_memory=True )
iterator = iter(dl)
M = []
for ib in range(int(nsamps_agn_factor/bs)):
# sample a mini-batch
Xb, _ = next(iterator) # (bs x C x H x W)
if self.use_cuda:
Xb = Xb.cuda()
# enc(Xb)
mub, _, _ = self.encoder(Xb) # (bs x z_dim)
M.append(mub.cpu().detach().numpy())
M = np.concatenate(M, 0)
# estimate sample vairance and mean of latent points for each dim
vars_agn_factor = np.var(M, 0)
# 2) estimatet dim-wise vars of latent points with "one factor varied"
factor_ids = range(0, len(self.latent_sizes)) # true factor ids
vars_per_factor = np.zeros([num_pairs,self.z_dim])
true_factor_ids = np.zeros(num_pairs, np.int) # true factor ids
# prepare data pairs for majority-vote classification
i = 0
for j in factor_ids: # for each factor
# repeat num_paris/num_factors times
for r in range(int(num_pairs/len(factor_ids))):
# randomly choose true factors (id's and class values) to fix
fac_ids = list(np.setdiff1d(factor_ids,j))
fac_classes = \
[ np.random.randint(self.latent_sizes[k]) for k in fac_ids ]
# randomly select images (with the other factors fixed)
if len(fac_ids)>1:
indices = np.where(
np.sum(self.latent_classes[:,fac_ids]==fac_classes,1)
== len(fac_ids)
)[0]
else:
indices = np.where(
self.latent_classes[:,fac_ids]==fac_classes
)[0]
np.random.shuffle(indices)
idx = indices[:nsamps_per_factor]
M = []
for ib in range(int(nsamps_per_factor/bs)):
Xb, _ = dl.dataset[ idx[(ib*bs):(ib+1)*bs] ]
if Xb.shape[0]<1: # no more samples
continue;
if self.use_cuda:
Xb = Xb.cuda()
mub, _, _ = self.encoder(Xb) # (bs x z_dim)
M.append(mub.cpu().detach().numpy())
M = np.concatenate(M, 0)
# estimate sample var and mean of latent points for each dim
if M.shape[0]>=2:
vars_per_factor[i,:] = np.var(M, 0)
else: # not enough samples to estimate variance
vars_per_factor[i,:] = 0.0
# true factor id (will become the class label)
true_factor_ids[i] = j
i += 1
# 3) evaluate majority vote classification accuracy
# inputs in the paired data for classification
largest_var_dims = np.argmax(
vars_per_factor / (vars_agn_factor + 1e-20), axis=1 )
# contingency table
C = np.zeros([self.z_dim,len(factor_ids)])
for i in range(num_pairs):
C[ largest_var_dims[i], true_factor_ids[i] ] += 1
num_errs = 0 # # misclassifying errors of majority vote classifier
for k in range(self.z_dim):
num_errs += np.sum(C[k,:]) - np.max(C[k,:])
metric2 = (num_pairs - num_errs) / num_pairs # metric = accuracy
return metric2, C
####
def save_recon(self, iters, true_images, recon_images):
# make a merge of true and recon, eg,
# merged[0,...] = true[0,...],
# merged[1,...] = recon[0,...],
# merged[2,...] = true[1,...],
# merged[3,...] = recon[1,...], ...
n = true_images.shape[0]
perm = torch.arange(0,2*n).view(2,n).transpose(1,0)
perm = perm.contiguous().view(-1)
merged = torch.cat([true_images, recon_images], dim=0)
merged = merged[perm,:].cpu()
# save the results as image
fname = os.path.join(self.output_dir_recon, 'recon_%s.jpg' % iters)
mkdirs(self.output_dir_recon)
save_image(
tensor=merged, filename=fname, nrow=2*int(np.sqrt(n)),
pad_value=1
)
####
def save_synth(self, iters, howmany=100):
decoder = self.decoder
Z = torch.randn(howmany, self.z_dim)
if self.use_cuda:
Z = Z.cuda()
# do synthesis
X = torch.sigmoid(decoder(Z)).data.cpu()
# save the results as image
fname = os.path.join(self.output_dir_synth, 'synth_%s.jpg' % iters)
mkdirs(self.output_dir_synth)
save_image(
tensor=X, filename=fname, nrow=int(np.sqrt(howmany)),
pad_value=1
)
####
def save_traverse_new( self, iters, num_reps,
limb=-3, limu=3, inter=2/3, loc=-1 ):
encoder = self.encoder
decoder = self.decoder
interpolation = torch.arange(limb, limu+0.001, inter)
np.random.seed(123)
rii = np.random.randint(self.N, size=num_reps)
#--#
prn_str = '(TRAVERSAL) random image IDs = {}'.format(rii)
print(prn_str)
self.dump_to_record(prn_str)
#--#
random_imgs = [0]*num_reps
random_imgs_zmu = [0]*num_reps
for i, i2 in enumerate(rii):
random_imgs[i] = self.data_loader.dataset.__getitem__(i2)[0]
if self.use_cuda:
random_imgs[i] = random_imgs[i].cuda()
random_imgs[i] = random_imgs[i].unsqueeze(0)
random_imgs_zmu[i], _, _ = encoder(random_imgs[i])
if self.dataset.lower() == 'dsprites':
fixed_idx1 = 87040 # square
fixed_idx2 = 332800 # ellipse
fixed_idx3 = 578560 # heart
fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
if self.use_cuda:
fixed_img1 = fixed_img1.cuda()
fixed_img1 = fixed_img1.unsqueeze(0)
fixed_img_zmu1, _, _ = encoder(fixed_img1)
fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
if self.use_cuda:
fixed_img2 = fixed_img2.cuda()
fixed_img2 = fixed_img2.unsqueeze(0)
fixed_img_zmu2, _, _ = encoder(fixed_img2)
fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
if self.use_cuda:
fixed_img3 = fixed_img3.cuda()
fixed_img3 = fixed_img3.unsqueeze(0)
fixed_img_zmu3, _, _ = encoder(fixed_img3)
IMG = { 'fixed_square': fixed_img1, 'fixed_ellipse': fixed_img2,
'fixed_heart': fixed_img3 }
for i in range(num_reps):
IMG['random_img'+str(i)] = random_imgs[i]
Z = { 'fixed_square': fixed_img_zmu1,
'fixed_ellipse': fixed_img_zmu2,
'fixed_heart': fixed_img_zmu3 }
for i in range(num_reps):
Z['random_img'+str(i)] = random_imgs_zmu[i]
elif self.dataset.lower() == 'oval_dsprites':
fixed_idx1 = 87040 # oval1
fixed_idx2 = 220045 # oval2
fixed_idx3 = 178560 # oval3
fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
if self.use_cuda:
fixed_img1 = fixed_img1.cuda()
fixed_img1 = fixed_img1.unsqueeze(0)
fixed_img_zmu1, _, _ = encoder(fixed_img1)
fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
if self.use_cuda:
fixed_img2 = fixed_img2.cuda()
fixed_img2 = fixed_img2.unsqueeze(0)
fixed_img_zmu2, _, _ = encoder(fixed_img2)
fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
if self.use_cuda:
fixed_img3 = fixed_img3.cuda()
fixed_img3 = fixed_img3.unsqueeze(0)
fixed_img_zmu3, _, _ = encoder(fixed_img3)
IMG = { 'fixed1': fixed_img1, 'fixed2': fixed_img2,
'fixed3': fixed_img3 }
for i in range(num_reps):
IMG['random_img'+str(i)] = random_imgs[i]
Z = { 'fixed1': fixed_img_zmu1, 'fixed2': fixed_img_zmu2,
'fixed3': fixed_img_zmu3}
for i in range(num_reps):
Z['random_img'+str(i)] = random_imgs_zmu[i]
elif self.dataset.lower() == '3dfaces':
fixed_idx1 = 6245
fixed_idx2 = 10205
fixed_idx3 = 68560
fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
if self.use_cuda:
fixed_img1 = fixed_img1.cuda()
fixed_img1 = fixed_img1.unsqueeze(0)
fixed_img_zmu1, _, _ = encoder(fixed_img1)
fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
if self.use_cuda:
fixed_img2 = fixed_img2.cuda()
fixed_img2 = fixed_img2.unsqueeze(0)
fixed_img_zmu2, _, _ = encoder(fixed_img2)
fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
if self.use_cuda:
fixed_img3 = fixed_img3.cuda()
fixed_img3 = fixed_img3.unsqueeze(0)
fixed_img_zmu3, _, _ = encoder(fixed_img3)
IMG = { 'fixed1': fixed_img1, 'fixed2': fixed_img2,
'fixed3': fixed_img3 }
for i in range(num_reps):
IMG['random_img'+str(i)] = random_imgs[i]
Z = { 'fixed1': fixed_img_zmu1, 'fixed2': fixed_img_zmu2,
'fixed3': fixed_img_zmu3}
for i in range(num_reps):
Z['random_img'+str(i)] = random_imgs_zmu[i]
elif self.dataset.lower() == 'celeba':
fixed_idx1 = 191282
fixed_idx2 = 143308
fixed_idx3 = 101535
fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
if self.use_cuda:
fixed_img1 = fixed_img1.cuda()
fixed_img1 = fixed_img1.unsqueeze(0)
fixed_img_zmu1, _, _ = encoder(fixed_img1)
fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
if self.use_cuda:
fixed_img2 = fixed_img2.cuda()
fixed_img2 = fixed_img2.unsqueeze(0)
fixed_img_zmu2, _, _ = encoder(fixed_img2)
fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
if self.use_cuda:
fixed_img3 = fixed_img3.cuda()
fixed_img3 = fixed_img3.unsqueeze(0)
fixed_img_zmu3, _, _ = encoder(fixed_img3)
IMG = { 'fixed1': fixed_img1, 'fixed2': fixed_img2,
'fixed3': fixed_img3 }
for i in range(num_reps):
IMG['random_img'+str(i)] = random_imgs[i]
Z = { 'fixed1': fixed_img_zmu1, 'fixed2': fixed_img_zmu2,
'fixed3': fixed_img_zmu3}
for i in range(num_reps):
Z['random_img'+str(i)] = random_imgs_zmu[i]
# elif self.dataset.lower() == '3dchairs':
#
# fixed_idx1 = 40919 # 3DChairs/images/4682_image_052_p030_t232_r096.png
# fixed_idx2 = 5172 # 3DChairs/images/14657_image_020_p020_t232_r096.png
# fixed_idx3 = 22330 # 3DChairs/images/30099_image_052_p030_t232_r096.png
#
# fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
# fixed_img1 = fixed_img1.to(self.device).unsqueeze(0)
# fixed_img_z1 = encoder(fixed_img1)[:, :self.z_dim]
#
# fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
# fixed_img2 = fixed_img2.to(self.device).unsqueeze(0)
# fixed_img_z2 = encoder(fixed_img2)[:, :self.z_dim]
#
# fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
# fixed_img3 = fixed_img3.to(self.device).unsqueeze(0)
# fixed_img_z3 = encoder(fixed_img3)[:, :self.z_dim]
#
# Z = {'fixed_1':fixed_img_z1, 'fixed_2':fixed_img_z2,
# 'fixed_3':fixed_img_z3, 'random':random_img_zmu}
#
elif self.dataset.lower() == 'edinburgh_teapots':
fixed_idx1 = 7040
fixed_idx2 = 32800
fixed_idx3 = 78560
fixed_img1 = self.data_loader.dataset.__getitem__(fixed_idx1)[0]
if self.use_cuda:
fixed_img1 = fixed_img1.cuda()
fixed_img1 = fixed_img1.unsqueeze(0)
fixed_img_zmu1, _, _ = encoder(fixed_img1)
fixed_img2 = self.data_loader.dataset.__getitem__(fixed_idx2)[0]
if self.use_cuda:
fixed_img2 = fixed_img2.cuda()
fixed_img2 = fixed_img2.unsqueeze(0)
fixed_img_zmu2, _, _ = encoder(fixed_img2)
fixed_img3 = self.data_loader.dataset.__getitem__(fixed_idx3)[0]
if self.use_cuda:
fixed_img3 = fixed_img3.cuda()
fixed_img3 = fixed_img3.unsqueeze(0)
fixed_img_zmu3, _, _ = encoder(fixed_img3)
IMG = { 'fixed1': fixed_img1, 'fixed2': fixed_img2,
'fixed3': fixed_img3 }
for i in range(num_reps):
IMG['random_img'+str(i)] = random_imgs[i]
Z = { 'fixed1': fixed_img_zmu1, 'fixed2': fixed_img_zmu2,
'fixed3': fixed_img_zmu3}
for i in range(num_reps):
Z['random_img'+str(i)] = random_imgs_zmu[i]
else:
raise NotImplementedError
# do traversal and collect generated images
gifs = []
for key in Z:
z_ori = Z[key]
for row in range(self.z_dim):
if loc != -1 and row != loc:
continue
z = z_ori.clone()
for val in interpolation:
z[:,row] = val
sample = torch.sigmoid(decoder(z)).data
gifs.append(sample)
# save the generated files, also the animated gifs
out_dir = os.path.join(self.output_dir_trvsl, str(iters))
mkdirs(self.output_dir_trvsl)
mkdirs(out_dir)
gifs = torch.cat(gifs)
gifs = gifs.view(
len(Z), self.z_dim, len(interpolation), self.nc, 64, 64
).transpose(1,2)
for i, key in enumerate(Z.keys()):
for j, val in enumerate(interpolation):
I = torch.cat([IMG[key], gifs[i][j]], dim=0)
save_image(
tensor=I.cpu(),
filename=os.path.join(out_dir, '%s_%03d.jpg' % (key,j)),
nrow=1+self.z_dim, pad_value=1 )
# make animated gif
grid2gif(
out_dir, key, str(os.path.join(out_dir, key+'.gif')), delay=10
)
####
def set_mode(self, train=True):
if train:
self.encoder.train()
self.decoder.train()
self.D.train()
else:
self.encoder.eval()
self.decoder.eval()
self.D.eval()
####
def load_checkpoint(self):
encoder_path = os.path.join( self.ckpt_dir,
'iter_%s_encoder.pt' % self.ckpt_load_iter )
decoder_path = os.path.join( self.ckpt_dir,
'iter_%s_decoder.pt' % self.ckpt_load_iter )
prior_alpha_path = os.path.join( self.ckpt_dir,
'iter_%s_prior_alpha.pt' % self.ckpt_load_iter )
post_alpha_path = os.path.join( self.ckpt_dir,
'iter_%s_post_alpha.pt' % self.ckpt_load_iter )
D_path = os.path.join( self.ckpt_dir,
'iter_%s_D.pt' % self.ckpt_load_iter )
if self.use_cuda:
self.encoder = torch.load(encoder_path)
self.decoder = torch.load(decoder_path)
self.prior_alpha = torch.load(prior_alpha_path)
self.post_alpha = torch.load(post_alpha_path)
self.D = torch.load(D_path)
else:
self.encoder = torch.load(encoder_path, map_location='cpu')
self.decoder = torch.load(decoder_path, map_location='cpu')
self.prior_alpha = torch.load(prior_alpha_path, map_location='cpu')
self.post_alpha = torch.load(post_alpha_path, map_location='cpu')
self.D = torch.load(D_path, map_location='cpu')
####
def dump_to_record(self, prn_str):
record = open(self.record_file, 'a')
record.write('%s\n' % (prn_str,))
record.close()
| [
"ps851@scarletmail.rutgers.edu"
] | ps851@scarletmail.rutgers.edu |
199e2c73709ba89644fe25f22ee2a014ca03fcca | d3410127c2fb05a2312c8861214e789fccaba615 | /doctor/admin.py | 0198981b371281c242794bfdc1411d22af6128f1 | [] | no_license | Vitalio44/Dol_plastic | 8f8ef222badc9aaf5807da78c644396209752f93 | 21c6e62e51ecb3343d288c4a1c75bb01388fcf8a | refs/heads/master | 2021-01-19T02:46:09.838145 | 2017-05-29T10:18:35 | 2017-05-29T10:18:35 | 87,291,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.contrib import admin
from .models import Doctor, Specialization
class DoctorAdmin(admin.ModelAdmin):
list_display = ["title", "timestamp"]
list_display_links = ["title"]
list_filter = ["timestamp"]
search_fields = ["title"]
prepopulated_fields = {'slug': ('title',)}
class Meta:
model = Doctor
class SpecializationAdmin(admin.ModelAdmin):
list_display = ["name", "timestamp"]
list_display_links = ["name"]
list_filter = ["timestamp"]
search_fields = ["name"]
class Meta:
model = Specialization
admin.site.register(Doctor, DoctorAdmin)
admin.site.register(Specialization, SpecializationAdmin)
| [
"Vitalii"
] | Vitalii |
98d4b751487188eba562b6871a2298cb5ea68deb | 34d5ebe9e6de9d6742c234dabfa9b38f0adb7774 | /carriage_return/ui.py | 6b4e0f2958482b03ef044f9c62842f0bd045a463 | [] | no_license | campagnola/return-to-carriage | f37acaf8e41ccf04e7089018574732a1fdcd2a64 | eeb7f31b16e2c236c875c867a0295173fa6f4b0a | refs/heads/master | 2021-08-05T14:02:49.988526 | 2021-07-31T08:38:17 | 2021-07-31T08:38:17 | 84,014,684 | 0 | 2 | null | 2021-07-30T02:48:13 | 2017-03-06T00:55:55 | Python | UTF-8 | Python | false | false | 4,518 | py | import numpy as np
import vispy.scene, vispy.app
import vispy.util.ptime as ptime
from .input import InputDispatcher, CommandInputHandler
from .graphics import TextBox
from .console import CommandInterpreter
class MainWindow:
"""Implements user interface: graphical panels, key input handling
"""
def __init__(self):
self.canvas = vispy.scene.SceneCanvas()
self.canvas.show()
self.canvas.size = 1400, 900
self.debug_line_of_sight = False
self.debug_los_tex = False
# Setup input event handling
self.input_dispatcher = InputDispatcher(self.canvas)
self.command_mode = False
# setup UI
self.view = self.canvas.central_widget.add_view()
self.view.camera = 'panzoom'
self.view.camera.rect = [0, -5, 120, 60]
self.view.camera.aspect = 0.6
self.view.events.key_press.disconnect()
self.camera_target = self.view.camera.rect
self._last_camera_update = ptime.time()
self.scroll_timer = vispy.app.Timer(start=True, connect=self._scroll_camera, interval=0.016)
self.console_grid = self.canvas.central_widget.add_grid()
self.stats_box = TextBox((2, 160))
self.console_grid.add_widget(self.stats_box.view, 1, 0, 1, 2)
self.stats_box.write(
"HP:17/33 Food:56% Water:34% Sleep:65% Weight:207(45) Level:3 Int:12 Str:9 Wis:11 Cha:2")
self.stats_box.view.height_max = 30
self.stats_box.view.stretch = (1, 10)
self.info_box = TextBox((15, 80))
self.console_grid.add_widget(self.info_box.view, 2, 0)
self.info_box.write("There is a scroll of infinite recursion here.")
self.info_box.view.height_max = 200
self.stats_box.view.stretch = (1, 1)
self.console = TextBox((15, 80))
self.console_grid.add_widget(self.console.view, 2, 1)
self.console.view.stretch = (1, 10)
# self.console.view.parent = self.canvas.scene
self.console.view.rect = vispy.geometry.Rect(30, 620, 1350, 250)
self.console.transform = vispy.visuals.transforms.STTransform((0, 0, -0.5))
# self.console.view.camera.aspect = 0.6
self.console.view.height_max = 200
self.console.write('Hello?')
self.console.write('Is anybody\n there?')
self.console.write(''.join([chr(i) for i in range(0x20, 128)]))
# self.console.view.camera.rect = [-1, -1, 30, 3]
self.command = CommandInterpreter(self)
self.cmd_input_handler = CommandInputHandler(self.console, self.command)
self._follow_entity = None
def follow_entity(self, entity):
if self._follow_entity is not None:
self._follow_entity.location.global_changed.disconnect(self._update_camera_target)
self._follow_entity = entity
entity.location.global_changed.connect(self._update_camera_target)
self._update_camera_target()
def toggle_command_mode(self):
# todo: visual cue
self.command_mode = not self.command_mode
if self.command_mode:
self.cmd_input_handler.activate()
else:
self.cmd_input_handler.deactivate()
def _scroll_camera(self, ev):
now = ptime.time()
dt = now - self._last_camera_update
self._last_camera_update = now
cr = vispy.geometry.Rect(self.view.camera.rect)
tr = self.camera_target
crv = np.array(cr.pos + cr.size, dtype='float32')
trv = np.array(tr.pos + tr.size, dtype='float32')
if not np.any(abs(trv - crv) > 1e-2):
return
s = np.exp(-dt / 0.4) # 400 ms settling time constant
nrv = crv * s + trv * (1.0 - s)
cr.pos = nrv[:2]
cr.size = nrv[2:]
self.view.camera.rect = cr
def _update_camera_target(self, event=None):
location = self._follow_entity.location
pp = np.array(location.global_location.slot)
cr = vispy.geometry.Rect(self.view.camera.rect)
cc = np.array(cr.center)
cs = np.array(cr.size)
cp = np.array(cr.pos)
dif = pp - cc
maxdif = 0.1 * cs # start correcting camera at 10% width from center
for ax in (0, 1):
if dif[ax] < -maxdif[ax]:
cp[ax] += dif[ax] + maxdif[ax]
elif dif[ax] > maxdif[ax]:
cp[ax] += dif[ax] - maxdif[ax]
cr.pos = cp
self.camera_target = cr
def quit(self):
self.canvas.close()
| [
"luke.campagnola@gmail.com"
] | luke.campagnola@gmail.com |
2a0eb33ad087fd4330fbe9400b31fb58715ae5dd | 6581783e94c0474ace8a945ce6c11dac1c4d60f8 | /bozor/apps/shop/migrations/0001_initial.py | a38712f8ba1322f8b44db839b8708fa9a79d3955 | [
"BSD-3-Clause"
] | permissive | mirjalolbahodirov/bozor | 22367c89c746eeab12334bb3901a0d1b9884f9ff | c76b7b7dee4b6a80957602a10a900f7548347642 | refs/heads/master | 2021-08-23T20:09:04.971753 | 2017-12-06T10:25:33 | 2017-12-06T10:25:33 | 113,212,879 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | # Generated by Django 2.0 on 2017-12-05 16:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('stock', models.PositiveIntegerField()),
('image', models.ImageField(upload_to='product')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.Category')),
],
),
]
| [
"mirjalol.bahodirov@gmail.com"
] | mirjalol.bahodirov@gmail.com |
c99ad622603cbf9f4ff8ce3a42ef74bed108324b | 1d6c3055cbbd6bf23d272353a3187409b5d0ed78 | /Grafo.py | dd4dacc5e8352013c7503fe37d03e841d82e22f5 | [] | no_license | BASDiniz/GrafoPython | 158c4af2a0e519b44a6b33e7dbc13ebf6f3bbea3 | 5823e4042f05c9a3a9a7c37f7b459d5786fc9edc | refs/heads/master | 2020-08-08T05:24:09.856506 | 2019-10-07T15:20:48 | 2019-10-07T15:20:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,505 | py | from Vertice import *
from Aresta import *
class Grafo:
def __init__(self, direcionado=True):
self.lista_Vertices = []
self.lista_Arestas = []
self.direcionado = direcionado
self.tempo = 0
def novo_Vertice(self, identificador):
# string = input(str("Identificador do Vertice: "))
self.lista_Vertices.append(Vertice(identificador))
def busca_Aresta(self, u, v): # Método recebe dois objetos do tipo Vértice
for w in self.lista_Arestas:
origem = w.getOrigem()
destino = w.getDestino()
if origem.getId() == u.getId() and destino.getId() == v.getId():
return w
def busca_Vertice(self, identificador): # Método recebe um identificador
for i in self.lista_Vertices:
if identificador == i.getId():
return i
else:
return None
def nova_Aresta(self, origem, destino, peso): # Método recebe dois identificadores
origem_aux = self.busca_Vertice(origem)
destino_aux = self.busca_Vertice(destino)
if (origem_aux is not None) and (destino_aux is not None):
self.lista_Arestas.append(Aresta(origem_aux, destino_aux, peso))
else:
print("Um do Vertice ou ambos são invalidos")
if self.direcionado == False:
self.lista_Arestas.append(Aresta(destino_aux, origem_aux, peso)) # Aresta(u,v) e Aresta(v,u)
def esta_Vazio(self):
if len(self.lista_Vertices) == 0:
return True
else:
return False
def busca_Adjacente(self, u): # Método recebe um vertice
for i in range(len(self.lista_Arestas)):
origem = self.lista_Arestas[i].getOrigem()
destino = self.lista_Arestas[i].getDestino()
if (u.getId() == origem.getId()) and (destino.getVisitado() == False):
destino.setVisitado(True) # Para que não retorn o mesmo vertice seguidas veses
return destino
else:
return None
####################################################################
def Depth_first_search(self):#profundidade
self.tempo = 0
for v in self.lista_Vertices:
v.setVisitado(False)
v.input = 0
v.output = 0
for v in self.lista_Vertices:
if not v.getVisitado():
self.visita(v)
def visita(self, u):
print("Visitando o vertice: %s" % u.getId())
u.setVisitado(True)
self.tempo += 1
u.setImput(self.tempo)
v = self.busca_Adjacente(u) # retorna apenas não visitado ou nulo
while v is not None:
v.predecessor.append(u.getId())
self.visita(v)
v = self.busca_Adjacente(u)
self.tempo += 1
u.setOutput(self.tempo)
print("Voltando para: ", u.predecessor)
####################################################################
def inicializa_Fonte(self, fonte): # Função usado no BFS e Dijkstra Método recebe um Objeto
for v in self.lista_Vertices:
v.setEstimativa(99999)
v.setVisitado(False)
fonte.setVisitado(True)
fonte.setEstimativa(0)
####################################################################
def Breadth_first_search(self, identificador):#Largura
flag = 0
fonte = self.busca_Vertice(identificador)
if fonte is None:
return "Vertice Nulo"
self.inicializa_Fonte(fonte)
lista = [fonte]
while 0 != len(lista):
u = lista[0]
if flag == 1:
print('Visitando Vertice: '+u.getId())
flag = 1
v = self.busca_Adjacente(u) # retorna adjacente não visitado
if v is None:
lista.pop(0) # retiro o vertice sem adjacentes
else:
self.tempo += 1
v.setImput(self.tempo)
v.predecessor.append(u.getId())
v.setVisitado(True)
lista.append(v)
u.setVisitado(True)
def imprime_Grafo_com_Destino(self, origem, destino):
destino_Aux = self.busca_Vertice(destino)
if len(destino_Aux.predecessor) == 0:
print("Não ha caminho")
else:
print(destino)
self.imprime_Grafo(origem, destino)
def imprime_Grafo(self, origem, destino):
if origem == destino:
print("Fim")
else:
destino_Aux = self.busca_Vertice(destino)
if len(destino_Aux.predecessor) == 0:
print("Não ha caminho")
else:
print(destino_Aux.predecessor[0])
self.imprime_Grafo(origem, destino_Aux.predecessor[0])
####################################################################
def relaxa_Vertice(self, u, v, w):
if v.getEstimativa() > (u.getEstimativa() + w.getPeso()):
v.setEstimativa(u.getEstimativa() + w.getPeso())
v.predecessor.append(u.getId()) # guarda apenas o id
def Dijkstra(self, origem):
fonte = self.busca_Vertice(origem)
if fonte is None:
return "Vertce Nulo"
self.inicializa_Fonte(fonte)
lista = []
resposta = [] # conjunto resposta
for i in self.lista_Vertices:
lista.append(i)
while len(lista) != 0:
lista.sort() # ordeno a lista baseado na estimativa
u = lista[0]
v = self.busca_Adjacente(u)
if v is None:
for i in self.lista_Vertices: # como o vetice u marcou seus adj como visitado nenhum outro vértice visitara
i.setVisitado(
False) # esse vertice então preciso marcar como não visitado pra bucar os adj de outro vertice
self.tempo += 1
u.setImput(self.tempo) # apenas mostra a ordem de visitação do grafo
resposta.append(lista[0])
lista.pop(0) # retiro vertice sem adjacente da lista
else:
w = self.busca_Aresta(u, v)
if w is not None:
self.relaxa_Vertice(u, v, w)
print("Estimativas: ")
for i in resposta:
print(i) # imprimo as respostas
def Minimum_spanning_tree(self, origem): # Prim
fonte = self.busca_Vertice(origem)
if fonte is None:
return "Vertice Nulo"
self.inicializa_Fonte(fonte)
lista = []
for i in self.lista_Vertices:
lista.append(i)
lista.sort()
while len(lista) != 0:
# ordeno a lista baseado na estimativa
u = lista[0]
v = self.busca_Adjacente(u)
if v is None:
for i in lista: # como o vetice u marcou seus adj como visitado nenhum outro vértice visitara
i.setVisitado(
False) # esse vertice então preciso marcar como não visitado pra bucar os adj de outro vertice
# retiro vertice sem adjacente
lista.sort()
self.tempo += 1
u.setImput(self.tempo)
lista.remove(u)
else:
w = self.busca_Aresta(u, v)
if lista.count(v) > 0:
if v.getEstimativa() > w.getPeso():
v.predecessor = [u.getId()]
v.setEstimativa(w.getPeso())
for u in self.lista_Vertices:
if len(u.predecessor) > 0:
print(u.predecessor, "------", u.getId())
self.lista_Vertices.sort(key=lambda u: u.input, reverse=False)
for i in self.lista_Vertices:
print(i)
####################################################################
def is_Cyclic(self):
if (len(self.lista_Arestas) > len(self.lista_Vertices) - 1):
print("Grafo Cíclico por Nº Aresta : %i > Nº Vértices: %i" % (
len(self.lista_Arestas), len(self.lista_Vertices)))
else:
print("Grafo Acíclico")
#################################################################### | [
"noreply@github.com"
] | noreply@github.com |
629f28a4b56dfbfcff989d4212e45fd579c89c74 | dc36e9910af16b86657e052dd5335878d734d2f8 | /Part2/venv/Scripts/pip3.6-script.py | daa7481da3e56188b556e6e2fe49748aaa4ea869 | [] | no_license | JLevy97/redhorse2020_interview | e68f785bb115fd4cf0b0bb8983dbff39dd179c9b | 411fccaf02de29bd5d80acf29a2314bccad34dcb | refs/heads/master | 2021-05-18T07:08:33.629580 | 2020-03-30T01:32:08 | 2020-03-30T01:32:08 | 251,172,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!"D:\Documents\Job Applications\RedHorse 2019\Part2\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"39770656+JLevy97@users.noreply.github.com"
] | 39770656+JLevy97@users.noreply.github.com |
65eb9b374f06fae0a98bf2b502ad7a15ff4d00ee | 212266251f6fad18d293dcd752240f90867319a0 | /fibanocci.py | 85014ec7c389ea2e93cb1366cb0fbb2acee92592 | [] | no_license | manojkumarmc/python-examples | b5e41a2c621ad1b2e54002f1c4b96c6b833eb455 | 2eb474f998d6212586a8a2a3723d7471ab565c08 | refs/heads/master | 2021-01-19T00:58:56.397735 | 2019-06-10T17:03:29 | 2019-06-10T17:03:29 | 38,770,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from itertools import islice
def fib():
a = 1
b = 1
yield a
yield b
while True:
a,b = b, a+b
yield b
output = islice(fib(), 0, 100)
for o in output:
print o
| [
"manoj.kumar.myaliputhenpurayi@oracle.com"
] | manoj.kumar.myaliputhenpurayi@oracle.com |
03617b3129dcf8a670eaf2276f34c38807321a83 | 0dc2b8f5b553f72f72a41f61e63b633dca4f9923 | /HICO-DET_Benchmark/Generate_HICO_detection_nis.py | 4bf265cae449fdd5292591073dc10436ae47c90e | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | DirtyHarryLYL/Transferable-Interactiveness-Network | 84e877352bdf5f0654294a6522e6ab0a0e16346e | 3ab905bb95817cbc52b5306323fa9a9ddc3cd86d | refs/heads/master | 2023-04-06T14:08:55.494243 | 2023-02-22T17:47:11 | 2023-02-22T17:47:11 | 177,267,363 | 247 | 51 | MIT | 2023-03-24T22:42:31 | 2019-03-23T09:05:23 | Python | UTF-8 | Python | false | false | 24,084 | py | # --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""
Change the HICO-DET detection results to the right format.
input arg: python Generate_HICO_detection_nis.py (1:pkl_path) (2:hico_dir) (3:rule_inter) (4:threshold_x) (5:threshold_y)
"""
import pickle
import shutil
import numpy as np
import scipy.io as sio
import os
import sys
import matplotlib
import matplotlib.pyplot as plth
import random
import HICO_Benchmark_Binary as rank
# all the no-interaction HOI index in HICO dataset
hoi_no_inter_all = [10,24,31,46,54,65,76,86,92,96,107,111,129,146,160,170,174,186,194,198,208,214,224,232,235,239,243,247,252,257,264,273,283,290,295,305,313,325,330,336,342,348,352,356,363,368,376,383,389,393,397,407,414,418,429,434,438,445,449,453,463,474,483,488,502,506,516,528,533,538,546,550,558,562,567,576,584,588,595,600]
# all HOI index range corresponding to different object id in HICO dataset
hoi_range = [(161, 170), (11, 24), (66, 76), (147, 160), (1, 10), (55, 65), (187, 194), (568, 576), (32, 46), (563, 567), (326, 330), (503, 506), (415, 418), (244, 247), (25, 31), (77, 86), (112, 129), (130, 146), (175, 186), (97, 107), (314, 325), (236, 239), (596, 600), (343, 348), (209, 214), (577, 584), (353, 356), (539, 546), (507, 516), (337, 342), (464, 474), (475, 483), (489, 502), (369, 376), (225, 232), (233, 235), (454, 463), (517, 528), (534, 538), (47, 54), (589, 595), (296, 305), (331, 336), (377, 383), (484, 488), (253, 257), (215, 224), (199, 208), (439, 445), (398, 407), (258, 264), (274, 283), (357, 363), (419, 429), (306, 313), (265, 273), (87, 92), (93, 96), (171, 174), (240, 243), (108, 111), (551, 558), (195, 198), (384, 389), (394, 397), (435, 438), (364, 368), (284, 290), (390, 393), (408, 414), (547, 550), (450, 453), (430, 434), (248, 252), (291, 295), (585, 588), (446, 449), (529, 533), (349, 352), (559, 562)]
# all image index in test set without any pair
all_remaining = set([20, 25, 54, 60, 66, 71, 74, 94, 154, 155, 184, 200, 229, 235, 242, 249, 273, 280, 289, 292, 315, 323, 328, 376, 400, 421, 432, 436, 461, 551, 554, 578, 613, 626, 639, 641, 642, 704, 705, 768, 773, 776, 796, 809, 827, 845, 850, 855, 862, 886, 901, 947, 957, 963, 965, 1003, 1011, 1014, 1028, 1042, 1044, 1057, 1090, 1092, 1097, 1099, 1119, 1171, 1180, 1231, 1241, 1250, 1346, 1359, 1360, 1391, 1420, 1450, 1467, 1495, 1498, 1545, 1560, 1603, 1605, 1624, 1644, 1659, 1673, 1674, 1677, 1709, 1756, 1808, 1845, 1847, 1849, 1859, 1872, 1881, 1907, 1910, 1912, 1914, 1953, 1968, 1979, 2039, 2069, 2106, 2108, 2116, 2126, 2142, 2145, 2146, 2154, 2175, 2184, 2218, 2232, 2269, 2306, 2308, 2316, 2323, 2329, 2390, 2397, 2406, 2425, 2463, 2475, 2483, 2494, 2520, 2576, 2582, 2591, 2615, 2624, 2642, 2646, 2677, 2703, 2707, 2712, 2717, 2763, 2780, 2781, 2818, 2830, 2833, 2850, 2864, 2873, 2913, 2961, 2983, 3021, 3040, 3042, 3049, 3057, 3066, 3082, 3083, 3111, 3112, 3122, 3157, 3200, 3204, 3229, 3293, 3309, 3328, 3341, 3373, 3393, 3423, 3439, 3449, 3471, 3516, 3525, 3537, 3555, 3616, 3636, 3653, 3668, 3681, 3709, 3718, 3719, 3733, 3737, 3744, 3756, 3762, 3772, 3780, 3784, 3816, 3817, 3824, 3855, 3865, 3885, 3891, 3910, 3916, 3918, 3919, 3933, 3949, 3980, 4009, 4049, 4066, 4089, 4112, 4143, 4154, 4200, 4222, 4243, 4254, 4257, 4259, 4266, 4269, 4273, 4308, 4315, 4320, 4331, 4343, 4352, 4356, 4369, 4384, 4399, 4411, 4424, 4428, 4445, 4447, 4466, 4477, 4482, 4492, 4529, 4534, 4550, 4566, 4596, 4605, 4606, 4620, 4648, 4710, 4718, 4734, 4771, 4773, 4774, 4801, 4807, 4811, 4842, 4845, 4849, 4874, 4886, 4887, 4907, 4926, 4932, 4948, 4960, 4969, 5000, 5039, 5042, 5105, 5113, 5159, 5161, 5174, 5183, 5197, 5214, 5215, 5216, 5221, 5264, 5273, 5292, 5293, 5353, 5438, 5447, 5452, 5465, 5468, 5492, 5498, 5520, 5543, 5551, 5575, 5581, 5605, 5617, 5623, 5671, 5728, 5759, 5766, 5777, 5799, 5840, 5853, 5875, 5883, 5886, 5898, 5919, 5922, 5941, 5948, 5960, 5962, 5964, 6034, 6041, 6058, 6080, 6103, 6117, 6134, 6137, 6138, 6163, 6196, 6206, 6210, 6223, 6228, 6232, 6247, 6272, 6273, 6281, 6376, 6409, 6430, 6438, 6473, 6496, 6595, 6608, 6635, 6678, 6687, 6692, 6695, 6704, 6712, 6724, 6757, 6796, 6799, 6815, 6851, 6903, 6908, 6914, 6948, 6957, 7065, 7071, 7073, 7089, 7099, 7102, 7114, 7147, 7169, 7185, 7219, 7226, 7232, 7271, 7285, 7315, 7323, 7341, 7378, 7420, 7433, 7437, 7467, 7489, 7501, 7513, 7514, 7523, 7534, 7572, 7580, 7614, 7619, 7625, 7658, 7667, 7706, 7719, 7727, 7752, 7813, 7826, 7829, 7868, 7872, 7887, 7897, 7902, 7911, 7936, 7942, 7945, 8032, 8034, 8042, 8044, 8092, 8101, 8156, 8167, 8175, 8176, 8205, 8234, 8237, 8244, 8301, 8316, 8326, 8350, 8362, 8385, 8441, 8463, 8479, 8534, 8565, 8610, 8623, 8651, 8671, 8678, 8689, 8707, 8735, 8761, 8763, 8770, 8779, 8800, 8822, 8835, 8923, 8942, 8962, 8970, 8984, 9010, 9037, 9041, 9122, 9136, 9140, 9147, 9164, 9165, 9166, 9170, 9173, 9174, 9175, 9185, 9186, 9200, 9210, 9211, 9217, 9218, 9246, 9248, 9249, 9250, 9254, 9307, 9332, 9337, 9348, 9364, 9371, 9376, 9379, 9389, 9404, 9405, 9408, 9415, 9416, 9417, 9418, 9419, 9421, 9424, 9433, 9434, 9493, 9501, 9505, 9519, 9520, 9521, 9522, 9526, 9529, 9531, 9637, 9654, 9655, 9664, 9686, 9688, 9701, 9706, 9709, 9712, 9716, 9717, 9718, 9731, 9746, 9747, 9748, 9753, 9765])
pair_total_num = 999999
binary_score_nointer, binary_score_inter, a_pair, b_pair, c_pair = rank.cal_rank_600()
pair_is_del = np.zeros(pair_total_num, dtype = 'float32')
pair_in_the_result = np.zeros(9999, dtype = 'float32')
def getSigmoid(b,c,d,x,a=6):
e = 2.718281828459
return a/(1+e**(b-c*x))+d
def save_HICO(HICO, HICO_dir, thres_no_inter, thres_inter, classid, begin, finish):
all_boxes = []
possible_hoi_range = hoi_range[classid - 1]
num_delete_pair_a = 0
num_delete_pair_b = 0
num_delete_pair_c = 0
for i in range(finish - begin + 1): # for every verb, iteration all the pkl file
total = []
score = []
pair_id = 0
for key, value in HICO.iteritems():
for element in value:
if element[2] == classid:
temp = []
temp.append(element[0].tolist()) # Human box
temp.append(element[1].tolist()) # Object box
temp.append(int(key)) # image id
temp.append(int(i)) # action id (0-599)
human_score = element[4]
object_score = element[5]
d_score = binary_score_inter[pair_id]
d_score_noi = binary_score_nointer[pair_id]
# you could change the parameter of NIS (sigmoid function) here
# use (10, 1.4, 0) as the default
score_old = element[3][begin - 1 + i] * getSigmoid(10,1.4,0,element[4]) * getSigmoid(10,1.4,0,element[5])
hoi_num = begin - 1 + i
score_new = score_old
if classid == 63:
thres_no_inter = 0.95
thres_inter = 0.15
elif classid == 43:
thres_no_inter = 0.85
thres_inter = 0.1
elif classid == 57:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 48:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 41:
thres_no_inter = 0.85
thres_inter = 0.15
elif classid == 2:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 4:
thres_inter = 0.15
thres_no_inter = 0.85
elif classid == 31:
thres_inter = 0.1
thres_no_inter = 0.85
elif classid == 19:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 1:
thres_inter = 0.05
thres_no_inter = 0.85
elif classid == 11:
thres_inter = 0.15
thres_no_inter = 0.85
# if Binary D score D[0] > no interaction threshold and D[1] <
if (d_score_noi > thres_no_inter) and (d_score < thres_inter) and not(int(key) in all_remaining):
if not((hoi_num + 1) in hoi_no_inter_all): # skiping all the 520 score
if (a_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_a += 1
pair_is_del[pair_id] = 1
elif (b_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_b += 1
pair_is_del[pair_id] = 1
elif (c_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_c += 1
pair_is_del[pair_id] = 1
pair_id += 1
continue
temp.append(score_new)
total.append(temp)
score.append(score_new)
if not(int(key) in all_remaining):
pair_id += 1
idx = np.argsort(score, axis=0)[::-1]
for i_idx in range(min(len(idx),19999)):
all_boxes.append(total[idx[i_idx]])
# save the detection result in .mat file
savefile = os.path.join(HICO_dir, 'detections_' + str(classid).zfill(2) + '.mat')
if os.path.exists(savefile):
os.remove(savefile)
sio.savemat(savefile, {'all_boxes':all_boxes})
print('class',classid,'finished')
num_delete_inter = num_delete_pair_a + num_delete_pair_b
return num_delete_inter, num_delete_pair_c
def Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter):
if not os.path.exists(HICO_dir):
os.makedirs(HICO_dir)
HICO = pickle.load( open( output_file, "rb" ) )
# del_i and del_ni
del_i = 0
del_ni = 0
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 1 ,161, 170)
del_i += num_del_i
del_ni += num_del_no_i
# 1 person
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 2 ,11, 24 )
del_i += num_del_i
del_ni += num_del_no_i
# 2 bicycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 3 ,66, 76 )
del_i += num_del_i
del_ni += num_del_no_i
# 3 car
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 4 ,147, 160)
del_i += num_del_i
del_ni += num_del_no_i
# 4 motorcycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 5 ,1, 10 )
del_i += num_del_i
del_ni += num_del_no_i
# 5 airplane
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 6 ,55, 65 )
del_i += num_del_i
del_ni += num_del_no_i
# 6 bus
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 7 ,187, 194)
del_i += num_del_i
del_ni += num_del_no_i
# 7 train
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 8 ,568, 576)
del_i += num_del_i
del_ni += num_del_no_i
# 8 truck
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 9 ,32, 46 )
del_i += num_del_i
del_ni += num_del_no_i
# 9 boat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 10,563, 567)
del_i += num_del_i
del_ni += num_del_no_i
# 10 traffic light
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 11,326,330)
del_i += num_del_i
del_ni += num_del_no_i
# 11 fire_hydrant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 12,503,506)
del_i += num_del_i
del_ni += num_del_no_i
# 12 stop_sign
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 13,415,418)
del_i += num_del_i
del_ni += num_del_no_i
# 13 parking_meter
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 14,244,247)
del_i += num_del_i
del_ni += num_del_no_i
# 14 bench
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 15,25, 31)
del_i += num_del_i
del_ni += num_del_no_i
# 15 bird
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 16,77, 86)
del_i += num_del_i
del_ni += num_del_no_i
# 16 cat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 17,112,129)
del_i += num_del_i
del_ni += num_del_no_i
# 17 dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 18,130,146)
del_i += num_del_i
del_ni += num_del_no_i
# 18 horse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 19,175,186)
del_i += num_del_i
del_ni += num_del_no_i
# 19 sheep
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 20,97,107)
del_i += num_del_i
del_ni += num_del_no_i
# 20 cow
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 21,314,325)
del_i += num_del_i
del_ni += num_del_no_i
# 21 elephant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 22,236,239)
del_i += num_del_i
del_ni += num_del_no_i
# 22 bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 23,596,600)
del_i += num_del_i
del_ni += num_del_no_i
# 23 zebra
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 24,343,348)
del_i += num_del_i
del_ni += num_del_no_i
# 24 giraffe
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 25,209,214)
del_i += num_del_i
del_ni += num_del_no_i
# 25 backpack
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 26,577,584)
del_i += num_del_i
del_ni += num_del_no_i
# 26 umbrella
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 27,353,356)
del_i += num_del_i
del_ni += num_del_no_i
# 27 handbag
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 28,539,546)
del_i += num_del_i
del_ni += num_del_no_i
# 28 tie
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 29,507,516)
del_i += num_del_i
del_ni += num_del_no_i
# 29 suitcase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 30,337,342)
del_i += num_del_i
del_ni += num_del_no_i
# 30 Frisbee
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 31,464,474)
del_i += num_del_i
del_ni += num_del_no_i
# 31 skis
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 32,475,483)
del_i += num_del_i
del_ni += num_del_no_i
# 32 snowboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 33,489,502)
del_i += num_del_i
del_ni += num_del_no_i
# 33 sports_ball
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 34,369,376)
del_i += num_del_i
del_ni += num_del_no_i
# 34 kite
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 35,225,232)
del_i += num_del_i
del_ni += num_del_no_i
# 35 baseball_bat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 36,233,235)
del_i += num_del_i
del_ni += num_del_no_i
# 36 baseball_glove
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 37,454,463)
del_i += num_del_i
del_ni += num_del_no_i
# 37 skateboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 38,517,528)
del_i += num_del_i
del_ni += num_del_no_i
# 38 surfboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 39,534,538)
del_i += num_del_i
del_ni += num_del_no_i
# 39 tennis_racket
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 40,47,54)
del_i += num_del_i
del_ni += num_del_no_i
# 40 bottle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 41,589,595)
del_i += num_del_i
del_ni += num_del_no_i
# 41 wine_glass
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 42,296,305)
del_i += num_del_i
del_ni += num_del_no_i
# 42 cup
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 43,331,336)
del_i += num_del_i
del_ni += num_del_no_i
# 43 fork
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 44,377,383)
del_i += num_del_i
del_ni += num_del_no_i
# 44 knife
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 45,484,488)
del_i += num_del_i
del_ni += num_del_no_i
# 45 spoon
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 46,253,257)
del_i += num_del_i
del_ni += num_del_no_i
# 46 bowl
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 47,215,224)
del_i += num_del_i
del_ni += num_del_no_i
# 47 banana
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 48,199,208)
del_i += num_del_i
del_ni += num_del_no_i
# 48 apple
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 49,439,445)
del_i += num_del_i
del_ni += num_del_no_i
# 49 sandwich
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 50,398,407)
del_i += num_del_i
del_ni += num_del_no_i
# 50 orange
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 51,258,264)
del_i += num_del_i
del_ni += num_del_no_i
# 51 broccoli
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 52,274,283)
del_i += num_del_i
del_ni += num_del_no_i
# 52 carrot
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 53,357,363)
del_i += num_del_i
del_ni += num_del_no_i
# 53 hot_dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 54,419,429)
del_i += num_del_i
del_ni += num_del_no_i
# 54 pizza
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 55,306,313)
del_i += num_del_i
del_ni += num_del_no_i
# 55 donut
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 56,265,273)
del_i += num_del_i
del_ni += num_del_no_i
# 56 cake
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 57,87,92)
del_i += num_del_i
del_ni += num_del_no_i
# 57 chair
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 58,93,96)
del_i += num_del_i
del_ni += num_del_no_i
# 58 couch
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 59,171,174)
del_i += num_del_i
del_ni += num_del_no_i
# 59 potted_plant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 60,240,243)
del_i += num_del_i
del_ni += num_del_no_i
#60 bed
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 61,108,111)
del_i += num_del_i
del_ni += num_del_no_i
#61 dining_table
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 62,551,558)
del_i += num_del_i
del_ni += num_del_no_i
#62 toilet
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 63,195,198)
del_i += num_del_i
del_ni += num_del_no_i
#63 TV
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 64,384,389)
del_i += num_del_i
del_ni += num_del_no_i
#64 laptop
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 65,394,397)
del_i += num_del_i
del_ni += num_del_no_i
#65 mouse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 66,435,438)
del_i += num_del_i
del_ni += num_del_no_i
#66 remote
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 67,364,368)
del_i += num_del_i
del_ni += num_del_no_i
#67 keyboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 68,284,290)
del_i += num_del_i
del_ni += num_del_no_i
#68 cell_phone
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 69,390,393)
del_i += num_del_i
del_ni += num_del_no_i
#69 microwave
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 70,408,414)
del_i += num_del_i
del_ni += num_del_no_i
#70 oven
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 71,547,550)
del_i += num_del_i
del_ni += num_del_no_i
#71 toaster
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 72,450,453)
del_i += num_del_i
del_ni += num_del_no_i
#72 sink
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 73,430,434)
del_i += num_del_i
del_ni += num_del_no_i
#73 refrigerator
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 74,248,252)
del_i += num_del_i
del_ni += num_del_no_i
#74 book
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 75,291,295)
del_i += num_del_i
del_ni += num_del_no_i
#75 clock
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 76,585,588)
del_i += num_del_i
del_ni += num_del_no_i
#76 vase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 77,446,449)
del_i += num_del_i
del_ni += num_del_no_i
#77 scissors
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 78,529,533)
del_i += num_del_i
del_ni += num_del_no_i
#78 teddy_bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 79,349,352)
del_i += num_del_i
del_ni += num_del_no_i
#79 hair_drier
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 80,559,562)
del_i += num_del_i
del_ni += num_del_no_i
#80 toothbrush
print('num_del_inter',del_i,'num_del_no_inter',del_ni)
def main():
output_file = sys.argv[1]
HICO_dir = sys.argv[2]
thres_no_inter = float(sys.argv[3])
thres_inter = float(sys.argv[4])
print("the output file is",output_file)
print("the threshold of no interaction score is",thres_no_inter)
print("the threshold of interaction score is",thres_inter)
Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter)
if __name__ == '__main__':
main()
| [
"790327070@qq.com"
] | 790327070@qq.com |
8d533a2e1e17a9542fe1eeacefeee17b3e6fde53 | c25785eefec6a6aadeb215902d672345ffdf8873 | /EDA/check.py | 080b2913fa5f886a091ded500c71a2c0fdecb02b | [] | no_license | ayush237/Phising-Detector | 77e29ed996a1d9a5da0a75d27dd282c6561fe117 | 9c5cf887fe1b2cb5e9578968aed01bfff3af0e40 | refs/heads/master | 2023-01-06T08:44:56.151058 | 2020-11-04T18:25:19 | 2020-11-04T18:25:19 | 296,503,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py |
import re
def URLBreakdown(url):
regex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)$'''
phoneNumRegex = re.compile(r'''\b(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\b''')
mo = phoneNumRegex.search('http:google//110.234.52.124.//google.com ')
if(mo is not None):
has_ip = 1
else:
has_ip =0
if(len(url)<54):
lenurl = 1
elif(len(url)>=54 and len(url)<=75):
lenurl = 0
else:
lenurl = -1
if(url.find("@")):
atsymbol =1
else:
atsymbol = 0
redirect_index = url.rindex("//")
if(redirect_index>7):
redirect_check = 1
else:
redirect_check = 0
if(url.find("-") >=0):
hyphen = 1
else:
hyphen = 0
afterwwwdot = url.partition("www.")[0]
count=0
for i in afterwwwdot:
if i == '.':
count=count+1
if(count==2):
dotcount=1
elif(count==2):
dotcount= 0
else:
dotcount=-1
if(url[4]=='s'):
https =1
else:
https =0
httpsorhttp = 1 if(url[:5]=='https') or url[:4]=='http' else 0
return[has_ip,lenurl,atsymbol,redirect_check,hyphen,dotcount,httpsorhttp,https]
| [
"50947002+dhruvstan@users.noreply.github.com"
] | 50947002+dhruvstan@users.noreply.github.com |
f8917466e4a192e6642d00bf1a4285c37b838b7f | 52725f5e0b5cd1c258c22a8b14979c32b010463f | /setup.py | 3719dcbde5fc902b4035eec5da685152fa4e077b | [
"MIT"
] | permissive | ehershey/penn-sdk-python | 0fd902cc88ac7f722ce3440ae3192f01a94250ed | 30f7c975e76db85c5c365ba0aff81ff0fe2ce58a | refs/heads/master | 2021-01-21T08:38:19.898861 | 2013-10-30T20:55:45 | 2013-10-30T20:55:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from distutils.core import setup
import penn
setup(
name='PennSDK',
description='Python tools for building Penn-related applications',
url='https://github.com/pennappslabs/penn-sdk-python',
author='PennApps Labs',
author_email='pennappslabs@gmail.com',
version=penn.__version__,
packages=['penn'],
license='MIT',
long_description=open('README.md').read(),
install_requires=[
'requests==1.2.3'
]
)
| [
"kyle@kylehardgrave.com"
] | kyle@kylehardgrave.com |
0c3188d7fce7c05801cd688411919a2258081394 | a4a9ceccc22c47b1bdeb4903665f926893c11b1f | /parties.py | 37b4cef22d99fb6746fbccb1847df3114d0378c0 | [] | no_license | dominiquetheodore/projet_parlement | 21229ef25d9013f728480a9179f8e8038880c5c3 | ff7465254e68cc1804d36a7e80c0acf3f9e2cbb4 | refs/heads/master | 2021-01-19T22:15:17.254437 | 2017-08-05T14:12:29 | 2017-08-05T14:12:29 | 88,780,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import PQ, Deputy, Base, Party
from os import remove, listdir
from os.path import isfile, join, exists
from datetime import datetime, timedelta
engine = create_engine('sqlite:///PQs.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
with open('csv/parties.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
rows = list(spamreader)
for row in rows[1:]:
party = Party(name=unicode(row[0], "utf-8"), color=unicode(row[1], "utf-8"))
session.add(party)
print 'Successfully added a party: %s'%(party.name)
session.commit()
| [
"dominique.theodore86@gmail.com"
] | dominique.theodore86@gmail.com |
1f1e521ed713f698e4e1be29a5625ab970c1060f | a270c429e0228936e3fcf9879c5db4586de086f4 | /testpy.py | 304e245571c84bf59d6692d171f06287e725e405 | [] | no_license | MorijyobiNogyo/cameratest | 0e0fb711ae8616ddeb49ebf481ff2d7583cbcbac | 13b0318afb968b6ae40db67c60f352896c112d79 | refs/heads/master | 2016-09-05T14:10:31.459854 | 2014-04-07T00:52:33 | 2014-04-07T00:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | import test
test.test1()
| [
"kamatuka3@gmail.com"
] | kamatuka3@gmail.com |
89dbf6c60e02fa4811b4747ec6b0e32ad8f7d785 | b505e15a2d697f50081eb1d9e01be81213fee51d | /MotionControl.py | 317798a5075de6faf96fba9c36537c8d6cd30652 | [] | no_license | adamanderson/he10_fridge_control | 0dd2af8e9c19752d91901eb3b9adddf83715f537 | 9cb6c0747760b79dc2feef750062798b70d81736 | refs/heads/master | 2021-01-17T00:33:36.053486 | 2020-12-05T21:24:40 | 2020-12-05T21:24:40 | 41,942,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,646 | py | # -*- coding: utf-8 -*-
"""
Last version of 7/24/17
@author: Antony Simonoff
for SPT-3G
This file prints position, velocity, and time to a text file or spreadsheet
"""
from __future__ import print_function
import time
import serial
import sys
#from datetime import datetime
#from Tkinter import *
#import xlsxwriter
reload(sys) #sets default encoding due to ascii encoding issues
sys.setdefaultencoding('utf8')
class NewportSMC100CC:
def __init__(self): #initialize and open port
self.serial_interface = serial.Serial(port='/dev/ttyUSB1', baudrate=57600,
bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, xonxoff=True)
self.serial_interface.write('01OR\r\n')
time.sleep(.3) #longer wait because initialization
self.serial_interface.write('01VA10\r\n') #set slow velocity
def set_velocity(self, velocityInput): #manually sets velocity
velocityWrite = "01VA" + str(velocityInput) + "\r\n"
self.serial_interface.write(velocityWrite)
def set_position(self, goToDegrees):
positionWrite = "01PA" + str(goToDegrees) + "\r\n"
self.serial_interface.write(positionWrite)
def get_velocity(self): #get current velocity
self.serial_interface.write('01VA?\r\n')
time.sleep(.1)
velocity = self.serial_interface.read(self.serial_interface.inWaiting())
return velocity
def get_position(self): #get current position
self.serial_interface.write('01TP?\r\n')
time.sleep(.1)
position = self.serial_interface.read(self.serial_interface.inWaiting())
return position
def is_number(testStr): #test to see if a string is a number or error
try:
float(testStr)
return True
except ValueError:
return False
def formatter(inputStr): #formats response from device
inputStr= str(inputStr)
inputStr= inputStr.strip()
inputStr= inputStr.strip()[4:]
inputStr = float(inputStr)
return inputStr
newport = NewportSMC100CC()
#interacting with user:
#set velocity at beginning of file run:
global velocityInput
velocityInput = raw_input("What should the velocity be, in degrees per second? Leave blank for 10deg/s\n\n")
if is_number(velocityInput) == True:
newport.set_velocity(velocityInput)
if velocityInput == "":
veloctyInput = 10
#set pause at beginning of file run
pauseBetweenLoops = input("How often should data output? Each loop takes about 0.2s\n\n")
"""def show_entry_fields(): #Tkinter input/output, but I haven't figured out how to do this yet
print("Loops: %s\nSeconds between loops: %s" % (e1.get(), e2.get()))
pauseBetweenLoops = e2.get()
numberOfLoops = e1.get()
master = Tk()
Label(master, text="How many loops?").grid(row=0)
Label(master, text="Time between loops?").grid(row=1)
e1 = Entry(master)
e2 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
Button(master, text='Close', command=master.quit).grid(row=3, column=0, sticky=W, pady=4)
Button(master, text='Confirm', command=show_entry_fields).grid(row=3, column=1, sticky=W, pady=4)
#mainloop()
wait_variable(numberOfLoops)"""
#make spreadsheet as output
#workbook = xlsxwriter.Workbook('Output.xlsx')
#worksheet = workbook.add_worksheet()
#bold = workbook.add_format({'bold': 1})
#headers
#worksheet.write('A1', 'Position (degrees)', bold)
#worksheet.write('B1', 'Velocity', bold)
#worksheet.write('C1', 'Time in UTC', bold)
#worksheet.set_column('A:C', 15)
presetDegrees = 45 #sets preset degrees around origin; + and - presetDegrees used
outputFile = open('Output.txt', 'a')
def inputFunc(): #function for all the inputs at the beginning of each run
goToDegrees = raw_input("Input a degree value to go to. Leave blank to choose presets: " + str(presetDegrees) + " deg, or -" + str(presetDegrees) + " deg\n\n")
if is_number(goToDegrees) == True:
return goToDegrees
else:
presetInput = raw_input("Enter (A) for " + str(presetDegrees) + " deg, (B) for -" + str(presetDegrees) + " deg\n\n")
if presetInput == "A" or presetInput == "a":
goToDegrees = presetDegrees
return goToDegrees
elif presetInput == "B" or presetInput == "b":
goToDegrees = presetDegrees * -1
return goToDegrees
else:
inputFunc()
def rotateFunc(): #function that deals with the actual rotation of the rotating stage
currentPosition = formatter(newport.get_position())
print(currentPosition)
currentLoop = 0
goToDegrees = inputFunc()
numberOfLoops = int((abs(int(currentPosition)) + abs(int(goToDegrees))/int(velocityInput))/pauseBetweenLoops)
print(numberOfLoops)
newport.set_position(goToDegrees)
for currentLoop in range(0, numberOfLoops): #loop for data output
currentLoop = currentLoop + 1
#errString_position = newport.get_position()
response_position = newport.get_position()
print(numberOfLoops)
"""if is_number(response_position) == True :
positionPrint = stringer("Position:", response_position)
outputFile.write(positionPrint)
else:
positionPrint = stringer("Position Error:", errString_position)
outputFile.write(positionPrint)"""
#worksheet.write(currentLoop, 0, formatter(response_position))
positionPrint = "Position:" + " " + str(formatter(response_position)) +"\n"
outputFile.write(positionPrint)
velocityPrint = "Velocity:" + " " + str(velocityInput) + "\n"
outputFile.write(velocityPrint)
#worksheet.write(currentLoop, 1, 'N/A') #velocity placeholder
#worksheet.write(currentLoop, 2, datetime.utcnow())
#timePrint = "Time in UTC:" + " " + str(datetime.utcnow()) +"\n"
#outputFile.write(timePrint)
timePrint = "Time in UTC:" + " " + str(time.time()) +"\n"
outputFile.write(timePrint)
currentLoopPrint = "Current Loop:" + " " + str(currentLoop) +"\n"
outputFile.write(currentLoopPrint)
outputFile.write("\n")
time.sleep(pauseBetweenLoops) #sets pause length between loop runs, each run takes about 200ms
def continueFunc():
continueInput = raw_input("Continue Y/N? ")
if continueInput == "Y" or continueInput == "y":
continueInput = "Yes"
rotateFunc()
elif continueInput == "N" or continueInput == "n":
continueInput = "No"
outputFile.close()
sys.exit()
else:
continueFunc()
continueFunc()
outputFile.close()
#workbook.close()
| [
"noreply@github.com"
] | noreply@github.com |
13b3e86381b9d83ab0f7bc356fcac495f9962c13 | 2cf698dab041796369c137404bc01da055ccfe33 | /P1即时标记/markup.py | d308e100e690b4ddd8e2f497099756507f094142 | [] | no_license | DFEViA/Python2.7Project | f698d8bcd1b175b75b939597b885ceb267da18ef | 2608eb6257c8183e4dec948d7d9b35b3a4469ba9 | refs/heads/master | 2020-04-05T14:33:08.941046 | 2017-07-06T06:04:18 | 2017-07-06T06:04:18 | 94,674,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-06-18 21:34:54
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import sys, re
from handlers import *
from util import *
from rules import *
class Parser:
"""
语法分析器读取文本文件、应用规则并且控制处理程序
"""
def __init__(self, handler):
self.handler = handler
self.rules = []# 规则器
self.filters = []# 过滤器
def addRule(self, rule):
self.rules.append(rule)
def addFilter(self, pattern, name):
def filter(block, handler):
return re.sub(pattern, handler.sub(name), block)
"""
然后仔细看书,书上在前面有这样一句话,[re.sub函数可以将第一个函数作为第二个参数]。至少笔者觉得这句话写的很奇怪,’第一个函数‘明明要写成第一个参数啊有木有。好吧,不吐槽这些。
大概意思就是,re.sub的第二个参数可以是一个函数作为替换式,替换式的参数就是re.sub的第一个参数匹配后返回的正则对象。
这下就可以看懂了,我们会去调用sub_emphasis(self,match),然后match.group(1)表示的实际上是is。关于group(1)大家去看一下,re模块的内容,在这里我就直接告诉你他的内容,就是匹配式(.+?)中的内容。
"""
self.filters.append(filter)
def parse(self, file):
self.handler.start('document')
for block in blocks(file):
for filter in self.filters:
block = filter(block, self.handler)#重新绑定到block
for rule in self.rules:
if rule.condition(block):
last = rule.action(block, self.handler)
if last:break
self.handler.end('document')
class BasicTestParser(Parser):
"""
在构造函数中增加规则和过滤器的具体语法分析器
"""
def __init__(self, handler):
Parser.__init__(self, handler)
self.addRule(ListRule())
self.addRule(ListItemRule())
self.addRule(TitleRule())
self.addRule(HeadingRule())
self.addRule(ParagraphRule())
self.addFilter(r'\*(.+?)\*', 'emphasis')
self.addFilter(r'(http://[\.a-zA-Z/]+)', 'url')
self.addFilter(r'([\.a-zA-z]+@[\.a-zA-Z]+[a-zA-Z]+)', 'mail')
handler = HTMLRenderer()
parser = BasicTestParser(handler)
parser.parse(sys.stdin) | [
"2653239255@qq.com"
] | 2653239255@qq.com |
180d7e63bd9cdeaa048ef5fceb023abdcaffe85e | 07120adf02a2fc58a9d3ee85ecc7f6abb2a0a4c7 | /UdPject/Original_bot.py | 7d20218c56a92073d8066ac86013869fc36f0fdf | [
"MIT"
] | permissive | Mbonea-Mjema/Telegram | 53d22896cbd462df355dde75b36abd4c050e032c | b6c6910e646816a2985e29f6b9dfd514e02e14c8 | refs/heads/master | 2021-06-24T12:57:46.095595 | 2017-09-12T15:45:32 | 2017-09-12T15:45:32 | 103,290,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,929 | py |
from run import getData #gets data from the zigbee(xbee) connected in the raspberry pi
#stikers
Stiker ={"Thinking": "CAADAgADNwADyIsGAAFOgk-EJUfH-gI","Albert_Einstein":"CAADAgADIQADyIsGAAHaCFln7THl9QI" ,"Goodnight":"CAADAgADxgUAAvoLtgjbqjaBr05-YgI","Waiting":"CAADAgADxQUAAvoLtgipmNsAAd08atYC"}
#emoji
Emojies={"SMILEY FACE": u'\U0001F600',"LINUX" :u'\U00001427',"MOVIE":" u'\U0001F3AC"}
#wikiquote api
from wikiquote import quote_of_the_day #wikiquote module for quotes
#extensions
from Extensions.GoodReads import Gquote as Good # Goodreads check the extension folder
from Extensions.Quotes import links
from Extensions.DSE import init as DSE # dar es salaam stock exchange
import re # regular expressions for searching strings
import subprocess # for running linux commands in python
from serial import SerialException
# wikipedia api funtions importation
from wikipedia import summary as ans
from wikipedia import exceptions as ex
from wikipedia import page as Page
#telegram bot api functions importation
from telegram import Bot
import telegram
from telegram.ext import Updater # checks for updates like new messages etc
#useful functions and modules
import schedule # for
from functools import lru_cache as Cache #for storing data in cache
import time
#imdb
import tmdbsimple as tmdb
tmdb.API_KEY = 'e542e5e9c5f014dd493e1a65710bbd18'
search=tmdb.Search()
#test the internet connection
def is_connected():
import socket
REMOTE_SERVER = "www.google.com"
try:
host = socket.gethostbyname(REMOTE_SERVER)
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
#wikipedia fuction
@Cache(maxsize=100) # fuction decorator to store that data received in cache
def wiki(received,u):
keyWord=re.compile(r'\wiki (.*)')
try:
image=""
for img in Page(keyWord.findall(received)).images:
if ("jpg" in img or "jpeg" in img or "png" in img):
image=img
break
wiki_summary=ans(keyWord.findall(received),sentences=3)
if(image==""):
image=None
except ex.PageError:
bot.send_message(chat_id=u.message.chat_id, text="Sorry check your spelling")
except ex.DisambiguationError as e :
bot.send_message(chat_id=u.message.chat_id, text="try being more specific \n" +str(e))
except:
pass
return wiki_summary,image
def Movie(item):
from google.google import search
result=search("intitle:index.of? mkv " +item)
Movies=[]
for r in result:
Movies.append(r.link)
return Movies
#quote of the day from wikiquote
@Cache(maxsize=100) # fuction decorator to store that data received in cache
def Quotes(u):
quote,person=(quote_of_the_day())
bot.send_message(chat_id=u.message.chat_id, text=quote)
bot.send_message(chat_id=u.message.chat_id, text=person)
#this fuction is scheduled to run every morning
def morning(t):
keyWord=re.compile(r'\wrainy (.*)')
images=links(keyWord.findall("brainy jim rohn"))
for img in images:
bot.send_photo(chat_id=431226183, photo=img)
#checks if connected to the internet
while not is_connected():
print("status: offline")
#bot initializations
updater = Updater(token="399340337:AAG-yD4Xpfo1cCOfoLJLI9lDSE7_fbEtqiA") #put the token you got from "THE BOT FATHER"
bot=Bot(token="399340337:AAG-yD4Xpfo1cCOfoLJLI9lDSE7_fbEtqiA")#put the token you got from "THE BOT FATHER"
print(bot.get_me()) #prints the bots details
updater.start_polling()#starts the updater
print("status: online")
# this will run a certain task at a particular time everyday
schedule.every().day.at("06:00").do(morning,"ok") #you can
while True:
schedule.run_pending()
try :
for u in bot.get_updates(): #u stands for update
#do something
user=u.message.from_user
received=u.message.text
print(str(user["first_name"])+" texted : " + received)
if "gas" in received.lower():# if you received gas
try:
text=str(getData("GAS\n"))
bot.send_message(chat_id=u.message.chat_id, text=text+" kg")
except SerialException as e:
bot.send_message(chat_id=u.message.chat_id, text="check your ports please and connect the xbee")
elif "water" in received.lower(): # if you received water
try:
text=str(getData("WATER\n"))
bot.send_message(chat_id=u.message.chat_id, text=text+" LITERS")
except SerialException as e:
bot.send_message(chat_id=u.message.chat_id, text="check your ports please and connect the xbee")
elif "ip" in received.lower():
inet=str(subprocess.check_output(["ifconfig | grep inet"], shell=True)).split(" ")
ip=inet[4].split(" ")
text=ip[0]
bot.send_message(chat_id=u.message.chat_id, text=text)
#for brainy quotes (images) text brainy (name of the person)
elif "brainy" in received.lower():
keyWord=re.compile(r'\wrainy (.*)')
print(keyWord.findall(received.lower()))
images=links(keyWord.findall(received.lower()))
for img in images:
bot.sendPhoto(chat_id=u.message.chat_id, photo=img)
#for goodreads quotes text good (person) (number of quotes you want)
elif "movie" in received.lower():
keyWord=re.compile(r'\wvie (.*)')
item=(keyWord.findall(received.lower())[0])
imdb_result=search.movie(query=item)
if(len(imdb_result["results"])!=0):
data=imdb_result["results"][0]
title=data["title"]
poster="https://image.tmdb.org/t/p/w300_and_h450_bestv2"+data["poster_path"]
print("done imdb")
Movies=Movie(title)
keyboard=[]
row=[]
for link in Movies:
if(Movies.index(link)!= 3):
row.append([telegram.InlineKeyboardButton(text=" link :"+ str(Movies.index(link)+1),url=str(link))])
else:
break
reply_markup = telegram.InlineKeyboardMarkup(row)
bot.sendPhoto(chat_id=u.message.chat_id,photo=poster,caption=title)
bot.send_message(chat_id=u.message.chat_id, text= "overview : \n" +data["overview"] ,reply_markup=reply_markup)
else:
bot.send_message(chat_id=u.message.chat_id, text="Nothing found")
print("sent movie details")
elif "good" in received.lower():
keyWord=re.compile(r'\wod(.*) (\d?\d)?')
qoute , number=keyWord.findall(received.lower())[0]
if number != None:
number=int(number)
Gq=Good(qoute , number)
for quote in Gq :
bot.send_message(chat_id=u.message.chat_id, text=quote[0])
bot.send_message(chat_id=u.message.chat_id, text=quote[1])
#commands in linux (sudo)"note the "s" in sudo should be a small letter" text sudo (command)
elif "sudo" in received:
received=str(received)
text=(subprocess.check_output([received], shell=True)).decode().strip()
bot.send_message(chat_id=u.message.chat_id, text=text)
#dar es salaam stock exchange text "dse"
elif "dse" in received.lower():
bot.send_message(chat_id=u.message.chat_id, text=DSE())
#get music from the music folder text "Music (song name)"
elif "Music" in received:
keyWord=re.compile(r'\wusic (.*)')
word=str(keyWord.findall(received)[0])
audio=str(subprocess.check_output(["ls /home/pi/Music/"+word], shell=True).decode()).strip()
bot.send_audio(chat_id=u.message.chat_id, audio=open(audio,"rb"))
bot.send_chat_action(chat_id=u.message.chat_id,action=telegram.ChatAction.UPLOAD_AUDIO)
#get wikiquote of the day text "quote"
elif "quote" in received.lower():
Quotes(u)
#search wikipedia text "wiki (something)" example "wiki University of Dar es salaam"
elif "wiki" in received.lower():
bot.send_message(chat_id=u.message.chat_id, text="ok")
text,image = wiki(received,u)
print(image)
print("sending photo")
bot.sendPhoto(chat_id=u.message.chat_id, photo=image)
bot.send_message(chat_id=u.message.chat_id, text=text)
#the bot will send your first name
elif("name" in received.lower() ):
text="Your name is"+user["first_name"]
bot.send_message(chat_id=u.message.chat_id, text=text)
#The bot will send you a document
elif("document" in received.lower()):
bot.send_document(chat_id=u.message.chat_id, document=open("/home/pi/UdPject/Records.xlsx","rb"))
else:
#default message
bot.send_message(chat_id=u.message.chat_id, text="Hi "+user["first_name"]+" "+user["last_name"]+" "+Emojies["SMILEY FACE"] +" Try \n(1)dse \n \
(2)quotes\n (3) document\n (4) wiki obama or wiki 'something'\n (5) sudo 'command' \n (6) water \n (7) gas\n(8)Movie eg movie avengers")
time.sleep(1)
bot.send_sticker(chat_id=u.message.chat_id,sticker=Stiker["Waiting"])
except Exception as e:
print("status: offline")
while not is_connected():
pass
updater.start_polling()
print("status: online")
#for debugging purposes it will text you the exception that occured
text=str(e)
bot.send_message(chat_id=u.message.chat_id, text=text)
| [
"noreply@github.com"
] | noreply@github.com |
e2df404f234ab3d108cbda675d9190679f716fdd | feccf7588777becba68921c0bfade3e21f5210ce | /airflow/providers/google/ads/_vendor/googleads/v12/services/services/feed_mapping_service/client.py | 47bde342b33c5b65eb11046e4f5b390987d25d06 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | francescomucio/airflow | f17ed9abd8b41d8a2227deca052508edf12f1cbf | c199b1a10563a11cf24436e38cb167ae82c01601 | refs/heads/master | 2023-04-14T17:44:53.438246 | 2023-04-06T06:44:23 | 2023-04-06T06:44:23 | 217,327,641 | 0 | 0 | Apache-2.0 | 2020-09-09T13:26:47 | 2019-10-24T15:06:52 | Python | UTF-8 | Python | false | false | 20,872 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from airflow.providers.google.ads._vendor.googleads.v12.services.types import feed_mapping_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedMappingServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedMappingServiceGrpcTransport
class FeedMappingServiceClientMeta(type):
"""Metaclass for the FeedMappingService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedMappingServiceTransport]]
_transport_registry["grpc"] = FeedMappingServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedMappingServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedMappingServiceClient(metaclass=FeedMappingServiceClientMeta):
"""Service to manage feed mappings."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedMappingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedMappingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedMappingServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedMappingServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def feed_mapping_path(
customer_id: str, feed_id: str, feed_mapping_id: str,
) -> str:
"""Returns a fully-qualified feed_mapping string."""
return "customers/{customer_id}/feedMappings/{feed_id}~{feed_mapping_id}".format(
customer_id=customer_id,
feed_id=feed_id,
feed_mapping_id=feed_mapping_id,
)
@staticmethod
def parse_feed_mapping_path(path: str) -> Dict[str, str]:
"""Parses a feed_mapping path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feedMappings/(?P<feed_id>.+?)~(?P<feed_mapping_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedMappingServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed mapping service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedMappingServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedMappingServiceTransport):
# transport is a FeedMappingServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feed_mappings(
self,
request: Union[
feed_mapping_service.MutateFeedMappingsRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[feed_mapping_service.FeedMappingOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_mapping_service.MutateFeedMappingsResponse:
r"""Creates or removes feed mappings. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__
`DistinctError <>`__ `FeedMappingError <>`__ `FieldError <>`__
`HeaderError <>`__ `IdError <>`__ `InternalError <>`__
`MutateError <>`__ `NotEmptyError <>`__
`OperationAccessDeniedError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v12.services.types.MutateFeedMappingsRequest, dict]):
The request object. Request message for
[FeedMappingService.MutateFeedMappings][google.ads.googleads.v12.services.FeedMappingService.MutateFeedMappings].
customer_id (str):
Required. The ID of the customer
whose feed mappings are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v12.services.types.FeedMappingOperation]):
Required. The list of operations to
perform on individual feed mappings.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v12.services.types.MutateFeedMappingsResponse:
Response message for a feed mapping
mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_mapping_service.MutateFeedMappingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, feed_mapping_service.MutateFeedMappingsRequest
):
request = feed_mapping_service.MutateFeedMappingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_feed_mappings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedMappingServiceClient",)
| [
"noreply@github.com"
] | noreply@github.com |
ee82711c75670f7d87554edba44e7e7b2e7f4bb1 | 7c0cb3fd9bb95df51470271446f917c3b92f1d07 | /Ch12_Window-Program/12-06-img_button.py | 54e06368940c3b13032506c2d71c533fa8e1eeac | [] | no_license | ShinHyoHaeng/Learn-Python | daf5b9e761427abe328faa651d179dceaa5291fe | 52c1cf4df4be66321b9c977743f75a75f6d675f5 | refs/heads/master | 2023-08-28T10:57:11.790102 | 2021-10-18T01:22:17 | 2021-10-18T01:22:17 | 403,936,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py |
## 윈도우 프로그램의 기본 위젯: 버튼 - 이미지 버튼
from tkinter import *
# 버튼 클릭 시 에러(NameError: name 'messagebox' is not defined)
from tkinter import messagebox # 파이썬 3.6. 이후 버전에서 messagebox 오류 해결 방법
# 함수 정의 부분
def myFunc() :
messagebox.showinfo("강아지 버튼", "강아지는 언제나 옳다")
# 메인 코드 부분
window = Tk()
window.title("이미지 버튼 연습") # 윈도우 창에 제목 표시
# 버튼 이미지 출력하기
# step 1. 버튼에 들어갈 이미지 생성하기
photo = PhotoImage(file="gif/dog3.gif")
# step 2. 버튼 생성하기
button = Button(window, image=photo, command=myFunc) # myFunc()으로 넣으면 안됨
# step 3. 버튼 출력
button.pack()
window.mainloop()
| [
"hyohaeng.shin@gmail.com"
] | hyohaeng.shin@gmail.com |
8596ef3beed2acbd192211891b7e6ff558450fe7 | ce3e499774c45b9cce4c7b9b1b7ea762a1a7c0c1 | /src/foodify/settings.py | 7b69f33b4b45e62938a070cf4b80c8f508fd8790 | [] | no_license | aminaloui/Foodify-Praktijk-2- | 6d6776a09f9ecaaebf54cedaf7431f7af5071951 | 3184beb38804cdc6def4ec2a3c20d868f6320a42 | refs/heads/master | 2020-08-08T06:55:04.770012 | 2019-11-01T18:02:02 | 2019-11-01T18:02:02 | 213,767,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,938 | py | """
Django settings for foodify project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=qwqcn*t(z!6)_%)x^l0c64tge=8n0*4mia6iu!2masl1-y8up'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'foods',
'tags',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'foodify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foodify.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "staticfiles")
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "mediafiles")
| [
"amin@Amins-MacBook-Pro-2.local"
] | amin@Amins-MacBook-Pro-2.local |
f58dcee5cb758e78ff6bf7dcf3b58455a48bf188 | 88dfd5f8ff788378305b832813f44ac6c3939065 | /MxOnline/urls.py | 314156940afc1209329e74b256b420371714b839 | [] | no_license | jixiaoxin666/MxOnline | 27ebb0041a038c95158baa3e97dd812a13f9f8a0 | 22b88a8a35899fedec19e64eeb15eb809b038772 | refs/heads/master | 2021-04-03T08:16:04.363410 | 2018-04-14T06:53:37 | 2018-04-14T06:53:37 | 124,881,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # _*_encoding:utf-8_*_
"""MxOnline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
import xadmin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^xadmin/', xadmin.site.urls), # admin替换为xadmin
]
| [
"jihuixin@xjgreat.com"
] | jihuixin@xjgreat.com |
89a83059cc975cbb899bcbf35c4ce9000b7da5e0 | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_watermill_api.py | 9b7b62efa75df5f6b212c5921dfa2cb31da4fd6a | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.watermill_api import WatermillApi # noqa: E501
from dbpedia.rest import ApiException
class TestWatermillApi(unittest.TestCase):
"""WatermillApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.watermill_api.WatermillApi() # noqa: E501
def tearDown(self):
pass
def test_watermills_get(self):
"""Test case for watermills_get
List all instances of Watermill # noqa: E501
"""
pass
def test_watermills_id_get(self):
"""Test case for watermills_id_get
Get a single Watermill by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"maxiosorio@gmail.com"
] | maxiosorio@gmail.com |
82d2fa47943004945d86fbcb57953c0a89b36722 | 6326fa61ea28764a430a3910a162ef9861977db3 | /src/spec/genvk.py | 7fb67200e0cb76a26cca2f9636ba3699c76024f9 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"LicenseRef-scancode-other-permissive",
"CC-BY-4.0"
] | permissive | hanaa-mohamed/Vulkan-Docs | ec4426a9ed837322337e2d18191dcf4369d49ff8 | 64fa8ef4df3bff37214e717abe490f7ea7ea44b0 | refs/heads/1.0 | 2021-03-22T04:49:46.239044 | 2017-11-27T09:07:06 | 2017-11-27T09:07:06 | 112,636,243 | 1 | 0 | null | 2017-11-30T16:48:47 | 2017-11-30T16:48:47 | null | UTF-8 | Python | false | false | 13,612 | py | #!/usr/bin/python3
#
# Copyright (c) 2013-2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, cProfile, pdb, string, sys, time
from reg import *
from generator import write
from cgenerator import CGeneratorOptions, COutputGenerator
from docgenerator import DocGeneratorOptions, DocOutputGenerator
from extensionmetadocgenerator import ExtensionMetaDocGeneratorOptions, ExtensionMetaDocOutputGenerator
from pygenerator import PyOutputGenerator
from validitygenerator import ValidityOutputGenerator
from hostsyncgenerator import HostSynchronizationOutputGenerator
from extensionStubSource import ExtensionStubSourceOutputGenerator
# Simple timer functions
startTime = None
def startTimer(timeit):
global startTime
startTime = time.clock()
def endTimer(timeit, msg):
global startTime
endTime = time.clock()
if (timeit):
write(msg, endTime - startTime, file=sys.stderr)
startTime = None
# Turn a list of strings into a regexp string matching exactly those strings
def makeREstring(list):
return '^(' + '|'.join(list) + ')$'
# Returns a directory of [ generator function, generator options ] indexed
# by specified short names. The generator options incorporate the following
# parameters:
#
# extensions - list of extension names to include.
# protect - True if re-inclusion protection should be added to headers
# directory - path to directory in which to generate the target(s)
def makeGenOpts(extensions = [], removeExtensions = [], protect = True, directory = '.'):
global genOpts
genOpts = {}
# Descriptive names for various regexp patterns used to select
# versions and extensions
allVersions = allExtensions = '.*'
noVersions = noExtensions = None
addExtensions = makeREstring(extensions)
removeExtensions = makeREstring(removeExtensions)
# Copyright text prefixing all headers (list of strings).
prefixStrings = [
'/*',
'** Copyright (c) 2015-2017 The Khronos Group Inc.',
'**',
'** Licensed under the Apache License, Version 2.0 (the "License");',
'** you may not use this file except in compliance with the License.',
'** You may obtain a copy of the License at',
'**',
'** http://www.apache.org/licenses/LICENSE-2.0',
'**',
'** Unless required by applicable law or agreed to in writing, software',
'** distributed under the License is distributed on an "AS IS" BASIS,',
'** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
'** See the License for the specific language governing permissions and',
'** limitations under the License.',
'*/',
''
]
# Text specific to Vulkan headers
vkPrefixStrings = [
'/*',
'** This header is generated from the Khronos Vulkan XML API Registry.',
'**',
'*/',
''
]
# Defaults for generating re-inclusion protection wrappers (or not)
protectFile = protect
protectFeature = protect
protectProto = protect
# Header for core API + extensions.
# To generate just the core API,
# change to 'defaultExtensions = None' below.
genOpts['vulkan.h'] = [
COutputGenerator,
CGeneratorOptions(
filename = 'vulkan.h',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = allVersions,
defaultExtensions = 'vulkan',
addExtensions = None,
removeExtensions = None,
prefixText = prefixStrings + vkPrefixStrings,
genFuncPointers = True,
protectFile = protectFile,
protectFeature = False,
protectProto = '#ifndef',
protectProtoStr = 'VK_NO_PROTOTYPES',
apicall = 'VKAPI_ATTR ',
apientry = 'VKAPI_CALL ',
apientryp = 'VKAPI_PTR *',
alignFuncParam = 48)
]
# API include files for spec and ref pages
# Overwrites include subdirectories in spec source tree
# The generated include files do not include the calling convention
# macros (apientry etc.), unlike the header files.
# Because the 1.0 core branch includes ref pages for extensions,
# all the extension interfaces need to be generated, even though
# none are used by the core spec itself.
genOpts['apiinc'] = [
DocOutputGenerator,
DocGeneratorOptions(
filename = 'timeMarker',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = allVersions,
defaultExtensions = None,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
prefixText = prefixStrings + vkPrefixStrings,
apicall = '',
apientry = '',
apientryp = '*',
alignFuncParam = 48,
expandEnumerants = False)
]
# API names to validate man/api spec includes & links
genOpts['vkapi.py'] = [
PyOutputGenerator,
DocGeneratorOptions(
filename = 'vkapi.py',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = allVersions,
defaultExtensions = None,
addExtensions = addExtensions,
removeExtensions = removeExtensions)
]
# API validity files for spec
genOpts['validinc'] = [
ValidityOutputGenerator,
DocGeneratorOptions(
filename = 'timeMarker',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = allVersions,
defaultExtensions = None,
addExtensions = addExtensions,
removeExtensions = removeExtensions)
]
# API host sync table files for spec
genOpts['hostsyncinc'] = [
HostSynchronizationOutputGenerator,
DocGeneratorOptions(
filename = 'timeMarker',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = allVersions,
defaultExtensions = None,
addExtensions = addExtensions,
removeExtensions = removeExtensions)
]
# Extension stub source dispatcher
genOpts['vulkan_ext.c'] = [
ExtensionStubSourceOutputGenerator,
CGeneratorOptions(
filename = 'vulkan_ext.c',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = None,
defaultExtensions = None,
addExtensions = '.*',
removeExtensions = removeExtensions,
prefixText = prefixStrings + vkPrefixStrings,
alignFuncParam = 48)
]
# Extension metainformation for spec extension appendices
genOpts['extinc'] = [
ExtensionMetaDocOutputGenerator,
ExtensionMetaDocGeneratorOptions(
filename = 'timeMarker',
directory = directory,
apiname = 'vulkan',
profile = None,
versions = allVersions,
emitversions = None,
defaultExtensions = 'vulkan',
addExtensions = None,
removeExtensions = None)
]
# Generate a target based on the options in the matching genOpts{} object.
# This is encapsulated in a function so it can be profiled and/or timed.
# The args parameter is an parsed argument object containing the following
# fields that are used:
# target - target to generate
# directory - directory to generate it in
# protect - True if re-inclusion wrappers should be created
# extensions - list of additional extensions to include in generated
# interfaces
def genTarget(args):
global genOpts
# Create generator options with specified parameters
makeGenOpts(extensions = args.extension,
removeExtensions = args.removeExtension,
protect = args.protect,
directory = args.directory)
if (args.target in genOpts.keys()):
createGenerator = genOpts[args.target][0]
options = genOpts[args.target][1]
if not args.quiet:
write('* Building', options.filename, file=sys.stderr)
startTimer(args.time)
gen = createGenerator(errFile=errWarn,
warnFile=errWarn,
diagFile=diag)
reg.setGenerator(gen)
reg.apiGen(options)
if not args.quiet:
write('* Generated', options.filename, file=sys.stderr)
endTimer(args.time, '* Time to generate ' + options.filename + ' =')
else:
write('No generator options for unknown target:',
args.target, file=sys.stderr)
# -extension name - may be a single extension name, a a space-separated list
# of names, or a regular expression.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-extension', action='append',
default=[],
help='Specify an extension or extensions to add to targets')
parser.add_argument('-removeExtension', action='append',
default=[],
help='Specify an extension or extensions to remove from targets')
parser.add_argument('-debug', action='store_true',
help='Enable debugging')
parser.add_argument('-dump', action='store_true',
help='Enable dump to stderr')
parser.add_argument('-diagfile', action='store',
default=None,
help='Write diagnostics to specified file')
parser.add_argument('-errfile', action='store',
default=None,
help='Write errors and warnings to specified file instead of stderr')
parser.add_argument('-noprotect', dest='protect', action='store_false',
help='Disable inclusion protection in output headers')
parser.add_argument('-profile', action='store_true',
help='Enable profiling')
parser.add_argument('-registry', action='store',
default='vk.xml',
help='Use specified registry file instead of vk.xml')
parser.add_argument('-time', action='store_true',
help='Enable timing')
parser.add_argument('-validate', action='store_true',
help='Enable group validation')
parser.add_argument('-o', action='store', dest='directory',
default='.',
help='Create target and related files in specified directory')
parser.add_argument('target', metavar='target', nargs='?',
help='Specify target')
parser.add_argument('-quiet', action='store_true', default=False,
help='Suppress script output during normal execution.')
args = parser.parse_args()
# This splits arguments which are space-separated lists
args.extension = [name for arg in args.extension for name in arg.split()]
# Load & parse registry
reg = Registry()
startTimer(args.time)
tree = etree.parse(args.registry)
endTimer(args.time, '* Time to make ElementTree =')
startTimer(args.time)
reg.loadElementTree(tree)
endTimer(args.time, '* Time to parse ElementTree =')
if (args.validate):
reg.validateGroups()
if (args.dump):
write('* Dumping registry to regdump.txt', file=sys.stderr)
reg.dumpReg(filehandle = open('regdump.txt', 'w', encoding='utf-8'))
# create error/warning & diagnostic files
if (args.errfile):
errWarn = open(args.errfile, 'w', encoding='utf-8')
else:
errWarn = sys.stderr
if (args.diagfile):
diag = open(args.diagfile, 'w', encoding='utf-8')
else:
diag = None
if (args.debug):
pdb.run('genTarget(args)')
elif (args.profile):
import cProfile, pstats
cProfile.run('genTarget(args)', 'profile.txt')
p = pstats.Stats('profile.txt')
p.strip_dirs().sort_stats('time').print_stats(50)
else:
genTarget(args)
| [
"oddhack@sonic.net"
] | oddhack@sonic.net |
99bd11439e2e2f52ed637262e97a8b95926c9cdb | 83043d89093004fb87b133e1affe17a713ace225 | /greatNumberGame/server.py | 8f6f220757e84a40082293baa111e6c457aee612 | [] | no_license | tsicroxe/flaskprojects | f3afa4fd0c3486c59209ae402ac19076a9547e54 | f4955ec4b977460f44562ed3181b5dcdc24c283c | refs/heads/master | 2021-01-12T12:36:20.107323 | 2016-10-15T01:34:34 | 2016-10-15T01:34:34 | 69,612,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | from flask import Flask, session, render_template, redirect, request
import random
app = Flask(__name__)
app.secret_key = "asdf1234"
#Routes to index.html
@app.route('/')
def index():
data = {}
try:
print 'session number is already set: ' + str(session['number'])
except:
session['number'] = random.randint(0,101)
print "this got excepted and set"
try:
guess = int(session['guess'])
if session['number'] > guess:
data = {'event':'Your guess is too low'}
data['color'] = 'red'
print "Guess is too low"
elif session['number'] < guess:
data = {'event':'Your guess is too high'}
data['color'] = 'red'
print "guess is too high"
elif session['number'] == guess:
data = {'event':"You guessed right!"}
data['color'] = 'green'
data['again'] = 'Press reset to play again!'
print "You guessed right!"
except:
print "couldn't get form"
return render_template('index.html', data=data, reset=reset)
@app.route('/guess', methods=["POST"])
def guess():
session['guess'] = request.form['guess']
print 'Session number is ' + str(session['number'])
print 'guess is ' + str(session['guess'])
return redirect('/')
@app.route('/reset', methods=["POST"])
def reset():
session.clear()
return redirect('/')
app.run(debug=True)
| [
"tsicroxes@gmail.com"
] | tsicroxes@gmail.com |
f2f17f58ba386a695c6117de316226c5144ad71d | 4d47afc8d51fb5c4b6f02039e809c110c9c6178c | /PrimeNumbers.py | c032b674d6fbb61758282bee6ac3578fda2f4d1c | [] | no_license | SVovk2012/Python | 4bbd93720f14ee1341dae1a4e20a133a462b2508 | 691d3481db3ed0fbc37f47ac701add84e3918532 | refs/heads/master | 2020-03-19T10:23:01.034505 | 2018-11-04T19:29:51 | 2018-11-04T19:29:51 | 136,365,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | #function returns the list of prime numbers up to the 'num' and including 'num'
def count_primes(num):
#initialize a list with first 2 known prime numbers:
primenumberslist = [1,2]
#iterate two times through all numbers up to the 'num'
for a in range(3, num+1, 2):
# change isprime to False if we find that the number could be devided without remainder by some other number
isprime = True
for b in range(3,a):
if a%b==0 and a!=b:
isprime = False
break
#append to the list of primes if we did't prove that it is not prime (isprime == True)
if isprime:
primenumberslist.append(a)
return primenumberslist
count_primes(100)
| [
"noreply@github.com"
] | noreply@github.com |
0c924d1161b97dbe606ec60a4a52647ccc876ffd | 74e5a5b47a479b92c4e8f69a2a1c5018cd02e4de | /ex29_What_if.py | dc4f560c49c05465e95c2c22341c739c21f0c7ff | [] | no_license | heron2014/Learning_Python_The_Hard_Way | 3fc80ee4b0a8a4be26a2b40c7b66c1cc333d4bb2 | 7300ba821a76556a39d2de32be18aa61fe50c9b1 | refs/heads/master | 2020-05-17T00:00:23.732103 | 2015-01-30T15:58:24 | 2015-01-30T15:58:24 | 29,782,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | people = 20
cats = 30
dogs = 15
#if boolean expression evaluates to True , run code under it otherwise skip it
if people < cats:
print "Too many cats! The world is doomed!"
if people < cats:
print 'Not many cats. The world is save!'
if people < dogs:
print "The world is drooled on!"
if people > dogs:
print 'The worlds is dry'
dogs += 5
if people >= dogs:
print "People are greater than or equal to dogs"
if people <= dogs:
print "People are less than or equal to dogs"
if people == dogs:
print "People are dogs"
#if statemnt checks whether the condition is met and returns booleans values : true or false
#The if-statement tells your script 'If this boolean expression is True, then run the code under it,
# otherwise skip it. '
# Also an if-statement creates what is called a 'branch' in the code. | [
"a.nita@hotmail.co.uk"
] | a.nita@hotmail.co.uk |
ce66f81dd62ef4c454b93bada3202dfdabc764a2 | adbb2b958296815f9485bab60c0d38827befeeeb | /build/lib.linux-i686-2.7/gdrivefs/change.py | 394f5bedbdc47e5902688e014679cddbd2e96977 | [
"MIT"
] | permissive | gryphius/GDriveFS | 4b4619e1eefceb562ded6ae13dcc9a2c5b4c0a1b | fadfbdea019cfa4c2a821f4636380edbc8be32bc | refs/heads/master | 2021-01-18T14:14:32.028542 | 2013-04-24T06:17:03 | 2013-04-24T06:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | import logging
from threading import Lock, Timer
from gdrivefs.gdtool import AccountInfo, drive_proxy
from gdrivefs.conf import Conf
from gdrivefs.cache import PathRelations, EntryCache
from gdrivefs.timer import Timers
def _sched_check_changes():
logging.debug("Doing scheduled check for changes.")
get_change_manager().process_updates()
# Schedule next invocation.
t = Timer(Conf.get('change_check_frequency_s'), _sched_check_changes)
t.start()
Timers.get_instance().register_timer('change', t)
class _ChangeManager(object):
__log = None
at_change_id = None
def __init__(self):
self.__log = logging.getLogger().getChild('ChangeMan')
try:
self.at_change_id = AccountInfo.get_instance().largest_change_id
except:
self.__log.exception("Could not get largest change-ID.")
raise
self.__log.info("Latest change-ID at startup is (%d)." %
(self.at_change_id))
def mount_init(self):
"""Called when filesystem is first mounted."""
self.__log.debug("Change init.")
_sched_check_changes()
def mount_destroy(self):
"""Called when the filesystem is unmounted."""
self.__log.debug("Change destroy.")
def process_updates(self):
"""Process any changes to our files. Return True if everything is up to
date or False if we need to be run again.
"""
start_at_id = (self.at_change_id + 1)
try:
result = drive_proxy('list_changes', start_change_id=start_at_id)
except:
self.__log.exception("Could not retrieve updates. Skipped.")
return True
(largest_change_id, next_page_token, changes) = result
self.__log.debug("The latest reported change-ID is (%d) and we're "
"currently at change-ID (%d)." % (largest_change_id,
self.at_change_id))
if largest_change_id == self.at_change_id:
self.__log.debug("No entries have changed.")
return True
self.__log.info("(%d) changes will now be applied." % (len(changes)))
for change_id, change_tuple in changes.iteritems():
# Apply the changes. We expect to be running them from oldest to
# newest.
self.__log.info("Change with ID (%d) will now be applied." %
(change_id))
try:
self.__apply_change(change_id, change_tuple)
except:
self.__log.exception("There was a problem while processing change"
" with ID (%d). No more changes will be "
"applied." % (change_id))
return False
self.at_change_id = change_id
return (next_page_token == None)
def __apply_change(self, change_id, change_tuple):
"""Apply changes to our filesystem reported by GD. All we do is remove
the current record components, if it's valid, and then reload it with
what we were given. Note that since we don't necessarily know
about the entries that have been changed, this also allows us to slowly
increase our knowledge of the filesystem (of, obviously, only those
things that change).
"""
(entry_id, was_deleted, entry) = change_tuple
is_visible = entry.is_visible if entry else None
self.__log.info("Applying change with change-ID (%d), entry-ID [%s], and "
"is-visible of [%s]" % (change_id, entry_id, is_visible))
# First, remove any current knowledge from the system.
self.__log.debug("Removing all trace of entry with ID [%s]." % (entry_id))
try:
PathRelations.get_instance().remove_entry_all(entry_id)
except:
self.__log.exception("There was a problem remove entry with ID [%s] "
"from the caches." % (entry_id))
raise
# If it wasn't deleted, add it back.
self.__log.debug("Registering changed entry with ID [%s]." % (entry_id))
if is_visible:
path_relations = PathRelations.get_instance()
try:
path_relations.register_entry(entry)
except:
self.__log.exception("Could not register changed entry with ID "
"[%s] with path-relations cache." %
(entry_id))
raise
def get_change_manager():
with get_change_manager.lock:
if not get_change_manager.instance:
get_change_manager.instance = _ChangeManager()
return get_change_manager.instance
get_change_manager.instance = None
get_change_manager.lock = Lock()
| [
"myselfasunder@gmail.com"
] | myselfasunder@gmail.com |
8f15048573ae6cf53c784fe29bb50ef7345fb154 | 99701affb7ae46c42c55484f3301d59f79294a10 | /project/Examples/Examples/PP2E/Dstruct/Basic/inter2.py | 200364cc5828b3f08ae4bba0989169e3e39861b8 | [] | no_license | inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall | 1050b9e9bddb335bf42b7debf2abebe51dd9f9e0 | 0f650a97d8fbaa576142e5bb1745f136b027bc73 | refs/heads/master | 2021-01-21T21:48:21.326372 | 2016-04-06T20:05:19 | 2016-04-06T20:05:19 | 42,902,523 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
| [
"inteljack2008@gmail.com"
] | inteljack2008@gmail.com |
7bd764e443bcc2a8bebbfbb11b929320013f5a61 | de0824b62a51d44e5031dbc9f2cd45d9e8de45ed | /tests/test_manager_implemented.py | bbbca642c1ad3de56ad36b28baacfe6f3cb524a1 | [
"MIT"
] | permissive | SecuPlus/tomcatmanager | 6447e51882a54cf7b75d50c62fdce399cad54044 | 597f3e37a520b66fc6c05984c69ec22111ef1f11 | refs/heads/main | 2023-07-15T09:58:06.387805 | 2021-08-26T18:50:34 | 2021-08-26T18:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,816 | py | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007 Jared Crapo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# pylint: disable=protected-access, missing-function-docstring
# pylint: disable=missing-module-docstring, unused-variable
from unittest import mock
import pytest
import requests
import tomcatmanager as tm
def test_implemented_by_invalid(mocker):
# - this test makes sure the _implemented_by decorator throws the proper exceptions
# - it depends on ssl_reload() being decorated as not implemented in TomcatMajor.V7
# - this does not attempt to test whether various methods are decorated with the
# proper versions of Tomcat
tomcat = tm.TomcatManager()
with pytest.raises(tm.TomcatNotConnected):
response = tomcat.ssl_reload()
# pretend we are connected and use an invalid version
cmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.is_connected",
new_callable=mock.PropertyMock,
)
cmock.return_value = True
vmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.tomcat_major_minor",
new_callable=mock.PropertyMock,
)
vmock.return_value = tm.TomcatMajorMinor.V8_0
with pytest.raises(tm.TomcatNotImplementedError):
response = tomcat.ssl_reload()
def test_implemented_by_decorations8_0(mocker):
tomcat = tm.TomcatManager()
cmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.is_connected",
new_callable=mock.PropertyMock,
)
cmock.return_value = True
vmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.tomcat_major_minor",
new_callable=mock.PropertyMock,
)
vmock.return_value = tm.TomcatMajorMinor.V8_0
# don't care if this errors because all we care is that the decorator
# allowed us to try and make a HTTP request. Functionality of the
# decorated method is tested elsewhere
gmock = mocker.patch("requests.get")
gmock.side_effect = requests.HTTPError
with pytest.raises(ValueError):
tomcat.deploy_localwar(None, None)
with pytest.raises(ValueError):
tomcat.deploy_serverwar(None, None)
with pytest.raises(ValueError):
tomcat.deploy_servercontext(None, None)
with pytest.raises(ValueError):
tomcat.undeploy(None)
with pytest.raises(ValueError):
tomcat.start(None)
with pytest.raises(ValueError):
tomcat.stop(None)
with pytest.raises(ValueError):
tomcat.reload(None)
with pytest.raises(ValueError):
tomcat.sessions(None)
with pytest.raises(ValueError):
tomcat.expire(None)
with pytest.raises(requests.HTTPError):
response = tomcat.list()
assert gmock.call_count == 1
with pytest.raises(requests.HTTPError):
response = tomcat.ssl_connector_ciphers()
assert gmock.call_count == 2
with pytest.raises(requests.HTTPError):
response = tomcat.server_info()
assert gmock.call_count == 3
with pytest.raises(requests.HTTPError):
response = tomcat.status_xml()
assert gmock.call_count == 4
with pytest.raises(requests.HTTPError):
response = tomcat.vm_info()
assert gmock.call_count == 5
with pytest.raises(requests.HTTPError):
response = tomcat.thread_dump()
assert gmock.call_count == 6
with pytest.raises(requests.HTTPError):
response = tomcat.resources()
assert gmock.call_count == 7
with pytest.raises(requests.HTTPError):
response = tomcat.find_leakers()
assert gmock.call_count == 8
TOMCAT_MAJORS = [
tm.TomcatMajorMinor.V8_5,
tm.TomcatMajorMinor.V9_0,
tm.TomcatMajorMinor.V10_0,
tm.TomcatMajorMinor.VNEXT,
]
METHOD_MATRIX = [
# ( method name, number of arguments, expected exception )
("deploy_localwar", 2, ValueError),
("deploy_serverwar", 2, ValueError),
("deploy_servercontext", 2, ValueError),
("undeploy", 1, ValueError),
("start", 1, ValueError),
("stop", 1, ValueError),
("reload", 1, ValueError),
("sessions", 1, ValueError),
("expire", 1, ValueError),
("list", 0, requests.HTTPError),
("ssl_connector_ciphers", 0, requests.HTTPError),
("ssl_connector_certs", 0, requests.HTTPError),
("ssl_connector_trusted_certs", 0, requests.HTTPError),
("ssl_reload", 0, requests.HTTPError),
("server_info", 0, requests.HTTPError),
("status_xml", 0, requests.HTTPError),
("vm_info", 0, requests.HTTPError),
("thread_dump", 0, requests.HTTPError),
("resources", 0, requests.HTTPError),
("find_leakers", 0, requests.HTTPError),
]
@pytest.mark.parametrize("tomcat_major_minor", TOMCAT_MAJORS)
@pytest.mark.parametrize("method, arg_count, exc", METHOD_MATRIX)
def test_implemented_by_decorations_short(
mocker, tomcat_major_minor, arg_count, method, exc
):
tomcat = tm.TomcatManager()
cmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.is_connected",
new_callable=mock.PropertyMock,
)
cmock.return_value = True
vmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.tomcat_major_minor",
new_callable=mock.PropertyMock,
)
vmock.return_value = tomcat_major_minor
# don't care if this errors because all we care is that the decorator
# allowed us to try and make a HTTP request. Functionality of the
# decorated method is tested elsewhere
gmock = mocker.patch("requests.get")
gmock.side_effect = requests.HTTPError
with pytest.raises(exc):
method = getattr(tomcat, method)
if arg_count == 2:
method(None, None)
elif arg_count == 1:
method(None)
else:
method()
###
#
# validate the implements() and implemented_by() methods
#
###
def test_implements(tomcat):
assert tomcat.implements(tomcat.list)
assert tomcat.implements("list")
def test_implements_not_decorated(tomcat):
# see what happens if we passed an undecorated method
assert not tomcat.implements("connect")
def test_implements_not_connected(tomcat, mocker):
cmock = mocker.patch(
"tomcatmanager.tomcat_manager.TomcatManager.is_connected",
new_callable=mock.PropertyMock,
)
cmock.return_value = False
with pytest.raises(tm.TomcatNotConnected):
assert tomcat.implements(tomcat.list)
def test_implemented_by_method():
tomcat = tm.TomcatManager()
assert tomcat.implemented_by(tomcat.list, tm.TomcatMajorMinor.V9_0)
assert tomcat.implemented_by("list", tm.TomcatMajorMinor.VNEXT)
def test_implemented_by_method_invalid():
tomcat = tm.TomcatManager()
assert not tomcat.implemented_by("ssl_reload", tm.TomcatMajorMinor.V8_0)
assert not tomcat.implemented_by("notamethod", tm.TomcatMajorMinor.V9_0)
| [
"kotfu@kotfu.net"
] | kotfu@kotfu.net |
5260e5f6e9e62dff2851c2a69b0d9942a5673c04 | ccbb7fb8fda4d936e765263f05a435058b397bd9 | /src/guiltytargets/ppi_network_annotation/pipeline.py | 4556892fb1e8316cdaac58aa4319506234f86649 | [
"MIT"
] | permissive | GuiltyTargets/guiltytargets | 5a5d3ba9e45867a64c81a91529ae6689f8be447f | c20a5cae6c9cc71c2ca73080a862abe986bc34c0 | refs/heads/master | 2022-02-13T03:30:49.705239 | 2021-12-22T12:51:20 | 2021-12-22T12:51:20 | 154,318,881 | 10 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | # -*- coding: utf-8 -*-
"""Functions to easily set up the network."""
import logging
from typing import List, Optional
from .model.gene import Gene
from .model.network import Network
from .parsers import parse_csv, parse_disease_associations, parse_disease_ids, parse_excel, parse_ppi_graph
__all__ = [
'generate_ppi_network',
'parse_dge',
]
logger = logging.getLogger(__name__)
def generate_ppi_network(
ppi_graph_path: str,
dge_list: List[Gene],
max_adj_p: float,
max_log2_fold_change: float,
min_log2_fold_change: float,
ppi_edge_min_confidence: Optional[float] = None,
current_disease_ids_path: Optional[str] = None,
disease_associations_path: Optional[str] = None,
) -> Network:
"""Generate the protein-protein interaction network.
:return Network: Protein-protein interaction network with information on differential expression.
"""
# Compilation of a protein-protein interaction (PPI) graph (HIPPIE)
protein_interactions = parse_ppi_graph(ppi_graph_path, ppi_edge_min_confidence)
protein_interactions = protein_interactions.simplify()
if disease_associations_path is not None and current_disease_ids_path is not None:
current_disease_ids = parse_disease_ids(current_disease_ids_path)
disease_associations = parse_disease_associations(disease_associations_path,
current_disease_ids)
else:
disease_associations = None
# Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs
network = Network(
protein_interactions,
max_adj_p=max_adj_p,
max_l2fc=max_log2_fold_change,
min_l2fc=min_log2_fold_change,
)
network.set_up_network(dge_list, disease_associations=disease_associations)
return network
def parse_dge(
dge_path: str,
entrez_id_header: str,
log2_fold_change_header: str,
adj_p_header: str,
entrez_delimiter: str,
base_mean_header: Optional[str] = None,
) -> List[Gene]:
"""Parse a differential expression file.
:param dge_path: Path to the file.
:param entrez_id_header: Header for the Entrez identifier column
:param log2_fold_change_header: Header for the log2 fold change column
:param adj_p_header: Header for the adjusted p-value column
:param entrez_delimiter: Delimiter between Entrez ids.
:param base_mean_header: Header for the base mean column.
:return: A list of genes.
"""
if dge_path.endswith('.xlsx'):
return parse_excel(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.csv'):
return parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.tsv'):
return parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
sep="\t",
)
raise ValueError(f'Unsupported extension: {dge_path}')
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
5582e0c04ffcb5fecce6af3812ec4c05c1be9fb2 | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/thnhatrangvn.py | 220429db599deaabf7822d301bccd557a783a259 | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='home-center']/div[@class='product-info']/div[@class='product-name']/h1",
'price' : "//div[@class='product-right']/div[@class='product-price']/p[@class='cssPriceSpecial']/b",
'category' : "//div[@class='wrap']/div[@class='home-content']/div[@class='category-path']/a",
'description' : "//div[@id='pro_content_desc']/div//span",
'images' : "//div[@id='pro_big']/a/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'thnhatrang.vn'
allowed_domains = ['thnhatrang.vn']
start_urls = ['http://thnhatrang.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-p\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-c\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
c3270fdaccc023a16cea14d71a33d0ce8194251a | 3c3d978649517493745d9ce9bb44362a0fb53dbb | /get_pairs_chinese/get_text_pair_lcqmc.py | bb3c6f672230313c6e12e068fe0113ac2e4f2fff | [
"Apache-2.0"
] | permissive | fighting41love/text_data_enhancement_with_LaserTagger | 4679b37a416758521f732438bd91b9269217973e | 64b7b2a136820a52b40646bd71495c8e865a9827 | refs/heads/master | 2022-10-13T12:25:42.641144 | 2020-06-08T10:07:19 | 2020-06-08T10:07:19 | 271,198,161 | 2 | 0 | Apache-2.0 | 2020-06-10T06:34:53 | 2020-06-10T06:34:53 | null | UTF-8 | Python | false | false | 2,417 | py | # coding=utf-8
# 利用文本匹配的语料,从正例中采样得到句子对(A,B),然后训练模型把A改写成B
# 当前是针对LCQMC 改了几条语料的标注
import random
import os
import sys
sys.path.append("..")
from compute_lcs import _compute_lcs
from curLine_file import curLine
def process(corpus_folder, raw_file_name, save_folder):
corpus_list = []
for name in raw_file_name:
raw_file = os.path.join(corpus_folder, name)
with open(raw_file, "r") as fr:
lines = fr.readlines()
for i ,line in enumerate(lines):
source, target, label = line.strip().split("\t")
if label=="0" or source==target:
continue
if label != "1":
input(curLine()+line.strip())
length = float(len(source) + len(target))
source_length = len(source)
if source_length > 8 and source_length<38 and (i+1)%2>0: # 对50%的长句构造交换操作
rand = random.uniform(0.4, 0.9)
source_pre = source
swag_location = int(source_length*rand)
source = "%s%s" % (source[swag_location:], source[:swag_location])
lcs1 = _compute_lcs(source, target)
lcs_rate= len(lcs1)/length
if (lcs_rate<0.4):# 差异大,换回来
source = source_pre
else:
print(curLine(), "source_pre:%s, source:%s, lcs_rate=%f" % (source_pre, source, lcs_rate))
lcs1 = _compute_lcs(source, target)
lcs_rate = len(lcs1) / length
if (lcs_rate<0.2):
continue # 变动过大,忽略
# if (lcs_rate<0.4):
# continue # 变动过大,忽略
# if len(source)*1.15 < len(target):
# new_t = source
# source = target
# target = new_t
# print(curLine(), source, target, ",lcs1:",lcs1 , ",lcs_rate=", lcs_rate)
corpus = "%s\t%s\t%f\n" % (source, target, lcs_rate)
corpus_list.append(corpus)
print(curLine(), len(corpus_list), "from %s" % raw_file)
save_file = os.path.join(save_folder, "lcqmc.txt")
with open(save_file, "w") as fw:
fw.writelines(corpus_list)
print(curLine(), "have save %d to %s" % (len(corpus_list), save_file))
if __name__ == "__main__":
corpus_folder = "/home/cloudminds/Mywork/corpus/Chinese_QA/LCQMC"
raw_file_name = ["train.txt", "dev.txt", "test.txt"]
save_folder = "/home/cloudminds/Mywork/corpus/rephrase_corpus"
process(corpus_folder, raw_file_name, save_folder)
| [
"785092099@qq.com"
] | 785092099@qq.com |
f181b10e9cc80527a418bb758860d3196cdc888e | 013d9f5ae8f5c35e31fae89520813e61450e9b94 | /webdevops/settings.py | 0ad36c3ca90004f0d7e09fb62b123bcf57a22cec | [] | no_license | daysunshine/webdevops | f83ae6aa2feec10ee02cdff339d8c71cc0c4e9ce | 3787bd65e54250e11eb02918df6ceb389010ecfa | refs/heads/master | 2022-03-20T21:22:29.246769 | 2019-12-23T08:16:17 | 2019-12-23T08:16:17 | 226,041,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | py | """
Django settings for webdevops project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=0k)v8(78v^8dkwjsfdui*tsah1-$ctxd8&upexx&f=_-pk+09'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webuser',
'app',
# 'webserver',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webdevops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webdevops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST':'192.168.5.254',
'PORT':3308,
'NAME': 'webdevops',
'USER':'webdev',
'PASSWORD':'webdev123456',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
# 缓存配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://192.168.5.254:6379/5",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD":"12345678",
}
}
}
#
# #作为session backend 使用配置
# # 将session缓存在Redis中
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
SESSION_COOKIE_NAME = "sessionid" # session 的 cookie 保存在浏览器上时的 key,即:sessionid=随机字符串(默认)
SESSION_COOKIE_PATH = "/" # session 的 cookie 保存的路径(默认)
SESSION_COOKIE_DOMAIN = None # session 的 cookie 保存的域名(默认)
SESSION_COOKIE_SECURE = False # 是否用 https 传输 cookie(默认)
SESSION_COOKIE_HTTPONLY = True # 是否 session 的 cookie 只支持 http 传输(默认)
# SESSION_COOKIE_AGE = 1209600 # session 的 cookie 失效日期(2 个星期)(默认)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 是否关闭浏览器的时候让 session 过期(默认)
SESSION_SAVE_EVERY_REQUEST = False # 是否每次请求都保存 session,默认修改之后才保存(默认) | [
"1558526652@qq.com"
] | 1558526652@qq.com |
638342b991b3abab3fc9f44d4cec5456f14da836 | fc3546dae25e362728d6c832d20387b2888b9c98 | /aes.py | 4c5b46de2e8566b36ff81cda1bf146fc96f103f3 | [] | no_license | clefru/mypyaes | bbd3a9804941a700e4e4c6be6832d5acf441ef92 | dfbee6061bd76709db223df30cd99ab22d56b87c | refs/heads/master | 2020-04-15T02:14:05.388155 | 2019-01-29T09:50:09 | 2019-01-29T09:50:09 | 164,307,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,309 | py | #!/usr/bin/python
# Copyright 2004, 2019, Clemens Fruhwirth <clemens@endorphin.org>
# My pedagogicial Rijndael implementation in Python.
# It uses a minimal amount of magic values (no predefined lookup
# tables) and if so, they are marked with MAGIC
# DO NOT USE IN PRODUCTION OR WITH ANY PRODUCTION SECRETS. THIS IS MY
# TOY IMPLEMENTATION OF AES, WHICH I WROTE IN 2004 TO LEARN PYTHON AND
# AES.
from tmath import *
from marshal import *
import copy
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("aes.py")
# These are the basic fields we're using in Rijndael
# Residue class 2 field
Z2 = Z(2)
# Polynomial over field Z2
POFZ2 = POF(Z2)
# Reduction polynomial in POFZ2 as defined by Page 36 of the Rijndael book. MAGIC
rp = POFZ2.fromInt(0x11b)
# Galois field over Z2 with reduction polynomial
GFPOFZ2 = GFPOF(Z2, rp)
def debug(msg, state):
logger.debug(msg + " " + dumpStateHex(state))
def fGen(a, mask):
"""This implements the vector multiplication on Page 36 of the Rijndael book.
Unfortunately this function conflates the large box generation with
the multiplication itself. Refactor.
"""
def rol(val, shift, length):
if val > (1 << length):
raise Error
return ((val << shift) & ((1 << length)-1)) | (val >> (length-shift))
def XORsum(a, length):
r = 0
while not a == 0:
r = r ^ (a&1)
a = a >> 1
return r
res = []
for i in range(0, 8):
res.append(XORsum(a & mask, 8))
mask = rol(mask, 1, 8)
return fromBin(res)
def f(a):
"""f, as defined by the Rijndael book Page 36."""
# MAGIC
return fGen(a, 0xF1) ^ 0x63
def fInv(a):
"""f^-1, as defined by the Rijndael book Page 37."""
# MAGIC
return fGen(a, 0xA4) ^ 0x05
def g(a):
"""g, as defined by the Rijndael book Page 36.
This is just the multiplicative inverse under GFPOFZ2.
"""
if a == 0: return 0
return fromBin(POL2L(GFPOFZ2.fromInt(a).mulInv()))
def SR(a):
return STable[a]
def SRInv(a):
return SInvTable[a]
STable = bytearray()
SInvTable = bytearray()
for i in range(0, 0x100):
STable.append(f(g(i)))
SInvTable.append(g(fInv(i)))
RCCache = [0x00, 0x01]
def RC(a):
if a >= len(RCCache):
newval = xtime(RC(a-1))
RCCache.append(newval)
return RCCache[a]
def xtime(a):
pol = GFPOFZ2.fromInt(a)
newpol = pol.xtime()
return fromBin(EL2L(newpol.toEL()))
def keyExpansion(cipherKey, nr, nk, nb):
expandedKey = []
for j in range(0, nk):
expandedKey.append(cipherKey[j])
for j in range(nk, nb*(nr+1)):
sub = bytearray()
if j % nk == 0:
sub.append(expandedKey[j-nk][0] ^ SR(expandedKey[j-1][1]) ^ RC(j/nk))
for i in range(1, 4):
sub.append(expandedKey[j-nk][i] ^ SR(expandedKey[j-1][(i+1)%4]))
elif j % nk == 4 and nk > 6:
for i in range(0, 4):
sub.append(expandedKey[j-nk][i] ^ SR(expandedKey[j-1][i]))
else:
for i in range(0, 4):
sub.append(expandedKey[j-nk][i] ^ expandedKey[j-1][i])
expandedKey.append(sub)
return expandedKey
def SubBytes(state, function):
"""Sec 3.4.1 of the Rijndael book."""
r = []
for i in state:
r.append(map(function, i))
return r
# MAGIC
ShiftRowsOffsets = [
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 3, 4]
]
def ShiftRows(state, amp):
"""Sec 3.4.2 of the Rijndael book."""
offsets = ShiftRowsOffsets[len(state) - 4]
newstate = copy.deepcopy(state)
for j in range(0, len(state)):
for i in range(0, 4):
newstate[j][i] = state[(j + offsets[i] * amp) % len(state)][i]
return newstate
def RORRay(array, amount):
new = bytearray()
for i in array[-amount:]:
new.append(i)
for i in array[0:-amount]:
new.append(i)
return new
def SingleMixColumn(stateSub, coeffs):
resStateSub = bytearray()
localcoeffs = RORRay(coeffs, 0)
for j in range(0, 4):
res = GFPOFZ2.plusID()
# print "LC: ", localcoeffs
for i in range(0, 4):
pol1 = GFPOFZ2.fromInt(stateSub[i])
pol2 = GFPOFZ2.fromInt(localcoeffs[i])
mulres = GFPOFZ2.mul(pol1, pol2)
# print "pol1:", pol1, "pol2:", pol2, "mulres: ", mulres
res = GFPOFZ2.plus(res, mulres)
fb = fromBin(EL2L(res.toEL()))
resStateSub.append(fb)
localcoeffs = RORRay(localcoeffs, 1)
return resStateSub
def MixColumns(state, coeffs):
"""Sec 3.4.3 of the Rijndael book."""
return map(lambda x: SingleMixColumn(x, coeffs), state)
def AddRoundKey(state, subkey):
"""Sec 3.4.4 of the Rijndael book."""
return map(
lambda stateSL, keySL: map(
lambda stateE, keyE: stateE^keyE, stateSL, keySL),
state, subkey)
def rnd(state, subkey, nr):
logger.debug("R[%02d].start %s" % (nr, dumpStateHex(state)))
state = SubBytes(state, SR)
logger.debug("R[%02d].s_box %s" % (nr, dumpStateHex(state)))
state = ShiftRows(state, 1)
logger.debug("R[%02d].s_row %s" % (nr, dumpStateHex(state)))
# MAGIC
state = MixColumns(state, [0x02, 0x03, 0x01, 0x01])
logger.debug("R[%02d].m_col %s" % (nr, dumpStateHex(state)))
state = AddRoundKey(state, subkey)
logger.debug("R[%02d].k_sch %s" % (nr, dumpStateHex(subkey)))
return state
def invRnd(state, subkey, nr):
state = AddRoundKey(state, subkey)
# MAGIC
state = MixColumns(state, [0x0E, 0x0B, 0x0D, 0x09])
state = ShiftRows(state, -1)
state = SubBytes(state, SRInv)
return state
def finalRnd(state, key, nr):
logger.debug("R[%02d].start %s" % (nr, dumpStateHex(state)))
state = SubBytes(state, SR)
logger.debug("R[%02d].s_box %s" % (nr, dumpStateHex(state)))
state = ShiftRows(state, 1)
logger.debug("R[%02d].s_row %s" % (nr, dumpStateHex(state)))
state = AddRoundKey(state, key)
logger.debug("R[%02d].k_sch %s" % (nr, dumpStateHex(key)))
return state
def invFinalRnd(state, key, nr):
state = AddRoundKey(state, key)
state = ShiftRows(state, -1)
state = SubBytes(state, SRInv)
return state
def rijndael(msg, key):
state = arrayToState(msg)
cipherKey = arrayToState(key)
nb = len(state)
nk = len(cipherKey)
nr = max(nb, nk)+6
expandedKey = keyExpansion(cipherKey, nr, nk, nb)
logger.debug("R[00].input %s" % dumpStateHex(state))
state = AddRoundKey(state, expandedKey[0:nb])
logger.debug("R[%02d].k_sch %s" % (nr, dumpStateHex(expandedKey[0:nb])))
for i in range(1, nr):
subkey = expandedKey[nb*i:nb*(i+1)]
state = rnd(state, expandedKey[nb*i:nb*(i+1)], i)
state = finalRnd(state, expandedKey[nb*(nr):nb*(nr+1)], nr)
logger.debug("R[%02d].output %s" % (nr, dumpStateHex(state)))
return stateToArray(state)
def invRijndael(msg, key):
state = arrayToState(msg)
cipherKey = arrayToState(key)
nb = len(state)
nk = len(cipherKey)
nr = max(nb, nk)+6
expandedKey = keyExpansion(cipherKey, nr, nk, nb)
state = invFinalRnd(state, expandedKey[nb * nr:nb*(nr + 1)], nr)
for i in range(nr-1, 0, -1):
subkey = expandedKey[nb * i:nb * (i + 1)]
state = invRnd(state, expandedKey[nb * i:nb*(i + 1)], i)
state = AddRoundKey(state, expandedKey[0:nb])
return stateToArray(state)
def arrayToState(array):
state = []
if len(array)%4 != 0:
raise StandardError
for i in range(0, len(array) / 4):
state.append(bytearray(array[i * 4:(i + 1) * 4]))
return state
def stateToArray(state):
array = bytearray()
for i in state:
for j in i:
array.append(j)
return array
def dumpStateHex(state):
s = "["
for i in state:
s += "["
for j in i:
s += "%02X" % j
s += "],"
s += "]"
return s
if __name__ == '__main__':
# D.2 Rijndael test vectors
# This enc test vector comes from Page 216 in the Rijndael
key = bytearray([
0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c])
plaintext = bytearray([
0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d,
0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34])
# Labelled R[10].output in output
ciphertext = bytearray([ 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb,
0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32])
logger.setLevel(logging.DEBUG)
enc = rijndael(plaintext, key)
print "Test vector check: ", enc == ciphertext
dec = invRijndael(enc, key)
print "Encryption symmetry:", plaintext == dec
| [
"clemens@endorphin.org"
] | clemens@endorphin.org |
2e557a9d2e7e6a2d371e64ee7999559e8bea6d75 | d62caf4015dbb6017c1078d59b36b9d5c90a13f0 | /education_system_a/manage.py | 0e9ed335cb8aae5777ae24b6bda1137560ff384a | [] | no_license | WuChuYi/education | 3e007932daae212a36923a04b0fc79550cf3ab05 | 5a9f431e22f61b27b730fafba521e78b045eec81 | refs/heads/master | 2020-03-19T03:11:04.516257 | 2018-06-01T11:19:24 | 2018-06-01T11:19:24 | 135,702,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "education_system_a.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"chuyiwu@foxmail.com"
] | chuyiwu@foxmail.com |
c507865b007f982465904baf2c3b60f92f089965 | d15b12580c59ad3c742ebd7cc8994d61ec54bdd7 | /application/app/migrations/0002_auto_20180327_2104.py | 2dd7bc73c0bfd98e17a9a10885c07a4eacddde8d | [] | no_license | Nick1994209/wordknow | 6443f6a50858d367bcb86d8657bcf3cdcf7aef8e | 56d10e32125b9875c95acfbbca07d082493472be | refs/heads/master | 2023-01-18T15:14:11.299101 | 2020-09-18T07:18:36 | 2020-09-18T07:18:36 | 126,932,851 | 0 | 0 | null | 2022-12-08T05:45:35 | 2018-03-27T05:20:56 | Python | UTF-8 | Python | false | false | 434 | py | # Generated by Django 2.0.3 on 2018-03-27 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='chat_id',
field=models.CharField(max_length=100, unique=True, verbose_name='ID чата в телеграме'),
),
]
| [
"NVKorolkov@domclick.ru"
] | NVKorolkov@domclick.ru |
b187b3dd4ec4a595ec4a5689b6746f1d8a35d782 | ed520426baa0cb046ed1d5b59fc5b387cae966ee | /plotData.py | 0131b14f8aac05139cc117ddb861e1de162183b5 | [] | no_license | juanfvallejo125/MiniSegway_Data_Collection | 983eb73e66bcaa28a38f4c284dee0c6dcbbb9e33 | f5de229ddd4169156b5d46e0749d67b1bcbf6dcf | refs/heads/master | 2023-02-21T15:21:40.668386 | 2021-01-22T21:26:32 | 2021-01-22T21:26:32 | 323,231,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | import numpy as np
import matplotlib.pyplot as plt
def main():
file = open('test_1_1_21_21_5.txt')
all_arguments = []
all_values = np.array([], dtype = float)
line = file.readline()
while(line): # Parse through the entire file
if(line[0] == '$'): # Data lines are marked with a dollar sign at the start
line = line[1:-1]
data_list = line.split()
arguments = data_list[0::2]
values = data_list[1::2]
for arg in arguments:
all_arguments.append(arg)
for value in values:
all_values = np.append(all_values, value)
line = file.readline()
# print(len(all_arguments))
# print(all_values.size)
arg_set = set(all_arguments)
unique_args = list(arg_set)
all_arguments = np.array(all_arguments)
data_mat = np.array([]);
data_dict = {};
for arg in unique_args:
mask = all_arguments == arg
# if(len(data_mat) > 0):
# data_mat = np.vstack((data_mat, all_values[mask]))
# else:
# data_mat = np.append(data_mat, all_values[mask])
data_dict[arg] = np.array(all_values[mask], dtype = float)
# print(data_dict)
# Normalize Ms data to get x_values
x_values = data_dict['Ms'] - data_dict['Ms'].min()
# print(data_dict['Inner_Setpoint'])
# print(data_dict['Inner_Setpoint'].dtype)
i = 1;
# Outer PID Figure
plt.figure(1)
plt.plot(x_values, data_dict['Speed'], x_values, data_dict['Outer_Setpoint'], 'k.')
plt.title('Outer PID')
plt.legend(['Speed', 'Outer Setpoint'])
# # Inner PID Figure
# plt.figure(2)
# plt.title('Inner PID')
# plt.plot( x_values, data_dict['Inner_Setpoint'],x_values, data_dict['Angle'])
# plt.legend(['Inner Setpoint', 'Angle'])
# # Encoder figure
# plt.figure(3)
# plt.title('Encoders')
# plt.plot(x_values, data_dict['Right_encoder'], x_values, data_dict['Left_encoder'])
# plt.legend(['Right Encoder', 'Left Encoder'])
# # Odometry figure
# plt.figure(4)
# plt.title('Odometry')
# plt.plot(x_values, data_dict['Speed'], x_values, data_dict['Left_velocity']*2*np.pi*50.25/240, x_values, data_dict['Right_velocity']*2*np.pi*50.25/240)
# plt.legend(['Speed', 'Left speed', 'Right speed'])
# Turning rate figure
plt.figure(5)
plt.title('Turning Rate')
plt.plot(x_values, data_dict['Turning_Rate'], x_values, data_dict["Turn_Setpoint"], 'k.')
plt.legend(['Turning Rate', 'Turning Setpoint'])
# plt.figure(6)
# plt.title("Millis difference")
# plt.plot(np.diff(data_dict['Ms']))
# #PWM Values
# plt.figure(7)
# plt.title("PWM and Inner Loop Output")
# plt.plot(x_values, data_dict['PWM_Right'], x_values, data_dict['PWM_Left'], x_values, data_dict['Inner_PID_Output'])
# plt.legend(['Right PWM', 'Left PWM', 'Inner PID Output'])
# #Inner PID Output components
# plt.figure(8)
# plt.title("Inner PID Output")
# plt.plot(x_values, data_dict['Inner_PID_Output'], x_values, data_dict['Inner_Out_D'], x_values, data_dict['Inner_Out_P'])
# plt.legend(['PID Output', 'D output', 'P Output'])
# #Outer PID Output components
# plt.figure(10)
# plt.title("Outer PID Output")
# plt.plot(x_values, data_dict['Outer_PID_Output'], x_values, data_dict['Outer_Out_I'], x_values, data_dict['Outer_Out_P'])
# plt.legend(['PID Output', 'I output', 'P Output'])
# #IMU data
# plt.figure(9)
# plt.title("IMU")
# plt.plot(x_values, data_dict['Angle'], x_values, data_dict['Angular_Rate'], x_values, data_dict['Angle_Accel'])
# plt.legend(['Filtered Angle', 'Angular Rate', 'Accelerometer Angle'])
# for arg in unique_args:
# # if(arg == 'Right_encoder' or arg == 'Left_encoder' or arg == 'Right_velocity' or arg == 'Left_velocity'):
# plt.figure(i)
# plt.plot(data_dict[arg])
# plt.title(arg)
# i = i+1
plt.show()
# print(data_mat.shape)
# print(unique_args)
# print(len(unique_args))
if __name__ == '__main__':
main() | [
"juanfvallejo125@gmail.com"
] | juanfvallejo125@gmail.com |
6f3448d29f55f76cbc5b23cf246a84cbabe6592b | 148b6d9b43f1096960effc0bb4264686914f3c21 | /new_folder/deyisiklik.py | 1e6b0dfe99f5fdbb214ed48500cb333364f36fcc | [] | no_license | farxad91/GitDersleri | cada4a9a5e2283fbeb29be83f5d786295bc3e9d6 | f7a1c4209e96a56e26e022ebeb7d986490156528 | refs/heads/master | 2021-05-18T02:20:58.007757 | 2020-03-29T14:30:27 | 2020-03-29T14:30:27 | 251,063,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | def ustegel(x + y);
return x + y
def salam ();
print("hello")
| [
"farxadgambarov91.fg@gmail.com"
] | farxadgambarov91.fg@gmail.com |
11416eba932d3ceb66c0fb690e4d5d2c5b45fb53 | 37430b0cdf24f602e16a81b344f32a05bd6e2251 | /monitoring/trademark_monitor.py | e8e6a9f90202ddddbcdf7294667219a354094e68 | [
"MIT"
] | permissive | evelyn9191/trademark_monitor | 626be61f4823384d09358008ccf40d896925d77c | c82a70532e0e92a29e8f0f4a35c8ce71b2b76859 | refs/heads/master | 2023-03-29T20:40:05.758313 | 2018-12-19T21:56:18 | 2020-02-29T08:41:29 | 165,064,717 | 0 | 1 | MIT | 2021-03-20T20:03:19 | 2019-01-10T13:28:52 | Python | UTF-8 | Python | false | false | 9,451 | py | # Trademark monitor fills out search form at www.tmnd.org with provided data,
# and sends the results as html text within an email to receiver's email address.
# Script is then run once a month via Cron.
#
# There has to be module :email_data with the variable named 'receiver'.
# The variable contains email address of the monitoring receiver.
import platform
import re
import os
import time
from pathlib import Path
from selenium.webdriver.chrome.webdriver import WebDriver
from seleniumwire import webdriver
from bs4 import BeautifulSoup
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import glob
from monitoring import email_data
class OSNotRecognized(Exception):
"""Raise if OS is not recognized."""
def check_os() -> WebDriver:
"""Check OS to install the correct driver for Chrome."""
operation_system = platform.system()
chromedriver_folder = Path(__file__).absolute().parents[1] / "chromedriver"
if 'linux' in operation_system.lower():
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(executable_path="/usr/bin/chromedriver", chrome_options=options)
elif 'darwin' in operation_system.lower():
driver = webdriver.Chrome(executable_path=chromedriver_folder / "chromedriver_mac")
elif 'win' in operation_system.lower():
driver = webdriver.Chrome(executable_path=(chromedriver_folder / "chromedriver.exe"))
else:
raise OSNotRecognized('Couldn\'t find out your operation system. Program will stop.')
return driver
def access_search_form(driver):
"""Open the website TrademarkView and access its advanced search form."""
driver.get('https://www.tmdn.org/tmview/welcome')
# Break captcha before proceeding
driver.find_element_by_id('lnkAdvancedSearch').click() # Access advanced search form
def search_trademarks(driver, trademark_name, nice_class, searched_id, vienna_class):
"""Fill out the advance search form with trademark parameters and download web page."""
driver.find_element_by_class_name('DesignatedTerritoriesControl').click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Select all EU member states')]").click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Select all Non-EU member states')]").click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Trade mark offices')]").click() # Click away
driver.find_element_by_class_name('SelectedOfficesControl').click()
driver.find_element_by_xpath(".//*[contains(text(), 'Select All')]").click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Trade mark offices')]").click() # Click away
driver.find_element_by_id('TrademarkName').send_keys(trademark_name)
driver.find_element_by_class_name('TrademarkStatusControl').click()
driver.find_element_by_xpath(".//*[contains(text(), 'Filed')]").click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Trade mark offices')]").click() # Click away
driver.find_element_by_id('NiceClass').send_keys(nice_class)
if vienna_class is not None:
driver.find_element_by_id('ViennaClass').send_keys(vienna_class)
driver.find_element_by_class_name('SortControl').click()
driver.find_element_by_xpath("//select[@name='cmbSortField']"
"/option[text()='Application date']").click()
driver.find_element_by_xpath(".//*[contains(text(), "
"'Trade mark offices')]").click() # Click away
driver.find_element_by_class_name('cmbOrderControl').click()
driver.find_element_by_xpath(".//*[contains(text(), 'Descending')]").click()
driver.find_element_by_id('SearchCopy').click() # Start searching
time.sleep(10) # Wait till the database content loads
print('Searched through successfully for TM {}'.format(searched_id))
html_source = driver.page_source
searched_name = searched_id
with open('tm_{}.html'.format(searched_name), 'w', encoding='utf-8') as f:
downloaded_doc = f.write(html_source)
driver.find_element_by_id('btnClear').click()
driver.find_element_by_id('lnkAdvancedSearch').click() # Get back to advanced search form
def edit_downloaded_html() -> str:
"""Delete files with no search results and remove unnecessary code from downloaded html."""
path = glob.glob('tm_*.html')
for tm_file in path:
html = open(tm_file, 'r', encoding='utf-8')
soup = BeautifulSoup(html, 'lxml')
no_results = soup.find('span', class_='noresults')
if no_results is not None:
html.close()
os.remove(tm_file)
else:
whole_table = soup.find_all('div', id="table_of_results")
to_be_kept = re.compile(('(?=<table border="0" cellpadding="0" cellspacing="0" '
'class="ui-pg-table" ).*(?<=id="rs_mgrid")'), flags=re.DOTALL
).findall(str(whole_table))
string_to_be_kept = ''.join(to_be_kept)
with open(tm_file, 'w', encoding='utf-8') as f:
f.write(string_to_be_kept)
path = glob.glob('tm_*.html')
return path
def get_trademark_url(downloaded_htmls) -> list:
"""Parse the data, extract trademark application ID and url and save all in a dictionary."""
tm_name_url_list = []
for clean_tm_file in downloaded_htmls:
html = open(clean_tm_file, 'r', encoding='utf-8')
soup = BeautifulSoup(html, 'lxml')
results_ids = soup.find_all('div', id=re.compile("flag_rowId_"))
all_clean_ids = list(re.compile('(?=flag_rowId_).*?\"', flags=re.MULTILINE | re.IGNORECASE)
.findall(str(results_ids)))
map(lambda value: re.sub('(\"|flag_rowId)', '', value), all_clean_ids)
for i, value in enumerate(all_clean_ids):
no_quotation = value.replace('\"', '')
no_id = no_quotation.replace('flag_rowId_', '')
include_name = 'for code ' + no_id + ' in file ' + clean_tm_file + ': '
new_value = include_name
all_clean_ids[i] = value.replace(value, new_value)
created_url = 'https://www.tmdn.org/tmview/get-detail?st13=%s' % no_id
one_link_url_dict = dict()
one_link_url_dict[new_value] = created_url
tm_name_url_list.append(one_link_url_dict)
return tm_name_url_list
def send_email(get_trademark_url, email_data):
"""Send email with web page with search results in email attachment
and urls of each trademark."""
urls_list = get_trademark_url
tm_database_files = glob.glob('tm_*.html')
fromaddr = email_data.sender
toaddr = email_data.receiver
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Trademark monitoring results"
msg_intro = MIMEText("Dears,\n\nbelow see the results from the trademark monitoring "
"made after a month. Attached find the tables of results for "
"particular keywords. In case you would like to investigate "
"suspicious applications, click on the relevant link depending "
"on the trademark application number:\n", 'plain')
msg.attach(msg_intro)
msg_urls = MIMEText(('\n'.join('{}\n'.format(value) for value in urls_list))
.replace('{', '').replace('}', '').replace('\'', ''), 'plain')
msg.attach(msg_urls)
for file in tm_database_files:
with open(file, "rb") as f:
msg_attachments = MIMEApplication(f.read(), name=os.path.basename(file))
msg_attachments['Content-Disposition'] = 'attachment; filename="%s"' % \
os.path.basename(file)
msg.attach(msg_attachments)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.connect('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(fromaddr, email_data.openkeyword)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
print("Email sent!")
def finish_search(driver):
"""Close browser."""
driver.close()
driver.quit()
def run_trademark_check():
driver = check_os()
access_search_form(driver=driver)
trademarks_to_check = [
('*trez*r*', '9,36,38,42', 'trezor', None),
('*tres*r*', '9,36,38,42', 'tresor', None),
('*satoshi*', '9,35,36,38,42', 'satoshi', None),
('*trez*r*', '9,35,36,38,42', 'trezor logo', '14.05.21,14.05.23'),
('*tres*r*', '9,35,36,38,42', 'tresor logo', '14.05.21,14.05.23'),
('*satoshi*', '9,35,36,38,42', 'satoshi logo', '14.05.21,14.05.23')
]
for trademark in trademarks_to_check:
trademark_name, nice_class, searched_id, vienna_class = trademark
search_trademarks(driver, trademark_name, nice_class, searched_id, vienna_class)
edited_html = edit_downloaded_html()
trademark_url = get_trademark_url(edited_html)
send_email(trademark_url, email_data)
finish_search(driver)
if __name__ == '__main__':
run_trademark_check()
| [
"ockova.michaela@gmail.com"
] | ockova.michaela@gmail.com |
15800cdc98e5923c97a8260ad85c32ff54666a97 | d07a6b31a74b50da39b77f79c084ddc4a8c4a23d | /jia_django/jia/jia/settings.py | 8b4fee60668da7981781ff7f9bdb3764f172ccec | [] | no_license | fribble186/jia | 5d3224c83c6beddd422d08136bd9b73db467a603 | e93a064fb43404f38a017546366bb9ae59efcfca | refs/heads/master | 2023-01-02T00:46:48.834861 | 2020-10-26T09:20:33 | 2020-10-26T09:20:33 | 305,665,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,057 | py | """
Django settings for jia project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jia.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"psj124@qq.com"
] | psj124@qq.com |
61e32bfd56df75f25d13318c5a7a8e3483ac5f47 | afc3dc5769fbd3803d4ea126acea927fd784a706 | /student.py | 1fe2def62713c281db224fe7834bb554b9728299 | [
"MIT"
] | permissive | seanmacb/COMP-115-Exercises | 1a33cddb6cb7a66efb4aa85914f65dfe1991cec9 | fbe7e5b158f2db785b886b6c600f1a8beb19ab1f | refs/heads/master | 2022-12-05T09:16:31.205481 | 2020-08-31T01:09:00 | 2020-08-31T01:09:00 | 287,665,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # student.py
#
# Class to store info for one student; similar to program on p. 327
class Student:
def __init__ (self, lastName, firstName, idNum, grades):
self.firstName = firstName
self.lastName = lastName
self.id = idNum
# make a list for all the HW grades
self.hwGrade = []
for i in range (5):
self.hwGrade.append (grades [i]) # deep copy of grades list
# the following methods allow access to the class's data members
def getLastName (self):
return self.lastName
def getName (self):
return self.lastName + ", " + self.firstName
def getId (self):
return self.id
def getGrades (self):
return self.hwGrade | [
"seanmacbride@icloud.com"
] | seanmacbride@icloud.com |
ea8ca2060f2262c3ecaf0c88506fad93bb81a001 | eb54d732b5f14f03d9bf2988c6157605c80bbdd5 | /bubble_sort.py | e599bb7065016d2e01b3e67d5e93e3dc4947d828 | [] | no_license | tngo0508/practice_coding | 2e60519fed83a9b3c28b52c2d5ec1ee1d2a609ed | 453c9a7b9a8aa80f37b245f9df447525a9b0a2d1 | refs/heads/master | 2022-03-27T01:44:56.589650 | 2020-01-05T18:58:31 | 2020-01-05T18:58:31 | 225,294,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | def bubble_sort(nums):
for i in range(len(nums) - 1, 0, -1):
for j in range(i):
if nums[j] > nums[j + 1]:
nums[j], nums[j+1] = nums[j+1], nums[j]
print(nums)
return nums
print(bubble_sort([4, 1, 0, 3, 5, 1, 2, 6]))
| [
"tngo0508@gmail.com"
] | tngo0508@gmail.com |
30f7edb35eef5cce6d855b50be7fff21042a064c | 39257f596d4ec7840e39c8267b3444443e89ebba | /src/pyff/pipes.py | b76c3853b4c2b50602eb6f904f16bc38eee2d586 | [
"BSD-2-Clause"
] | permissive | lhoekenga/pyFF | ff6921410d46687528d84e416cbdafa6af46b164 | a0413d34744ddbf95904d0d933524589a039c025 | refs/heads/master | 2021-08-22T09:49:56.160558 | 2017-11-27T09:58:08 | 2017-11-27T09:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,028 | py | """
Pipes and plumbing. Plumbing instances are sequences of pipes. Each pipe is called in order to load, select,
transform, sign or output SAML metadata.
"""
import traceback
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
print(" *** install cStringIO for better performance")
from StringIO import StringIO
import os
import yaml
from .utils import resource_string, PyffException
from .logs import log
__author__ = 'leifj'
registry = dict()
def pipe(*args, **kwargs):
"""
Register the decorated function in the pyff pipe registry
:param name: optional name - if None, use function name
"""
def deco_none(f):
return f
def deco_pipe(f):
f_name = kwargs.get('name', f.__name__)
registry[f_name] = f
return f
if 1 == len(args):
f = args[0]
registry[f.__name__] = f
return deco_none
else:
return deco_pipe
class PipeException(PyffException):
pass
class PluginsRegistry(dict):
"""
The plugin registry uses pkg_resources.iter_entry_points to list all EntryPoints in the group 'pyff.pipe'. All pipe
entry_points must have the following prototype:
def the_something_func(req,*opts):
pass
Referencing this function as an entry_point using something = module:the_somethig_func in setup.py allows the
function to be referenced as 'something' in a pipeline.
"""
# def __init__(self):
# for entry_point in iter_entry_points('pyff.pipe'):
# if entry_point.name in self:
# log.warn("Duplicate entry point: %s" % entry_point.name)
# else:
# log.debug("Registering entry point: %s" % entry_point.name)
# self[entry_point.name] = entry_point.load()
def load_pipe(d):
"""Return a triple callable,name,args of the pipe specified by the object d.
:param d: The following alternatives for d are allowed:
- d is a string (or unicode) in which case the pipe is named d called with None as args.
- d is a dict of the form {name: args} (i.e one key) in which case the pipe named *name* is called with args
- d is an iterable (eg tuple or list) in which case d[0] is treated as the pipe name and d[1:] becomes the args
"""
def _n(_d):
lst = _d.split()
_name = lst[0]
_opts = lst[1:]
return _name, _opts
name = None
args = None
opts = []
if type(d) is str or type(d) is unicode:
name, opts = _n(d)
elif hasattr(d, '__iter__') and not type(d) is dict:
if not len(d):
raise PipeException("This does not look like a length of pipe... \n%s" % repr(d))
name, opts = _n(d[0])
elif type(d) is dict:
k = d.keys()[0]
name, opts = _n(k)
args = d[k]
else:
raise PipeException("This does not look like a length of pipe... \n%s" % repr(d))
if name is None:
raise PipeException("Anonymous length of pipe... \n%s" % repr(d))
func = None
if name in registry:
func = registry[name]
if func is None or not hasattr(func, '__call__'):
raise PipeException('No pipe named %s is installed' % name)
return func, opts, name, args
class PipelineCallback(object):
"""
A delayed pipeline callback used as a post for parse_metadata
"""
def __init__(self, entry_point, req):
self.entry_point = entry_point
self.plumbing = Plumbing(req.plumbing.pipeline, "%s-via-%s" % (req.plumbing.id, entry_point))
self.req = req
def __call__(self, *args, **kwargs):
t = args[0]
if t is None:
raise ValueError("PipelineCallback must be called with a parse-tree argument")
try:
return self.plumbing.process(self.req.md, state={self.entry_point: True}, t=t)
except Exception as ex:
traceback.print_exc(ex)
raise ex
class Plumbing(object):
"""
A plumbing instance represents a basic processing chain for SAML metadata. A simple, yet reasonably complete example:
.. code-block:: yaml
- load:
- /var/metadata/registry
- http://md.example.com
- select:
- #md:EntityDescriptor[md:IDPSSODescriptor]
- xslt:
stylesheet: tidy.xsl
- fork:
- finalize:
Name: http://example.com/metadata.xml
cacheDuration: PT1H
validUntil: PT1D
- sign:
key: signer.key
cert: signer.crt
- publish: /var/metadata/public/metadata.xml
Running this plumbing would bake all metadata found in /var/metadata/registry and at http://md.example.com into an
EntitiesDescriptor element with @Name http://example.com/metadata.xml, @cacheDuration set to 1hr and @validUntil
1 day from the time the 'finalize' command was run. The tree woud be transformed using the "tidy" stylesheets and
would then be signed (using signer.key) and finally published in /var/metadata/public/metadata.xml
"""
def __init__(self, pipeline, pid):
self._id = pid
self.pipeline = pipeline
@property
def id(self):
return self._id
@property
def pid(self):
return self._id
def __iter__(self):
return self.pipeline
def __str__(self):
out = StringIO()
yaml.dump(self.pipeline, stream=out)
return out.getvalue()
class Request(object):
"""
Represents a single request. When processing a set of pipelines a single request is used. Any part of the pipeline
may modify any of the fields.
"""
def __init__(self, pl, md, t, name=None, args=None, state=None):
if not state:
state = dict()
if not args:
args = []
self.plumbing = pl
self.md = md
self.t = t
self.name = name
self.args = args
self.state = state
self.done = False
def process(self, pl):
"""The inner request pipeline processor.
:param pl: The plumbing to run this request through
"""
log.debug('Processing \n%s' % pl)
for p in pl.pipeline:
cb, opts, name, args = load_pipe(p)
# log.debug("traversing pipe %s,%s,%s using %s" % (pipe,name,args,opts))
if type(args) is str or type(args) is unicode:
args = [args]
if args is not None and type(args) is not dict and type(args) is not list and type(args) is not tuple:
raise PipeException("Unknown argument type %s" % repr(args))
self.args = args
self.name = name
ot = cb(self, *opts)
if ot is not None:
self.t = ot
if self.done:
break
return self.t
def process(self, md, state=None, t=None):
"""
The main entrypoint for processing a request pipeline. Calls the inner processor.
:param md: The current metadata repository
:param state: The active request state
:param t: The active working document
:return: The result of applying the processing pipeline to t.
"""
if not state:
state = dict()
# req = Plumbing.Request(self, md, t, state=state)
# self._process(req)
# return req.t
return Plumbing.Request(self, md, t, state=state).process(self)
def _process(self, req):
"""The inner request pipeline processor.
:param req: The request to run through the pipeline
"""
log.debug('Processing \n%s' % self)
for p in self.pipeline:
try:
pipe, opts, name, args = load_pipe(p)
# log.debug("traversing pipe %s,%s,%s using %s" % (pipe,name,args,opts))
if type(args) is str or type(args) is unicode:
args = [args]
if args is not None and type(args) is not dict and type(args) is not list and type(args) is not tuple:
raise PipeException("Unknown argument type %s" % repr(args))
req.args = args
req.name = name
ot = pipe(req, *opts)
if ot is not None:
req.t = ot
if req.done:
break
except PipeException as ex:
log.error(ex)
break
return req.t
def plumbing(fn):
"""
Create a new plumbing instance by parsing yaml from the filename.
:param fn: A filename containing the pipeline.
:return: A plumbing object
This uses the resource framework to locate the yaml file which means that pipelines can be shipped as plugins.
"""
pid = os.path.splitext(fn)[0]
ystr = resource_string(fn)
if ystr is None:
raise PipeException("Plumbing not found: %s" % fn)
pipeline = yaml.safe_load(ystr)
return Plumbing(pipeline=pipeline, pid=pid)
| [
"leifj@sunet.se"
] | leifj@sunet.se |
eb79ae9da300d2daa7953889856eb0652be4ea4c | d08c8bcbe0f67cb3ff2a01afc9fad390dafc4f0a | /test/Search.py | 160ad7cdc68be1100fd06521f140ffba64b3240b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | Dibyadarshan/mezzanine | 6e6a1cafdba03660a18df29238b271a83357a9b0 | 6bc046f086c70c1f6bda3458eafbbe2da54df0c8 | refs/heads/master | 2020-07-22T07:29:35.759238 | 2019-11-06T17:14:04 | 2019-11-06T17:14:04 | 207,116,582 | 0 | 0 | BSD-2-Clause | 2019-09-08T13:27:44 | 2019-09-08T13:27:44 | null | UTF-8 | Python | false | false | 2,564 | py | import unittest
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
driver = webdriver.Firefox(executable_path="/home/anmol/PycharmProjects/prg1/geckodriver-v0.24.0-linux64/geckodriver")
wait = WebDriverWait(driver, 10)
# driver.implicitly_wait(2)
driver.get("http://127.0.0.1:8000/blog/test-blog-post/")
driver.maximize_window()
wait = WebDriverWait(driver, 10)
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/div[1]/input')))
search_filter = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'select.form-control')))
select_element = Select(search_filter);
select_element.select_by_index(1);
search_key = "heroku"
search_element.clear()
search_element.send_keys(search_key)
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/input')))
search_element.click()
bodyText = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'body')))
assert search_key in bodyText.text
time.sleep(2)
wait = WebDriverWait(driver, 10)
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/div[1]/input')))
search_key = "pneumonoultramicroscopicsilicovolcanoconiosis"
search_filter = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'select.form-control')))
select_element = Select(search_filter);
select_element.select_by_index(0);
search_element.clear()
search_element.send_keys(search_key)
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/input')))
search_element.click()
bodyText = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'body')))
assert 'No results' in bodyText.text
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/div[1]/input')))
search_filter = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'select.form-control')))
select_element = Select(search_filter);
select_element.select_by_index(2);
search_key = "heroku"
search_element.clear()
search_element.send_keys(search_key)
search_element = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div[2]/form/input')))
search_element.click()
bodyText = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'body')))
assert search_key in bodyText.text
driver.quit()
| [
"hanmol123@outlook.com"
] | hanmol123@outlook.com |
86a2d304179a0d4d021966bafce213f4365d57c2 | 84290c584128de3e872e66dc99b5b407a7a4612f | /Statistical Thinking in Python (Part 2)/Bootstrap confidence intervals/Visualizing bootstrap samples.py | 325418de26f528e09ecafe5c6554c241dae959c8 | [] | no_license | BautizarCodigo/DataAnalyticEssentials | 91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789 | 7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57 | refs/heads/main | 2023-04-11T04:42:17.977491 | 2021-03-21T19:05:17 | 2021-03-21T19:05:17 | 349,784,608 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | for _ in range(50):
# Generate bootstrap sample: bs_sample
bs_sample = np.random.choice(rainfall, size=len(rainfall))
# Compute and plot ECDF from bootstrap sample
x, y = ecdf(bs_sample)
_ = plt.plot(x, y, marker='.', linestyle='none',
color='gray', alpha=0.1)
# Compute and plot ECDF from original data
x, y = ecdf(rainfall)
_ = plt.plot(x, y, marker='.')
# Make margins and label axes
plt.margins(0.02)
_ = plt.xlabel('yearly rainfall (mm)')
_ = plt.ylabel('ECDF')
# Show the plot
plt.show() | [
"78171986+BautizarCodigo@users.noreply.github.com"
] | 78171986+BautizarCodigo@users.noreply.github.com |
7bd0879f9babbc70ad3e7b46acda567a0352685e | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/chunk/util.py | 0027fab667b850ac00ae34418c66c68f13313f1e | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 21,251 | py | # Natural Language Toolkit: Chunk format conversions
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals, division
import re
from nltk.tree import Tree
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.compat import python_2_unicode_compatible
##//////////////////////////////////////////////////////
## EVALUATION
##//////////////////////////////////////////////////////
from nltk.metrics import accuracy as _accuracy
def accuracy(chunker, gold):
"""
Score the accuracy of the chunker against the gold standard.
Strip the chunk information from the gold standard and rechunk it using
the chunker, then compute the accuracy score.
:type chunker: ChunkParserI
:param chunker: The chunker being evaluated.
:type gold: tree
:param gold: The chunk structures to score the chunker on.
:rtype: float
"""
gold_tags = []
test_tags = []
for gold_tree in gold:
test_tree = chunker.parse(gold_tree.flatten())
gold_tags += tree2conlltags(gold_tree)
test_tags += tree2conlltags(test_tree)
# print 'GOLD:', gold_tags[:50]
# print 'TEST:', test_tags[:50]
return _accuracy(gold_tags, test_tags)
# Patched for increased performance by Yoav Goldberg <yoavg@cs.bgu.ac.il>, 2006-01-13
# -- statistics are evaluated only on demand, instead of at every sentence evaluation
#
# SB: use nltk.metrics for precision/recall scoring?
#
class ChunkScore(object):
"""
A utility class for scoring chunk parsers. ``ChunkScore`` can
evaluate a chunk parser's output, based on a number of statistics
(precision, recall, f-measure, misssed chunks, incorrect chunks).
It can also combine the scores from the parsing of multiple texts;
this makes it significantly easier to evaluate a chunk parser that
operates one sentence at a time.
Texts are evaluated with the ``score`` method. The results of
evaluation can be accessed via a number of accessor methods, such
as ``precision`` and ``f_measure``. A typical use of the
``ChunkScore`` class is::
>>> chunkscore = ChunkScore() # doctest: +SKIP
>>> for correct in correct_sentences: # doctest: +SKIP
... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP
... chunkscore.score(correct, guess) # doctest: +SKIP
>>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP
F Measure: 0.823
:ivar kwargs: Keyword arguments:
- max_tp_examples: The maximum number actual examples of true
positives to record. This affects the ``correct`` member
function: ``correct`` will not return more than this number
of true positive examples. This does *not* affect any of
the numerical metrics (precision, recall, or f-measure)
- max_fp_examples: The maximum number actual examples of false
positives to record. This affects the ``incorrect`` member
function and the ``guessed`` member function: ``incorrect``
will not return more than this number of examples, and
``guessed`` will not return more than this number of true
positive examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- max_fn_examples: The maximum number actual examples of false
negatives to record. This affects the ``missed`` member
function and the ``correct`` member function: ``missed``
will not return more than this number of examples, and
``correct`` will not return more than this number of true
negative examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- chunk_label: A regular expression indicating which chunks
should be compared. Defaults to ``'.*'`` (i.e., all chunks).
:type _tp: list(Token)
:ivar _tp: List of true positives
:type _fp: list(Token)
:ivar _fp: List of false positives
:type _fn: list(Token)
:ivar _fn: List of false negatives
:type _tp_num: int
:ivar _tp_num: Number of true positives
:type _fp_num: int
:ivar _fp_num: Number of false positives
:type _fn_num: int
:ivar _fn_num: Number of false negatives.
"""
def __init__(self, **kwargs):
self._correct = set()
self._guessed = set()
self._tp = set()
self._fp = set()
self._fn = set()
self._max_tp = kwargs.get('max_tp_examples', 100)
self._max_fp = kwargs.get('max_fp_examples', 100)
self._max_fn = kwargs.get('max_fn_examples', 100)
self._chunk_label = kwargs.get('chunk_label', '.*')
self._tp_num = 0
self._fp_num = 0
self._fn_num = 0
self._count = 0
self._tags_correct = 0.0
self._tags_total = 0.0
self._measuresNeedUpdate = False
def _updateMeasures(self):
if (self._measuresNeedUpdate):
self._tp = self._guessed & self._correct
self._fn = self._correct - self._guessed
self._fp = self._guessed - self._correct
self._tp_num = len(self._tp)
self._fp_num = len(self._fp)
self._fn_num = len(self._fn)
self._measuresNeedUpdate = False
def score(self, correct, guessed):
"""
Given a correctly chunked sentence, score another chunked
version of the same sentence.
:type correct: chunk structure
:param correct: The known-correct ("gold standard") chunked
sentence.
:type guessed: chunk structure
:param guessed: The chunked sentence to be scored.
"""
self._correct |= _chunksets(correct, self._count, self._chunk_label)
self._guessed |= _chunksets(guessed, self._count, self._chunk_label)
self._count += 1
self._measuresNeedUpdate = True
# Keep track of per-tag accuracy (if possible)
try:
correct_tags = tree2conlltags(correct)
guessed_tags = tree2conlltags(guessed)
except ValueError:
# This exception case is for nested chunk structures,
# where tree2conlltags will fail with a ValueError: "Tree
# is too deeply nested to be printed in CoNLL format."
correct_tags = guessed_tags = ()
self._tags_total += len(correct_tags)
self._tags_correct += sum(1 for (t,g) in zip(guessed_tags,
correct_tags)
if t==g)
def accuracy(self):
"""
Return the overall tag-based accuracy for all text that have
been scored by this ``ChunkScore``, using the IOB (conll2000)
tag encoding.
:rtype: float
"""
if self._tags_total == 0: return 1
return self._tags_correct/self._tags_total
def precision(self):
"""
Return the overall precision for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fp_num
if div == 0: return 0
else: return self._tp_num / div
def recall(self):
"""
Return the overall recall for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fn_num
if div == 0: return 0
else: return self._tp_num / div
def f_measure(self, alpha=0.5):
"""
Return the overall F measure for all texts that have been
scored by this ``ChunkScore``.
:param alpha: the relative weighting of precision and recall.
Larger alpha biases the score towards the precision value,
while smaller alpha biases the score towards the recall
value. ``alpha`` should have a value in the range [0,1].
:type alpha: float
:rtype: float
"""
self._updateMeasures()
p = self.precision()
r = self.recall()
if p == 0 or r == 0: # what if alpha is 0 or 1?
return 0
return 1/(alpha/p + (1-alpha)/r)
def missed(self):
"""
Return the chunks which were included in the
correct chunk structures, but not in the guessed chunk
structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fn)
return [c[1] for c in chunks] # discard position information
def incorrect(self):
"""
Return the chunks which were included in the guessed chunk structures,
but not in the correct chunk structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fp)
return [c[1] for c in chunks] # discard position information
def correct(self):
"""
Return the chunks which were included in the correct
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._correct)
return [c[1] for c in chunks] # discard position information
def guessed(self):
"""
Return the chunks which were included in the guessed
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._guessed)
return [c[1] for c in chunks] # discard position information
def __len__(self):
self._updateMeasures()
return self._tp_num + self._fn_num
def __repr__(self):
"""
Return a concise representation of this ``ChunkScoring``.
:rtype: str
"""
return '<ChunkScoring of '+repr(len(self))+' chunks>'
def __str__(self):
"""
Return a verbose representation of this ``ChunkScoring``.
This representation includes the precision, recall, and
f-measure scores. For other information about the score,
use the accessor methods (e.g., ``missed()`` and ``incorrect()``).
:rtype: str
"""
return ("ChunkParse score:\n" +
(" IOB Accuracy: %5.1f%%\n" % (self.accuracy()*100)) +
(" Precision: %5.1f%%\n" % (self.precision()*100)) +
(" Recall: %5.1f%%\n" % (self.recall()*100))+
(" F-Measure: %5.1f%%" % (self.f_measure()*100)))
# extract chunks, and assign unique id, the absolute position of
# the first word of the chunk
def _chunksets(t, count, chunk_label):
pos = 0
chunks = []
for child in t:
if isinstance(child, Tree):
if re.match(chunk_label, child.label()):
chunks.append(((count, pos), child.freeze()))
pos += len(child.leaves())
else:
pos += 1
return set(chunks)
def tagstr2tree(s, chunk_label="NP", root_label="S", sep='/',
source_tagset=None, target_tagset=None):
"""
Divide a string of bracketted tagged text into
chunks and unchunked tokens, and produce a Tree.
Chunks are marked by square brackets (``[...]``). Words are
delimited by whitespace, and each word should have the form
``text/tag``. Words that do not contain a slash are
assigned a ``tag`` of None.
:param s: The string to be converted
:type s: str
:param chunk_label: The label to use for chunk nodes
:type chunk_label: str
:param root_label: The label to use for the root of the tree
:type root_label: str
:rtype: Tree
"""
WORD_OR_BRACKET = re.compile(r'\[|\]|[^\[\]\s]+')
stack = [Tree(root_label, [])]
for match in WORD_OR_BRACKET.finditer(s):
text = match.group()
if text[0] == '[':
if len(stack) != 1:
raise ValueError('Unexpected [ at char %d' % match.start())
chunk = Tree(chunk_label, [])
stack[-1].append(chunk)
stack.append(chunk)
elif text[0] == ']':
if len(stack) != 2:
raise ValueError('Unexpected ] at char %d' % match.start())
stack.pop()
else:
if sep is None:
stack[-1].append(text)
else:
word, tag = str2tuple(text, sep)
if source_tagset and target_tagset:
tag = map_tag(source_tagset, target_tagset, tag)
stack[-1].append((word, tag))
if len(stack) != 1:
raise ValueError('Expected ] at char %d' % len(s))
return stack[0]
### CONLL
_LINE_RE = re.compile('(\S+)\s+(\S+)\s+([IOB])-?(\S+)?')
def conllstr2tree(s, chunk_types=('NP', 'PP', 'VP'), root_label="S"):
"""
Return a chunk structure for a single sentence
encoded in the given CONLL 2000 style string.
This function converts a CoNLL IOB string into a tree.
It uses the specified chunk types
(defaults to NP, PP and VP), and creates a tree rooted at a node
labeled S (by default).
:param s: The CoNLL string to be converted.
:type s: str
:param chunk_types: The chunk types to be converted.
:type chunk_types: tuple
:param root_label: The node label to use for the root.
:type root_label: str
:rtype: Tree
"""
stack = [Tree(root_label, [])]
for lineno, line in enumerate(s.split('\n')):
if not line.strip(): continue
# Decode the line.
match = _LINE_RE.match(line)
if match is None:
raise ValueError('Error on line %d' % lineno)
(word, tag, state, chunk_type) = match.groups()
# If it's a chunk type we don't care about, treat it as O.
if (chunk_types is not None and
chunk_type not in chunk_types):
state = 'O'
# For "Begin"/"Outside", finish any completed chunks -
# also do so for "Inside" which don't match the previous token.
mismatch_I = state == 'I' and chunk_type != stack[-1].label()
if state in 'BO' or mismatch_I:
if len(stack) == 2: stack.pop()
# For "Begin", start a new chunk.
if state == 'B' or mismatch_I:
chunk = Tree(chunk_type, [])
stack[-1].append(chunk)
stack.append(chunk)
# Add the new word token.
stack[-1].append((word, tag))
return stack[0]
def tree2conlltags(t):
"""
Return a list of 3-tuples containing ``(word, tag, IOB-tag)``.
Convert a tree to the CoNLL IOB tag format.
:param t: The tree to be converted.
:type t: Tree
:rtype: list(tuple)
"""
tags = []
for child in t:
try:
category = child.label()
prefix = "B-"
for contents in child:
if isinstance(contents, Tree):
raise ValueError("Tree is too deeply nested to be printed in CoNLL format")
tags.append((contents[0], contents[1], prefix+category))
prefix = "I-"
except AttributeError:
tags.append((child[0], child[1], "O"))
return tags
def conlltags2tree(sentence, chunk_types=('NP','PP','VP'),
root_label='S', strict=False):
"""
Convert the CoNLL IOB format to a tree.
"""
tree = Tree(root_label, [])
for (word, postag, chunktag) in sentence:
if chunktag is None:
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as O
tree.append((word,postag))
elif chunktag.startswith('B-'):
tree.append(Tree(chunktag[2:], [(word,postag)]))
elif chunktag.startswith('I-'):
if (len(tree)==0 or not isinstance(tree[-1], Tree) or
tree[-1].label() != chunktag[2:]):
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as B-*
tree.append(Tree(chunktag[2:], [(word,postag)]))
else:
tree[-1].append((word,postag))
elif chunktag == 'O':
tree.append((word,postag))
else:
raise ValueError("Bad conll tag %r" % chunktag)
return tree
def tree2conllstr(t):
"""
Return a multiline string where each line contains a word, tag and IOB tag.
Convert a tree to the CoNLL IOB string format
:param t: The tree to be converted.
:type t: Tree
:rtype: str
"""
lines = [" ".join(token) for token in tree2conlltags(t)]
return '\n'.join(lines)
### IEER
_IEER_DOC_RE = re.compile(r'<DOC>\s*'
r'(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?'
r'(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?'
r'(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?'
r'<BODY>\s*'
r'(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?'
r'<TEXT>(?P<text>.*?)</TEXT>\s*'
r'</BODY>\s*</DOC>\s*', re.DOTALL)
_IEER_TYPE_RE = re.compile('<b_\w+\s+[^>]*?type="(?P<type>\w+)"')
def _ieer_read_text(s, root_label):
stack = [Tree(root_label, [])]
# s will be None if there is no headline in the text
# return the empty list in place of a Tree
if s is None:
return []
for piece_m in re.finditer('<[^>]+>|[^\s<]+', s):
piece = piece_m.group()
try:
if piece.startswith('<b_'):
m = _IEER_TYPE_RE.match(piece)
if m is None: print('XXXX', piece)
chunk = Tree(m.group('type'), [])
stack[-1].append(chunk)
stack.append(chunk)
elif piece.startswith('<e_'):
stack.pop()
# elif piece.startswith('<'):
# print "ERROR:", piece
# raise ValueError # Unexpected HTML
else:
stack[-1].append(piece)
except (IndexError, ValueError):
raise ValueError('Bad IEER string (error at character %d)' %
piece_m.start())
if len(stack) != 1:
raise ValueError('Bad IEER string')
return stack[0]
def ieerstr2tree(s, chunk_types = ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'], root_label="S"):
"""
Return a chunk structure containing the chunked tagged text that is
encoded in the given IEER style string.
Convert a string of chunked tagged text in the IEER named
entity format into a chunk structure. Chunks are of several
types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
PERCENT, MONEY, and MEASURE.
:rtype: Tree
"""
# Try looking for a single document. If that doesn't work, then just
# treat everything as if it was within the <TEXT>...</TEXT>.
m = _IEER_DOC_RE.match(s)
if m:
return {
'text': _ieer_read_text(m.group('text'), root_label),
'docno': m.group('docno'),
'doctype': m.group('doctype'),
'date_time': m.group('date_time'),
#'headline': m.group('headline')
# we want to capture NEs in the headline too!
'headline': _ieer_read_text(m.group('headline'), root_label),
}
else:
return _ieer_read_text(s, root_label)
def demo():
s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
import nltk
t = nltk.chunk.tagstr2tree(s, chunk_label='NP')
t.pprint()
print()
s = """
These DT B-NP
research NN I-NP
protocols NNS I-NP
offer VBP B-VP
to TO B-PP
the DT B-NP
patient NN I-NP
not RB O
only RB O
the DT B-NP
very RB I-NP
best JJS I-NP
therapy NN I-NP
which WDT B-NP
we PRP B-NP
have VBP B-VP
established VBN I-VP
today NN B-NP
but CC B-NP
also RB I-NP
the DT B-NP
hope NN I-NP
of IN B-PP
something NN B-NP
still RB B-ADJP
better JJR I-ADJP
. . O
"""
conll_tree = conllstr2tree(s, chunk_types=('NP', 'PP'))
conll_tree.pprint()
# Demonstrate CoNLL output
print("CoNLL output:")
print(nltk.chunk.tree2conllstr(conll_tree))
print()
if __name__ == '__main__':
demo()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
fb28b34d5e92d85a2bb4c9bb7c813b765175571d | 98a6a4f99a170cb96f6d2a620029bbcfc6775c6c | /blogengine/settings.py | 67706dd834e09268201ed0080747ecda00dab64b | [] | no_license | OttoSteel/blogengine | f367dc4003fd3c229c0f3a2562790c597973156e | 09f5e90a907471918b9a8b31f74bd70a04730a1d | refs/heads/master | 2023-01-05T10:38:36.440501 | 2020-11-02T21:01:00 | 2020-11-02T21:01:00 | 309,198,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | """
Django settings for blogengine project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bxb2%-bpj2@e_^fe624tn&k-%d6bjh_)c_+0(x^@d5ac%qb=nz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogengine.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogengine.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"polshinski@gmail.com"
] | polshinski@gmail.com |
a68a0d4543824377684445082a309e04204bc1e7 | 7e9b04b9e4212b58cd57ecc6e1f3c8dc06bf83ef | /OLD_BAK/data_structure2/practice/hotpotato.py | 6c8d515b882fca2c17f8a088b88adf633a663ed9 | [] | no_license | onionzhou/Note | 1766cf8ac45e9fd39b7eb79816c01e8ab709a52d | ed008913a2ce51b6c26718d09461e126b71fc0e1 | refs/heads/master | 2022-11-14T18:34:42.166244 | 2022-10-18T13:38:50 | 2022-10-18T13:38:50 | 132,616,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author:onion
# datetime:2019/10/6 13:28
# software: PyCharm
from data_structure2.basic.my_queue import Queue
from random import randint
'''
传土豆游戏
接收一个名字列表 和一个用于计数的常亮num
程序将这个孩子的名字移出队列,然后立即将他插入队列得尾部,
随后该孩子会一直等待,直到再次达到队列得头部,在出队列和入队列num次后,
位于队列头部的孩子出局,如此反复,直到队列只剩下一个名字(队列大小为1)
'''
def hot_potato(namelist, num):
q = Queue()
for name in namelist:
q.enqueue(name)
while q.size() > 1:
for _ in range(num):
q.enqueue(q.dequeue())
q.dequeue()
return q.dequeue()
if __name__ == '__main__':
namelist=['a','b','c','d','e','f']
num= randint(1,10)
tmp =hot_potato(namelist,num)
print(tmp) | [
"dkonion@foxmail.com"
] | dkonion@foxmail.com |
9cbdd8a6c6170a9d1d5a9ca37e428a2e16bc6c22 | 309d17b81cea038713ba67bee72a41d2df4d6869 | /Python/Python_basic/Python_OOP/OOP21_composition2.py | 86f25cef9fcfcf5256d11e83738ff6e7e74ed70b | [] | no_license | Bongkot-Kladklaen/Programming_tutorial_code | ac07e39da2bce396e670611884436b360536cdc5 | cda7508c15c3e3d179c64b9aac163b6173ef3519 | refs/heads/master | 2023-06-20T13:14:17.077809 | 2021-07-18T04:41:04 | 2021-07-18T04:41:04 | 387,081,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | class Printer:
def print_page(self, data):
print("printing {}".format(data))
class Scanner:
def scan_page(self):
print("scanning...")
class Fax:
def fax_page(self,number):
print("faxing to {}".format(number))
class Aio: # All in one printer
def __init__(self, p, s,f):
self.p = p
self.s = s
self.f = f
if __name__ == '__main__':
a = Aio(Printer(), Scanner(), Fax())
a.p.print_page("hello")
a.s.scan_page()
a.f.fax_page("02848248") | [
"bongkot.klad@gmail.com"
] | bongkot.klad@gmail.com |
e8e6486efd70d55478ed082df7d817b27d8eed70 | 5c0ca1a0c9c057a3ffc328a45e66b5a0bcbadd5a | /a.py | 5350b1468de4ae0e15c59023c556f6b57932215d | [] | no_license | 190330126/Project0202 | 516d75c4256704130bf6238afc6ff1e736bdb512 | e0b6b7629fc2e4eb9267ea01bc5bac9ac16b2293 | refs/heads/master | 2023-02-26T11:18:10.367685 | 2021-02-02T05:02:03 | 2021-02-02T05:02:03 | 335,174,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | def do():
print("I can not do")
def play():
print("I play cricket")
| [
"190330126@klh.edu.in"
] | 190330126@klh.edu.in |
71ae82df16f921dba43e0a9720d746745210aa0c | dcb2700636da30ca8e2a86e8a76fb5e796650922 | /app/geocode.py | 64843a6543b1d571817e8df242917048df3b7cd4 | [] | no_license | Tepau/GrandPyBot | 17820200595d1f7de4da4eb920b7a56cd7eeb62b | 63bbc85b6fa9e945819ecc45fd318b842ebdfd89 | refs/heads/master | 2022-12-13T16:06:11.440161 | 2020-05-04T13:21:23 | 2020-05-04T13:21:23 | 201,489,329 | 0 | 0 | null | 2022-12-08T06:10:18 | 2019-08-09T15:02:07 | Python | UTF-8 | Python | false | false | 1,486 | py | import googlemaps
import os
class GoogleMap:
"""class who recovers informations about
a place through the api "googlemap\""""
def __init__(self):
self.gmaps = googlemaps.Client(key=os.environ.get('KEY'))
def find_adress(self, search):
# Get the full adress of a place
geocode_result = self.gmaps.geocode(search)
adress = geocode_result[0]["formatted_address"]
return adress
def find_location(self, search):
# Get the longitude and latitude of a place
geocode_result = self.gmaps.geocode(search)
latitude = geocode_result[0]["geometry"]["location"]["lat"]
longitude = geocode_result[0]["geometry"]["location"]["lng"]
return (latitude, longitude)
def wiki_search(self, search):
# Get informations needed for a wikipedia research
geocode_result = self.gmaps.geocode(search)
location = geocode_result[0]["address_components"][1]["long_name"]
ville = geocode_result[0]["address_components"][2]["long_name"]
if len(geocode_result[0]["address_components"]) > 5:
pays = geocode_result[0]["address_components"][5]["long_name"]
return location + ", " + ville + ", " + pays
return location + ", " + ville
if __name__ == '__main__':
app = GoogleMap()
print(app.find_adress('openclassrooms paris'))
print(app.find_location('openclassrooms paris'))
print(app.wiki_search('openclassrooms paris'))
| [
"malaury.lemaitresalmon@gmail.com"
] | malaury.lemaitresalmon@gmail.com |
991cbc95070acdf5c80c693df59fb131362feff3 | b7c538adde4712a5be459b3c7cf39c574fdb30c8 | /macro1/matlab/Sugarkhuu_Macroeconomics_PS2/compute_medians.py | 984cfa199dedb74cd41427a2981ca5347c23fdb9 | [] | no_license | Sugarkhuu/bonn_phd | b3f9351628270a55d01204e8210c94cdba1bcde4 | 3b45b1c8462ab1e8c09ba3df9a90d57a74bc70cf | refs/heads/main | 2023-09-02T17:27:39.681537 | 2021-11-12T09:15:28 | 2021-11-12T09:15:28 | 420,656,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,491 | py | ### Code to compute income and networth medians from SCF data
# =============================================================================
# IMPORT PACKAGES
# =============================================================================
import pandas as pd
import numpy as np
import os
# =============================================================================
# FUNCTIONS
# =============================================================================
def process_data(df,age0,age1):
var_list = ['WGT','AGE','INCOME','NETWORTH'] # no need of other variables
df = df[var_list].copy()
df['impute_id'] = [1,2,3,4,5]*int(len(df)/5) # imputation ID in SCF data
for i in range(5):
print('Sum of weight in Replicate ' + str(i ) + ': ' + str(df[df['impute_id'] == i+1]['WGT'].sum()))
df['WGT'] = df['WGT']*5 # Multiply weights by 5 to arrive at total US population for each imputation
df = df[(df['AGE']>=age0-2)&(df['AGE']<=age1+2)] # will use -2,+2 ages for MA
return df
def calc_median(df):
# calculate median for each imputation and average over the imputations
df_median_tot = pd.DataFrame(columns=('AGE','INCOME','NETWORTH'))
for i in range(5):
df_i = df[df['impute_id'] == i+1]
age_list = list(set(df['AGE']))
df_median = pd.DataFrame(columns=('AGE','INCOME','NETWORTH'))
# median for each age
for age in age_list:
df_age = df_i[df_i['AGE'] == age]
inc_median = median_(df_age['INCOME'].values,df_age['WGT'].values)
nw_median = median_(df_age['NETWORTH'].values,df_age['WGT'].values)
df_median.loc[len(df_median)] = [age, inc_median, nw_median]
# Moving average
df_median['INCOME'] = df_median['INCOME'].rolling(5, center=True).mean()
df_median['NETWORTH'] = df_median['NETWORTH'].rolling(5, center=True).mean()
df_median.dropna(inplace=True) # drop -2,+2 ages
df_median_tot = df_median_tot.append(df_median, ignore_index = True)
# average over 5 imputations
df_median_tot = df_median_tot.groupby('AGE')[['INCOME','NETWORTH']].mean()
return df_median_tot
def median_(val, freq):
# use median using the WGTs
# Credit: https://stackoverflow.com/questions/46086663/how-to-get-mean-and-standard-deviation-from-a-frequency-distribution-table-in-py/46090291
ord = np.argsort(val)
cdf = np.cumsum(freq[ord])
return val[ord][np.searchsorted(cdf, cdf[-1] // 2)]
def avg_inc(df):
# func to calculate average income between 25-64 years
avg_inc = df[(df['AGE'] >= 25)&(df['AGE'] <= 64)]['INCOME'].mean()
return avg_inc
# =============================================================================
# MAIN PART
# =============================================================================
# mydir = r"C:\Users\sugarkhuu\Documents\phd\bonn\bonn_phd\macro1\matlab\ps2"
df_all = pd.read_csv('SCFP2019.csv')
age0 = 25
T_years = 60
age1 = age0 + T_years # age to die
df = process_data(df_all,age0,age1)
df_median = calc_median(df)
df_median.reset_index(drop=False, inplace=True)
avg_income = avg_inc(df_median)
df_median['y_norm'] = df_median['INCOME']/avg_income # normalize income to 1
df_median['a2y'] = df_median['NETWORTH']/df_median['INCOME'] # wealth to income
# save [AGE,INCOME,NETWORTH,y_norm,a2y]
df_median.to_csv('inc_wl_us_2019.csv',index=False)
| [
"py4econ@gmail.com"
] | py4econ@gmail.com |
381cbb9843c8e8a7b93fece9104cde8dc161688a | af373596f14347bb7a3483a14ead3004efd45aa1 | /particle.py | 5a99e874dc274413c9daecaac20f208b440ea466 | [] | no_license | bharath-r/2D-SLAM | 7ed1b519aa38b92bf99fdfcea53a29cd3f20ff41 | f1116f16cbaaaea6c08962c91843e7eade23c989 | refs/heads/master | 2020-05-03T00:34:05.574200 | 2019-03-29T01:56:21 | 2019-03-29T01:56:21 | 178,312,968 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,353 | py | import numpy as np
import math as m
import cv2
class particle(object):
# Particle has weight w and the current state x
def __init__(self, w,x):
self.w = w
self.x = x
# Update the map by finding hit coordinates in the world frame using the head/neck angles, local frame
# coordinates and robot distance coordinates
def update_map(self, MAP, head, neck, dist, ps, roll, pitch):
# Transform points from the world frame to the local frame
pw = self.loc2wld(head, neck, dist, ps, roll, pitch)
# Check if the ray is striking the ground plane using the world Z coordinate
indValid = (pw[2, :] > 0.1)
pw = pw[:, indValid]
# Convert from physical to map coordinates
# xs remain in the same scale. For ys the axis needs to be inverted and distances measured from ymin.
xis = np.ceil((pw[0, :] - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
yis = np.ceil((-pw[1, :] - MAP['ymin']) / MAP['res']).astype(np.int16) - 1
# Position of the robot from physical to map coordinates
xrob = np.ceil((self.x[0, 0] - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
yrob = np.ceil((-self.x[0, 1] - MAP['ymin']) / MAP['res']).astype(np.int16) - 1
# Pick only those coordinates which are within the image limits
indGood = np.logical_and(np.logical_and(np.logical_and((xis > 1), (yis > 1)),
(xis < MAP['sizex'])), (yis < MAP['sizey']))
xis = xis[indGood]
yis = yis[indGood]
# Make a contour of the hit points that begins and ends with the robot pose
cnt = np.vstack((xis, yis)).T
cnt = np.vstack((np.array([[xrob, yrob]]), cnt, np.array([[xrob, yrob]])))
# Mask the scan region using the hit cells as polygon vertices and fill with log(1/9)
mask = np.zeros(MAP['map'].shape, dtype=np.float64)
cv2.drawContours(image=mask, contours=[cnt], contourIdx=0, color=(0.5 * m.log(1./9.)), thickness=-1)
# add mask to log-odds map to accumulate log(1/9) in the free region
MAP['l_odds'] += mask
# Add (2 * log 9) to the points where it is hit (since it is set as log(1/9) during the draw contours function)
MAP['l_odds'][yis, xis] += (2 * m.log(9.))
# Clip the function so that it does not get too confident about a particular reading
MAP['l_odds'] = np.clip(MAP['l_odds'], -100, 100)
# Find values where the log odds is greater than 0 (probability more than half) and set the map values there as
# 1. Note reinitialize MAP before setting the values. Never reset the log odds
mask = MAP['l_odds'] > 10. * m.log(9)
# Set the points where log odds is greater than 0 as 1
MAP['map'] = np.zeros((MAP['sizey'], MAP['sizex']), dtype=np.uint8) # DATA TYPE: char or int8
if np.any(mask):
MAP['map'][mask] = MAP['l_odds'][mask] * 100 / np.amax(MAP['l_odds'][mask])
return MAP
# Convert the points from the local frame of the scanner to the world frame
def loc2wld(self, head, neck, dist, ps, roll, pitch):
# Robot pose
x = self.x[0, 0]
y = self.x[0, 1]
th = self.x[0, 2]
# Transformation from robot base to world
Tr2w = np.dot(np.array([[m.cos(th), -m.sin(th), 0, x],
[m.sin(th), m.cos(th), 0, y],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]),
np.dot(np.array([[m.cos(pitch), 0, m.sin(pitch), 0],
[0, 1., 0, 0],
[-m.sin(pitch), 0., m.cos(pitch), 0.],
[0., 0., 0., 1.]]),
np.array([[1., 0, 0, 0],
[0, m.cos(roll), -m.sin(roll), 0],
[0., m.sin(roll), m.cos(roll), 0.],
[0., 0., 0., 1.]])))
# Transformation from the robot head to the base
Th2r = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., dist['g2com'] + dist['com2h']],
[0., 0., 0., 1.]])
# Transformation from lidar to head = R_yaw * R_pitch * R_trans
Tl2h = np.dot(np.dot(np.array([[m.cos(neck), -m.sin(neck), 0., 0.],
[m.sin(neck), m.cos(neck), 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]),
np.array([[m.cos(head), 0., m.sin(head), 0.],
[0., 1., 0., 0.],
[-m.sin(head), 0., m.cos(head), 0.],
[0., 0., 0., 1.]])),
np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., dist['h2lid']],
[0., 0., 0., 1.]]))
# Transform from local coordinates to global coordinates
pw = np.dot(Tr2w, np.dot(Th2r, np.dot(Tl2h, ps)))
return pw
def predict(self, noise, c_act):
# print hex(id(self.x))
# Use current theta
th = self.x[0,2]
# Find rotation matrix to convert from local to global
R = np.array([[m.cos(th), -m.sin(th)], [m.sin(th), m.cos(th)]])
a = np.zeros((1,3))
# Convert from local to global frame and add the noise
a[0, 0:2] = self.x[0,0:2] + (np.dot(R,c_act[0, 0:2].T).T + noise[0, 0:2])
a[0, 2] = self.x[0,2] + (noise[0, 2] + c_act[0, 2])
self.x = a
def get_corr(self, MAP, head, neck, dist, ps, roll, pitch):
# Transform points from the world frame to the local frame
pw = self.loc2wld(head, neck, dist, ps, roll, pitch)
# Check if the ray is striking the ground plane
indValid = (pw[2, :] > 0.1)
pw = pw[:, indValid]
# Convert from physical to map coordinates
xis = np.ceil((pw[0, :] - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
yis = np.ceil((-pw[1, :] - MAP['ymin']) / MAP['res']).astype(np.int16) - 1
# Pick only those coordinates which are within the image limits
indGood = np.logical_and(np.logical_and(np.logical_and((xis > 1), (yis > 1)),
(xis < MAP['sizex'] - 1 )), (yis < MAP['sizey'] -1))
xis = xis[indGood]
yis = yis[indGood]
cmax = 0.
xreq = 0.
yreq = 0.
x_range = np.arange(-2, 3, 1)
y_range = np.arange(-2, 3, 1)
for i in xrange(x_range.shape[0]):
for j in xrange(y_range.shape[0]):
c = np.sum(MAP['map'][yis+y_range[j],xis + x_range[i]])/100
if c > cmax:
cmax = c
xreq = float(x_range[i]) * MAP['res']
yreq = -float(y_range[j]) * MAP['res']
x_cur = self.x[0,0] + xreq
y_cur = self.x[0,1] + yreq
a = np.array([[x_cur, y_cur, self.x[0,2]]])
self.x = a
return cmax
| [
"noreply@github.com"
] | noreply@github.com |
13200077a023a7b18121b8ac2256a596ef327e45 | 03dc722fea38f25844a45cd9606db2ccdc92dbaf | /projekt.py | 39428f9dbb1d7cf4b0ee6302f8e234309e614b66 | [] | no_license | Marcin8899/Grafika | 1c0a5fa5d911db215349ca6e7eb38ba2245398be | cce118609bd502da1f50945fe8691d4613de083b | refs/heads/master | 2023-04-21T09:05:42.527155 | 2021-04-20T09:15:59 | 2021-04-20T09:15:59 | 359,212,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,341 | py | import pygame
from pygame.locals import *
import keyboard
import numpy as np
from math import *
import sys
width = 800
height = 500
screen=pygame.display.set_mode((width,height))
screen.fill((0,0,0))
camera = 1000
zoom = 1
step = 20
angle = 0.02
#wczytanine pliku z danymi
f = open("bryly.txt", "r")
tekst = f.read()
i = 0
tmp = None
V = []
E = []
e = False
while (i < len(tekst)):
if(tekst[i] == '\n'):
e = not(e)
if(tekst[i].isdigit()):
if(tmp == None):
tmp = 0
tmp = tmp *10 + int(tekst[i])
else:
x = tmp
tmp = None
if(x != None):
if(e):
E.append(x)
else:
V.append(x)
i+=1
verticies = []
point = []
edges = []
for i in range(len(V)):
point.append(V[i])
if(i%3 == 2):
verticies.append(point)
point = []
for i in range(len(E)):
point.append(E[i])
if(i%2 == 1):
edges.append(point)
point = []
# print(verticies)
# print(edges)
#tuuuu
number_of_blocks = int(len(edges)/12)
number_of_walls = number_of_blocks*6
def get_straight(p1, p2):
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
A = y1-y2
B = x2-x1
C = (y2 - y1)*x1 - (x2 - x1)*y1
return (A,B,C)
def is_in_straight(A,B,C,x,y):
if(abs(A*x + B*y + C) < 1 ):
return True
return False
def get_x(y, A, B, C):
if(A == 0):
return False
else:
x = - (B * y + C) /A
return x
def get_y(x, A, B, C):
if(B == 0):
return False
else:
y = - (A * x + C) /B
return y
def get_z (x1,y1,z1,x2,y2,z2,x,y):
if (z1 == z2):
return z1
if (x1!=x2):
z = z1 + (x-x1)/(x2-x1)*(z2-z1)
else:
z = z1 + (y-y1)/(y2-y1)*(z2-z1)
return z
def calculate_ratio(point, min, max):
ratio = (point - min) / (max-min)
return ratio
def draw_linie(screen,p1,p2,index):
white = (255, 255, 255)
pygame.draw.line(screen, white, [p1[0],p1[1]], [p1[0],p1[1]])
pygame.draw.line(screen, white, [p2[0],p2[1]], [p2[0],p2[1]])
if (index == 0):
color = (255,0,0)
elif (index == 1):
color = (50,255,50)
elif (index == 2):
color = (0,100,200)
elif (index == 3):
color = (255,0,127)
if(p1[0] > p2[0]):
pygame.draw.line(screen, color, [p1[0]-1,p1[1]], [p2[0]+1,p2[1]])
else:
pygame.draw.line(screen, color, [p1[0]+1,p1[1]], [p2[0]-1,p2[1]])
WALL = [[0,4],[1,4],[0,1],[3,4],[0,3],[4,5],[1,5],[3,5],[0,2],[1,2],[2,3],[2,5]]
#translacje
def translateUp(verticies):
for vertex in verticies:
vertex[1] += step
return verticies
def translateDown(verticies):
for vertex in verticies:
vertex[1] -= step
return verticies
def translateLeft(verticies):
for vertex in verticies:
vertex[0] +=step
return verticies
def translateRight(verticies):
for vertex in verticies:
vertex[0] -= step
return verticies
def translateForward(verticies):
for vertex in verticies:
vertex[2] -= step
return verticies
def translateBack(verticies):
for vertex in verticies:
vertex[2] += step
return verticies
#obroty
def rotateX(verticies):
M =([1,0,0],
[0,cos(angle),-sin(angle)],
[0,sin(angle),cos(angle)])
for i in range (len(verticies)):
# przesunięcie z powodu kamery w z = -1000
# (x,y,z) jest w takim samym pomożeniu względem środka rzutni jak (x,y,z+1000) względem środka układu współrzędnych
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
def rotateBackX(verticies):
M =([1,0,0],
[0,cos(-angle),-sin(-angle)],
[0,sin(-angle),cos(-angle)])
for i in range (len(verticies)):
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
def rotateY(verticies):
M =([cos(angle),0,sin(angle)],
[0,1,0],
[-sin(angle),0,cos(angle)])
for i in range (len(verticies)):
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
def rotateYBack(verticies):
M =([cos(-angle),0,sin(-angle)],
[0,1,0],
[-sin(-angle),0,cos(-angle)])
for i in range (len(verticies)):
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
def rotateZ(verticies):
M =([cos(angle),-sin(angle),0],
[sin(angle),cos(angle),0],
[0,0,1])
for i in range (len(verticies)):
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
def rotateZBack(verticies):
M =([cos(-angle),-sin(-angle),0],
[sin(-angle),cos(-angle),0],
[0,0,1])
for i in range (len(verticies)):
verticies[i][2] += camera
verticies[i] = np.dot(M,verticies[i])
verticies[i][2] -= camera
return verticies
#zoom
def enlarging(zoom):
if(zoom < 10):
zoom +=0.1
return zoom
def reducing(zoom ):
if(zoom > 1):
zoom -=0.1
return zoom
#rzutowanie
def projection(verticies, zoom):
two_dimensional = []
for i in range (len(verticies)):
scale = camera/(camera + verticies[i][2] )
x = verticies[i][0] * scale * zoom
y = verticies[i][1] * scale * zoom
# w pygame y jest odwrócony a (0,0) jest w lewym górnym rogu
x = x + width/2
y = y*(-1) + height/2
two_dimensional.append((x,y))
return two_dimensional
while True:
#rysowanie
screen.fill((0,0,0))
two_dimensional = projection(verticies, zoom)
#stare rysowanie
# for i in range(len(edges)):
# vertex_1 = edges[i][0]
# vertex_2 = edges[i][1]
# if(verticies[vertex_1][2] > -camera and verticies[vertex_2][2] > -camera ):
# x1 = two_dimensional[vertex_1][0]
# y1 = two_dimensional[vertex_1][1]
# x2 = two_dimensional[vertex_2][0]
# y2 = two_dimensional[vertex_2][1]
# pygame.draw.line(screen,(255,255,255),(x1,y1),(x2,y2))
#nowe rysowanie
for y in range(0,height):
points = []
distance = []
walls = []
added_distance = []
added_points = []
min = []
max = []
min_distance = []
max_distance = []
blocks = []
for i in range (number_of_blocks):
points.append(None)
distance.append(None)
added_points.append(None)
added_distance.append(None)
walls.append(None)
points[i] = []
distance[i] = []
added_points[i] = []
added_distance[i] = []
walls[i] = []
for i in range(len(edges)):
vertex_1 = edges[i][0]
vertex_2 = edges[i][1]
if(verticies[vertex_1][2] > -camera and verticies[vertex_2][2] > -camera ):
x1 = two_dimensional[vertex_1][0]
y1 = two_dimensional[vertex_1][1]
x2 = two_dimensional[vertex_2][0]
y2 = two_dimensional[vertex_2][1]
if not((y > y1 and y > y2) or (y < y1 and y < y2)):
A,B,C = get_straight(two_dimensional[vertex_1], two_dimensional[vertex_2])
x = get_x(y, A, B, C)
if(x != False):
plus = True
minus = True
x_tmp = x + 1
while(plus):
if (abs(get_y(float(x_tmp),A,B,C) - y) <0.5 ):
block_indeks = (int(i/12))
added_points[block_indeks].append([x_tmp,y])
z = get_z(x1,y1,verticies[vertex_1][2],x2,y2,verticies[vertex_2][2],x,y)
added_distance[block_indeks].append(z)
x_tmp = x_tmp + 1
if(x_tmp > x1 and x_tmp > x2):
plus = False
else:
plus = False
x_tmp = x - 1
while(minus):
if (abs(get_y(float(x_tmp),A,B,C) - y) <=0.5 ):
block_indeks = (int(i/12))
added_points[block_indeks].append([x_tmp,y])
z = get_z(x1,y1,verticies[vertex_1][2],x2,y2,verticies[vertex_2][2],x,y)
added_distance[block_indeks].append(z)
x_tmp = x_tmp - 1
if(x_tmp < x1 and x_tmp < x2):
minus = False
else:
minus = False
block_indeks = (int(i/12))
points[block_indeks].append([x,y])
z = get_z(x1,y1,verticies[vertex_1][2],x2,y2,verticies[vertex_2][2],x,y)
distance[block_indeks].append(z)
walls[block_indeks].append(WALL[i%12])
#distance[block_indeks].append((verticies[vertex_1][2]+verticies[vertex_2][2])/2)
for block in range (number_of_blocks):
for p1 in range(len(points[block])):
for p2 in range(len(points[block])):
if (p2 > p1):
if(walls[block][p1][0] == walls[block][p2][0] or walls[block][p1][1] == walls[block][p2][0] or walls[block][p1][0] == walls[block][p2][1] or walls[block][p1][1] == walls[block][p2][1]):
if(points[block][p1][0] < points[block][p2][0]):
min.append(points[block][p1][0])
min_distance.append(distance[block][p1])
max.append(points[block][p2][0])
max_distance.append(distance[block][p2])
blocks.append(block)
else:
min.append(points[block][p2][0])
min_distance.append(distance[block][p2])
max.append(points[block][p1][0])
max_distance.append(distance[block][p1])
blocks.append(block)
for i in range (len(min)):
block = blocks[i]
begin = 100000000
end = -1000000000
for j in range (len(min)):
if(i != j):
if(not(max[i] <= min[j] or min[i] >= max[j])):
if(min_distance[i] + max_distance[i] > min_distance[j] + max_distance[j]):
if(min[j] < begin):
begin = min[j]
if(max[j] > end):
end = max[j]
if(begin != 100000000 or end != -1000000000):
if (min[i] < begin):
draw_linie(screen,[min[i],y],[begin,y],block)
for p in range(len(added_points[block])):
if(added_points[block][p][0] < begin):
ratio = calculate_ratio(added_points[block][p][0], min[i], max[i])
if(added_distance[block][p] <= ratio * max_distance[i] + (1-ratio) * min_distance[i]):
pygame.draw.line(screen,(255,255,255),added_points[block][p],added_points[block][p])
if (max[i] > end):
draw_linie(screen,[end,y],[max[i],y],block)
for p in range(len(added_points[block])):
if(added_points[block][p][0] > end):
ratio = calculate_ratio(added_points[block][p][0], min[i], max[i])
if(added_distance[block][p] <= ratio * max_distance[i] + (1-ratio) * min_distance[i]):
pygame.draw.line(screen,(255,255,255),added_points[block][p],added_points[block][p])
else:
if(max[i] != -1):
draw_linie(screen,[min[i],y],[max[i],y],block)
for p in range(len(added_points[block])):
if(added_points[block][p][0] <= max[i] and added_points[block][p][0] >= min[i]):
if((max[i]-min[i]) != 0):
ratio = calculate_ratio(added_points[block][p][0], min[i], max[i])
else:
ratio = 0
if(added_distance[block][p] <= ratio * max_distance[i] + (1-ratio) * min_distance[i]):
pygame.draw.line(screen,(255,255,255),added_points[block][p],added_points[block][p])
for events in pygame.event.get():
if events.type == QUIT:
sys.exit(0)
#sprawdzanie użycia klawiatury
if keyboard.is_pressed("up arrow"):
translateUp(verticies)
if keyboard.is_pressed("down arrow"):
translateDown(verticies)
if keyboard.is_pressed("left arrow"):
translateLeft(verticies)
if keyboard.is_pressed("right arrow"):
translateRight(verticies)
if keyboard.is_pressed('r'):
translateForward(verticies)
if keyboard.is_pressed('f'):
translateBack(verticies)
if keyboard.is_pressed('w'):
rotateX(verticies)
if keyboard.is_pressed('s'):
rotateBackX(verticies)
if keyboard.is_pressed('a'):
rotateY(verticies)
if keyboard.is_pressed('d'):
rotateYBack(verticies)
if keyboard.is_pressed('q'):
rotateZ(verticies)
if keyboard.is_pressed('e'):
rotateZBack(verticies)
if keyboard.is_pressed('p'):
zoom = enlarging(zoom)
if keyboard.is_pressed('l'):
zoom = reducing(zoom)
pygame.display.flip()
pygame.time.wait(1) | [
"marcinbaranowski8899@gmail.com"
] | marcinbaranowski8899@gmail.com |
c0339fb350d2f4299dcc7b234f1dd422618bc1ba | 6218864c9b51d9f8abd8c1478660fdadf9e41911 | /mms-basket-optimizer/optimizer/MKMAPI.py | 3fb63abaed841f6799b067f0761a8e94a71cdf2c | [] | no_license | wynautt/mms | f152c482e5b6d51c99af1248bc0c54c557168aca | 879663b0ac13f4ccd1a605aaceb328f9fa6c902e | refs/heads/master | 2021-05-11T09:06:27.492564 | 2018-01-21T20:14:23 | 2018-01-21T20:14:23 | 118,061,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,654 | py | import sys
sys.path.insert(0, "libs")
import requests
import logging
import httplib
import pickle
import json
import hashlib
import os
httplib.HTTPConnection.debuglevel = 0
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.CRITICAL)
requests_log.propagate = True
stream_handler_simple = logging.StreamHandler()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log = logging.getLogger("basic_MKMAPI")
log.setLevel(logging.DEBUG)
log.addHandler(stream_handler_simple)
from requests_oauthlib import OAuth1
API_KEYS = {
'awaken': dict(
APP_TOKEN='xxx',
APP_SECRET='xxx',
ACCESS_TOKEN='xxx',
ACCESS_SECRET='xxx'
)
}
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Iron/32.0.1750.1 Chrome/32.0.1750.1 Safari/537.36'
headers = {'User-Agent': USER_AGENT}
api_keys = API_KEYS['awaken']
APP_TOKEN = api_keys['APP_TOKEN']
APP_SECRET = api_keys['APP_SECRET']
ACCESS_TOKEN = api_keys['ACCESS_TOKEN']
ACCESS_SECRET = api_keys['ACCESS_SECRET']
MKM_API_BASE_URL = "https://www.mkmapi.eu/ws/v1.1/output.json/"
def get_full_url(*args):
return MKM_API_BASE_URL + "/".join(str(i) for i in args)
def mkm_get(key):
def decorate(f):
def inner(*args, **kwargs):
url = get_full_url(*f(*args, **kwargs))
auth = OAuth1(APP_TOKEN, APP_SECRET, ACCESS_TOKEN, ACCESS_SECRET, realm=url)
result = requests.get(url, auth=auth)
log.debug("Getting %s with result %s" % (url, result))
if key:
return json.loads(result.content)[key]
return json.loads(result.content)
return inner
return decorate
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
output.close()
def load_object(filename):
with open(filename, 'rb') as input:
obj = pickle.load(input)
return obj
@mkm_get('product')
def get_products(name):
return 'products', name, 1, 1, 'true'
@mkm_get('product')
def get_product(id):
return 'product', id
@mkm_get('article')
def get_articles(id):
return 'articles', id
def refresh_articles(articles_list):
log.info("Refresing articles from: %s" % articles_list)
articles_map = {}
for name, sets, quant in articles_list:
mkm_search_name = name.replace(' ', '+').replace(',', '%2C').replace("'", "")
products = get_products(mkm_search_name)
products = [x for x in products if x['expansion'] == sets]
#TODO save data from all sets and filter when fetching from filesystem
p = products[0]
p_name = p['name']['1']['productName']
p_id = p['idProduct']
articles = get_articles(p_id)
articles_map[name] = dict(pretty_name=p_name, articles=articles, quant=quant)
return articles_map
def get_input_data(articles_list, refresh=False):
articles_list_hashable = map(lambda x: (x[0], x[1]), articles_list)
articles_list_hashable = sorted(articles_list_hashable)
filename = hashlib.md5(json.dumps(articles_list_hashable, sort_keys=True)).hexdigest()
if refresh or not os.path.isfile("input/%s.pkl" % filename):
r = refresh_articles(articles_list)
save_object(r, "input/%s.pkl" % filename)
return r
log.info("Getting articles from %s" % filename)
r = load_object("input/%s.pkl" % filename)
for name, sets, quant in articles_list:
r[name]['quant'] = quant
return r
| [
"wynautt@undefined.com"
] | wynautt@undefined.com |
95c11fc532001a60bcf89f67ab4b82459082e79d | 21bb5da0c2bbb840edd893539acffa35b04c614c | /tvshow_folder.py | e8039b06f46f6c8ecb4597c79343e1055c48a636 | [] | no_license | guiszk/tvshow | adef03e7859cb4efc46f028d2a6041797de8464d | 85f4b4ecfcb06bf8ff77043589e91f277dacdd05 | refs/heads/master | 2023-03-04T23:11:07.523762 | 2021-02-07T02:40:29 | 2021-02-07T02:40:29 | 337,578,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | #!/usr/bin/env python3.7
import os, sys, re, requests
from bs4 import BeautifulSoup as bs
if(len(sys.argv) != 4):
sys.stderr.write("{0} <name> <path> <*name>\n".format(sys.argv[0]))
sys.exit(1)
if not (os.path.isdir(sys.argv[2])):
sys.stderr.write("<path> must be a directory\n")
sys.exit(1)
allowed = [".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpg", ".mpeg"]
sname = sys.argv[3]
pathdir = os.path.dirname(sys.argv[2])
files = [f for f in os.listdir(pathdir) if not f.startswith('.')]
ext = files[0].split(".")[::-1][0]
for f in files:
pindex = re.search(r"[sS][0-9]+[eE][0-9]+", f)
if(pindex != None):
pathindex = pindex.group(0)
pathseason = re.search(r"(?<=[sS])[0-9]+", pathindex).group(0)
url = 'https://www.thetvdb.com/series/' + sys.argv[1] + '/seasons/official/' + str(int(pathseason))
page = requests.get(url)
soup = bs(page.text, 'html.parser')
table = soup.find(class_='table table-condensed table-bordered')
eptable = table.find_all('tr')
nametable = table.find_all('a')
index = []
name = []
assoc = {}
n=0
for i in eptable:
s = re.search(r"[sS][0-9]+[eE][0-9]+", str(i).strip())
if(s != None):
index.append(s.group(0))
n = 0
for i in nametable:
name.append(i.contents[0].strip())
for i,j in zip(index,name):
assoc[i] = j
for f in files:
f = pathdir + "/" + str(f)
pindex = re.search(r"[sS][0-9]+[eE][0-9]+", f)
if(pindex != None):
pathindex = pindex.group(0).upper()
pathseason = re.search(r"(?<=[sS])[0-9]+", f).group(0)
newname = str(pathdir) + "/" + str(sname) + "." + str(pathindex) + "." + assoc[pathindex].replace(" ", ".") + "." + ext
os.rename(f, newname)
| [
"guiszk@protonmail.com"
] | guiszk@protonmail.com |
391a306f78fe5c96c880603c95534afa317eb828 | 874f8db726d5ce5da971dbd54aac58f0b3176d78 | /aa通用的工具类或方法/一个通用的mongodb类.py | 688e8d6c4c304fd0c6613395dc49c4fed7d13fcf | [] | no_license | Social-Engineering-OrigData/python | a8442ab5b3a772ddfc568eb5e386b11074c5bf93 | 6dde78f75e2a3306bccdc0085a44751cf2b901ca | refs/heads/master | 2021-09-09T12:56:09.781127 | 2018-03-16T09:34:17 | 2018-03-16T09:34:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@contact: wersonliugmail.com
@File : 一个通用的mongodb类.py
'''
from pymongo import MongoClient
"""
在爬虫存数据时使用,不需要事先建立数据库,直接存字典
"""
class MyMongo:
def __init__(self, dbname, colname):
"""
:param dbname: 初始化 命名自己的库
:param colname: 初始化 命名自己的表(集合)名
"""
# host,port 按自己需要重写
self.host = "127.0.0.1"
self.port = 27017
# self.dbname = dbname
# self.colname = colname
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[dbname]
self.col = self.db[colname]
def process_data(self, data):
self.col.insert(data)
print("成功插入%s" % data)
def close_mongo(self):
self.client.close()
# 其他增删改查操作
my = MyMongo("wnagyi", "info")
my.process_data({"姓名": "刘伟", "工资": 1800})
my.close_mongo()
| [
"wersonliu@gmail.com"
] | wersonliu@gmail.com |
55ebb1748fdf42a82be27411918d1825e97a3dba | 3a88c807b9343d7e609edf6c5c77386ff258af8c | /day24/day24.py | 8fd81017776ae4f24962663b96e1f9a56adfebda | [] | no_license | cdmaher/adv_of_code_2019 | 0e9d66fd852207744d5da1ce8c6d6a9e81af71dd | 4051daeda28d64be771bce312355b3a92bffa962 | refs/heads/master | 2022-12-17T06:39:37.570316 | 2020-09-18T18:47:02 | 2020-09-18T18:47:02 | 296,703,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,525 | py | import sys
import copy
import math
from functools import reduce
from sets import Set
file = open(sys.argv[1], 'r')
lines = file.readlines()
erisArea = [[['.' for i in range(5)] for j in range(5)]
for depth in range(300)]
lineN = 0
firstArea = erisArea[150]
for line in lines:
splitL = list(line.rstrip('\r\n'))
splitN = 0
for cell in splitL:
firstArea[lineN][splitN] = cell
splitN += 1
lineN += 1
layouts = {}
def printArea(area):
print ''
for i in range(0, len(area)):
for j in range(0, len(area[i])):
print area[i][j],
print ''
def printDimensionRange(dimensions, start, end):
mid = (start + end) / 2
for i in range(start, end + 1):
print 'Depth ' + str(i - mid)
printArea(dimensions[i])
print '\n'
def numBugsAdjacent(area, pos):
numBugs = 0
if pos[0] - 1 >= 0 and area[pos[0] - 1][pos[1]] == '#':
numBugs += 1
if pos[0] + 1 < len(area) and area[pos[0] + 1][pos[1]] == '#':
numBugs += 1
if pos[1] + 1 < len(area[0]) and area[pos[0]][pos[1] + 1] == '#':
numBugs += 1
if pos[1] - 1 >= 0 and area[pos[0]][pos[1] - 1] == '#':
numBugs += 1
return numBugs
def runGeneration(area):
newArea = copy.deepcopy(area)
for i in range(0, len(area)):
for j in range(0, len(area[0])):
adjacent = numBugsAdjacent(area, (i, j))
if area[i][j] == '#' and adjacent != 1:
newArea[i][j] = '.'
elif area[i][j] == '.' and (adjacent == 1 or adjacent == 2):
newArea[i][j] = '#'
return newArea
def numBugsAdjacentDimensions(dimensions, pos):
numBugs = 0
# if pos[1] == 0 and pos[2] == 1:
# print 'why ' + str(100 - pos[0])
# printArea(dimensions[pos[0]])
if pos[1] - 1 == 2 and pos[2] == 2:
for i in range(0, 5):
numBugs += 1 if dimensions[pos[0] + 1][4][i] == '#' else 0
# if pos[1] == 3 and pos[2] == 2:
# print 'shit the 1 ' + str(numBugs) + ' ' + str(
# dimensions[pos[0] + 1][4])
elif pos[1] - 1 >= 0 and dimensions[pos[0]][pos[1] - 1][pos[2]] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 2 ' + str(numBugs)
elif pos[1] - 1 < 0 and dimensions[pos[0] - 1][1][2] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 3 ' + str(numBugs)
if pos[1] + 1 == 2 and pos[2] == 2:
for i in range(0, 5):
numBugs += 1 if dimensions[pos[0] + 1][0][i] == '#' else 0
# if pos[1] == 1 and pos[2] == 2:
# print 'shit the 4 ' + str(numBugs)
elif pos[1] + 1 < len(
dimensions[0]) and dimensions[pos[0]][pos[1] + 1][pos[2]] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 5 ' + str(numBugs)
elif pos[1] + 1 >= len(
dimensions[0]) and dimensions[pos[0] - 1][3][2] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 6 ' + str(numBugs)
if pos[2] - 1 == 2 and pos[1] == 2:
for i in range(0, 5):
numBugs += 1 if dimensions[pos[0] + 1][i][4] == '#' else 0
# if pos[1] == 2 and pos[2] == 3:
# print 'shit the 7 ' + str(numBugs)
elif pos[2] - 1 >= 0 and dimensions[pos[0]][pos[1]][pos[2] - 1] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 8 ' + str(numBugs)
elif pos[2] - 1 < 0 and dimensions[pos[0] - 1][2][1] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 9 ' + str(numBugs)
if pos[2] + 1 == 2 and pos[1] == 2:
for i in range(0, 5):
numBugs += 1 if dimensions[pos[0] + 1][i][0] == '#' else 0
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 10 ' + str(numBugs)
elif pos[2] + 1 < len(
dimensions[0][0]) and dimensions[pos[0]][pos[1]][pos[2]
+ 1] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 11 ' + str(numBugs)
elif pos[2] + 1 >= len(
dimensions[0][0]) and dimensions[pos[0] - 1][2][3] == '#':
numBugs += 1
# if pos[1] == 0 and pos[2] == 1:
# print 'shit the 12 ' + str(numBugs)
return numBugs
def runGenerationDimension(dimensions):
newDims = copy.deepcopy(dimensions)
for dim in range(1, len(dimensions) - 1):
area = dimensions[dim]
newArea = newDims[dim]
for i in range(0, len(area)):
for j in range(0, len(area[0])):
if i == 2 and j == 2:
continue
adjacent = numBugsAdjacentDimensions(dimensions, (dim, i, j))
# if i == 3 and j == 2:
# print 'fuc ' + str(adjacent)
if area[i][j] == '#' and adjacent != 1:
newArea[i][j] = '.'
elif area[i][j] == '.' and (adjacent == 1 or adjacent == 2):
newArea[i][j] = '#'
return newDims
def getBioRating(area):
rating = 0
for i in range(0, len(area)):
for j in range(0, len(area[i])):
if area[i][j] == '#':
rating += pow(2, i * 5 + j)
return rating
def countAllBugs(dimensions):
bugs = 0
for dim in range(0, len(dimensions)):
for i in range(0, len(dimensions[dim])):
for j in range(0, len(dimensions[dim][i])):
if dimensions[dim][i][j] == '#':
bugs += 1
return bugs
count = 200
minute = 0
printDimensionRange(erisArea, 145, 155)
while minute < count:
erisArea = runGenerationDimension(erisArea)
minute += 1
printDimensionRange(erisArea, 145, 155)
print 'CHEck 0'
printArea(erisArea[1])
print 'CHEck last'
printArea(erisArea[298])
print 'ANS: ' + str(countAllBugs(erisArea))
# count = 90000000
# minute = 0
# bioRating = getBioRating(erisArea)
# secondRating = 0
# layouts[bioRating] = 1
# printArea(erisArea)
# while minute < count:
# erisArea = runGeneration(erisArea)
# bioRating = getBioRating(erisArea)
# # printArea(erisArea)
# if bioRating in layouts:
# secondRating = bioRating
# break
# layouts[bioRating] = 1
# minute += 1
#
# print 'ANS: ' + str(secondRating)
# print 'TOOK: ' + str(minute) + ' minutes'
| [
"cdmaher@fb.com"
] | cdmaher@fb.com |
b663d23bffb53cf3aed33855ca26ce68dc91cab8 | e9b513db9bfcd9cbc8e59b3bc18498d03fbc9a3c | /preprocessing.py | 9cfa7ba7e590b24951a769d5e694f9684e250c8a | [] | no_license | ruirae123/appliedML-group_project | 97398107bda83604560b562b598a320079c5ca1d | 9c56c31403854a67229f8fe8711d22addc657776 | refs/heads/main | 2023-04-21T14:05:41.315758 | 2021-05-10T23:24:42 | 2021-05-10T23:24:42 | 352,131,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | import ast
import os
import csv
import argparse
import constant
import pickle
import numpy as np
def _commandline_parser():
"""Commandline parser."""
parser = argparse.ArgumentParser()
parser.add_argument('--input_filename', type=str, default='framingham.csv')
parser.add_argument('--num_evals', type=int, default=500)
return parser
def write_csv(filepath, column_names, data):
"""Write CSV file."""
with open(filepath, 'w') as fd:
CSVwriter = csv.writer(fd, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
CSVwriter.writerow(column_names)
for i in range(data.shape[0]):
row = data[i].tolist()
CSVwriter.writerow(row)
def _curate_data(data):
"""Removing rows that contains NA."""
new_data = []
for row in data:
flag = True
new_row = []
for col in row:
if col == 'NA':
flag = False
break
else:
new_row.append(ast.literal_eval(col))
if flag:
new_data.append(new_row)
return np.asarray(new_data)
if __name__ == '__main__':
parser = _commandline_parser()
args = parser.parse_args()
input_filepath = os.path.join(constant.raw_data_path, args.input_filename)
with open(input_filepath, 'r') as fd:
raw_data = list(csv.reader(fd))
column_names = raw_data[0][:-1]
data = _curate_data(raw_data[1:])[:, :-1]
N, D = data.shape
print('Original: \t{} data points.'.format(len(raw_data)))
print('Remove NA: \t{} data points ({:.2f}%).'.format(N, N * 100.0 / len(raw_data)))
np.random.seed(199)
data_ids = np.random.permutation(range(N))
train_ids = data_ids[:-args.num_evals*2]
val_ids = data_ids[-args.num_evals*2:-args.num_evals]
test_ids = data_ids[-args.num_evals:]
train_data = np.stack([data[idx] for idx in train_ids])
val_data = np.stack([data[idx] for idx in val_ids])
test_data = np.stack([data[idx] for idx in test_ids])
output_filepath = os.path.join(constant.processed_data_path, args.input_filename + '.pkl')
with open(output_filepath, 'wb') as fd:
pickle.dump(
dict(
column_names=column_names,
train_data=train_data,
val_data=val_data,
test_data=test_data), fd)
output_dirpath = os.path.join(constant.processed_data_path, args.input_filename.split('.')[0])
if not os.path.exists(output_dirpath):
os.makedirs(output_dirpath)
output_data_loops = [
(train_data, 'train_data.cvs'),
(val_data, 'val_data.cvs'),
(test_data, 'test_data.cvs'),
]
for data, data_filepath in output_data_loops:
write_csv(os.path.join(output_dirpath, data_filepath), column_names, data)
| [
"tianqi.kiko.zhou@gmail.com"
] | tianqi.kiko.zhou@gmail.com |
93059852d29c8eae57585fb1868438b234082be1 | 18aedf3de6badbdab79c7c1de632f7cc75b6540a | /livedata/forms.py | b7c03b74d8cc499c0b7a81f4ce97467071619d0e | [] | no_license | nrjadkry/covid19 | e4fada0ba1fa12088a8aa4a6594298d774174617 | 1f165a0820af9b3fa584aa2cbfade582801401c9 | refs/heads/master | 2022-09-05T12:03:53.675421 | 2020-05-27T11:00:06 | 2020-05-27T11:00:06 | 266,482,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django import forms
class CovidForm(forms.Form):
country=forms.CharField(required=True)
| [
"nrjadkry@gmail.com"
] | nrjadkry@gmail.com |
5c97cdd91470bad1c6f42403f56fa17b169b7aa4 | e50a4820db6f5230c852ba3ef1b7fd12bbce9aac | /file/favority_number.py | 34448be22d245de0db738ce9497cfbca4f9d10a2 | [] | no_license | LeoSilvaGomes/pythonPlayground | 078773b6a3cf67e0e87d68c8c008733e40a29261 | bfb65f5cf4d60a3c1240be028a1d7ece3f127d77 | refs/heads/master | 2022-04-24T02:32:36.311181 | 2020-04-29T02:32:13 | 2020-04-29T02:32:13 | 259,804,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | import json
def write_number():
'''Write on file the favority number'''
number = input("Tell me your favority number: ")
filename = 'favority_number.json'
with open(filename, 'w') as f_obj:
json.dump(number, f_obj) | [
"leonardodasigomes@gmail.com"
] | leonardodasigomes@gmail.com |
a7f70dfb73aff910024e9703786cfdab163c284e | 9231d5ab3c99f328f6134df4982e7910e1db69b2 | /gui/wx/panel.py | 06cef3a5ef40d3c28afdfa79c6d64478a1a7b114 | [] | no_license | liviaerxin/python-tutorials | 6a908a93b894b29022bba15bac345e6594665454 | 5cb84861d9d616973637312711e23c5eb44f4fb5 | refs/heads/master | 2021-02-21T11:50:18.418465 | 2020-03-06T07:55:08 | 2020-03-06T07:55:08 | 245,357,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | import wx
class ExamplePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# create some sizers
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(hgap=5, vgap=5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.quote = wx.StaticText(self, label="Your quote: ")
grid.Add(self.quote, pos=(0,0))
# A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it
self.logger = wx.TextCtrl(self, size=(200,300), style=wx.TE_MULTILINE | wx.TE_READONLY)
# A button
self.button =wx.Button(self, label="Save")
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
# the edit control - one line version.
self.lblname = wx.StaticText(self, label="Your name :")
grid.Add(self.lblname, pos=(1,0))
self.editname = wx.TextCtrl(self, value="Enter here your name", size=(140,-1))
grid.Add(self.editname, pos=(1,1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
# the combobox Control
self.sampleList = ['friends', 'advertising', 'web search', 'Yellow Pages']
self.lblhear = wx.StaticText(self, label="How did you hear from us ?")
grid.Add(self.lblhear, pos=(3,0))
self.edithear = wx.ComboBox(self, size=(95, -1), choices=self.sampleList, style=wx.CB_DROPDOWN)
grid.Add(self.edithear, pos=(3,1))
self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.Bind(wx.EVT_TEXT, self.EvtText,self.edithear)
# add a spacer to the sizer
grid.Add((10, 40), pos=(2,0))
# Checkbox
self.insure = wx.CheckBox(self, label="Do you want Insured Shipment ?")
grid.Add(self.insure, pos=(4,0), span=(1,2), flag=wx.BOTTOM, border=5)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# Radio Boxes
radioList = ['blue', 'red', 'yellow', 'orange', 'green', 'purple', 'navy blue', 'black', 'gray']
rb = wx.RadioBox(self, label="What color would you like ?", pos=(20, 210), choices=radioList, majorDimension=3,
style=wx.RA_SPECIFY_COLS)
grid.Add(rb, pos=(5,0), span=(1,2))
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
hSizer.Add(grid, 0, wx.ALL, 5)
hSizer.Add(self.logger)
mainSizer.Add(hSizer, 0, wx.ALL, 5)
mainSizer.Add(self.button, 0, wx.CENTER)
self.SetSizerAndFit(mainSizer)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def OnClick(self,event):
self.logger.AppendText(" Click on object with Id %d\n" %event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.IsChecked())
app = wx.App(False)
frame = wx.Frame(None)
panel = ExamplePanel(frame)
frame.Show()
app.MainLoop() | [
"1yue8haogaoqi@gmail.com"
] | 1yue8haogaoqi@gmail.com |
01dbe7ced71ab19257a6164a20f22849cf0e5720 | 20cf7f9fdc5a9fa32a7332017a46dcf3d15dd3ad | /Lesson_10 (Classes)/Task3.py | 7777218dea95f3a1efdde46abd8f6ea895656f8f | [] | no_license | AlexSmolko/bitroot_repo | ee426e266946b5495f390817f305ead1901f6d0d | f8e767167f7a5dfe71d3e84ceba54fda54ba48cd | refs/heads/master | 2022-11-21T18:12:33.207560 | 2020-07-01T10:10:16 | 2020-07-01T10:10:16 | 254,702,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # TV controller
Channels = ["Discovery", "A1", "EuroSport", "MTV"]
class TVController:
def __init__(self, channels):
self.channels = channels
self.num_of_channel = 1
def first_channel(self):
print(self.channels[0])
def last_channel(self):
print(self.channels[-1])
def turn_channel(self, num_of_channel):
self.num_of_channel = num_of_channel
return self.show_channel()
def next_channel(self):
self.num_of_channel += 1
if self.num_of_channel > len(self.channels):
self.num_of_channel = 1
return self.show_channel()
def previous_channel(self):
self.num_of_channel -= 1
if self.num_of_channel == 1:
self.num_of_channel = len(self.channels) - 1
return self.show_channel()
def current_channel(self):
return self.show_channel()
def is_exist(self, name_of_channel):
if name_of_channel not in self.channels:
return "No"
else:
return "Yes"
def show_channel(self):
return print(self.channels[self.num_of_channel - 1])
def __repr__(self):
return self.show_channel()
tv_controller = TVController(Channels)
tv_controller.first_channel()
tv_controller.last_channel()
tv_controller.turn_channel(4)
tv_controller.next_channel()
tv_controller.previous_channel()
tv_controller.current_channel()
print(tv_controller.is_exist("A1"))
print(tv_controller.is_exist("UT-2"))
| [
"34611935+Aresnadro@users.noreply.github.com"
] | 34611935+Aresnadro@users.noreply.github.com |
36fc39d1390c434d3e34fec2e3c7d1f93053f360 | 7ae140103459547a514e11b0f51adec173843dce | /Administrador.py | d14f276cb56d09500a14e3d92126601375ae9239 | [] | no_license | Mintic-Team7-Inventario/Inventario_App | 94af71fd874ca8145a536778b6800502ce434b88 | 548bf122be82809359de5ede343a09b02fd9d0ab | refs/heads/master | 2023-08-20T02:34:50.931717 | 2021-10-28T15:16:17 | 2021-10-28T15:16:17 | 416,175,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py |
from UsuarioFinal import UsuarioFinal
from db import get_db
from db import close_db
class Administrador(UsuarioFinal):
def __init__(self,codigo,name, apellido, contraseña, celular,email,rol):
super().__init__(codigo,name, apellido, contraseña, celular,email,rol)
def buscarUsuario(self,label,value):
try:
db = get_db()
cursor=db.cursor()
val=()
for valores in value:
val= val+(valores,)
cursor.execute("SELECT Codigo, Nombre, Apellido, Celular, Email, Rol FROM Usuario WHERE"+ label,val )
query=cursor.fetchall()
#db.commit()
close_db()
return query
except Exception as ex:
print(ex)
return
| [
"egarciasolano@outlook.com"
] | egarciasolano@outlook.com |
c084b8ef3203190d108bbb035c3c5f0dd2efccc3 | 48bccafbc774f82a7598ec514ccadf8962ec06d0 | /10/jacksyntax.py | 8415ac34c62438730f1d73e67bce9216533a304b | [] | no_license | waddlepon/nand2tetris | 9378be45b05205b94169b47aa5efa1c84846668d | 963fc5be3c1192f189b55db63a8a5466aca8dd5e | refs/heads/master | 2020-03-30T11:47:17.884896 | 2018-10-02T03:01:16 | 2018-10-02T03:01:16 | 151,192,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,427 | py | import argparse
import tempfile
import os
from enum import Enum
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
class TokenType(Enum):
KEYWORD = 0
SYMBOL = 1
IDENTIFIER = 2
INT_CONST = 3
STRING_CONST = 4
class JackTokenizer:
SYMBOLS = ['{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~']
KEYWORDS = ['class',
'constructor',
'function',
'method',
'field',
'static',
'var',
'int',
'char',
'boolean',
'void',
'true',
'false',
'null',
'this',
'let',
'do',
'if',
'else',
'while',
'return']
def __init__(self, source_file):
self.tokens = []
lines = []
with open(source_file) as f:
in_comment = False
for line in f:
l = ' '.join(line.split())
comment_position = l.find("//")
if comment_position != -1:
l = l[0:comment_position]
if in_comment == False:
start_comment_position = l.find("/*")
if start_comment_position != -1:
in_comment = True
if in_comment:
end_comment_position = l.find("*/")
if end_comment_position != -1:
l = l[end_comment_position+2:]
in_comment = False
else:
start_comment_position = l.find("/*")
if start_comment_position != -1:
l = l[0:start_comment_position]
else:
l = ""
lines.append(l)
lines = filter(None, lines)
self.tokenizeLines('\n'.join(lines))
def addToken(self, token):
if token.strip() != "":
self.tokens.append(token)
def tokenizeLines(self, lines):
"""
0 = searching
1 = in string
2 = in int
3 = identifier/keyword
"""
state = 0
current_token = ""
for c in lines:
if state == 1:
if c == '"':
state = 0
self.addToken(current_token + c)
current_token = ""
elif c == '\n':
print("Your string has a newline in it")
exit()
else:
current_token += c
continue;
if c == ' ' or c == '\n':
state = 0
self.addToken(current_token)
current_token = ""
continue;
if c in self.SYMBOLS:
state = 0
self.addToken(current_token)
self.addToken(c)
current_token = ""
continue;
if state == 3:
current_token += c
continue;
if c == "\"":
if state != 0:
print("Your int is touching the string")
exit()
state = 1
self.addToken(current_token)
current_token = c
continue;
if state == 2:
if not c.isdigit():
print("Your int isn't an int")
exit()
current_token += c
continue;
if c.isdigit():
state = 2
self.addToken(current_token)
current_token = ""
state = 3
current_token += c
def hasMoreTokens(self):
return len(self.tokens) > 0
def advance(self):
if self.hasMoreTokens():
self.current_token = self.tokens.pop(0)
def tokenType(self):
if self.current_token[0] == '"' and self.current_token[-1] == '"':
return TokenType.STRING_CONST
if self.current_token in self.SYMBOLS:
return TokenType.SYMBOL
if isInt(self.current_token):
return TokenType.INT_CONST
if self.current_token in self.KEYWORDS:
return TokenType.KEYWORD
return TokenType.IDENTIFIER
def keyword(self):
if self.tokenType() != TokenType.KEYWORD:
return
return self.current_token
def symbol(self):
if self.tokenType() != TokenType.SYMBOL:
return
if self.current_token == ">":
return ">"
elif self.current_token == "<":
return "<"
elif self.current_token == "&":
return "&"
return self.current_token
def identifier(self):
if self.tokenType() != TokenType.IDENTIFIER:
return
return self.current_token
def intVal(self):
if self.tokenType() != TokenType.INT_CONST:
return
return int(self.current_token)
def stringVal(self):
if self.tokenType() != TokenType.STRING_CONST:
return
t = self.current_token[self.current_token.find('"')+1:]
return t[:-1]
def writeTokens(self, output_file):
with open(output_file, "w") as f:
f.write("<tokens>\n")
while self.hasMoreTokens():
self.advance()
token_type = self.tokenType()
if token_type == TokenType.KEYWORD:
f.write("<keyword> " + self.keyword() + " </keyword>\n")
elif token_type == TokenType.SYMBOL:
f.write("<symbol> " + self.symbol() + " </symbol>\n")
elif token_type == TokenType.IDENTIFIER:
f.write("<identifier> " + self.identifier() + " </identifier>\n")
elif token_type == TokenType.INT_CONST:
f.write("<integerConstant> " + str(self.intVal()) + " </integerConstant>\n")
elif token_type == TokenType.STRING_CONST:
f.write("<stringConstant> " + self.stringVal() + " </stringConstant>\n")
f.write("</tokens>")
return f
class CompilationEngine:
OPERATORS = ["+", "-", "*", "/", "&", "|", "<", ">", "=",]
def __init__(self, tokenizer, output_file_name):
self.output_file = open(output_file_name, "w")
self.tokenizer = tokenizer
self.indentation = 0
self.compileClass()
self.output_file.close()
def write(self, line):
self.output_file.write(" "*self.indentation + line + '\n')
def indent(self):
self.indentation += 1
def unindent(self):
if self.indentation != 0:
self.indentation -= 1
def write_token(self):
token_type = self.tokenizer.tokenType()
if token_type == TokenType.KEYWORD:
self.write("<keyword> " + self.tokenizer.keyword() + " </keyword>")
elif token_type == TokenType.IDENTIFIER:
self.write("<identifier> " + self.tokenizer.identifier() + " </identifier>")
elif token_type == TokenType.SYMBOL:
self.write("<symbol> " + self.tokenizer.symbol() + " </symbol>")
elif token_type == TokenType.INT_CONST:
self.write("<integerConstant> " + str(self.tokenizer.intVal()) + " </integerConstant>")
elif token_type == TokenType.STRING_CONST:
self.write("<stringConstant> " + self.tokenizer.stringVal() + " </stringConstant>")
def next(self):
self.tokenizer.advance()
def compileClass(self):
self.write("<class>")
self.indent()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
while self.tokenizer.keyword() == "field" or self.tokenizer.keyword() == "static":
self.compileClassVarDec()
while self.tokenizer.keyword() == "function" or self.tokenizer.keyword() == "method" or self.tokenizer.keyword() == "constructor":
self.compileSubroutine()
self.write_token()
self.unindent()
self.write("</class>")
def compileClassVarDec(self):
self.write("<classVarDec>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
while self.tokenizer.symbol() != ";":
self.write_token()
self.next()
self.write_token()
self.next()
self.unindent()
self.write("</classVarDec>")
def compileSubroutine(self):
self.write("<subroutineDec>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileParameterList()
self.write_token()
self.next()
self.write("<subroutineBody>")
self.indent()
self.write_token()
self.next()
while self.tokenizer.keyword() == "var":
self.compileVarDec()
self.compileStatements()
self.write_token()
self.next()
self.unindent()
self.write("</subroutineBody>")
self.unindent()
self.write("</subroutineDec>")
def compileParameterList(self):
self.write("<parameterList>")
self.indent()
if self.tokenizer.symbol() == ")":
self.unindent()
self.write("</parameterList>")
return;
else:
self.write_token()
self.next()
self.write_token()
self.next()
while self.tokenizer.symbol() != ")":
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
self.unindent()
self.write("</parameterList>")
def compileVarDec(self):
self.write("<varDec>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
while self.tokenizer.symbol() != ";":
self.write_token()
self.next()
self.write_token()
self.next()
self.unindent()
self.write("</varDec>")
def compileStatements(self):
self.write("<statements>")
self.indent()
while self.tokenizer.symbol() != "}":
if self.tokenizer.keyword() == "do":
self.compileDo()
elif self.tokenizer.keyword() == "let":
self.compileLet()
elif self.tokenizer.keyword() == "while":
self.compileWhile()
elif self.tokenizer.keyword() == "return":
self.compileReturn()
elif self.tokenizer.keyword() == "if":
self.compileIf()
self.unindent()
self.write("</statements>")
def compileDo(self):
self.write("<doStatement>")
self.indent()
self.write_token()
self.next()
while self.tokenizer.symbol() != "(":
self.write_token()
self.next()
self.write_token()
self.next()
self.compileExpressionList()
self.write_token()
self.next()
self.write_token()
self.next()
self.unindent()
self.write("</doStatement>")
def compileLet(self):
self.write("<letStatement>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
if self.tokenizer.symbol() == "[":
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
self.unindent()
self.write("</letStatement>")
def compileWhile(self):
self.write("<whileStatement>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileStatements()
self.write_token()
self.next()
self.unindent()
self.write("</whileStatement>")
def compileReturn(self):
self.write("<returnStatement>")
self.indent()
self.write_token()
self.next()
if self.tokenizer.symbol() != ";":
self.compileExpression()
self.write_token()
self.next()
self.unindent()
self.write("</returnStatement>")
def compileIf(self):
self.write("<ifStatement>")
self.indent()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileStatements()
self.write_token()
self.next()
if self.tokenizer.keyword() == "else":
self.write_token()
self.next()
self.write_token()
self.next()
self.compileStatements()
self.write_token()
self.next()
self.unindent()
self.write("</ifStatement>")
def compileExpressionList(self):
self.write("<expressionList>")
self.indent()
if self.tokenizer.symbol() != ")":
self.compileExpression()
while self.tokenizer.symbol() == ",":
self.write_token()
self.next()
self.compileExpression()
self.unindent()
self.write("</expressionList>")
def compileExpression(self):
self.write("<expression>")
self.indent()
self.compileTerm()
while self.tokenizer.symbol() in self.OPERATORS:
self.write_token()
self.next()
self.compileTerm()
self.unindent()
self.write("</expression>")
def compileTerm(self):
self.write("<term>")
self.indent()
if self.tokenizer.symbol() == "-" or self.tokenizer.symbol() == "~":
self.write_token()
self.next()
self.compileTerm()
elif self.tokenizer.symbol() == "(":
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
else:
self.write_token()
self.next()
if self.tokenizer.symbol() == "(":
self.write_token()
self.next()
self.compileExpressionList()
self.write_token()
self.next()
elif self.tokenizer.symbol() == ".":
self.write_token()
self.next()
self.write_token()
self.next()
self.write_token()
self.next()
self.compileExpressionList()
self.write_token()
self.next()
elif self.tokenizer.symbol() == "[":
self.write_token()
self.next()
self.compileExpression()
self.write_token()
self.next()
self.unindent()
self.write("</term>")
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("source")
args = arg_parser.parse_args()
output_file = ""
sources = []
if os.path.isdir(args.source):
sources = [os.path.join(args.source, f) for f in os.listdir(args.source) if os.path.isfile(os.path.join(args.source, f)) and f.endswith('.jack')]
else:
if args.source.endswith('.jack'):
sources.append(args.source)
else:
print("Wrong File Extension")
exit()
for s in sources:
tokenizer = JackTokenizer(s)
compilation_engine = CompilationEngine(tokenizer, s[:-5] + "C.xml")
| [
"tylpoon@bigcity.local"
] | tylpoon@bigcity.local |
c8cb7b527e6e5bd43755eaa0f580c076ec82f12e | f3bbfe35555030afce1fd6d430f0b90e02e96ced | /Directory Scan/dirScan.py | 9e448afcc1b3304e844393c5d35a9cc94477b88a | [] | no_license | git618/py-scripts | cb42e871fa2e085c81172d8678292bb6798a315b | b24404b393c2e7d8480435b70cbee1c3114b51a3 | refs/heads/master | 2022-01-09T16:49:40.129004 | 2018-09-13T03:19:36 | 2018-09-13T03:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # -*- coding:utf-8 -*-
import requests
from threading import Thread, activeCount
import queue
def dir_scan(url):
"""
扫描url
"""
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.baidu.com'
}
status_code = [200]
try:
req = requests.head(url.strip(), timeout = 8, headers = headers)
if req.status_code in status_code:
print('status_code: %s url: %s'%(req.status_code, url.strip('\n')))
open('exist_url.txt','a').write(url)
except:
print('no url')
def open_pathfile(url, file):
scan_queue = queue.Queue()
path = open(file, 'r').readlines()
for line in path:
if url.endswith('/'):
if line.startswith('/'):
scan_queue.put(url + line[1:])
else:
scan_queue.put(url + line)
else:
if line.startswith('/'):
scan_queue.put(url + line)
else:
scan_queue.put(url + '/' + line)
return scan_queue
def check_url(url):
if url.startswith('http://') or url.startswith('https://'):
pass
else:
url = 'http://'+ url
return url
def main():
print('''
____ _ ____
| _ \(_)_ __/ ___| ___ __ _ _ __
| | | | | '__\___ \ / __/ _` | '_ \
| |_| | | | ___) | (_| (_| | | | |
|____/|_|_| |____/ \___\__,_|_| |_|
''')
url = input('[*] Please input your target url: ')
thread_num = input('[*] Please input your threadnum: ')
pathfile = input('[*] Please input your dictionary: ')
_url = check_url(url)
print('The number of threads is %s '% thread_num)
print('[*] scanning...')
scan_queue = open_pathfile(url, pathfile)
while scan_queue.qsize() > 0:
if activeCount() <= int(thread_num):
Thread(target=dir_scan, args=(scan_queue.get(),)).start()
if __name__ == '__main__':
main() | [
"1162248131@qq.com"
] | 1162248131@qq.com |
2c940a23b570ef8251077537155455093097c2d3 | d1d366fe205dd8bb0469df4c9f80df488bccfa22 | /script/model/type.py | 8fc98bc7607cc6d5bd86dbab07e1c7e377b98f35 | [
"MIT"
] | permissive | kjobanputra/noisepage | 0403ed062350ad657178e943635ed2092c67a432 | e675b0befb55fcfaa78c23628e32800374a10a24 | refs/heads/master | 2023-04-01T07:49:09.715971 | 2020-10-30T00:56:01 | 2020-10-30T00:56:01 | 294,547,389 | 0 | 0 | MIT | 2020-12-01T04:50:46 | 2020-09-10T23:45:03 | C++ | UTF-8 | Python | false | false | 2,379 | py | """All the types (defined using Enum).
This should be the only module that you directly import classes, instead of the higher-level module.
"""
import enum
class Target(enum.IntEnum):
"""The output targets for the operating units
"""
START_TIME = 0
CPU_ID = 1,
CPU_CYCLES = 2,
INSTRUCTIONS = 3,
CACHE_REF = 4,
CACHE_MISS = 5,
REF_CPU_CYCLES = 6,
BLOCK_READ = 7,
BLOCK_WRITE = 8,
MEMORY_B = 9,
ELAPSED_US = 10
class OpUnit(enum.IntEnum):
"""The enum for all the operating units
For each operating unit, the first upper case name should be used in the codebase,
and the second lower case name (alias) is to match the string identifier from the csv data file
"""
GC = 0,
LOG_SERIALIZER_TASK = 1,
DISK_LOG_CONSUMER_TASK = 2,
TXN_BEGIN = 3,
TXN_COMMIT = 4,
# Execution engine opunits
OUTPUT = 6,
OP_INTEGER_PLUS_OR_MINUS = 7,
OP_INTEGER_MULTIPLY = 8,
OP_INTEGER_DIVIDE = 9,
OP_INTEGER_COMPARE = 10,
OP_DECIMAL_PLUS_OR_MINUS = 11,
OP_DECIMAL_MULTIPLY = 12,
OP_DECIMAL_DIVIDE = 13,
OP_DECIMAL_COMPARE = 14,
SEQ_SCAN = 15,
IDX_SCAN = 16,
HASHJOIN_BUILD = 17,
HASHJOIN_PROBE = 18,
AGG_BUILD = 19,
AGG_ITERATE = 20,
SORT_BUILD = 21,
SORT_ITERATE = 22,
INSERT = 23,
UPDATE = 24,
DELETE = 25,
CREATE_INDEX = 26,
CREATE_INDEX_MAIN = 27,
PARALLEL_MERGE_HASHJOIN = 28,
PARALLEL_MERGE_AGGBUILD = 29,
PARALLEL_SORT_STEP = 30,
PARALLEL_SORT_MERGE_STEP = 31
class ExecutionFeature(enum.IntEnum):
# Debugging information
QUERY_ID = 0,
PIPELINE_ID = 1,
# # features
NUM_FEATURES = 2,
FEATURES = 3,
# input features
EXEC_MODE = 4,
NUM_ROWS = 5,
KEY_SIZES = 6,
NUM_KEYS = 7,
EST_CARDINALITIES = 8,
MEM_FACTOR = 9,
NUM_LOOPS = 10,
NUM_CONCURRENT = 11,
# interval input features
TXNS_DEALLOCATED = 12,
TXNS_UNLINKED = 13,
BUFFER_UNLINKED = 14,
READONLY_UNLINKED = 15,
INTERVAL = 16,
class ArithmeticFeature(enum.Enum):
"""The input fields of the arithmetic operating units
"""
EXEC_NUMBER = 0,
EXEC_MODE = 1,
class ConcurrentCountingMode(enum.Enum):
"""How to identify the concurrent running operations (for a GroupedOpUnitData)
"""
EXACT = 0,
ESTIMATED = 1,
INTERVAL = 2,
| [
"noreply@github.com"
] | noreply@github.com |
3a7a14f6ee378d42a4cee70efcbd88a5ae6688f4 | 72e87e5547ecd7f53695f2b1e3d7af089b75314c | /python/exercises/ex26.py | e13f67ac5d641183e8f7ff1066a636e681d43a9b | [] | no_license | SpencerArtisan/experiments | 7b3990d942874c90e239a9a7ba74a1864dca1f97 | d45ea5dec57da8b91f009e260ba06b5441eda911 | refs/heads/develop | 2021-01-01T18:29:12.303031 | 2012-07-06T14:07:39 | 2012-07-06T14:07:39 | 4,161,603 | 0 | 1 | null | 2018-04-09T19:10:30 | 2012-04-27T19:12:42 | Python | UTF-8 | Python | false | false | 2,285 | py | def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 * 2 - 3 * 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crabapples." % secret_formula(start_point)
sentence = "All good\tthings come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
| [
"spencerward@hotmail.com"
] | spencerward@hotmail.com |
233285c17f75cb0cf8903cbacdeb74bbe001281d | 8fcdcec1bf0f194d23bba4acd664166a04dc128f | /packages/gcTool.py | bd22a08d82b189ff60330613fa6b6795e709fd48 | [] | no_license | grid-control/grid-control | e51337dd7e5d158644a8da35923443fb0d232bfb | 1f5295cd6114f3f18958be0e0618ff6b35aa16d7 | refs/heads/master | 2022-11-13T13:29:13.226512 | 2021-10-01T14:37:59 | 2021-10-01T14:37:59 | 13,805,261 | 32 | 30 | null | 2023-02-19T16:22:47 | 2013-10-23T14:39:28 | Python | UTF-8 | Python | false | false | 19 | py | grid_control_api.py | [
"stober@cern.ch"
] | stober@cern.ch |
35380b0997d3dc37aa77773fe400ca9768d179f3 | 9c05ec071dda2aa98ea1b12d9703dd91df19c87d | /quantum/hooks.py | 2c6a587a6d593503d2bbf9fee3977197c254c5db | [
"Apache-2.0"
] | permissive | DestinyOneSystems/quantum | af6ff44dd5e8cff944e53946f60adb11efb47bd5 | d7eafd8ffa719d91108b230221ecf27531a3530d | refs/heads/master | 2020-04-05T18:59:00.584768 | 2013-10-21T02:41:15 | 2013-10-21T02:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
def setup_hook(config):
"""Filter config parsed from a setup.cfg to inject our defaults."""
metadata = config['metadata']
if sys.platform == 'win32':
requires = metadata.get('requires_dist', list()).split('\n')
requires.append('pywin32')
requires.append('wmi')
requires.remove('pyudev')
metadata['requires_dist'] = "\n".join(requires)
config['metadata'] = metadata
| [
"mordred@inaugust.com"
] | mordred@inaugust.com |
30d846d37eefcc314ee9a952bf9d18e7cb2cc2b1 | f3e1c6702610b3104efaca3c177a050838f25e05 | /script/flask/bin/flask | 3da26d547b5f391ac0e9ae7f928b472ef7fd3456 | [] | no_license | cycfsabo/thesis | 5153aac15e81bfbe2d90af82d26dbdb495aba634 | d9124d97cc16896a343e43d55bdc9d41bad15067 | refs/heads/master | 2023-01-03T00:20:17.746874 | 2020-10-31T06:30:06 | 2020-10-31T06:30:06 | 308,819,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | #!/home/hungcao/flask/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"huuhungf@gmail.com"
] | huuhungf@gmail.com | |
f138655f1c273477db99f1f85129ea718053c624 | 1a2cbc44bfcda1eafe4e8513de8541d8cd49bd08 | /fts/test_t1_amend_user2.py | f8bebf459f343d1e016841a1993e789e179cfd24 | [
"LicenseRef-scancode-public-domain"
] | permissive | DonaldMc/gdms | d62d34585a3914330cc933476dcb0d3ab750b7d8 | 7bfdf40d929afab2e204256c781c3700f6e24443 | refs/heads/master | 2021-01-18T12:38:55.798638 | 2016-05-30T18:59:55 | 2016-05-30T18:59:55 | 56,460,151 | 0 | 0 | null | 2016-05-30T20:59:22 | 2016-04-17T21:44:40 | Python | UTF-8 | Python | false | false | 3,486 | py | from functional_tests import FunctionalTest, ROOT, USERS
from ddt import ddt, data, unpack
from selenium.webdriver.support.ui import WebDriverWait
import time
from selenium.webdriver.support.ui import Select
# Testuser1 - stays as unspecified
# Testuser2 - specifies Africa and unspecified country and subdivision
# Testuser3 - specifies Africa and South Africa and unspecified subdivision
# Testuser4 - specifies Europe and unspecifoed country
# Testuser5 - specifies Europe and Switzerland and unspecified Subdivision
# Testuser6 - specifies North America and Unspeccified country
# Testuser7 - specifies North America, Canada and unspecified subdivision
# Testuser8 - specifies North America, Canada and Alberta
# Testuser9 - specifies North America, Canada and Saskatchewan
@ddt
class TestRegisterPage (FunctionalTest):
def setUp(self):
self.url = ROOT + '/default/user/login'
get_browser=self.browser.get(self.url)
# setup below for user7 being set twice seems stupid however for reasons that escape me the
# setting of unspecified subdivision isn't working if done in a single step hence Manitoba
# temporarily wheeled into play
@data((USERS['USER7'], USERS['PASSWORD7'], 'North America (NA)', 'Canada (NA)', 'Manitoba'),
(USERS['USER6'], USERS['PASSWORD6'], 'North America (NA)', 'Unspecified', 'Unspecified'),
(USERS['USER8'], USERS['PASSWORD8'], 'North America (NA)', 'Canada (NA)', 'Alberta'),
(USERS['USER9'], USERS['PASSWORD9'], 'North America (NA)', 'Canada (NA)', 'Saskatchewan'),
(USERS['USER7'], USERS['PASSWORD7'], 'North America (NA)', 'Canada (NA)', 'Unspecified'))
@unpack
def test_put_values_in_register_form(self, user, passwd, continent, country, subdivision):
mailstring = user + '@user.com'
email = WebDriverWait(self, 10).until(lambda self: self.browser.find_element_by_name("email"))
email.send_keys(mailstring)
password = self.browser.find_element_by_name("password")
password.send_keys(passwd)
time.sleep(1)
submit_button = self.browser.find_element_by_css_selector("#submit_record__row input")
time.sleep(1)
submit_button.click()
time.sleep(1)
self.url = ROOT + '/default/user/profile'
get_browser=self.browser.get(self.url)
time.sleep(1)
select = Select(self.browser.find_element_by_id("auth_user_continent"))
time.sleep(1)
select.select_by_visible_text(continent)
time.sleep(1)
select = Select(self.browser.find_element_by_id("countryopt"))
time.sleep(2)
select.select_by_visible_text(country)
time.sleep(3)
select = Select(self.browser.find_element_by_id("subdivopt"))
time.sleep(3)
select.select_by_visible_text(subdivision)
time.sleep(3)
self.browser.find_element_by_xpath("//input[@value='Apply changes']").click()
# TODO get this changed to changes applied after working
resultstring = 'Welcome'
time.sleep(2)
body = WebDriverWait(self, 10).until(lambda self: self.browser.find_element_by_tag_name('body'))
self.assertIn(resultstring, body.text)
#welcome_message = self.browser.find_element_by_css_selector(".flash")
#self.assertEqual(resultstring, welcome_message.text)
self.url = ROOT + '/default/user/logout'
get_browser = self.browser.get(self.url)
time.sleep(1) | [
"donaldm2020@gmail.com"
] | donaldm2020@gmail.com |
bbbfb80122a53747480b05910a3ddbde0647a7ed | 0ad2bcd3881c691ffdc8ba83d9af7611e7827220 | /pad2drums.py | 587c3b5c2e75052f1b5107f7daff50ccc664b1f7 | [] | no_license | SimmaLimma/pad2drums | e028691e4fd9562d935576d4d0941637aac2ad25 | f7d6b95681f65c94337c28e012fcfeadab9170a3 | refs/heads/master | 2020-04-10T04:42:46.297097 | 2018-12-25T21:24:30 | 2018-12-25T21:24:30 | 160,806,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from DrumGenerator import DrumGenerator
from detection import detect_sound
from utils import load_wav, save_wav
def pad2drums(read_from_fname, save_to_fname):
"""
Reads .wav-file in folder "raw_audio" from a drum pad (with mic about 10 cm away)
and converts it to an .wav-file with drum sounds in place of
the pad sounds. Created file is placed in folder "results".
"""
load_path = 'raw_audio/'
fs, raw_audio = load_wav(load_path + read_from_fname)
# Detecting the pad hits from the raw_audio
hit_indices, hit_strengths = detect_sound(raw_audio, stereo=True)
dg = DrumGenerator(fs=fs)
drum_audio = dg.generate_drum_audio(hit_indices, hit_strengths, raw_audio.size)
# Save drum_audio to file name for save_to_file added by user
save_path = 'results/' + save_to_fname
save_wav(save_path, drum_audio, fs)
| [
"simonmaartensson@gmail.com"
] | simonmaartensson@gmail.com |
2f05d2d6c9b5c603f7c63f6fd86869c5371b1baa | 374615e05818a861b077253a484dd73e27ed65d3 | /batch/batch_learners.py | f4f0422b4b7396559881e73bb690b13924ff2ce3 | [] | no_license | wdbronac/m_car_python | bcdb4d6042b966ec855a77c901ec02a7e235b29f | 21e8ce7a6cadca6fa2774dc3b1847a7ff105be5c | refs/heads/master | 2021-05-31T11:23:26.666237 | 2016-05-24T09:52:25 | 2016-05-24T09:52:25 | 58,363,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,063 | py | # -*- coding: utf-8 -*-
"""
Module batch_learners
"""
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LinearRegression
class fittedQ:
"""
The fittedQ algorithm
Parameters
----------
regressor : a regressor from scikit-learn
the base regressor, default = extra-tree forest
gamma : a real in (0,1)
the discount factor, default = 0.99
"""
def __init__(self, regressor=None, gamma = 0.99):
if regressor is None:
regressor = ExtraTreesRegressor(n_estimators = 50, min_samples_leaf = 5)
#regressor = LinearRegression()
self.Q = regressor
self.gamma = gamma
self.na = 0 # set when calling the update function the first time
self.t = 0 # current iteration
def update(self, states, actions, next_states, rewards, eoes):
"""
Perform just one update of fitted-Q.
Parameters
----------
states : a (n,2) array
the set of states
actions : a (n,) array
the set of actions
next_states : a (n,2) array
the set of next states
rewards : a (n,) array
the set of gathered rewards
eoes : a (n,) array
the set of flags for episode ending
"""
(n,d) = np.shape(states)
na = len(np.unique(actions))
self.na = na
qvals = np.zeros((n,na))
print("fittedQ: iteration "+str(self.t))
X = np.concatenate((states, actions.reshape((n,1))), 1)
if self.t==0:
Y = rewards
else:
for a in range(na):
qvals[:,a] = self.Q.predict(np.concatenate((next_states, a*np.ones((n,1))),1))
Y = rewards + self.gamma*(1-eoes)*np.max(qvals, 1)
self.Q.fit(X, Y)
self.t +=1
def predict(self, states):
"""
Predict values (max_a Q(s,a)) and greedy actions for given states
Parameters
----------
states : a (n,) array
the set of states
Returns
-------
values : a (n,) array
the values
gactions : a (n,) array
greedy actions
"""
(n,d) = np.shape(states)
qvals = np.zeros((n, self.na))
for a in range(self.na):
qvals[:,a] = self.Q.predict(np.concatenate((states, a*np.ones((n,1))),1))
gactions = np.argmax(qvals,1)
values = qvals[(range(n), gactions)]
return values, gactions
class LSPI:
"""
The LSPI algorithm, with hand coded features
Parameters
----------
gamma : a float in (0,1)
the discount factor (default: 0.99)
"""
def __init__(self, gamma = 0.99):
self.nf = 3 #number of Gaussians per dim
self.na = 3 #number of actions, need it
self.d = (self.nf*self.nf+1)
self.theta = np.zeros(self.d*self.na) # param vector
self.t = 0 # current iteration
self.gamma = gamma
def features(self, states, actions):
"""
Return the set of features for a given set of state-action couples
Parameters
----------
states : a (n,2) array
the set of states
actions : a (n,) array
the actions
Returns
-------
features : a (n,d) array
the set of features
"""
pmin = -1.2
pmax = .5
vmin = -.07
vmax = .07
sigmap = (pmax-pmin)/(self.nf-1)
sigmav = (vmax-vmin)/(self.nf-1)
na = self.na
(n,ds) = np.shape(states)
d = self.d # number of features per action
features = np.zeros((n, na*d))
for i in xrange(n):
features[i, (actions[i]+1)*d-1]=1 # last feature is constant
for jp in xrange(self.nf):
for jv in xrange(self.nf):
cp = (states[i,0] - (pmin + jp*sigmap))/sigmap
cv = (states[i,1] - (vmin + jv*sigmav))/sigmav
features[i, actions[i]*d + jp + jv*self.nf] = np.exp(-.5*(cp*cp + cv*cv))
return features
def update(self, states, actions, next_states, rewards, eoes):
"""
Perform just one update of LSPI.
Parameters
----------
states : a (n,2) array
the set of states
actions : a (n,) array
the set of actions
next_states : a (n,2) array
the set of next states
rewards : a (n,) array
the set of gathered rewards
eoes : a (n,) array
the set of flags for episode ending
"""
(n,ds) = np.shape(states)
print("LSPI: iteration "+str(self.t))
# current policy for the dataset (greed. resp to Q_theta)
gactions = np.zeros(n) #np.random.randint(0,3,n)
if self.t>0:
qvals = self.get_qvals(next_states)
gactions = np.argmax(qvals, 1)
phi = self.features(states, actions)
next_phi = self.features(next_states, gactions)
for i in xrange(n):
if eoes[i]:
next_phi[i,:] = 0
A = np.dot(np.transpose(phi), phi - self.gamma * next_phi)
b = np.dot(np.transpose(phi), rewards)
self.theta = np.linalg.solve(A, b)
self.t +=1
def get_qvals(self, states):
"""
Get the Q values for a set of states
Parameters
----------
states : a (n,2) array
the set of states
Returns
-------
qvals : a (n,3) array
the set of Q-values (for the provided states and each action)
"""
(n,ds) = np.shape(states)
qvals = np.zeros((n, self.na))
for a in range(self.na):
qvals[:,a] = np.dot(self.features(states, a*np.ones(n)), self.theta)
return qvals
def predict(self, states):
"""
Predict values (max_a Q(s,a)) and greedy actions for given states
Parameters
----------
states : a (n,2) array
the set of states
Returns
-------
values : a (n,) array
the values
gactions : a (n,) array
greedy actions
"""
(n,d) = np.shape(states)
qvals = self.get_qvals(states)
gactions = np.argmax(qvals,1)
values = qvals[(range(n), gactions)]
return values, gactions
| [
"ubuntu@ip-172-31-1-251.eu-west-1.compute.internal"
] | ubuntu@ip-172-31-1-251.eu-west-1.compute.internal |
fb43ed68a6f884a2cf5bfcfb9b67d3a2702633cb | 3f27c94566dc5ef905fb8a13a84f8a4801d2dc78 | /dataStructures/arrays/find_summing_numbers/find_summing_numbers.py | 1cb124da676efb54dba96fe58ecbc7762fa7fd90 | [] | no_license | trotrem/algo-rythmes | 26a9ac2735e94d8bd9b33ee56c9f536f9f8ec24a | fc9a183225be889d2aa0220cce6d12637ade3e93 | refs/heads/master | 2020-03-17T22:19:48.121377 | 2018-07-27T17:05:35 | 2018-07-27T17:05:35 | 133,999,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | #!/usr/bin/env python
#Given an array of integers, return indices of the two numbers such that they add up to a specific target.
#You may assume that each input would have exactly one solution, and you may not use the same element twice.
class Solution:
# set O(n) O(n)
def twoSum_set(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
complements = {}
for i in range(len(nums)):
if nums[i] in complements:
return [complements[nums[i]], i]
complements[target - nums[i]] = i
# sort and two pointers O(nlogn) O(n)
def twoSum(self, nums, target):
left = 0
right = len(nums) -1
nums = sorted(zip(nums, range(len(nums))))
while left < right:
if nums[left][0] + nums[right][0] == target:
return [nums[left][1], nums[right][1]]
elif nums[left][0] + nums[right][0] > target:
right -= 1
elif nums[left][0] + nums[right][0] < target:
left += 1
| [
"jeanchristophe.buteau@gmail.com"
] | jeanchristophe.buteau@gmail.com |
d1c1bbb2db6a84b96958926b348b6b210bbea91b | 31bee02c2f76b7c541007856659d0df7ba8d5b01 | /image_resize.py | 84a5f0265db2c5fe4c0b060a5c43b56267995240 | [] | no_license | hikaru7719/Image-Preprocessing | 6473b7b947b61407654bec4c3dc5521bdd4f57e1 | d6532ce0deead523e6f1451daf1254dceae44075 | refs/heads/master | 2021-04-09T16:26:32.824014 | 2018-03-25T02:57:06 | 2018-03-25T02:57:06 | 125,794,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | from google.cloud import vision
from google.cloud.vision import types
import io
import os
from PIL import Image
import argparse
def image_function(image,size,file_path):
im = Image.open(image)
resize = im.resize(size)
rotate = resize.rotate(-90)
rotate.save(str(file_path))
def file_enums(dir_name):
return os.listdir(dir_name)
def main(dir_name,size):
file_list = file_enums(dir_name)
number = 32
print(file_list)
for file_name in file_list:
file_path = dir_name + '/' + file_name
with io.open(file_path, 'rb') as image_file:
content = image_file.read()
number -= 1
if number > 0:
image_function(image_file,size,file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "description goes here")
parser.add_argument("-d", type=str, help = "Write Directory Path", required=True)
args = parser.parse_args()
dir_name = args.d
size = (1600,1200)
main(dir_name,size)
| [
"j148016s@st.u-gakugei.ac.jp"
] | j148016s@st.u-gakugei.ac.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.