text stringlengths 8 6.05M |
|---|
from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import secrets
import subprocess
import sys
from typing import IO, Literal, Sequence
import uuid
import click
from humanize import naturalsize
from tabulate import tabulate
from .main import main
from .pretty import print_wait, print_done, print_error, print_fail, print_info, print_warn
from .ssh import container_ssh_ctx
from .run import format_stats, prepare_env_arg, prepare_resource_arg, prepare_mount_arg
from ..compat import asyncio_run
from ..exceptions import BackendAPIError
from ..session import Session, AsyncSession
from ..types import Undefined, undefined
from .params import CommaSeparatedListType
list_expr = CommaSeparatedListType()
@main.group()
def session():
"""Set of compute session operations"""
def _create_cmd(docs: str = None):
@click.argument('image')
@click.option('-t', '--name', '--client-token', metavar='NAME',
help='Specify a human-readable session name. '
'If not set, a random hex string is used.')
@click.option('-o', '--owner', '--owner-access-key', metavar='ACCESS_KEY',
help='Set the owner of the target session explicitly.')
# job scheduling options
@click.option('--type', metavar='SESSTYPE',
type=click.Choice(['batch', 'interactive']),
default='interactive',
help='Either batch or interactive')
@click.option('--starts-at', metavar='STARTS_AT', type=str, default=None,
help='Let session to be started at a specific or relative time.')
@click.option('-c', '--startup-command', metavar='COMMAND',
help='Set the command to execute for batch-type sessions.')
@click.option('--enqueue-only', is_flag=True,
help='Enqueue the session and return immediately without waiting for its startup.')
@click.option('--max-wait', metavar='SECONDS', type=int, default=0,
help='The maximum duration to wait until the session starts.')
@click.option('--no-reuse', is_flag=True,
help='Do not reuse existing sessions but return an error.')
@click.option('--depends', metavar='SESSION_ID', type=str, multiple=True,
help="Set the list of session ID or names that the newly created session depends on. "
"The session will get scheduled after all of them successfully finish.")
@click.option('--callback-url', metavar='CALLBACK_URL', type=str, default=None,
help="Callback URL which will be called upon sesison lifecycle events.")
# execution environment
@click.option('-e', '--env', metavar='KEY=VAL', type=str, multiple=True,
help='Environment variable (may appear multiple times)')
# extra options
@click.option('--bootstrap-script', metavar='PATH', type=click.File('r'), default=None,
help='A user-defined script to execute on startup.')
@click.option('--tag', type=str, default=None,
help='User-defined tag string to annotate sessions.')
# resource spec
@click.option('-v', '--volume', '-m', '--mount', 'mount',
metavar='NAME[=PATH]', type=str, multiple=True,
help='User-owned virtual folder names to mount. '
'If path is not provided, virtual folder will be mounted under /home/work. '
'When the target path is relative, it is placed under /home/work '
'with auto-created parent directories if any. '
'Absolute paths are mounted as-is, but it is prohibited to '
'override the predefined Linux system directories.')
@click.option('--scaling-group', '--sgroup', type=str, default=None,
help='The scaling group to execute session. If not specified, '
'all available scaling groups are included in the scheduling.')
@click.option('-r', '--resources', metavar='KEY=VAL', type=str, multiple=True,
help='Set computation resources used by the session '
'(e.g: -r cpu=2 -r mem=256 -r gpu=1).'
'1 slot of cpu/gpu represents 1 core. '
'The unit of mem(ory) is MiB.')
@click.option('--cluster-size', metavar='NUMBER', type=int, default=1,
help='The size of cluster in number of containers.')
@click.option('--cluster-mode', metavar='MODE',
type=click.Choice(['single-node', 'multi-node']), default='single-node',
help='The mode of clustering.')
@click.option('--resource-opts', metavar='KEY=VAL', type=str, multiple=True,
help='Resource options for creating compute session '
'(e.g: shmem=64m)')
@click.option('--preopen', default=None, type=list_expr,
help='Pre-open service ports')
# resource grouping
@click.option('-d', '--domain', metavar='DOMAIN_NAME', default=None,
help='Domain name where the session will be spawned. '
'If not specified, config\'s domain name will be used.')
@click.option('-g', '--group', metavar='GROUP_NAME', default=None,
help='Group name where the session is spawned. '
'User should be a member of the group to execute the code.')
@click.option('--assign-agent', default=None, type=list_expr,
help='Show mapping list of tuple which mapped containers with agent. '
'When user role is Super Admin. '
'(e.g., --assign-agent agent_id_1,agent_id_2,...)')
def create(
# base args
image: str,
name: str | None,
owner: str | None,
# job scheduling options
type: Literal['batch', 'interactive'],
starts_at: str | None,
startup_command: str | None,
enqueue_only: bool,
max_wait: bool,
no_reuse: bool,
depends: Sequence[str],
callback_url: str,
# execution environment
env: Sequence[str],
# extra options
bootstrap_script: IO | None,
tag: str | None,
# resource spec
mount: Sequence[str],
scaling_group: str | None,
resources: Sequence[str],
cluster_size: int,
cluster_mode: Literal['single-node', 'multi-node'],
resource_opts: Sequence[str],
preopen: str | None,
assign_agent: str | None,
# resource grouping
domain: str | None,
group: str | None,
) -> None:
"""
Prepare and start a single compute session without executing codes.
You may use the created session to execute codes using the "run" command
or connect to an application service provided by the session using the "app"
command.
\b
IMAGE: The name (and version/platform tags appended after a colon) of session
runtime or programming language.
"""
if name is None:
name = f'pysdk-{secrets.token_hex(5)}'
else:
name = name
######
envs = prepare_env_arg(env)
resources = prepare_resource_arg(resources)
resource_opts = prepare_resource_arg(resource_opts)
mount, mount_map = prepare_mount_arg(mount)
preopen_ports = preopen
assigned_agent_list = assign_agent
with Session() as session:
try:
compute_session = session.ComputeSession.get_or_create(
image,
name=name,
type_=type,
starts_at=starts_at,
enqueue_only=enqueue_only,
max_wait=max_wait,
no_reuse=no_reuse,
dependencies=depends,
callback_url=callback_url,
cluster_size=cluster_size,
cluster_mode=cluster_mode,
mounts=mount,
mount_map=mount_map,
envs=envs,
startup_command=startup_command,
resources=resources,
resource_opts=resource_opts,
owner_access_key=owner,
domain_name=domain,
group_name=group,
scaling_group=scaling_group,
bootstrap_script=bootstrap_script.read() if bootstrap_script is not None else None,
tag=tag,
preopen_ports=preopen_ports,
assign_agent=assigned_agent_list,
)
except Exception as e:
print_error(e)
sys.exit(1)
else:
if compute_session.status == 'PENDING':
print_info('Session ID {0} is enqueued for scheduling.'
.format(compute_session.id))
elif compute_session.status == 'SCHEDULED':
print_info('Session ID {0} is scheduled and about to be started.'
.format(compute_session.id))
return
elif compute_session.status == 'RUNNING':
if compute_session.created:
print_info('Session ID {0} is created and ready.'
.format(compute_session.id))
else:
print_info('Session ID {0} is already running and ready.'
.format(compute_session.id))
if compute_session.service_ports:
print_info('This session provides the following app services: ' +
', '.join(sport['name']
for sport in compute_session.service_ports))
elif compute_session.status == 'TERMINATED':
print_warn('Session ID {0} is already terminated.\n'
'This may be an error in the compute_session image.'
.format(compute_session.id))
elif compute_session.status == 'TIMEOUT':
print_info('Session ID {0} is still on the job queue.'
.format(compute_session.id))
elif compute_session.status in ('ERROR', 'CANCELLED'):
print_fail('Session ID {0} has an error during scheduling/startup or cancelled.'
.format(compute_session.id))
if docs is not None:
create.__doc__ = docs
return create
main.command(aliases=['start'])(_create_cmd(docs="Alias of \"session create\""))
session.command()(_create_cmd())
def _create_from_template_cmd(docs: str = None):
@click.argument('template_id')
@click.option('-t', '--name', '--client-token', metavar='NAME',
default=undefined,
help='Specify a human-readable session name. '
'If not set, a random hex string is used.')
@click.option('-o', '--owner', '--owner-access-key', metavar='ACCESS_KEY',
default=undefined,
help='Set the owner of the target session explicitly.')
# job scheduling options
@click.option('--type', 'type_', metavar='SESSTYPE',
type=click.Choice(['batch', 'interactive', undefined]), # type: ignore
default=undefined,
help='Either batch or interactive')
@click.option('--starts_at', metavar='STARTS_AT', type=str, default=None,
help='Let session to be started at a specific or relative time.')
@click.option('-i', '--image', default=undefined,
help='Set compute_session image to run.')
@click.option('-c', '--startup-command', metavar='COMMAND', default=undefined,
help='Set the command to execute for batch-type sessions.')
@click.option('--enqueue-only', is_flag=True,
help='Enqueue the session and return immediately without waiting for its startup.')
@click.option('--max-wait', metavar='SECONDS', type=int, default=undefined,
help='The maximum duration to wait until the session starts.')
@click.option('--no-reuse', is_flag=True,
help='Do not reuse existing sessions but return an error.')
@click.option('--depends', metavar='SESSION_ID', type=str, multiple=True,
help="Set the list of session ID or names that the newly created session depends on. "
"The session will get scheduled after all of them successfully finish.")
@click.option('--callback-url', metavar='CALLBACK_URL', type=str, default=None,
help="Callback URL which will be called upon sesison lifecycle events.")
# execution environment
@click.option('-e', '--env', metavar='KEY=VAL', type=str, multiple=True,
help='Environment variable (may appear multiple times)')
# extra options
@click.option('--tag', type=str, default=undefined,
help='User-defined tag string to annotate sessions.')
# resource spec
@click.option('-m', '--mount', metavar='NAME[=PATH]', type=str, multiple=True,
help='User-owned virtual folder names to mount. '
'When the target path is relative, it is placed under /home/work '
'with auto-created parent directories if any. '
'Absolute paths are mounted as-is, but it is prohibited to '
'override the predefined Linux system directories.')
@click.option('--scaling-group', '--sgroup', type=str, default=undefined,
help='The scaling group to execute session. If not specified, '
'all available scaling groups are included in the scheduling.')
@click.option('-r', '--resources', metavar='KEY=VAL', type=str, multiple=True,
help='Set computation resources used by the session '
'(e.g: -r cpu=2 -r mem=256 -r gpu=1).'
'1 slot of cpu/gpu represents 1 core. '
'The unit of mem(ory) is MiB.')
@click.option('--cluster-size', metavar='NUMBER', type=int, default=undefined,
help='The size of cluster in number of containers.')
@click.option('--resource-opts', metavar='KEY=VAL', type=str, multiple=True,
help='Resource options for creating compute session '
'(e.g: shmem=64m)')
# resource grouping
@click.option('-d', '--domain', metavar='DOMAIN_NAME', default=None,
help='Domain name where the session will be spawned. '
'If not specified, config\'s domain name will be used.')
@click.option('-g', '--group', metavar='GROUP_NAME', default=None,
help='Group name where the session is spawned. '
'User should be a member of the group to execute the code.')
# template overrides
@click.option('--no-mount', is_flag=True,
help='If specified, client.py will tell server not to mount '
'any vFolders specified at template,')
@click.option('--no-env', is_flag=True,
help='If specified, client.py will tell server not to add '
'any environs specified at template,')
@click.option('--no-resource', is_flag=True,
help='If specified, client.py will tell server not to add '
'any resource specified at template,')
def create_from_template(
# base args
template_id: str,
name: str | Undefined,
owner: str | Undefined,
# job scheduling options
type_: Literal['batch', 'interactive'] | Undefined,
starts_at: str | None,
image: str | Undefined,
startup_command: str | Undefined,
enqueue_only: bool,
max_wait: int | Undefined,
no_reuse: bool,
depends: Sequence[str],
callback_url: str,
# execution environment
env: Sequence[str],
# extra options
tag: str | Undefined,
# resource spec
mount: Sequence[str],
scaling_group: str | Undefined,
resources: Sequence[str],
cluster_size: int | Undefined,
resource_opts: Sequence[str],
# resource grouping
domain: str | None,
group: str | None,
# template overrides
no_mount: bool,
no_env: bool,
no_resource: bool,
) -> None:
"""
Prepare and start a single compute session without executing codes.
You may use the created session to execute codes using the "run" command
or connect to an application service provided by the session using the "app"
command.
\b
IMAGE: The name (and version/platform tags appended after a colon) of session
runtime or programming language.
"""
if name is undefined:
name = f'pysdk-{secrets.token_hex(5)}'
else:
name = name
envs = prepare_env_arg(env) if len(env) > 0 or no_env else undefined
resources = prepare_resource_arg(resources) if len(resources) > 0 or no_resource else undefined
resource_opts = (
prepare_resource_arg(resource_opts)
if len(resource_opts) > 0 or no_resource else undefined
)
prepared_mount, prepared_mount_map = (
prepare_mount_arg(mount)
if len(mount) > 0 or no_mount else (undefined, undefined)
)
with Session() as session:
try:
compute_session = session.ComputeSession.create_from_template(
template_id,
image=image,
name=name,
type_=type_,
starts_at=starts_at,
enqueue_only=enqueue_only,
max_wait=max_wait,
no_reuse=no_reuse,
dependencies=depends,
callback_url=callback_url,
cluster_size=cluster_size,
mounts=prepared_mount,
mount_map=prepared_mount_map,
envs=envs,
startup_command=startup_command,
resources=resources,
resource_opts=resource_opts,
owner_access_key=owner,
domain_name=domain,
group_name=group,
scaling_group=scaling_group,
tag=tag,
)
except Exception as e:
print_error(e)
sys.exit(1)
else:
if compute_session.status == 'PENDING':
print_info('Session ID {0} is enqueued for scheduling.'
.format(name))
elif compute_session.status == 'SCHEDULED':
print_info('Session ID {0} is scheduled and about to be started.'
.format(name))
return
elif compute_session.status == 'RUNNING':
if compute_session.created:
print_info('Session ID {0} is created and ready.'
.format(name))
else:
print_info('Session ID {0} is already running and ready.'
.format(name))
if compute_session.service_ports:
print_info('This session provides the following app services: ' +
', '.join(sport['name']
for sport in compute_session.service_ports))
elif compute_session.status == 'TERMINATED':
print_warn('Session ID {0} is already terminated.\n'
'This may be an error in the compute_session image.'
.format(name))
elif compute_session.status == 'TIMEOUT':
print_info('Session ID {0} is still on the job queue.'
.format(name))
elif compute_session.status in ('ERROR', 'CANCELLED'):
print_fail('Session ID {0} has an error during scheduling/startup or cancelled.'
.format(name))
if docs is not None:
create_from_template.__doc__ = docs
return create_from_template
main.command(aliases=['start-from-template'])(
_create_from_template_cmd(docs="Alias of \"session create-from-template\""),
)
session.command()(_create_from_template_cmd())
def _destroy_cmd(docs: str = None):
@click.argument('session_names', metavar='SESSID', nargs=-1)
@click.option('-f', '--forced', is_flag=True,
help='Force-terminate the errored sessions (only allowed for admins)')
@click.option('-o', '--owner', '--owner-access-key', metavar='ACCESS_KEY',
help='Specify the owner of the target session explicitly.')
@click.option('-s', '--stats', is_flag=True,
help='Show resource usage statistics after termination')
def destroy(session_names, forced, owner, stats):
"""
Terminate and destroy the given session.
SESSID: session ID given/generated when creating the session.
"""
if len(session_names) == 0:
print_warn('Specify at least one session ID. Check usage with "-h" option.')
sys.exit(1)
print_wait('Terminating the session(s)...')
with Session() as session:
has_failure = False
for session_name in session_names:
try:
compute_session = session.ComputeSession(session_name, owner)
ret = compute_session.destroy(forced=forced)
except BackendAPIError as e:
print_error(e)
if e.status == 404:
print_info(
'If you are an admin, use "-o" / "--owner" option '
'to terminate other user\'s session.')
has_failure = True
except Exception as e:
print_error(e)
has_failure = True
else:
if not has_failure:
print_done('Done.')
if stats:
stats = ret.get('stats', None) if ret else None
if stats:
print(format_stats(stats))
else:
print('Statistics is not available.')
if has_failure:
sys.exit(1)
if docs is not None:
destroy.__doc__ = docs
return destroy
main.command(aliases=['rm', 'kill'])(_destroy_cmd(docs="Alias of \"session destroy\""))
session.command(aliases=['rm', 'kill'])(_destroy_cmd())
def _restart_cmd(docs: str = None):
@click.argument('session_refs', metavar='SESSION_REFS', nargs=-1)
def restart(session_refs):
"""
Restart the compute session.
\b
SESSION_REF: session ID or name
"""
if len(session_refs) == 0:
print_warn('Specify at least one session ID. Check usage with "-h" option.')
sys.exit(1)
print_wait('Restarting the session(s)...')
with Session() as session:
has_failure = False
for session_ref in session_refs:
try:
compute_session = session.ComputeSession(session_ref)
compute_session.restart()
except BackendAPIError as e:
print_error(e)
if e.status == 404:
print_info(
'If you are an admin, use "-o" / "--owner" option '
'to terminate other user\'s session.')
has_failure = True
except Exception as e:
print_error(e)
has_failure = True
else:
if not has_failure:
print_done('Done.')
if has_failure:
sys.exit(1)
if docs is not None:
restart.__doc__ = docs
return restart
main.command()(_restart_cmd(docs="Alias of \"session restart\""))
session.command()(_restart_cmd())
@session.command()
@click.argument('session_id', metavar='SESSID')
@click.argument('files', type=click.Path(exists=True), nargs=-1)
def upload(session_id, files):
"""
Upload the files to a compute session's home directory.
If the target directory is in a storage folder mount, the operation is
effectively same to uploading files to the storage folder.
It is recommended to use storage folder commands for large file transfers
to utilize the storage proxy.
For cluster sessions, the files are only uploaded to the main container.
\b
SESSID: Session ID or name.
FILES: One or more paths to upload.
"""
if len(files) < 1:
print_warn("Please specify one or more file paths after session ID or name.")
return
with Session() as session:
try:
print_wait('Uploading files...')
kernel = session.ComputeSession(session_id)
kernel.upload(files, show_progress=True)
print_done('Uploaded.')
except Exception as e:
print_error(e)
sys.exit(1)
@session.command()
@click.argument('session_id', metavar='SESSID')
@click.argument('files', nargs=-1)
@click.option('--dest', type=Path, default='.',
help='Destination path to store downloaded file(s)')
def download(session_id, files, dest):
"""
Download files from a compute session's home directory.
If the source path is in a storage folder mount, the operation is
effectively same to downloading files from the storage folder.
It is recommended to use storage folder commands for large file transfers
to utilize the storage proxy.
For cluster sessions, the files are only downloaded from the main container.
\b
SESSID: Session ID or name.
FILES: One or more paths inside compute session.
"""
if len(files) < 1:
print_warn("Please specify one or more file paths after session ID or name.")
return
with Session() as session:
try:
print_wait('Downloading file(s) from {}...'
.format(session_id))
kernel = session.ComputeSession(session_id)
kernel.download(files, dest, show_progress=True)
print_done('Downloaded to {}.'.format(dest.resolve()))
except Exception as e:
print_error(e)
sys.exit(1)
@session.command()
@click.argument('session_id', metavar='SESSID')
@click.argument('path', metavar='PATH', nargs=1, default='/home/work')
def ls(session_id, path):
"""
List files in a path of a running compute session.
For cluster sessions, it lists the files of the main container.
\b
SESSID: Session ID or name.
PATH: Path inside container.
"""
with Session() as session:
try:
print_wait('Retrieving list of files in "{}"...'.format(path))
kernel = session.ComputeSession(session_id)
result = kernel.list_files(path)
if 'errors' in result and result['errors']:
print_fail(result['errors'])
sys.exit(1)
files = json.loads(result['files'])
table = []
headers = ['File name', 'Size', 'Modified', 'Mode']
for file in files:
mdt = datetime.fromtimestamp(file['mtime'])
fsize = naturalsize(file['size'], binary=True)
mtime = mdt.strftime('%b %d %Y %H:%M:%S')
row = [file['filename'], fsize, mtime, file['mode']]
table.append(row)
print_done('Retrived.')
print(tabulate(table, headers=headers))
except Exception as e:
print_error(e)
sys.exit(1)
@session.command()
@click.argument('session_id', metavar='SESSID')
def logs(session_id):
'''
Shows the full console log of a compute session.
\b
SESSID: Session ID or its alias given when creating the session.
'''
with Session() as session:
try:
print_wait('Retrieving live container logs...')
kernel = session.ComputeSession(session_id)
result = kernel.get_logs().get('result')
logs = result.get('logs') if 'logs' in result else ''
print(logs)
print_done('End of logs.')
except Exception as e:
print_error(e)
sys.exit(1)
@session.command()
@click.argument('session_id', metavar='SESSID')
@click.argument('new_id', metavar='NEWID')
def rename(session_id, new_id):
'''
Renames session name of running session.
\b
SESSID: Session ID or its alias given when creating the session.
NEWID: New Session ID to rename to.
'''
with Session() as session:
try:
kernel = session.ComputeSession(session_id)
kernel.rename(new_id)
print_done(f'Session renamed to {new_id}.')
except Exception as e:
print_error(e)
sys.exit(1)
def _ssh_cmd(docs: str = None):
@click.argument("session_ref", type=str, metavar='SESSION_REF')
@click.option('-p', '--port', type=int, metavar='PORT', default=9922,
help="the port number for localhost")
@click.pass_context
def ssh(ctx: click.Context, session_ref: str, port: int) -> None:
"""Execute the ssh command against the target compute session.
\b
SESSION_REF: The user-provided name or the unique ID of a running compute session.
All remaining options and arguments not listed here are passed to the ssh command as-is.
"""
try:
with container_ssh_ctx(session_ref, port) as key_path:
ssh_proc = subprocess.run(
[
"ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "NoHostAuthenticationForLocalhost=yes",
"-i", key_path,
"work@localhost",
"-p", str(port),
*ctx.args,
],
shell=False,
check=False, # be transparent against the main command
)
sys.exit(ssh_proc.returncode)
except Exception as e:
print_error(e)
if docs is not None:
ssh.__doc__ = docs
return ssh
_ssh_cmd_context_settings = {
"ignore_unknown_options": True,
"allow_extra_args": True,
"allow_interspersed_args": True,
}
# Make it available as:
# - backend.ai ssh
# - backend.ai session ssh
main.command(
context_settings=_ssh_cmd_context_settings,
)(_ssh_cmd(docs="Alias of \"session ssh\""))
session.command(
context_settings=_ssh_cmd_context_settings,
)(_ssh_cmd())
def _scp_cmd(docs: str = None):
@click.argument("session_ref", type=str, metavar='SESSION_REF')
@click.argument("src", type=str, metavar='SRC')
@click.argument("dst", type=str, metavar='DST')
@click.option('-p', '--port', type=str, metavar='PORT', default=9922,
help="the port number for localhost")
@click.option('-r', '--recursive', default=False, is_flag=True,
help="recursive flag option to process directories")
@click.pass_context
def scp(
ctx: click.Context,
session_ref: str,
src: str,
dst: str,
port: int,
recursive: bool,
) -> None:
"""
Execute the scp command against the target compute session.
\b
The SRC and DST have the same format with the original scp command,
either a remote path as "work@localhost:path" or a local path.
SESSION_REF: The user-provided name or the unique ID of a running compute session.
SRC: the source path
DST: the destination path
All remaining options and arguments not listed here are passed to the ssh command as-is.
Examples:
* Uploading a local directory to the session:
> backend.ai scp mysess -p 9922 -r tmp/ work@localhost:tmp2/
* Downloading a directory from the session:
> backend.ai scp mysess -p 9922 -r work@localhost:tmp2/ tmp/
"""
recursive_args = []
if recursive:
recursive_args.append("-r")
try:
with container_ssh_ctx(session_ref, port) as key_path:
scp_proc = subprocess.run(
[
"scp",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "NoHostAuthenticationForLocalhost=yes",
"-i", key_path,
"-P", str(port),
*recursive_args,
src, dst,
*ctx.args,
],
shell=False,
check=False, # be transparent against the main command
)
sys.exit(scp_proc.returncode)
except Exception as e:
print_error(e)
if docs is not None:
scp.__doc__ = docs
return scp
# Make it available as:
# - backend.ai scp
# - backend.ai session scp
main.command(
context_settings=_ssh_cmd_context_settings,
)(_scp_cmd(docs="Alias of \"session scp\""))
session.command(
context_settings=_ssh_cmd_context_settings,
)(_scp_cmd())
def _events_cmd(docs: str = None):
@click.argument('session_name_or_id', metavar='SESSION_ID_OR_NAME')
@click.option('-o', '--owner', '--owner-access-key', 'owner_access_key', metavar='ACCESS_KEY',
help='Specify the owner of the target session explicitly.')
@click.option('--scope', type=click.Choice(['*', 'session', 'kernel']), default='*',
help='Filter the events by kernel-specific ones or session-specific ones.')
def events(session_name_or_id, owner_access_key, scope):
"""
Monitor the lifecycle events of a compute session.
SESSID: session ID or its alias given when creating the session.
"""
async def _run_events():
async with AsyncSession() as session:
try:
session_id = uuid.UUID(session_name_or_id)
compute_session = session.ComputeSession.from_session_id(session_id)
except ValueError:
compute_session = session.ComputeSession(session_name_or_id, owner_access_key)
async with compute_session.listen_events(scope=scope) as response:
async for ev in response:
print(click.style(ev.event, fg='cyan', bold=True), json.loads(ev.data))
try:
asyncio_run(_run_events())
except Exception as e:
print_error(e)
if docs is not None:
events.__doc__ = docs
return events
# Make it available as:
# - backend.ai events
# - backend.ai session events
main.command()(_events_cmd(docs="Alias of \"session events\""))
session.command()(_events_cmd())
|
# Generated by Django 2.1.2 on 2018-12-23 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('systemoptions', '0008_auto_20181223_1630'),
]
operations = [
migrations.AlterField(
model_name='emailwebservice',
name='email_use_tls',
field=models.BooleanField(choices=[(True, 'True'), (False, 'False')], default=True, max_length=5, verbose_name='EMAIL_USE_TLS'),
),
]
|
class First:
def __init__(self,a,b):
self.a = a
self.b = b
print("iam constructor")
def sum(self):
res=self.a+self.b
print(res)
'''def __str__(self):
print("a="+str(self.a)+"b="+str(self.b))'''
def __del__(self):
print("iam destructor")
obj=First(10,20)
print(obj)
obj.sum() |
import unittest2
from Trees.bst import BinarySearchTree, Node
"""
Test for binary search tree
"""
class BstTestClass(unittest2.TestCase):
def setUp(self):
self.tree = BinarySearchTree()
def testInsert(self):
node = Node()
node.key = 10
node.value = "tor"
self.tree.insert(node)
result = self.tree.search_by_key(10)
self.assertEqual("tor",result.value)
node2 = Node()
node2.key = 20
node2.value = "malin"
self.tree.insert(node2)
result = self.tree.search_by_key(20)
self.assertEqual("malin",result.value)
node3 = Node()
node3.key = 30
node3.value = "eira"
self.tree.insert(node3)
result = self.tree.search_by_key(30)
self.assertEqual("eira",result.value)
node4 = Node()
node4.key = 25
node4.value = "truls"
self.tree.insert(node4)
result = self.tree.search_by_key(25)
self.assertEqual("truls",result.value)
node4 = Node()
node4.key = 6
node4.value = "glenn"
self.tree.insert(node4)
result = self.tree.search_by_key(6)
self.assertEqual("glenn",result.value)
self.tree.printer()
def testDelete(self):
node = Node()
node.key = 10
node.value = "tor"
self.tree.insert(node)
node2 = Node()
node2.key = 20
node2.value = "malin"
self.tree.insert(node2)
node3 = Node()
node3.key = 30
node3.value = "eira"
self.tree.insert(node3)
node4 = Node()
node4.key = 25
node4.value = "truls"
self.tree.insert(node4)
self.tree.delete(30)
self.tree.delete(10)
self.tree.printer()
|
# Generated by Django 3.2.7 on 2021-09-07 03:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Nombre')),
('description', models.TextField(max_length=150, verbose_name='Descripcion')),
],
options={
'verbose_name': 'Nombre',
'verbose_name_plural': 'Nombres',
'db_table': 'category',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Nombre')),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=9, verbose_name='Precio')),
('amount', models.PositiveIntegerField(default=0, verbose_name='cantidad')),
('description', models.TextField(max_length=150, verbose_name='Descripcion')),
('state', models.BooleanField(default=True, verbose_name='Estado')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.category')),
],
options={
'verbose_name': 'Nombre',
'verbose_name_plural': 'Nombres',
'db_table': 'products',
'ordering': ['id'],
},
),
]
|
from fastapi import FastAPI
from calc import Calc
app = FastAPI()
calc = Calc()
@app.get("/")
def read_root():
return {"Nombre": "ELADIO JUNIOR RODRIGUEZ RODRIGUEZ", "Matricula": "1085776"}
@app.get("/sumar")
def read_sumar(num1: int = 0, num2: int = 0):
return {
"total": calc.sumar(num1, num2)
}
@app.get("/restar")
def read_sumar(num1: int = 0, num2: int = 0):
return {
"total": calc.restar(num1, num2)
}
@app.get("/multiplicar")
def read_sumar(num1: int = 0, num2: int = 0):
return {
"total": calc.multiplicar(num1, num2)
}
@app.get("/dividir")
def read_sumar(num1: int = 0, num2: int = 0):
return {
"total": calc.dividir(num1, num2)
} |
"""
Constants of services that can be discovered.
"""
BELKIN_WEMO = "belkin_wemo"
DLNA = "DLNA"
GOOGLE_CAST = "google_cast"
PHILIPS_HUE = "philips_hue"
PMS = 'plex_mediaserver'
NETGEAR_ROUTER = "netgear_router"
SONOS = "sonos"
|
import pandas as pd
import numpy as np
import os
import webbrowser
# Read the dataset into a data table using Pandas
df = pd.read_csv("ratings.csv", dtype={'userId': np.int32, 'movieId': np.int32, 'rating': np.uint8})
# Convert the running list of user ratings into a matrix using the 'pivot table' function
ratings_df = pd.pivot_table(df, index='userId', columns='movieId', aggfunc=np.max)
# Create a web page view of the data for easy viewing
html = ratings_df.to_html(na_rep="")
# Save the html to a temporary file
with open("review_matrix.html", "w", encoding='utf-8') as f:
f.write(html)
# Open the web page in our web browser
full_filename = os.path.abspath("review_matrix.html")
webbrowser.open("file://{}".format(full_filename)) |
import unittest
from katas.kyu_7.simple_template import create_template
class TemplateTestCase(unittest.TestCase):
def setUp(self):
self.template = create_template('{{name}} likes {{animalType}}')
self.template2 = create_template('{{first}} {{last}}')
def test_equals(self):
self.assertEqual(
self.template(name='John', animalType='dogs'), 'John likes dogs'
)
def test_equals_2(self):
self.assertEqual(
self.template2(first='Smitty', last='Bacall'), 'Smitty Bacall'
)
def test_equals_3(self):
self.assertEqual(
self.template2(first='Smitty', other='other'), 'Smitty '
)
|
from enum import Enum, unique
import subprocess
import platform
import sys
import restic.parser
from restic.core import version
from restic.config import restic_bin
from restic.snapshot import Snapshot
from restic.key import Key
@unique
class RepoKind(Enum):
Local = 0
SFTP = 1
REST = 2
S3 = 3
Swift = 4
B2 = 5
Azure = 6
GoogleStorage = 7
Rclone = 8
class Repo(object):
kind = None
path = None
password = None
is_open = False
def __init__(self, path, password, kind = RepoKind.Local):
self.path = path
self.kind = kind
self.password = password
self.is_open = False
try:
version()
except Exception:
print('restic is not in env or it has not been installed')
def _run_command(self, cmd):
out = ''
err = ''
try:
with subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
text=True) as proc:
out, err = proc.communicate(self.password)
if err is not None:
raise RuntimeError('Command runtime failure')
proc.wait()
if proc.returncode != 0:
raise RuntimeError(f'Return code {proc.returncode} is not zero')
except FileNotFoundError:
raise RuntimeError('Cannot find restic installed')
if out.startswith('read password from stdin\n'):
out = out[25:]
return out
def _build_command_internal(self):
cmd = [restic_bin, '-r', self.path]
return cmd
def _build_command(self, cacert=None, cache_dir=None, no_lock=False, limit_download=None, limit_upload=None, quiet=False, verbose=False, json=False):
cmd = [restic_bin, '-r', self.path]
if cacert is not None:
if type(cacert) == str:
cmd.extend(['--cacert', cacert])
else:
raise ValueError('cacert shall be type of str or None')
if cache_dir is not None:
if type(cache_dir) == str:
cmd.extend(['--cache-dir', cache_dir])
else:
raise ValueError('cache_dir shall be type of str or None')
if no_lock:
cmd.append('--no-lock')
if limit_download is not None:
if type(limit_download) == int:
cmd.extend(['--limit-download', str(limit_download)])
else:
raise ValueError('limit_download shall be type of str or None')
if limit_upload is not None:
if type(limit_upload) == int:
cmd.extend(['--limit-upload', str(limit_upload)])
else:
raise ValueError('limit_upload shall be type of str or None')
if quiet:
cmd.append('--quiet')
if verbose:
cmd.append('--verbose')
if json:
cmd.append('--json')
return cmd
@staticmethod
def init(url, password, repo_kind = RepoKind.Local):
if repo_kind not in [RepoKind.Local, RepoKind.SFTP, RepoKind.REST, RepoKind.S3]:
raise NotImplementedError('This kind of repo is not implemented now.')
# check url valid(TODO)
repo = Repo(url, password, repo_kind)
# create repo
repo_url = None
if repo_kind == RepoKind.Local:
repo_url = url
elif repo_kind == RepoKind.SFTP:
if url.startswith('sftp:'):
repo_url = url
else:
repo_url = 'sftp:' + url
elif repo_kind == RepoKind.REST:
if url.startswith('rest:'):
repo_url = url
else:
repo_url = 'rest:' + url
elif repo_kind == RepoKind.S3:
if url.startswith('s3:'):
repo_url = url
else:
repo_url = 's3:' + url
else:
raise NotImplementedError('This kind of repo is not implemented now.')
repo._run_command([restic_bin, 'init', '--repo', url])
return repo
'''
Internal command implement
'''
def _run_snapshots_command(self):
cmd = self._build_command(json=True)
cmd.append('snapshots')
ret = self._run_command(cmd)
if ret is None:
return
return restic.parser.parse_snapshots(self, ret)
def _run_stats_command(self):
cmd = self._build_command(json=True)
cmd.append('stats')
ret = self._run_command(cmd)
if ret is None:
return
return restic.parser.parse_stats(self, ret)
def _run_key_list_commond(self):
cmd = self._build_command(json=True)
cmd.extend(['key', 'list'])
ret = self._run_command(cmd)
if ret is None:
return
return restic.parser.parse_key(self, ret)
'''
Public repository API
'''
def backup(self, file_path, exclude=None, tags=None):
# check url valid(TODO)
# run cmd
cmd = self._build_command()
cmd.extend(['backup', file_path])
if exclude is not None and type(exclude) == list:
exclude_cmd = '--exclude="'
for i, each_file in enumerate(exclude):
if i != 0:
exclude_cmd += ','
exclude_cmd += each_file
exclude_cmd += '"'
cmd.append(exclude_cmd)
if tags is not None:
if type(tags) == str:
cmd.extend(['--tag', tags])
elif type(tags) == list:
for each_tag in tags:
cmd.extend(['--tag', each_tag])
else:
raise ValueError('tags shall be type of str or list')
self._run_command(cmd)
def check(self, read_data=False):
cmd = self._build_command()
cmd.append('check')
if read_data:
cmd.append('--read-data')
ret_text = self._run_command(cmd)
if ret_text is None:
return
lines = ret_text.splitlines()
has_errors = lines[-1].strip() != 'no errors were found'
if has_errors:
for each_line in lines:
if each_line.startswith('error') or each_line.startswith('Fatal'):
print(each_line)
return has_errors
def mount(self, target, snapshot='latest'):
if 'Linux' not in platform.system():
raise RuntimeError('Mounting repositories via FUSE is not possible on OpenBSD, Solaris/illumos and Windows.')
if type(snapshot) == Snapshot:
snapshot = snapshot.snapshot_id
elif type(snapshot) not in [str, Snapshot]:
raise ValueError('snapshot shall be type of str or Snapshot')
cmd = self._build_command()
cmd.extend([snapshot, 'mount', target])
self._run_command(cmd)
def restore(self, target, snapshot='latest'):
if type(snapshot) == Snapshot:
snapshot = snapshot.snapshot_id
elif type(snapshot) not in [str, Snapshot]:
raise ValueError('snapshot shall be type of str or Snapshot')
cmd = self._build_command()
cmd.extend(['restore', snapshot, '--target', target])
self._run_command(cmd)
def snapshots(self):
snapshots_list = self._run_snapshots_command()
return snapshots_list
def stats(self):
return self._run_stats_command()
def tag(self, add_tags=None, remove_tags=None, set_tags=None, snapshot='latest'):
if type(snapshot) == Snapshot:
snapshot = snapshot.snapshot_id
elif type(snapshot) not in [str, Snapshot]:
raise ValueError('snapshot shall be type of str or Snapshot')
cmd = self._build_command()
cmd.append('tag')
if add_tags is not None:
if type(add_tags) == str:
cmd.extend(['--add', add_tags])
elif type(add_tags) == list:
for each_tag in add_tags:
if ',' not in each_tag:
cmd.extend(['--add', each_tag])
else:
raise ValueError('the `,` charactor in tag may make PyRestic wrong')
else:
raise ValueError('add_tags shall be type of str or list')
if remove_tags is not None:
if type(remove_tags) == str:
cmd.extend(['--remove', remove_tags])
elif type(remove_tags) == list:
for each_tag in remove_tags:
if ',' not in each_tag:
cmd.extend(['--remove', each_tag])
else:
raise ValueError('the `,` charactor in tag may make PyRestic wrong')
else:
raise ValueError('remove_tags shall be type of str or list')
if set_tags is not None:
if type(set_tags) == str:
cmd.extend(['--set', set_tags])
elif type(set_tags) == list:
for each_tag in set_tags:
if ',' not in each_tag:
cmd.extend(['--set', each_tag])
else:
raise ValueError('the `,` charactor in tag may make PyRestic wrong')
else:
raise ValueError('set_tags shall be type of str or list')
cmd.append(snapshot)
self._run_command(cmd)
|
#-*-coding:utf-8-*-
#__author__='maxiaohui'
import subprocess
import time,random,os
import datetime
from config import config
fail_date=datetime.date.today().strftime('%m%d')
fail_time=time.strftime('%H%M%S')
timeTag=fail_date+fail_time
def getLogcat(deviceID=config.deviceId,deviceName='',keyword=''):
filename = config.log_path+"\\"+deviceName+"_"+keyword+"_"+fail_date+fail_time+".txt"
# print(filename)
logcat_file = open(filename, 'w')
if keyword=='':
logcmd = "adb -s %s logcat -v time" % (deviceID)
else:
logcmd = "adb -s %s logcat -v time |grep %s" % (deviceID,keyword)
# print("执行的adb命令:"+logcmd)
pro = subprocess.Popen(logcmd, stdout=logcat_file, stderr=subprocess.PIPE)
return pro,filename
def stopPro(pro):
pro.terminate()
#adb -s 192.168.29.248:5555 shell top -d 5 | grep -E 'box'
sinanews://params=%7B%22id%22%3A%22hytcerm8333715-comos-zx-cms%22%2C%22type%22%3A%22%22%2C%22isSilence%22%3A%220%22%2C%22skipAd%22%3A%220%22%7D::k=sinawap_clip*zx*zx*wm3049_0015_LAND_hytcerm8333715_uid5238806746*SN_0410001007*1564893292164*https%3A%2F%2Fzx.sina.cn%2Fe%2F2019-08-03%2Fzx-ihytcerm8333715.d.html%3FHTTPS%3D1%26wm%3D3049_0015%26hd%3D1*ustat___172.16.93.32_1564893280_0.50028700_end::ustat=__172.16.93.32_1564893280_0.50028700::opid=15648932922362187548 def captureCPU(gap):
filename = config.log_path+"\\"+"cpu_"+fail_date+fail_time+".txt"
cpulog_file = open(filename, 'w')
cmd_cpu = "adb -s %s shell top -d %d | grep -E 'box|case'" % (config.deviceId,gap )
pro = subprocess.Popen(cmd_cpu, stdout=cpulog_file, stderr=subprocess.PIPE)
return pro,filename
#获取内存信息
#adb shell dumpsys meminfo | findstr "RAM"
def captureMemory():
filename = config.log_path + "\\" + "memory_" + fail_date + fail_time + ".txt"
cpulog_file = open(filename, 'w')
cmd_meminfo = "adb -s %s shell dumpsys meminfo | grep -E 'box|case'" % config.deviceId
# print(cmd_meminfo)
times=1
while times<15:
subprocess.Popen(cmd_meminfo, stdout=cpulog_file, stderr=subprocess.PIPE)
times+=1
time.sleep(10)
return filename
#获取屏幕
def captureScreen(filename):
cmd_capture = "adb -s %s shell screencap /sdcard/DCIM/%s.png" % (config.deviceId, filename+"_"+fail_date+"_"+fail_time)
#print(cmd_capture)
pic_name=filename+"_"+fail_date+"_"+fail_time
print("生成了错误图片:"+pic_name+" 文件路径:"+config.log_path)
subprocess.Popen(cmd_capture, shell=True)
time.sleep(3)
cmd_pull = "adb -s %s pull /sdcard/DCIM/%s.png %s" % (config.deviceId, pic_name,config.log_path)
#print("运行pull命令:"+cmd_pull)
subprocess.Popen(cmd_pull, shell=True)
time.sleep(1)
cmd_rm = "adb -s %s shell rm /sdcard/DCIM/%s.png" % (config.deviceId, pic_name)
#print("运行rm命令:"+cmd_rm)
subprocess.Popen(cmd_rm, shell=True)
#定义一个函数的抓取log的装饰器,在跑测试之前抓取adblog,在测试结束之后停止adblog
def getAdbLog(test):
def logResult(*args,**kwargs): #args是一个列表
pro,filename=getLogcat(config.deviceId,config.deviceSN,config.logKeyWord)
test(*args,**kwargs)
time.sleep(5)
stopPro(pro)
return logResult
@getAdbLog
def runMonkey(timeHour):
#time是用s做单位
sendTimes=int(timeHour*3600*13.96)
seed=random.choice(range(100))
timeTagMonkey = datetime.date.today().strftime('%m%d') + time.strftime('%H%M%S')
os.chdir(config.log_path)
os.mkdir(timeTagMonkey)
monkeyLog=config.log_path+'\\'+timeTagMonkey
cmd='adb -s %s shell monkey -v -v -v --ignore-crashes --ignore-timeouts --monitor-native-crashes --throttle 300 -s %d %d 1>%s\info%s.txt 2>%s\error%s.txt'%(config.deviceId,seed,sendTimes,monkeyLog,timeTag,monkeyLog,timeTag)
print(cmd)
subprocess.Popen(cmd, shell=True)
time.sleep(timeHour*3600)
if __name__=="__main__": #当前脚本运行实例
#runMonkey(0.1)
# pro,filename=captureCPU(5)
# time.sleep(60)
# stopPro(pro)
# captureMemory()
pro,file=getLogcat(keyword="resultType")
time.sleep(20)
stopPro(pro)
|
#!/usr/bin/env python3
"""
desc: demonstration of write functions for chunks with sample scraped data
"""
from chunk import Chunk
if __name__ == '__main__':
link_0 = "http://samplelink00.com"
title_0 = "Hello 0"
html_0 = "<html>" \
"<body><h1>Enter the main heading, usually the same as the title.</h1>" \
"<p>Be <b>bold</b> in stating your key points. Put them in a list: </p>" \
"</body>" \
"</html>"
link_1 = "https://somelink1.com"
title_1 = "This is title 1"
html_1 = "<html>" \
"<head>Hello</head>" \
"<body><h1>Enter the main heading, usually the same as the title.</h1>" \
"<p>Be <b>bold</b> in stating your key points. Put them in a list: </p>" \
"</body>" \
"</html>"
link_2 = "http://anotherlink2.net"
title_2 = "Page 2"
html_2 = "<html>" \
"<head>Hello</head>" \
"<body><h1>Enter the main heading, usually the same as the title.</h1>" \
"<ul>" \
"</ul>" \
"</body>" \
"</html>"
link_3 = "http://link3.org"
title_3 = "Number 3333"
html_3 = "<html>" \
"<head>This is the third page scraped</head>" \
"<body><h1>Enter the main heading, usually the same as the title.</h1>" \
"<p>Be <b>bold</b> in stating your key points. Put them in a list: </p>" \
"</body>" \
"</html>"
link_4 = "http://lastdoc.com/04/chunk"
title_4 = "Document 04"
html_4 = "<html>" \
"<head>This is the last document</head>" \
"<body><h1>Enter the main heading, usually the same as the title.</h1>" \
"<p>Be <b>bold</b> in stating your key points. Put them in a list: </p>" \
"</body>" \
"</html>"
chunk_123 = Chunk('123')
chunk_123.create_chunk()
chunk_123.compute_file_header_value(0)
chunk_123.append_to_chunk(link_0, title_0, html_0)
chunk_123.compute_file_header_value(1)
chunk_123.append_to_chunk(link_1, title_1, html_1)
chunk_123.compute_file_header_value(2)
chunk_123.append_to_chunk(link_2, title_2, html_2)
chunk_123.compute_file_header_value(3)
chunk_123.append_to_chunk(link_3, title_3, html_3)
chunk_123.compute_file_header_value(4)
chunk_123.append_to_chunk(link_4, title_4, html_4)
print(chunk_123.header)
chunk_123.append_header_to_chunk()
|
import unittest
from katas.kyu_7.sum_factorial import sum_factorial
class SumFactorialTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sum_factorial([4, 6]), 744)
def test_equals_2(self):
self.assertEqual(sum_factorial([5, 4, 1]), 145)
|
import PySimpleGUI as sg
import pyaudio
import numpy as np
"""PyAudio PySimpleGUI Blocking Stream for Microphone"""
# VARS CONSTS:
# We hold a reference to the PySimpleGUI window
# so we can update it later.
_VARS = {'window': False}
# pysimpleGUI INIT:
AppFont = 'Any 16'
sg.theme('DarkTeal3')
layout = [[sg.ProgressBar(10000, orientation='h',
size=(20, 20), key='-PROG-')],
[sg.Button('Listen', font=AppFont),
sg.Button('Exit', font=AppFont)]]
_VARS['window'] = sg.Window('Mic Max Data', layout, finalize=True)
# PyAudio INIT:
CHUNK = 1024 # Samples: 1024, 512, 256, 128
RATE = 44100 # Equivalent to Human Hearing at 40 kHz
INTERVAL = 4 # Sampling Interval in Seconds ie Interval to listen
# We moved all the PyAudio part to a new function:
def listen():
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True,
frames_per_buffer=CHUNK)
# Loop through chunks:
for i in range(int(INTERVAL*RATE/CHUNK)):
data = np.frombuffer(stream.read(CHUNK), dtype=np.int16)
chunkMax = np.amax(data)
# print(chunkMax)
# Update the progressBar via the window reference.
_VARS['window']['-PROG-'].update(chunkMax)
# reset the progress bar after listening.
_VARS['window']['-PROG-'].update(0)
# Tiddy up, this time this code runs:
stream.stop_stream()
stream.close()
print('closing stream')
p.terminate()
print('terminating PyAudio')
while True:
event, values = _VARS['window'].read()
if event == sg.WIN_CLOSED or event == 'Exit':
break
if event == 'Listen':
listen()
_VARS['window'].close()
|
first_name = ["John","Jason","Gerry","Mark"]
last_name = ["Snow", "White","Henry","Waugh"]
name_generated = []
for first in first_name:
for last in last_name:
name = first + " " + last
name_generated.append(name)
print(name)
print(name_generated) |
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: None Do not return anything, modify nums1 in-place instead.
"""
i, j = 0, 0
while i < len(nums1) and j < len(nums2):
print(f"i: {i}, {nums1[i]}, j: {j} {nums2[j]}")
if nums1[i] <= nums2[j]:
# siply go to the next number in the first array
i += 1
else:
nums1.insert(i, nums2[j])
nums1.pop()
j += 1
i += 1
while j < len(nums2):
nums1[i] = nums2[j]
i += 1
j += 1
nums1 = [1, 2, 3, 0, 0, 0]
nums2 = [2, 5, 6]
m = 3
n = 3
obj = Solution()
result = obj.merge(nums1, m, nums2, n)
print(result)
# Output: [1, 2, 2, 3, 5, 6]
|
BITS = 16 # format 2's complement binary
def to_bin_str(n):
s = bin(n & int("1" * BITS, 2))[2:]
return ("{0:0>%s}" % BITS).format(s)
|
from django.shortcuts import render, redirect
from plotly.offline import plot
import plotly.graph_objects as go
from django.contrib import messages
# Create your views here.
def home(request):
def scatter():
#x1 = [1,2,3,4]
#y1 = [30, 35, 25, 45]
import pymongo
from pymongo import MongoClient
import ssl
import datetime
import pandas as pd
from .dash_apps.finished_apps.nutrienti import dataframe_nutrimenti, calcola_nutrienti
# Sistemare tutto
client = pymongo.MongoClient(
"mongodb+srv://armonia_amministrazione:uanrimcoantitao2l0i2c1a@clusterarmonia.ukpoq.mongodb.net/local?retryWrites=true&w=majority",
ssl_cert_reqs=ssl.CERT_NONE)
# db = client.test
db = client['ArmoniaBot']
col = db['AlimentiDB']
username = None
if request.user.is_authenticated:
username = request.user.email#username
print(username)
if(username == 'aabeltino'):
prove = col.find({'Nome': 'AlessioNone'})
else:
prove = col.find({'Nome': 'GiuseppeMaulucci'})
data = pd.DataFrame()
for prova in prove:
date = prova['Data']
colazione = calcola_nutrienti(prova, 'Colazione')
pranzo = calcola_nutrienti(prova, 'Pranzo')
cena = calcola_nutrienti(prova, 'Cena')
merenda = calcola_nutrienti(prova, 'Merenda')
dff, cola, pra, cen, mere = dataframe_nutrimenti(colazione, pranzo, cena, merenda, date)
data = data.append(dff)
data['Data'] = data.index
data["Data"] = pd.to_datetime(data["Data"], format="%Y-%m-%d")
data.sort_values("Data", inplace=True)
x1=data['Data']
y1=data['Calorie']
trace = go.Scatter(
x=x1,
y=y1
)
layout = dict(
title=username,#'Simple Graph',
xaxis=dict(range=[min(x1), max(x1)]),
yaxis = dict(range=[min(y1), max(y1)])
)
fig = go.Figure(data=[trace], layout=layout)
plot_div = plot(fig, output_type='div', include_plotlyjs=False)
return plot_div
context ={
'plot1': scatter()
}
return render(request, 'home/welcome.html', context)
def login(request):
return render(request, 'home/login.html')
def dashboard(request):
return render(request, 'home/dashboard.html')
from .dash_apps.finished_apps.forms import NameForm, SearchFood
#def get_name(request):
# print('Sono entrato')
# # if this is a POST request we need to process the form data
# if request.method == 'POST':
# # create a form instance and populate it with data from the request:
# form = NameForm(request.POST)
# # check whether it's valid:
# if form.is_valid():
# # process the data in form.cleaned_data as required
# # ...
# # redirect to a new URL:
# return HttpResponseRedirect('/thanks/')
# if a GET (or any other method) we'll create a blank form
# else:
# form = NameForm()
# return render(request, 'name.html', {'form': form})
def index(request):
def e_mail_message(nome,telefono,mail, messaggio):
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import pandas as pd
from colored import fg, bg, attr
subject = "Richiesta informazioni"
# body = "This is an email with attachment sent from Python"
sender_email = "armonia.amministrazione@gmail.com"
password = 'Armonia2021!!'
#receiver_email = "armonia." + gmail + "@gmail.com"
receiver_email = sender_email
body = nome + ' ha scritto:\n'+messaggio+'\nContatti:\n'+'Email: '+mail+'\nTelefono: '+telefono
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
print(fg(14) + body + attr(0))
print('%sInviato%s' % (fg(10), attr(1)))
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = NameForm(request.POST)
# check whether it's valid:
#from django.core.mail import send_mail
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
print(form.cleaned_data['your_name'])
#print(form.your_phone)
#print(form.your_email)
print(form.cleaned_data['your_message'])
# redirect to a new URL:
#return HttpResponseRedirect('/thanks/')
subject = form.cleaned_data['your_name']
phone = form.cleaned_data['your_phone']
message = form.cleaned_data['your_message']
sender = form.cleaned_data['your_email']
#cc_myself = form.cleaned_data['my_cc']
recipients = ['armonia.amministrazione@gmail.com']
#if cc_myself:g
# recipients.append(sender)
e_mail_message(subject, phone, sender, message)
#send_mail(subject, message, sender, recipients)
messages.success(request, 'Contact request submitted successfully.')
else:
messages.error(request, 'Invalid form submission.')
# if a GET (or any other method) we'll create a blank form
else:
form = NameForm()
context = {
'form' : form
}
#context = {
# 'send': e_mail_message('amministrazione',message)
#}
return render(request, 'home/landing_page/index.html',context)
from .dash_apps.finished_apps.forms import NewUserForm, LoginForm
from django.contrib.auth import login, authenticate, logout #add this
from django.contrib import messages
#from django.contrib.auth.forms import AuthenticationForm #add this
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful." )
return redirect("welcome.html")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render (request=request, template_name="home/landing_page/register.html", context={"register_form":form})
# def login_request(request):
# if request.method == "POST":
# form = AuthenticationForm(request, data=request.POST)
# if form.is_valid():
# username = form.cleaned_data.get('username')
# password = form.cleaned_data.get('password')
# user = authenticate(username=username, password=password)
# if user is not None:
# login(request, user)
# messages.info(request, f"You are now logged in as {username}.")
# return redirect("welcome.html")
# else:
# messages.error(request,"Invalid username or password.")
# else:
# messages.error(request,"Invalid username or password.")
# form = AuthenticationForm()
# return render(request=request, template_name="home/landing_page/login.html", context={"login_form":form})
def login_request(request):
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
#messages.info(request, f"You are now logged in as {username}.")
return redirect("welcome.html")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid username or password.")
form = LoginForm()
return render(request=request, template_name="home/landing_page/login.html", context={"login_form":form})
def logout_request(request):
logout(request)
messages.info(request, "You have successfully logged out.")
return redirect("login")
# def tables(request):
# return redirect("home/landing_page/tables.html")
def form(request):
def search(alimento):
import pymongo
from pymongo import MongoClient
import ssl
import datetime
client = pymongo.MongoClient(
"mongodb+srv://armonia_amministrazione:uanrimcoantitao2l0i2c1a@clusterarmonia.ukpoq.mongodb.net/local?retryWrites=true&w=majority",
ssl_cert_reqs=ssl.CERT_NONE)
db = client['ArmoniaBot']
col = db['DatabaseDietabit']
somma = 0 # Calorie
db2 = []
print('Cerco in dietabit:', alimento.split()[0])
nome = alimento.split()[0]
query_ricerca = {"Nome": {"$regex": nome[0:len(nome) - 1]}}
print('Ecco la lista in dietabit')
ricette = []
for ric in col.find(query_ricerca):
print(ric['Nome'])
ricette.append(ric)
print('Cerco anche in Crea')
col = db['DatabaseCibo']
for ric in col.find(query_ricerca):
print(ric['Nome'])
ricette.append(ric)
query_ricerca = {"Nome": {"$regex": nome[0:len(nome) - 1].lower()}}
print('Cerco anche in BDA')
col = db['DatabaseBDA']
for ric in col.find(query_ricerca):
print(ric['Nome'])
ricette.append(ric)
print('Cerco anche in RicetteCalorie')
col = db['DatabaseRicetteCalorie']
for ric in col.find(query_ricerca):
print(ric['Nome'])
ricette.append(ric)
print('Cerco anche in SushiDB')
col = db['SushiDB']
query_ricerca = {"Name": {"$regex": nome[0:len(nome) - 1]}}
for ric in col.find(query_ricerca):
print(ric['Name'])
ricette.append(ric)
for j in range(0, len(ricette)):
if ('Nome' in ricette[j].keys()):
(ricette[j]['Nome'])
else:
(ricette[j]['Name'])
import openfoodfacts
ricette2 = openfoodfacts.products.get_by_brand(alimento)
# markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
for j in range(0, len(ricette2)):
ricette.append(ricette2[j])
if ('product_name' in ricette2[j].keys()):
(ricette2[j]['product_name'])
elif ('product_name_en' in ricette2[j].keys()):
(ricette2[j]['product_name_en'])
elif ('product_name_it' in ricette2[j].keys()):
(ricette2[j]['product_name_it'])
elif ('product_name_fr' in ricette2[j].keys()):
(ricette2[j]['product_name_fr'])
alimenti=[]
for ricetta in ricette:
if('Nome' in ricetta.keys()):
alimenti.append(ricetta['Nome'])
elif ('Name' in ricetta.keys()):
alimenti.append(ricetta['Name'])
elif ('product_name' in ricetta.keys()):
alimenti.append(ricetta['product_name'])
elif ('product_name_en' in ricetta.keys()):
alimenti.append(ricetta['product_name_en'])
elif ('product_name_it' in ricetta.keys()):
alimenti.append(ricetta['product_name_it'])
elif ('product_name_fr' in ricetta.keys()):
alimenti.append(ricetta['product_name_fr'])
return alimenti
def inserire(alimento, pasto, data, qty, utente):
import pymongo
from pymongo import MongoClient
import ssl
import datetime
client = pymongo.MongoClient(
"mongodb+srv://armonia_amministrazione:uanrimcoantitao2l0i2c1a@clusterarmonia.ukpoq.mongodb.net/local?retryWrites=true&w=majority",
ssl_cert_reqs=ssl.CERT_NONE)
db = client['ArmoniaBot']
col = db['Prova']
data = datetime.datetime.combine(data, datetime.datetime.min.time())
col2 = db['DatabaseDietabit']
ali = col2.find_one({'Nome': alimento})
if (ali == None):
col2 = db['DatabaseCibo']
ali = col2.find_one({'Nome': alimento})
if (ali == None):
col2 = db['DatabaseBDA']
ali = col2.find_one({'Nome': alimento})
if (ali == None):
col2 = db['SushiDB']
ali = col2.find_one({'Name': alimento})
ali['Quantita'] = float(qty)
qry = {"Nome": utente, "Data": data}
cerco = col.find_one(qry)
if (cerco == None):
col.insert_one({'Nome': utente, 'Data': data, pasto: {'1': ali}})
else:
if (pasto in cerco.keys()):
numero = str(len(cerco[pasto].keys()) + 1)
cerco[pasto][numero] = ali
new_value = {"$set": {pasto: cerco[pasto]}}
update = col.update_one(qry, new_value)
else:
new_value = {"$set": {pasto: {'1': ali}}}
update = col.update_one(qry, new_value)
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SearchFood(request.POST)
# check whether it's valid:
#from django.core.mail import send_mail
insieme = []
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
print(form.cleaned_data['search_name'])
nome = form.cleaned_data['search_name']
data = form.cleaned_data['data']
pasto = form.cleaned_data['pasto']
qty = form.cleaned_data['qty']
if request.user.is_authenticated:
usern = request.user.username
else:
usern = ''
insieme = []
insieme = [nome,data,pasto,qty,usern]
print(insieme)
inserire(nome,pasto,data,qty,usern)
# alimenti = search(nome)
# for i in range(0, len(alimenti)):
# print(alimenti[i])
# #send_mail(subject, message, sender, recipients)
# ricetta =alimenti
# ric = alimenti
# print(len(ric))
# ric.sort()
# print(ric)
# ric2=[]
# for ri in ric:
# ric2.append((ri, ri))
# from django import forms
# class Food(forms.Form):
# food = forms.MultipleChoiceField(
# required=False,
# widget=forms.RadioSelect,
# choices=ric2,
# )
# form2 = Food(request.POST)
form = SearchFood()
ricetta = None
form2 = None
insieme = []
context = {
'form': form,
'insieme': insieme,
# 'ricette' : ricetta,
# 'form2' : form2
}
request.method = 'GET'
return render(request, 'home/landing_page/tables.html', context)
messages.success(request, 'Contact request submitted successfully.')
else:
messages.error(request, 'Invalid form submission.')
# if a GET (or any other method) we'll create a blank form
else:
print('GET')
form = SearchFood()
ricetta = None
form2 = None
insieme = []
context = {
'form' : form,
'insieme':insieme,
#'ricette' : ricetta,
#'form2' : form2
}
return render(request, 'home/landing_page/tables.html',context)
# from .dash_apps.finished_apps.forms import FormForm
#
# def country_form(request):
# # instead of hardcoding a list you could make a query of a model, as long as
# # it has a __str__() method you should be able to display it.
# import pymongo
# from pymongo import MongoClient
# import ssl
# import datetime
# import pandas as pd
# # Sistemare tutto
# client = pymongo.MongoClient(
# "mongodb+srv://armonia_amministrazione:uanrimcoantitao2l0i2c1a@clusterarmonia.ukpoq.mongodb.net/local?retryWrites=true&w=majority",
# ssl_cert_reqs=ssl.CERT_NONE)
# # db = client.test
# db = client['ArmoniaBot']
# col = db['DatabaseDietabit']
# lista = col.find()
# FOOD_CHOICES = []
# alimenti = []
# for lis in lista:
# alimenti.append(lis['Nome'])
# col = db['DatabaseCibo']
# lista = col.find()
# #FOOD_CHOICES = []
# #alimenti = []
# for lis in lista:
# alimenti.append(lis['Nome'])
# # col = db['DatabaseBDA']
# # lista = col.find()
# # #FOOD_CHOICES = []
# # #alimenti = []
# # for lis in lista:
# # alimenti.append(lis['Nome'])
# col = db['SushiDB']
# lista = col.find()
# #FOOD_CHOICES = []
# #alimenti = []
# for lis in lista:
# alimenti.append(lis['Name'])
# country_list = tuple(alimenti)#('Mexico', 'USA', 'China', 'France')
# form = FormForm(data_list=country_list)
#
# return render(request, 'home/landing_page/form.html', {
# 'form': form,
#
# })
|
import sys
if len(sys.argv) < 2:
print 'Usage: python %s filename' % sys.argv[0]
sys.exit(0)
with open(sys.argv[1], 'rb') as f:
shellcode = ''
for line in f:
for c in line:
shellcode += '\\x' + c.encode('hex')
print shellcode
|
"""
This program is for building a twitter bot . Which can retweet and fav the tweets about COVID meds and essential items.
"""
# Import the necessary modules...
import tweepy
import time
import logging
from random import choice, randint
import sqlite3
import glob
import c
# For logging informations
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
#credentials
CONSUMER_KEY = c.ck
CONSUMER_SECRET = c.cs
ACCESS_KEY = c.ak
ACCESS_SECRET = c.ast
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
# use this object to communicate with twitter
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
recent_id = c.recent_id
try:
api.verify_credentials()
logger.info("Authentication OK...")
except:
logger.error("Error during authentication", exc_info=True)
def search():
logger.info("Searching ...")
date_since = c.recent_date
date_since1 = c.recent_date1
query = ["covid", "Remdesivir", "help"]
tweets = tweepy.Cursor(api.search, query, count = 200, since = date_since).items(10)
q = ["oxygen", "help"]
tweets1 = tweepy.Cursor(api.search, q, count = 200, since = date_since1).items(10)
for tweet in tweets:
status = api.get_status(tweet.id)
print("user's name: ", tweet.user.name, " ", tweet.created_at)
c.recent_date = tweet.created_at
# Like the tweet where it is tweeted(if not faved)
if (not status.favorited) and (tweet.user.name != "art_ideas"):
logger.info(f'Liking the tweet of {tweet.user.name}')
try:
tweet.favorite()
except Exception as e:
logger.error('Error while fav process .The error is :\n{}'.format(e), exc_info=True)
# Retweet the tweet which includes the hashtag(if not retweeted)
if (not status.retweeted) and (tweet.user.name != "art_ideas"):
logger.info(f'retweeting the tweet of {tweet.user.name}')
try:
tweet.retweet()
except:
logger.error('Error while retweeting.', exc_info=True)
for tweet in tweets1:
status = api.get_status(tweet.id)
print("user's name: ", tweet.user.name, " ", tweet.created_at)
c.recent_date1 = tweet.created_at
# Like the tweet where it is tweeted(if not faved)
if (not status.favorited) and (tweet.user.name != "art_ideas"):
logger.info(f'Liking the tweet of {tweet.user.name}')
try:
tweet.favorite()
except Exception as e:
logger.error('Error while fav process .The error is :\n{}'.format(e), exc_info=True)
# Retweet the tweet which includes the hashtag(if not retweeted)
if (not status.retweeted) and (tweet.user.name != "art_ideas"):
logger.info(f'retweeting the tweet of {tweet.user.name}')
try:
tweet.retweet()
except:
logger.error('Error while retweeting.', exc_info=True)
while True:
search()
time.sleep(5) |
#!/usr/bin/env python
import wx
import .diffpads_dialog
class DiffPadsApp(wx.App):
def __init__(self, board):
self.board = board
super(DiffPadsApp, self).__init__()
def OnInit(self):
diffpads_dialog.init_diffpads_dialog(self.board)
return True
|
import aiofiles
from aiofiles import os as async_os
from sanic import Sanic, response
from sanic.response import file_stream
app = Sanic(__name__)
@app.post('/upload')
async def ProcessUpload(request):
item = request.files.get("file")
print("name: ", item.name)
print("type: ", item.type)
async with aiofiles.open(f"../files/upload/{item.name}", 'wb') as f:
await f.write(item.body)
return response.empty()
@app.route('/download')
async def ProcessDownload(request):
print("sending file...")
file_name = request.args['filename'][0]
file_path = f"../files/{file_name}"
file_stat = await async_os.stat(file_path)
headers = {
"Content-Length": str(file_stat.st_size),
"Content-Disposition": f"attachment;filename={file_name}"
}
return await file_stream(
file_path,
headers=headers,
chunked=None,
chunk_size=1024 * 1024
)
@app.websocket('/get_log')
async def GetLog(request, ws):
async with aiofiles.open("../files/log.txt", 'r') as f:
async for line in f:
await ws.send(line)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
|
# coding: utf-8
import os
# SlackのAPIを利用するためのトークン
# Botの設定ページから「OAuth & Permissions」のページに遷移し、
# 「Bot User OAuth Access Token」をコピーして貼り付ける
API_TOKEN = os.environ["SLACK_API_TOKEN"]
# 対応するメッセージがなかった場合に反応するメッセージ
DEFAULT_REPLY = "I dont't understand you."
# Botが実行するスクリプトを配置するディレクトリパスのリスト
PLUGINS = ['plugins'] |
#!/usr/bin/env python
# -*- coding::utf-8 -*-
# Author :GG
# 给你一个数组 nums 和一个值 val,你需要 原地 移除所有数值等于 val 的元素,并返回移除后数组的新长度。
#
# 不要使用额外的数组空间,你必须仅使用 O(1) 额外空间并 原地 修改输入数组。
#
# 元素的顺序可以改变。你不需要考虑数组中超出新长度后面的元素。
#
#
#
# 示例 1:
#
# 给定 nums = [3,2,2,3], val = 3,
#
# 函数应该返回新的长度 2, 并且 nums 中的前两个元素均为 2。
#
# 你不需要考虑数组中超出新长度后面的元素。
#
#
# 示例 2:
#
# 给定 nums = [0,1,2,2,3,0,4,2], val = 2,
#
# 函数应该返回新的长度 5, 并且 nums 中的前五个元素为 0, 1, 3, 0, 4。
#
# 注意这五个元素可为任意顺序。
#
# 你不需要考虑数组中超出新长度后面的元素。
#
#
#
#
# 说明:
#
# 为什么返回数值是整数,但输出的答案是数组呢?
#
# 请注意,输入数组是以「引用」方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
#
# 你可以想象内部操作如下:
#
# // nums 是以“引用”方式传递的。也就是说,不对实参作任何拷贝
# int len = removeElement(nums, val);
#
# // 在函数里修改输入数组对于调用者是可见的。
# // 根据你的函数返回的长度, 它会打印出数组中 该长度范围内 的所有元素。
# for (int i = 0; i < len; i++) {
# print(nums[i]);
# }
#
# Related Topics 数组 双指针
# 👍 590 👎 0
from typing import List
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
i = 0
for j in range(0, len(nums)):
if nums[j] != val:
nums[i] = nums[j]
i += 1
return i
def removeElement2(self, nums: List[int], val: int) -> int:
i = 0
n = len(nums)
while i < n:
if nums[i] == val:
nums[i] = nums[n - 1]
n -= 1
else:
i += 1
return n
def removeElement3(self, nums: List[int], val: int) -> int:
for i in range(len(nums) - 1, -1, -1):
if nums[i] == val:
nums.pop(i)
return len(nums)
def removeElement4(self, nums: List[int], val: int) -> int:
a = nums.count(val)
for i in range(a):
nums.remove(val)
return len(nums)
# leetcode submit region end(Prohibit modification and deletion)
|
import os,sys
from numpy import median
from ..FeatureExtractor import FeatureExtractor, InterExtractor
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR")+'/Algorithms'))
from qso_fit import qso_fit
#class qso_extractor(FeatureExtractor): # Using this will add a 'qso' feature in vosource xml whose value is a string representation of the returned od dict. (using internal_use_only=False, active=False)
class qso_extractor(InterExtractor):
""" calculates the skew of the signal using scipy.stats.skew
biased skew?"""
internal_use_only = False # if set True, then seems to run all qso code for each sub-feature
active = True # if set False, then seems to run all qso code for each sub-feature
extname = 'qso' #extractor's name
def extract(self):
#import pdb; pdb.set_trace()
y0 = 19.
y = self.flux_data - median(self.flux_data) + y0
try:
od = qso_fit(self.time_data,
y,
self.rms_data,filter='g')
except:
self.ex_error(text="qso_extractor.qso_extractor()")
#res = od['chi2_qso/nu'],od['chi2_qso/nu_NULL']
# QSO-like: res[0]<~2
# non-QSO: res[1]/res[0]<~2
return od
class qso_generic(FeatureExtractor):
""" Generic qso extractor grabs value from dictionary """
internal_use_only = False
active = True
extname = 'to_be_overloaded' # identifier used in final extracted value dict.
qso_key = 'to_be_overloaded'
def extract(self):
qso_dict = self.fetch_extr('qso')
if qso_dict.has_key(self.qso_key):
return qso_dict[self.qso_key]
else:
self.ex_error('qso_extractor dictionary does not have key %s' % (self.qso_key))
##### Nat has converged upon the following being the most significant featues,
# Joey believes it is best to jut use these features only (so now the others are disabled in
# __init__.py and qso_extractor.py
class qso_log_chi2_qsonu_extractor(qso_generic):
""" qso_log_chi2_qsonu """
extname = "qso_log_chi2_qsonu"
qso_key = "log_chi2_qsonu"
class qso_log_chi2nuNULL_chi2nu_extractor(qso_generic):
""" qso_log_chi2nuNULL_chi2nu """
extname = "qso_log_chi2nuNULL_chi2nu"
qso_key = "log_chi2nuNULL_chi2nu"
#####
### eventually get rid of:
#class qso_lvar_extractor(qso_generic):
# """ qso_lvar """
# extname = "qso_lvar"
# qso_key = "lvar"
### eventually get rid of:
#class qso_ltau_extractor(qso_generic):
# """ qso_ltau """
# extname = "qso_ltau"
# qso_key = "ltau"
### eventually get rid of: (since is not related to QSO classifier)
#class qso_chi2nu_extractor(qso_generic):
# """ qso_chi2nu """
# extname = "qso_chi2nu"
# qso_key = "chi2/nu"
#class qso_chi2_qsonu_extractor(qso_generic):
# """ qso_chi2_qsonu """
# extname = "qso_chi2_qsonu"
# qso_key = "chi2_qso/nu"
#class qso_chi2_qso_nu_NULL_extractor(qso_generic):
# """ chi2_qso_nu_NULL """
# extname = "qso_chi2_qso_nu_NULL"
# qso_key = "chi2_qso/nu_NULL"
#class qso_signif_qso_extractor(qso_generic):
# """ qso_signif_qso """
# extname = "qso_signif_qso"
# qso_key = "signif_qso"
#class qso_signif_not_qso_extractor(qso_generic):
# """ qso_signif_not_qso """
# extname = "qso_signif_not_qso"
# qso_key = "signif_not_qso"
#class qso_signif_vary_extractor(qso_generic):
# """ qso_signif_vary """
# extname = "qso_signif_vary"
# qso_key = "signif_vary"
#class qso_chi2qso_nu_nuNULL_ratio_extractor(qso_generic):
# """ qso_chi2qso_nu_nuNULL_ratio """
# extname = "qso_chi2qso_nu_nuNULL_ratio"
# qso_key = "chi2qso_nu_nuNULL_ratio"
|
from matplotlib import pyplot as plt
jobs = [25,34,43,23,46,13,24,34,45,29]
aht = [15,12,23,8,17,14,7,10,14,12]
plt.plot(jobs, aht)
plt.title("Production")
plt.xlabel("Jobs-Completed")
plt.ylabel("Average-Handling-Time")
plt.show()
#py_dev_x = [10,20,30,40,50,60,70]
#py_dev_y = [12345,14541,14145,98758,74571,11578,11548]
#js_dev_y = [14415,41515,4545121,45845,48412,45121,4584521]
#ages_x = [20,25,24,28,29,31,35]
#plt.plot(dev_x, dev_y)
|
# 7-1 A function that determines whether an input string is in the format
# filename.ext. Returns True if so, and False if not.
def check_filename(string):
if string[-4] != '.':
return False
else:
fname, ext = string.split('.')
if len(ext) != 3:
return False
else:
return True |
# -*- coding:utf8 -*-
from lxml import etree
import requests
url = "https://www.appannie.com/apps/ios/app/idle-heroes/reviews/?order_by=date&order_type=desc&date=2019-04-29~2019-05-29&translate_selected=false&granularity=weekly&stack&percent=false&series=rating_star_1,rating_star_2,rating_star_3,rating_star_4,rating_star_5"
with open('htm0.txt', 'r') as myfile:
html = myfile.read()
#结论1 503not avaliable,反爬虫第一道墙,设置header模拟真机访问
header ={'User-Agent': 'Mozilla/5.0 (Windows NT x.y; Win64; x64; rv:10.0) Gecko/20100101 Firefox/10.0 '}
response = requests.get(url,headers = header)
htm0 = response.content
# text_file = open("htm0.txt", "w")
#
# text_file.write(htm0)
#
# text_file.close()
#结论 2-------用request爬虫 会被反爬虫 得不到重要数据,需对表格接口单独request才行,或者手动用元素半自动爬虫
# re=response.content
#
#
#
# selector2=etree.HTML(re)
selector=etree.HTML(html)
#contents=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[5]/div/div[2]/div[2]')
# simple1=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[1]/div/div[2]/div/div[1]/div[2]/div[1]/div[1]/span[1]/a')
# print simple1
# print simple1
simple2=selector.xpath('//*[@id="4229616386"]/div/div[2]/text()')
print simple2
print simple2
# //*[@id="4229616386"]/div/div[2]/text()
# //*[@id="4230292243"]/div/div[2]
# //*[@id="4230292243"]/div
# //*[@id="4237381132"]/div
# 结论 3--------反爬虫做得太好了,id也是随机数, 自己研究规律,手动解码,找到id即可按顺序爬取所有数据,遍历倒数tr即可
simple3=selector.xpath('//*[@id="sub-container"]/div[2]/div[2]/div[1]/div[2]/div/div[2]/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[2]/div/table/tbody/tr[1]/td[2]')
#此处已经找到id,为etree对象,打印出来很像字典,便试试强制转换
print dict(simple3[0].attrib)['id']
#已找到id,完成半自动爬虫
#打印出评价
pingjia_path='//*[@id="'+dict(simple3[0].attrib)['id']+'"]/div/div[2]/text()'
pingjia=selector.xpath(pingjia_path)
print pingjia
#-----------一次爬取多个数据
results=[]
for i in range(1,101):
id_path='//*[@id="sub-container"]/div[2]/div[2]/div[1]/div[2]/div/div[2]/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[2]/div/table/tbody/tr['+str(i)+']/td[2]'
id_selector=selector.xpath(id_path)
pingjia_path = '//*[@id="' + dict(id_selector[0].attrib)['id'] + '"]/div/div[2]/text()'
pingjia = selector.xpath(pingjia_path)
results.append(pingjia)
#print results
#----- 处理编码问题
results_utf8=[]
for strr in results:
#print str(strr)
results_utf8.append(str(strr).decode('unicode-escape').encode('utf-8'))
str_utf8= str(strr).decode('unicode-escape').encode('utf-8')
print str_utf8
#print(results_utf8)
ustring = unicode(str(results[9]), "utf-8")
#print ustring.decode('unicode-escape').encode('utf-8')
import numpy as np
np.savetxt("评价(解码).text", results, delimiter=",", fmt='%s')
np.savetxt("评价(未解码).csv", results, delimiter=",", fmt='%s')
#注:html文件 可由利用selenium+phantomjs,模拟游览器的方式得到,此文没有应用上,手动复制的网页元素文件 |
#! /usr/bin/python3
from lyrics import *
if __name__ == "__main__":
running = True
while running:
searched_song = input(">> Enter a song name : ")
if searched_song == "" or searched_song is None:
print("Error : You must enter string ..")
continue
searched_song = searched_song.strip()
print(">>> Connecting ...")
result = search_song_in_website(searched_song)
count = len(result.songs)
if count > 0:
print(">> This is what we found :\n")
for i in range(count):
song = result.songs[i]
msg = '{0}: {1} - {2}'.format((i + 1), song.song_name, song.singer)
print(msg)
number = input(">> Choose number or -1 to exit : ")
number = int(number)
if number == -1:
exit()
if number > count:
print(">> No such Number !! please read carefully .. \n")
print("\n" * 20)
else:
print("\n\n" + "*" * 30)
song = result.songs[number - 1]
msg = '{0}: {1} - {2}'.format(number, song.song_name, song.singer)
print(msg)
print(">>> Connecting ...")
song_lyrics = get_lyrics_from_link(song.link)
print(song_lyrics.lyrics)
else:
print(">> No songs found")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
delete_location_type_element_query = """
UPDATE public.location_type AS ltp SET deleted = TRUE,
active = FALSE WHERE ltp.id = $1::BIGINT RETURNING *;
"""
|
import numpy as np
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
decls = []
decl = ''
dlist = []
total = 0
counter = 0
#a list of numbers
for line in data:
if line != '\n':
decl += line.strip()
counter +=1
else:
for l in decl:
dlist.append(l)
decls.append((dlist, counter))
decl = ''
dlist = []
counter = 0
for l in decl:
dlist.append(l)
decls.append((dlist, counter))
#Makes a set
for group in decls:
everyone = 0
declarations = group[0]
persons = group[1]
uniques = set(declarations)
for x in uniques:
if declarations.count(x) == persons:
everyone+=1
total+=everyone
return total
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Architecture',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='GadgetSnap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('icon_url', models.URLField(blank=True)),
('name', models.CharField(max_length=100)),
('title', models.CharField(max_length=250, null=True, blank=True)),
('ratings_average', models.DecimalField(max_digits=2, decimal_places=1)),
('alias', models.CharField(max_length=100, null=True, blank=True)),
('price', models.DecimalField(max_digits=5, decimal_places=2)),
('publisher', models.CharField(max_length=100)),
('store_url', models.URLField(blank=True)),
('version', models.CharField(max_length=25)),
('last_updated', models.DateTimeField()),
('description', models.TextField(max_length=5000)),
('website', models.URLField(blank=True)),
('architecture', models.ManyToManyField(to='store_data.Architecture')),
],
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='ScreenshotURL',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(blank=True)),
],
),
migrations.AddField(
model_name='gadgetsnap',
name='release',
field=models.ManyToManyField(to='store_data.Release'),
),
migrations.AddField(
model_name='gadgetsnap',
name='screenshot_url',
field=models.ManyToManyField(to='store_data.ScreenshotURL'),
),
]
|
# -*- coding: utf-8 -*-
# metodIsmi(veri)
# alanHesapla(genislik, yukseklik)
genislik = float(input('Genişlik?\n'))
yukseklik = float(input('Yükseklik?\n'))
alan = genislik * yukseklik
print("Girdiğiniz genişlik " + str(genislik) + " metredir.\n")
print("Girdiğiniz yükseklik " + str(yukseklik) + " metredir.\n")
print("Odanın alanı "+ str(alan) + " metrekaredir\n") |
# Classify data using a basic TF model
print("CLASSIFYING...")
import tensorflow as tf
from tensorflow import keras
# Build the Model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
letter_labels = ['a','b','c','d','e','f','g','h','i','j']
# Configure optimizer, loss function, and monitoring metrics.
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train model on dataset
model.fit(train_dataset, train_labels, epochs=5)
# See how trained model performs on test set
test_loss, test_acc = model.evaluate(test_dataset, test_labels)
print('Test accuracy: ', test_acc)
# Make some predictions
predictions = model.predict(test_dataset)
# Plot the first 25 test images, their predicted label, and the true label
# Color correct predictions in green, incorrect predictions in red
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid('off')
plt.imshow(test_dataset[i], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions[i])
true_label = test_labels[i]
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
plt.xlabel("{} ({})".format(letter_labels[predicted_label],
letter_labels[true_label]),
color=color)
plt.show()
|
import numpy as np
from matplotlib import pyplot as plt
#from Ground_Function_File import Ground_Function
"""
This is a simulation of a six wheel rover diluted to a 2D model with inaccurate values.
"""
__author__="David Canosa Ybarra"
def Ground_Function(z):
if z<2:
x=0
elif z>=2:
x=0.05*(np.cos(5*(z-2))-1)+0.5*(np.cos((z-2))-1)
else:
pass
return x
alpha_1=np.pi/3
alpha_2=np.pi/3
L_1=0.5
L_3=0.2
L_2=L_1-L_3
k=170*10**3
c=1213
g=9.81
m_wheel=1
mass_of_rover_body=50
rover_body_height=0.2
rover_body_width=0.5
J_body=mass_of_rover_body*(rover_body_height**2+rover_body_width**2)/12
# Initial Conditions
z=0
z_dot=0.5
X=np.mat([[0],
[0],
[0]])
X_dot=np.mat([[0],
[0],
[0]])
X_dot_dot=np.mat([[0],
[0],
[0]])
X_base=np.mat([[Ground_Function(z)],
[Ground_Function(z-L_1*np.sin(alpha_1/2))],
[Ground_Function(z-L_1*np.sin(alpha_1/2)-2*L_3*np.sin(alpha_2/2))]])
t=0
dt=0.001
K_matrix=np.mat([[-3*k, 2*k*L_2*np.sin(alpha_1/2)-k*L_1*np.sin(alpha_1/2), 0],
[2*k*L_2*np.sin(alpha_1/2)-k*L_1*np.sin(alpha_1/2), -k*(L_1*np.sin(alpha_1/2))**2-2*k*(L_2*np.sin(alpha_2/2))**2, 0],
[0, 0, -k*2*(L_3*np.sin(alpha_2/2))**2]])
C_matrix=np.mat([[-3*c, 2*c*L_2*np.sin(alpha_1/2)-c*L_1*np.sin(alpha_1/2), 0],
[2*c*L_2*np.sin(alpha_1/2)-c*L_1*np.sin(alpha_1/2), -c*(L_1*np.sin(alpha_1/2))**2-2*c*(L_2*np.sin(alpha_2/2))**2, 0],
[0, 0, -c*2*(L_3*np.sin(alpha_2/2))**2]])
B_matrix=np.mat([[k, k, k],
[k*L_1*np.sin(alpha_1/2), -k*L_2*np.sin(alpha_1/2), -k*L_2*np.sin(alpha_1/2)],
[0, k*L_3*np.sin(alpha_2/2), -k*L_3*np.sin(alpha_2/2)]])
M_matrix=np.mat([[mass_of_rover_body, 0, 0],
[0, m_wheel*(2*L_3**2)+2*m_wheel*L_2**2+m_wheel*L_1**2+J_body, 0],
[0, 0, m_wheel*(2*L_3**2)]])
M_matrix_inv=np.linalg.inv(M_matrix)
x_lst=[]
z_lst=[]
x_base_lst=[]
x_dot_dot=[]
t_lst=[]
x_lst.append(np.array(X)[0][0]+L_1*np.cos(alpha_1/2))
x_base_lst.append(np.array(X_base)[0][0])
z_lst.append(z)
t_lst.append(t)
F_on_wheels_1=[]
F_on_wheels_2=[]
F_on_wheels_3=[]
x_dot_dot.append(np.array(X_dot_dot)[0][0])
F_on_wheels_1.append(k*(np.array(X)[0][0]-np.array(X_base)[0][0]+np.array(X)[1][0]*L_1*np.sin(alpha_1/2))-(mass_of_rover_body+3*m_wheel)*g/3)
F_on_wheels_2.append(k*(np.array(X)[0][0]-np.array(X_base)[1][0]-np.array(X)[1][0]*L_2*np.sin(alpha_1/2)+np.array(X)[2][0]*L_3*np.sin(alpha_2/2))-(mass_of_rover_body+3*m_wheel)*g/3)
F_on_wheels_3.append(k*(np.array(X)[0][0]-np.array(X_base)[2][0]-np.array(X)[1][0]*L_2*np.sin(alpha_1/2)-np.array(X)[2][0]*L_3*np.sin(alpha_2/2))-(mass_of_rover_body+3*m_wheel)*g/3)
count=0
while z<40:
X_dot_dot=M_matrix_inv*(K_matrix*X+C_matrix*X_dot+B_matrix*X_base)
z=z+z_dot*dt
X_base=np.mat([[Ground_Function(z+L_1*np.sin(alpha_1/2))],
[Ground_Function(z)],
[Ground_Function(z+2*L_3*np.sin(alpha_2/2))]])
# Finding forces on arms
F_on_wheels_1.append(k*(np.array(X)[0][0]-np.array(X_base)[0][0]+np.array(X)[1][0]*L_1*np.sin(alpha_1/2))-(mass_of_rover_body+3*m_wheel)*g/3)
F_on_wheels_2.append(k*(np.array(X)[0][0]-np.array(X_base)[1][0]-np.array(X)[1][0]*L_2*np.sin(alpha_1/2)+np.array(X)[2][0]*L_3*np.sin(alpha_2/2))-(mass_of_rover_body+3*m_wheel)*g/3)
F_on_wheels_3.append(k*(np.array(X)[0][0]-np.array(X_base)[2][0]-np.array(X)[1][0]*L_2*np.sin(alpha_1/2)-np.array(X)[2][0]*L_3*np.sin(alpha_2/2))-(mass_of_rover_body+3*m_wheel)*g/3)
X=X+X_dot*dt
X_dot=X_dot+X_dot_dot*dt
x_lst.append(np.array(X)[0][0]+L_1*np.cos(alpha_1/2))
x_base_lst.append(np.array(X_base)[0][0])
z_lst.append(z)
t_lst.append(t)
x_dot_dot.append(np.array(X_dot_dot)[0][0])
count=count+1
t=t+dt
plt.plot(z_lst, x_lst, label="Body")
plt.plot(z_lst, x_base_lst, label="Ground")
plt.legend()
plt.title("Graph of the rover body displacement")
plt.xlabel("Displacement (horizontal) [m]")
plt.ylabel("Displacement (vertical) [m]")
plt.axis('equal')
plt.show()
plt.plot(t_lst, x_dot_dot)
plt.title("Rover body acceleration")
plt.xlabel("Time [s]")
plt.ylabel("Force [N]")
plt.show()
plt.plot(t_lst, F_on_wheels_1, label="Force on wheel 1")
plt.plot(t_lst, F_on_wheels_2, label="Force on wheel 2")
plt.plot(t_lst, F_on_wheels_3, label="Force on wheel 3")
#plt.plot("Forces on wheels on the rover")
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Force [N]")
plt.show()
|
def pinta_ala(pituus, leveys = None):
if leveys == None:
pinta_ala =(pituus ** 2)
return pinta_ala
else:
ala = pituus * leveys
return ala
def main():
print("Neliön pinta-ala on {:.1f}".format(pinta_ala(3)))
print("Suorakaiteen pinta-ala on {:.1f}".format(pinta_ala(4,3)))
main()
|
import sys
import gym
import os
import tensorflow as tf
os.sys.path.insert(0, os.path.abspath('../../../settings_folder'))
import settings
import msgs
from gym_airsim.envs.airlearningclient import *
import callbacks
from multi_modal_policy import MultiInputPolicy
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from keras.backend.tensorflow_backend import set_session
def setup(difficulty_level='default', env_name = "AirSimEnv-v42"):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.6
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
env = gym.make(env_name)
env.init_again(eval("settings."+difficulty_level+"_range_dic"))
# Vectorized environments allow to easily multiprocess training
# we demonstrate its usefulness in the next examples
vec_env = DummyVecEnv([lambda: env]) # The algorithms require a vectorized environment to run
agent = PPO2(MultiInputPolicy, vec_env, verbose=1)
env.set_model(agent)
return env, agent
def train(env, agent):
# Train the agent
agent.learn(total_timesteps=settings.training_steps_cap)
agent.save()
def test(env, agent, filepath):
model = PPO2.load(filepath)
obs = env.reset()
for i in range(settings.testing_nb_episodes_per_model):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
if __name__ == "__main__":
env, agent = setup()
train()
|
# created by Ryan Spies
# 2/19/2015
# Python 2.7
# Description: parse through a summary file of NHDS site info obtained from website
# and split out individual cardfiles for each site. Also creates a summary csv file
# with calculated valid data points and percent of total. Used to display in arcmap
import os
import datetime
import dateutil.parser
maindir = os.getcwd()
workingdir = maindir[:-16] + 'Calibration_NWS'+ os.sep +'APRFC_FY2015'+ os.sep +'raw_data'
################### user input #########################
variable = 'ptpx' # choices: 'ptpx', 'tamn', or 'tamx'
timestep = 'daily' # choices: 'hourly' or 'daily'
station_file = workingdir + os.sep + 'nhds_' + timestep +os.sep + 'nhds_site_locations_' + timestep + '.txt'
daily_obs_file = workingdir + os.sep + 'nhds_' + timestep +os.sep + 'site_obs_time.csv'
data_file = workingdir + os.sep + 'nhds_' + timestep +os.sep + os.sep + variable + os.sep + 'sw_ak_' + variable + '_1960_2013.txt'
out_dir = workingdir + os.sep + 'nhds_' + timestep +os.sep + os.sep + variable + os.sep + 'cardfiles' + os.sep
bad_ptpx_file = workingdir + os.sep + 'nhds_' + timestep +os.sep + 'questionable_ptpx_check_' + timestep + '.txt'
########################################################
if variable == 'tamn':
ext = '.tmn'; taplot = 'nhds_' + variable + '.taplot'; tap_open = open(workingdir + os.sep + 'nhds_' + timestep + os.sep + variable + os.sep + taplot, 'wb')
if variable == 'tamx':
ext = '.tmx'; taplot = 'nhds_' + variable + '.taplot'; tap_open = open(workingdir + os.sep + 'nhds_' + timestep + os.sep + variable + os.sep + taplot, 'wb')
if variable == 'ptpx':
ext = '.ptp'
bad_ptpx_summary = open(bad_ptpx_file,'wb')
check_chps = open(maindir[:-16] + 'Calibration_NWS'+ os.sep +'APRFC_FY2015'+ os.sep + 'CHPS_suspect_map.csv','r')
set_miss_dates = []
for line in check_chps: # check csv file with dates of suspect MAP data (from CHPS)
date_chps = dateutil.parser.parse(line)
set_miss_dates.append(date_chps.date())
if timestep == 'hourly':
year_factor = float(24*365)
if timestep == 'daily':
year_factor = float(365)
### parse summary file for station info ###
summary_file = open(workingdir + os.sep + 'nhds_summary_' + variable + '_' + timestep + '.csv','w')
summary_file.write('NAME,SITE_ID,LAT,LON,ELEV,TOTAL_DATA,YEARS_DATA,PCT_AVAIL,YEAR_START,YEAR_END\n')
station_summary = {}; elev_list = []
read_stations = open(station_file,'r')
for line in read_stations:
if line[0] != '#':
name = line[13:40].strip() # find the station name
number = line[40:47].strip() # find the station id num (6 digit)
site_id = number.split()[1] # find the station id num (4 digit)
split = filter(None,line[47:].strip().split(' ')) # filter out blank entries in list
lat = split[0]; lon = '-' +split[1]; elev = split[2].strip(); types = split[5]
station_summary[site_id] = [name,number,lat,lon,elev]
elev_list.append(float(elev)) # used to fin max/min for taplot header line
### parse observation time csv file for daily data (taplot card) ###
if timestep == 'daily':
if variable == 'tamn' or variable == 'tamx':
daily_obs = {}
obs_time = open(daily_obs_file,'r')
for line in obs_time:
sep = line.split(',')
if sep[0].strip() != 'LOCATION' and sep[0].strip() != '':
daily_obs[sep[0]] = sep[8]
obs_time.close()
### taplot header line ###
if variable == 'tamn' or variable == 'tamx':
if len(station_summary) <= 26:
total_stations = len(station_summary)
else:
total_stations = 26
units = 'ENGL'
desc = "'KUSKOKWIM BASINS SW ALASKA'"
max_elev = max(elev_list); min_elev = min(elev_list)
tap_open.write('@A ')
tap_open.write('{:2d} {:4s} {:30s} {:4.0f} {:4.0f}'.format(total_stations,units,desc,max_elev,min_elev))
tap_open.write('\n')
### parse data and create individual datacard files ###
read_data = open(data_file,'r')
site_check = 'xxxx' # dummy site check to ignore first few empty lines
count_all = 0; count_missing = 0; prev_month = 13; day_count = 0
for each in read_data:
if each[:8] == 'datacard':
start_header = each
header = each.split()
site_check = header[5]
site_id_data = header[5].split('-')[1] # find the station id num (4 digit)
if each[:1] == ' ' or each[:1] == '1': # find the second line of each site's header to start new cardfile
if len(filter(None,each.split())) <= 7: # ignore station data at end of temp data
header2 = each.split()
date_start = header2[0] + header2[1]; date_end = header2[2] + header2[3]
#cardfile = open(out_dir + header[5] + '_NHDS.' + date_start + '.' + date_end + ext,'wb') # <- name may be too long for MAT input card
cardfile = open(out_dir + header[5] + '_NHDS' + ext,'wb')
cardfile.write(start_header)
cardfile.write(each)
if variable == 'tamn' or variable == 'tamx': # find the taplot lines for temperature data
if each[:1] == ' ' or each[:1] == '1' or each[:1] == '@' or each[:1] == '-':
if each[:2] == '@F':
name = each[5:25].strip()
lat_taplot = float(each[29:].split()[0]); lon_taplot = float(each[29:].split()[1]); elev_taplot = int(float(each[29:].split()[3]))
if name in daily_obs:
time_of_obs = int(daily_obs[name])/100
else:
print 'Observation time not available for: ' + name + ' -> using 1700 as estimate'
time_of_obs = int(17)
if len(name) <= 15:
tap_open.write('{:2s} {:20s} {:6.2f} {:6.2f} {:2d} {:4d}'.format('@F',"'"+name + ' NHDS'+"'",lat_taplot,lon_taplot,time_of_obs,elev_taplot))
else:
tap_open.write('{:2s} {:20s} {:6.2f} {:6.2f} {:2d} {:4d}'.format('@F',"'"+name +"'",lat_taplot,lon_taplot,time_of_obs,elev_taplot))
tap_open.write('\n')
elif each[:2] == '@G' or each[:2] == '@H': # find taplot lines at end of data
tap_open.write(each.rstrip() + ' ')
elif len(filter(None,each.split())) >= 7:
tap_open.write(' '.join(each.split()))
tap_open.write('\n')
if each[:1] != '$' and each[:11] == site_check: # find data lines corresponding to the current site id
parse = each[20:].strip().split() # parse through data in columns 4-9 in each line of data
parse_month = int((each[:20].split()[1])[:-2])
parse_year = int((each[:20].split()[1])[-2:])
if parse_year <= 15:
parse_year = parse_year + 2000
else:
parse_year = parse_year + 1900
if parse_month >=4 and parse_month <= 9:
thresh = 2.0
else:
thresh = 1.5
for value in parse:
count_all += 1
if value == '-999.00':
count_missing += 1
if variable == 'ptpx':
check_list = []
for value in parse:
check_list.append(float(value))
if any(check >= thresh for check in check_list) == True: # check if any values in line are >= 2.0 inches
bad_ptpx_summary.write(each) # write instance to questionable_ptpx_check_.txt
if any(check >= thresh for check in check_list) == True: # replace values with new_value = value/10
cardfile.write(each[:22])
for value in parse:
if parse_month == prev_month:
day_count += 1
else:
day_count = 1
prev_month = parse_month
day_check = datetime.datetime(parse_year,parse_month,day_count).date()
if float(value) >= thresh:
new_value = float(value)/10
cardfile.write("%7.2f" % new_value)
cardfile.write(' ')
else:
cardfile.write("%7.2f" % float(value))
cardfile.write(' ')
cardfile.write('\n')
else:
if timestep == 'daily':
cardfile.write(each[:22])
for value in parse:
if parse_month == prev_month:
day_count += 1
else:
day_count = 1
prev_month = parse_month
day_check = datetime.datetime(parse_year,parse_month,day_count).date()
if day_check in set_miss_dates:
if float(value) >= (thresh - 1.25):
new_value = -999.00
cardfile.write("%7.2f" % new_value)
cardfile.write(' ')
bad_ptpx_summary.write(site_check + ' ' + str(day_check) + ' old_value: ' + str(value) + ' new_value: ' + str(new_value) + '\n') # write instance to questionable_ptpx_check_.txt
else:
cardfile.write("%7.2f" % float(value))
cardfile.write(' ')
else:
cardfile.write("%7.2f" % float(value))
cardfile.write(' ')
cardfile.write('\n')
else:
cardfile.write(each)
else:
cardfile.write(each)
if each[:1] == '$' and count_all > 0: # find the break btw station data -> calculate site summary
percent_data = round(((count_all - count_missing)/float(count_all))*100,1)
station_summary[site_id_data].append(count_all-count_missing)
station_summary[site_id_data].append(round((count_all-count_missing)/year_factor,2))
station_summary[site_id_data].append(percent_data)
station_summary[site_id_data].append(date_start[-4:])
station_summary[site_id_data].append(date_end[-4:])
count_all = 0; count_missing = 0
print site_id_data + ' -> ' + str(percent_data) + '%'
prev_month = 13
### populate summary csv file
for site in station_summary:
for item in station_summary[site]:
summary_file.write(str(item) + ',')
summary_file.write('\n')
summary_file.close()
if variable == 'tamn' or variable == 'tamx':
tap_open.close()
if variable == 'ptpx':
bad_ptpx_summary.close()
cardfile.close()
read_stations.close()
print 'Completed!'
|
import math
REFERENCE_TEXTS = []
def clean_tokenize_corpus(texts: list) -> list:
test_objects = "qwertyuioplkjhgfdsazxcvbnm "
corpus = []
if isinstance(texts, list) and texts != []:
for text in texts:
if isinstance(text, str) and text != []:
text_clean = ''
text_low = text.lower()
if '<br />' in text_low:
text_low = text_low.replace('<br />', ' ')
for index in range(len(text_low)):
if text_low[index] in test_objects:
text_clean += text_low[index]
else:
continue
t = text_clean.split()
corpus += [t]
else:
continue
return corpus
else:
print('ERROR')
return []
class TfIdfCalculator:
def __init__(self, corpus):
self.corpus = corpus
self.tf_values = []
self.idf_values = {}
self.tf_idf_values = []
self.file_names = []
def calculate_tf(self):
if isinstance(self.corpus, list) and self.corpus != []:
iteration = 0
for text in self.corpus:
if isinstance(text, list) and text != []:
self.tf_values += [{}]
all_words = 0
for word in text:
if isinstance(word, str):
all_words += 1
else:
continue
for word in text:
if isinstance(word, str) and word not in self.tf_values[iteration]:
self.tf_values[iteration][word] = text.count(word) / all_words
else:
continue
iteration += 1
def calculate_idf(self):
if isinstance(self.corpus, list):
non_texts = 0
for text in self.corpus:
if isinstance(text, list):
continue
else:
non_texts += 1
all_texts = len(self.corpus) - non_texts
list_of_words = self.__unique_words_extracter__()
for word in list_of_words:
word_appearance = 0
for text in self.corpus:
if isinstance(text, list) and text != []:
if word in text:
word_appearance += 1
else:
continue
self.idf_values[word] = math.log(all_texts / word_appearance)
def __unique_words_extracter__(self):
list_of_unique_words = []
for text in self.corpus:
if isinstance(text, list) and text != []:
for word in text:
if isinstance(word, str) and word not in list_of_unique_words:
list_of_unique_words += [word]
else:
continue
return list_of_unique_words
def calculate(self):
if isinstance(self.tf_values, list) and isinstance(self.idf_values, dict) and self.idf_values != {} and self.tf_values !=[]:
for tf_dict_index in range(len(self.tf_values)):
self.tf_idf_values += [{}]
for word in list(self.tf_values[tf_dict_index].keys()):
if word not in self.tf_idf_values[tf_dict_index]:
self.tf_idf_values[tf_dict_index][word] = self.tf_values[tf_dict_index][word] * self.idf_values[word]
else:
continue
def report_on(self, word, document_index):
if isinstance(self.tf_idf_values, list) and document_index <= len(self.tf_idf_values) and self.tf_idf_values != [] and word not in self.tf_idf_values:
word_values = sorted(self.tf_idf_values[document_index].items(), key=lambda item: (-item[1], item[0]))
value_position = 0
top_dict = {}
for index, pare in enumerate(word_values):
if index + 1 == len(word_values):
top_dict[pare[0]] = value_position
continue
if word in self.corpus[document_index] and pare[1] == word_values[index + 1][1]:
top_dict[pare[0]] = value_position
continue
elif pare[1] != word_values[index + 1][1]:
top_dict[pare[0]] = value_position
value_position += 1
report = (self.tf_idf_values[document_index][word], top_dict[word])
return report
else:
return ()
def cosine_distance(self, index_text_1, index_text_2):
if index_text_1 <= len(self.tf_idf_values) and index_text_2 <= len(self.tf_idf_values):
vector_words = self.__unique_words_extracter__()
vectors = []
for index, text in enumerate(self.corpus):
if index == index_text_1 or index_text_2:
vector = []
for word in vector_words:
if word in text:
vector += [self.tf_idf_values[index][word]]
else:
vector += [0]
print(vector)
vectors += [vector]
a_b_sum = math.fsum(vectors[0][i] * vectors[1][i] for i in range(len(vector(0))))
a_b_cube_sum = (math.sqrt(math.fsum(i*i for i in vectors[0])) * (math.sqrt(math.fsum(i*i for i in vectors[1]))))
cos_dist = a_b_sum / a_b_cube_sum
return cos_dist
else:
return 1000
def save_to_csv(self):
with open('report.csv', 'w') as f:
tf_headline = ['TF(' + i + ')' for i in self.file_names]
tf_idf_headline = ['TF-IDF(' + i + ')' for i in self.file_names]
top = ['Слово'] + tf_headline + ['IDF'] + tf_idf_headline
f.write(';'.join(top) + '\n')
list_of_words = self.__unique_words_extracter__()
for word in list_of_words:
f.write(word + ';')
for index in range(len(self.tf_values)):
if word in self.tf_values[index]:
f.write(str(self.tf_values[index][word]) + ';')
else:
f.write(str(0) + ';')
f.write(str(self.idf_values[word]) + ';')
for index in range(len(self.tf_idf_values)):
if word in self.tf_idf_values[index] and index == len(self.tf_idf_values) - 1:
f.write(str(self.tf_idf_values[index][word]) + '\n')
elif word not in self.tf_idf_values[index] and index == len(self.tf_idf_values) - 1:
f.write(str(0) + '\n')
elif word in self.tf_idf_values[index]:
f.write(str(self.tf_idf_values[index][word]) + ';')
else:
f.write(str(0) + ';')
if __name__ == '__main__':
texts = ['5_7.txt', '15_2.txt', '10547_3.txt', '12230_7.txt']
for text in texts:
with open(text, 'r') as f:
REFERENCE_TEXTS.append(f.read())
# scenario to check your work
test_texts = clean_tokenize_corpus(REFERENCE_TEXTS)
tf_idf = TfIdfCalculator(test_texts)
tf_idf.calculate_tf()
tf_idf.calculate_idf()
tf_idf.calculate()
print(tf_idf.report_on('good', 0))
print(tf_idf.report_on('and', 1))
|
import inspect
def get_props(c, filter_values = []):
props = {}
query_key = None
for (k, v) in c.__dict__.items():
if k == 'key':
query_key = v
if k[:2] != '__' and not inspect.isroutine(v) and k not in filter_values:
props[k] = c.__dict__[k]
return (query_key, list(props.keys()))
|
from .base import BaseEventTestCase
class QueryEventTestCase(BaseEventTestCase):
"""
Test queries on events endpoint
"""
def test_query_deactivated_event(self):
query = """
query {
eventsList {
edges {
node {
id
description
title
active
}
}
}
}
"""
request = self.request
client = self.client
request.user = self.admin.user
self.assertMatchSnapshot(client.execute(query,
context_value=request))
def test_query_single_event(self):
query = """
query {
event(id:"RXZlbnROb2RlOjU="){
id
description
title
active
}
}
"""
request = self.request
client = self.client
request.user = self.admin.user
self.assertMatchSnapshot(client.execute(query,
context_value=request))
def test_get_event_list(self):
query = """
query {
eventsList {
edges {
node {
id
title
description
startDate
venue
socialEvent {
id
name
}
}
cursor
}
}
}"""
request = self.request
client = self.client
request.user = self.admin.user
response = client.execute(query, context_value=request)
self.assertMatchSnapshot(response)
def test_filter_event_list_by_valid_venue_is_successful(self):
query = """
query {
eventsList(venue: "") {
edges {
node {
id
title
description
startDate
venue
socialEvent {
id
name
}
}
cursor
}
}
}"""
request = self.request
client = self.client
request.user = self.admin.user
response = client.execute(query, context_value=request)
self.assertMatchSnapshot(response)
def test_filter_event_list_by_non_existing_venue_returns_empty_list(self):
query = """
query {
eventsList(venue: "test place") {
edges {
node {
id
title
description
startDate
venue
socialEvent {
id
name
}
}
cursor
}
}
}"""
request = self.request
client = self.client
request.user = self.admin.user
response = client.execute(query, context_value=request)
self.assertMatchSnapshot(response)
|
# coding: utf-8
#same as achybg.py, but if stopped before, can restart from the output files
#of the analytic contiunation.
import numpy as np
from mea import acon
from mea.model import green
import shutil
from mea.tools import kramerskronig as kk
from copy import deepcopy
import json
from scipy import linalg
t=1.0; tp=0.4
tloc = np.array([[0.0, -t, -tp, -t],
[-t, 0.0, -t, 0.0],
[-tp, -t, 0.0, -t],
[-t, 0.0, -t, 0.0]])
with open("statsparams0.json") as fin:
mu = json.load(fin)["mu"][0]
(w_vec2, gfvec_irw) = green.read_green_ir("gf_irtow0.dat")
(w_vec, hybvec_irw) = green.read_green_ir("hyb_irtow0.dat")
(hybvec_cw, gfvec_cw) = map(green.ir_to_c, [hybvec_irw, gfvec_irw])
sEvec_cw = np.zeros(hybvec_cw.shape, dtype=complex)
for (j, (hyb, gf)) in enumerate(zip(hybvec_cw, gfvec_cw)):
ww = (w_vec[j] + mu)
sEvec_cw[j] = -linalg.inv(gf) + (ww*np.eye(4) - tloc -hyb)
ii=0
fout_sE = "self_ctow" + str(ii) + ".dat"
green.save_gf_c(fout_sE, w_vec, sEvec_cw)
|
from django import forms
# Models
from service.models import Category, Service, Comment, Contractor, Rating, CommentContract
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['text']
widgets = {'text': forms.Textarea(attrs={'rows': 1, 'cols': 50})}
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
fields = ['category', 'title', 'description', 'price', 'address', 'date_active', 'status', 'image']
widgets = {'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),
'category': forms.Select(attrs={'class': 'form-select', 'cols': 30}),
'status': forms.Select(attrs={'class': 'form-select'}),
'title': forms.Textarea(attrs={'class': 'form-control', 'rows': 1}),
'address': forms.Textarea(attrs={'class': 'form-control', 'rows': 1}),
'price': forms.Textarea(attrs={'class': 'form-control', 'rows': 1}),
'date_active': forms.Textarea(attrs={'class': 'form-control', 'rows': 1}),
}
class ContractorForm(forms.ModelForm):
class Meta:
model = Contractor
fields = ['description', 'work', 'phone']
widgets = {'work': forms.Select(attrs={'class': 'form-select'}),
'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),
'phone': forms.Textarea(attrs={'class': 'form-control', 'rows': 1}),
}
class RatingForm(forms.ModelForm):
class Meta:
model = Rating
fields = ['rating']
widget = {'rating': forms.TextInput(attrs={'cols': 10})}
class CommentContractorForm(forms.ModelForm):
class Meta:
model = CommentContract
fields = ['text']
widgets = {'text': forms.Textarea(attrs={'rows': 1, 'cols': 50})} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 21:59:46 2020
@author: thomas
"""
import numpy as np
import pandas as pd
import os, sys
import time as t
import subprocess
from shutil import copyfile
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
# constructs a filepath for the pos data of Re = $Re
def pname(cwd):
return cwd+"/NOTA_List.txt"
def GetNOTAData(cwd):
data = pd.read_csv(pname(cwd),delimiter=' ')
data = data.sort_values(by=['Theta','parHx','parHy'])
data = data.reset_index(drop=True)
return data
if __name__ == '__main__':
#READ NOTAList.txt to get all sims that did not complete
#Whether through error or through no end state
#Pull Hx, Hy, Theta parameters for each
#Change directory to Theta$Theta/Hx$Hx/Hy$Hy
#Modify 'script_restart.sh and copy to specified directory
#Copy input2D_restart into directory
#Submit with subprocess the command "sbatch script_restart.sh"
cwd_PYTHON = os.getcwd() + '/'
data = GetNOTAData(cwd_PYTHON)
#2 choices: Either ran for full 20s seconds or stopped due to error
#1: Ran full 20s
data20 = data[data['20s'] == 'y'].copy()
data20 = data20.reset_index(drop=True)
for idx in range(len(data20['20s'])):
#for idx in range(1):
#Get Parameters Values
parTheta = data20.loc[idx,'Theta']
parHx = data20.loc[idx,'parHx']
parHy = data20.loc[idx,'parHy']
#Change to the Theta$Theta/Hx$Hx/Hy$Hy directory
strDir = 'Theta{0}/Hx{1}/Hy{2}/'.format(parTheta,parHx,parHy)
cwd_SIM = cwd_PYTHON+strDir
#Copy input2D_restart to the Theta$Theta/Hx$Hx/Hy$Hy directory
#Create script_restart.sh with Theta$Theta/Hx$Hx/Hy$Hy specifications
#In Theta$Theta/Hx$Hx/Hy$Hy directory
f = open('script_restart.sh','w')
f.write('#!/bin/sh\n')
strSBATCH1 = '#SBATCH --job-name=PD_Theta{0}_Hx{1}_Hy{2}_Re2\n'.format(parTheta,parHx,parHy)
f.write(strSBATCH1)
f.write('#SBATCH --ntasks=1\n')
f.write('#SBATCH --time=7-0\n')
f.write('#SBATCH --partition=general\n\n')
f.write('./main2d.exe input2D_restart restart_IB2d 2000000')
f.close()
copyfile('script_restart.sh',cwd_SIM+'script_restart.sh')
copyfile('input2D_restart',cwd_SIM+'input2D_restart')
print('Re10: Restarting Simulation: Theta={0}: Hx={1}: Hy={2}'.format(parTheta,parHx,parHy))
os.chdir(cwd_SIM)
subprocess.call(["sbatch","script_restart.sh"])
os.chdir(cwd_PYTHON)
#2) Stopped due to error
dataNo = data[data['20s'] == 'n'].copy()
dataNo = dataNo.reset_index(drop=True)
for idx in range(len(dataNo['20s'])):
#Get Parameters Values
parTheta = dataNo.loc[idx,'Theta']
parHx = dataNo.loc[idx,'parHx']
parHy = dataNo.loc[idx,'parHy']
#Change to the Theta$Theta/Hx$Hx/Hy$Hy directory
strDir = 'Theta{0}/Hx{1}/Hy{2}/'.format(parTheta,parHx,parHy)
cwd_SIM = cwd_PYTHON+strDir
os.chdir(cwd_SIM)
subprocess.call(["sbatch","script.sh"])
os.chdir(cwd_PYTHON)
print('Re10: Starting Simulation: Theta={0}: Hx={1}: Hy={2}'.format(parTheta,parHx,parHy))
|
# @author: Bogdan Hlevca 2012
''' x = gaussElimin(a,b).
Solves [a]{b} = {x} by Gauss elimination.
'''
from numpy import dot
def gaussElimin(a, b):
n = len(b)
# Elimination phase
for k in range(0, n - 1):
for i in range(k + 1, n):
if a[i, k] != 0.0:
lam = a [i, k] / a[k, k]
a[i, k + 1:n] = a[i, k + 1:n] - lam * a[k, k + 1:n]
b[i] = b[i] - lam * b[k]
# Back substitution
for k in range(n - 1, -1, -1):
b[k] = (b[k] - dot(a[k, k + 1:n], b[k + 1:n])) / a[k, k]
return b
|
import requests
import lxml.html as lh
from bs4 import BeautifulSoup
import pandas as pd
import openpyxl
import time
from others import create_excel_file, print_df_to_excel
workingpapers = []
workingpapersauthors = []
articles = []
articlesauthors = []
URL = 'https://ideas.repec.org/f/pba300.html'
#Create a handle, page, to handle the contents of the website
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
#print(soup.prettify())
div = soup.find('div' ,attrs={'class':'tab-pane fade show active'})
possiblepublications = []
for a in div.find_all('a', recursive=False):
possiblepublications.append(a.text)
print(possiblepublications)
workingpaperotherversion = []
workingpapersauthorsotherversion = []
articleotherversion = []
articlesauthorsotherversion = []
if possiblepublications[0] == 'Working papers':
table = soup.find('ol' ,attrs={'class':'list-group'})
for li in table.find_all('li', recursive=False):
for div in li.findAll('div'):
for li in div.find_all('li', recursive=True):
workingpaperotherversion.append(li.b.text)
workingpapersauthorsotherversion.append(li.contents[0])
for li in table.find_all('li', recursive=False):
for b in li.findAll('b'):
workingpapers.append(b.text)
for li in table.find_all('li', recursive=True):
workingpapersauthors.append(li.contents[0])
if possiblepublications[1] == 'Articles':
table = soup.find('ol', attrs={'class': 'list-group'})
table2 = table.find_next_sibling('ol')
for li in table2.find_all('li', recursive=False):
for div in li.findAll('div'):
for li in div.find_all('li', recursive=True):
articleotherversion.append(li.b.text)
articlesauthorsotherversion.append(li.contents[0])
for li in table2.find_all('li', recursive=False):
for b in li.findAll('b'):
articles.append(b.text)
for li in table2.find_all('li', recursive=True):
articlesauthors.append(li.contents[0])
if possiblepublications[0] == 'Articles':
table = soup.find('ol' ,attrs={'class':'list-group'})
for li in table.find_all('li', recursive=False):
for div in li.findAll('div'):
for li in div.find_all('li', recursive=True):
articleotherversion.append(li.b.text)
articlesauthorsotherversion.append(li.contents[0])
for li in table.find_all('li', recursive=False):
for b in li.findAll('b'):
articles.append(b.text)
for li in table.find_all('li', recursive=True):
articlesauthors.append(li.contents[0])
for i in workingpaperotherversion:
workingpapers.remove(i)
for i in articleotherversion:
articles.remove(i)
for i in workingpapersauthorsotherversion:
workingpapersauthors.remove(i)
for i in articlesauthorsotherversion:
articlesauthors.remove(i)
B = ''
A = ''
for i in range(0, len(workingpapersauthors)):
A += '{})'.format(i + 1) + workingpapersauthors[i]
for i in range(0, len(workingpapers)):
B += '{})'.format(i + 1) + workingpapers[i] + '\n'
WPyear = ''
WPyearcounter = 1
for i in range(0, len(workingpapersauthors)):
for word in workingpapersauthors[i].split():
word = word.replace('.', '')
if word.isdigit():
WPyear += '{}) '.format(WPyearcounter) + word + '\n'
WPyearcounter += 1
print(len(workingpapersauthors))
print(len(workingpapers))
print(len(articlesauthors))
print(len(articles)) |
class RandomizedSet:
def __init__(self):
self.nums = []
self.indices = {}
def insert(self, val: int) -> bool:
if val in self.indices:
return False
self.indices[val] = len(self.nums)
self.nums.append(val)
return True
# 思路是将最后一个元素与被删的元素调换位置
# 然后pop最后一个元素
def remove(self, val: int) -> bool:
if val not in self.indices:
return False
id = self.indices[val]
# 换位
self.nums[id] = self.nums[-1]
# 调正idx
self.indices[self.nums[id]] = id
self.nums.pop()
del self.indices[val]
return True
def getRandom(self) -> int:
return choice(self.nums)
|
# Generated by Django 3.1.3 on 2020-11-16 00:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rest_api', '0005_auto_20201116_0323'),
]
operations = [
migrations.AddField(
model_name='profile',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='auth.user', verbose_name='Пользователь'),
preserve_default=False,
),
]
|
# coding=utf-8
# @Author: wjn
import requests
import random
import time
class TYRequest():
def getInterfaceRes_no_token(self, url, body):
'''
发送post请求,没有前置cookie登录的需要。
目前只支持post登录,后续优化加入get功能,把method作为入参传进方法。
:return: 返回Response对象,例如:ret.status_code,ret.json()
'''
headers = {
'Content-Type': 'text/xml; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'X-Forwarded-For': CreateData.create_random_ip(),
}
res = requests.post(url=url, headers=headers, data=body)
return res
def getInterfaceRes_need_token(self, url, body):
'''
发送post请求,需要前置token登录的需要。
目前只支持post登录,后续优化加入get功能,把method作为入参传进方法。
:return: 返回Response对象,例如:ret.status_code,ret.json()
'''
get_Token = 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1ZTQ2Yzk0My1mYjcyLTQxZDMtODk2NS0xMTgzNjVjODQ4NjMiLCJpZCI6IjgxN2QyNjliLWRlNWMtNDQ3Ni1hY2FkLThhOWYwYzI1NzdiNCIsIm5hbWUiOiIxMjE3OThkZS0wOTE1LTRhMjEtYmFlNy0zNDcyZjAwMDA4YzMiLCJjb2RlIjoiNTQ0ZWI1MWQtZmIwZS00NmEwLWJlOGMtNmFhMjEzMWQ1Y2Q3IiwiYWdyZWVtZW50SWQiOiJlMzllNjRjOS04ZDUxLTQ2ZDAtODU5NS1jMTk0OWJiMDY4MTIiLCJ0eXBlIjoiZWI1YTA4NjgtYTkwNy00MjNmLTlhNzctNTcyZmEyNjRlNzE4IiwicmlnaHRSYW5nZSI6Ijg1YTE4MzU2LTM2ODYtNGU5Yy1hMGVlLWMyMWY3MzRhZTk2YSIsInJhbmRvbSI6ImNhNzQ0MTUzLWJkMjgtNGNmOC1hZTM2LTRlYzJjNGUzNDMwNyJ9.ODpHllunYntNit2mqP7MK8_iVbDmVjm4GxhgipBNMaZNpP3nXMp2tXJ1w2S6xr-LzrBfvjj0DHII8o9nfwc0986TDsTXH0L-Sul_FXfBCweVmnGwEkJTNkh9IDk8z-39njlnHernzbjsnYmkYhqLB-noH2WiHxgT98JLv1zisoc'
headers = {
'Content-Type': 'text/xml; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'X-Forwarded-For': CreateData.create_random_ip(),
'Authorization': "Bearer " + get_Token
}
res = requests.post(url=url, headers=headers, data=body)
return res
def getInterfaceRes_need_cookie(self, url, body):
'''
发送post请求,接口需要前置cookie登录的需要。
目前只支持post登录,后续优化加入get功能,把method作为入参传进方法。
:param body:
:return:
'''
headers = {
'Content-Type': 'text/xml; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'X-Forwarded-For': CreateData.create_random_ip()
}
self.s = requests.session()
# 之后用self.s发送请求,就会一直保持会话
res = self.s.post(url=url, data=body, headers=headers)
return res
'''
方法二:追加cookie:
https://www.cnblogs.com/hanmk/p/8907746.html
第一个接口:
r = requests.get('登录')
r.cookies # 这里就可以获取到登录后返回的cookie
s = requests.Session() # 开启一个会话Session
jar = requests.cookies.RequestsCookieJar() # 创建一个Cookie Jar对象
jar.set('49BAC005-7D5B-4231-8CEA-1XXXXBEACD67','cktXXXX001') # 向Cookie Jar对象中添加cookie值
jar.set('JSESSIONID','F4FFF69B8CXXXX80F0C8DCB4C061C0')
jar.set('JSESSIONIDSSO','9D49C7XXXX448FDF5B0F294242B44A')
s.cookies.update(jar) # 把cookies追加到Session中
整体思路:
比如一共发两个接口,第一个是登录,第二个是发送交易下单,第二个接口中需要第一个接口登录中返回的cookie,
就可以把第一个接口的返回报文cookie取出,之后set进下一个报文的cookie中
方法三:跟方法二思路一样
https://www.cnblogs.com/hz-blog/p/8150719.html
def __init__(self):
self.cookies = requests.cookies.RequestsCookieJar()
def go(self, url, method, post_data):
response = requests.request(method, url
, data=post_data
, headers=info.headers
, cookies=self.cookies) #传递cookie
self.cookies.update(response.cookies) # 保存cookie
'''
class CreateData():
'''
在case中需要随机创建名字、手机号等测试数据可以使用 此类,调用示例:
def setUp(self):
self.name = CreateData.create_name()
self.phone = CreateData.create_phone()
'''
@staticmethod
def create_random_ip():
ips = ['182.42.171.88', "113.108.182.52", "1.192.119.149", "183.197.59.179", "218.195.219.255", "1.189.13.41",
"116.234.222.36", "110.179.228.16", "220.174.166.255"]
return random.choice(ips)
@staticmethod
def create_phone():
last = random.randint(999999999, 10000000000)
phone = "9{}".format(last)
return phone
@staticmethod
def create_name():
last = random.randint(99, 1000)
name = "AutoTest{}".format(last)
return name
@staticmethod
def get_millis():
millis = int(round(time.time() * 1000))
return millis
@staticmethod
def create_coursename():
last = random.randint(99, 1000)
course_name = "自动化测试{}".format(last)
return course_name
if __name__ == '__main__':
req = TYRequest()
ret = req.getInterfaceRes_no_cookie()
print(type(ret))
print(ret.status_code)
print(ret.json())
print(type(ret.json()))
|
def star(pa):
for i in range(pa):
print("*"*pa)
print("*"+(pa-2)*" "+"*")
print("*"+(pa-2)*" "+"*")
print("*"*pa)
star(5)
print("update") |
# Generated by Django 3.2.5 on 2021-07-31 05:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('master_file', '0016_product_category'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ('level', 'parent_id', 'id'), 'verbose_name_plural': 'Category'},
),
migrations.AlterModelOptions(
name='country',
options={'ordering': ('sym',), 'verbose_name_plural': 'Countries'},
),
migrations.AlterField(
model_name='category',
name='parent_id',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Parent Category'),
),
]
|
# coding: utf-8
# iprPy imports
from .ElasticConstantsStatic import ElasticConstantsStatic
__all__ = ['ElasticConstantsStatic']
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/jyk/stomp_ws/src/4.1/src/ur5_demo_descartes/include".split(';') if "/home/jyk/stomp_ws/src/4.1/src/ur5_demo_descartes/include" != "" else []
PROJECT_CATKIN_DEPENDS = "moveit_core;descartes_moveit;pluginlib".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lur5_demo_descartes".split(';') if "-lur5_demo_descartes" != "" else []
PROJECT_NAME = "ur5_demo_descartes"
PROJECT_SPACE_DIR = "/home/jyk/stomp_ws/devel/.private/ur5_demo_descartes"
PROJECT_VERSION = "0.0.0"
|
def to_base(integer, base_list=None):
"""
Convert an integer to an arbitrarily-represented arbitrary-base number
system.
The base representation must be defined in the `base list` parameter.
The base is taken from the length of the list.
:param integer: Base 10 int()
:type integer: int()
:param base_list: A list defining the order and number of representations.
:type base_list: A list-like object. Needs to support len() and indexing.
:return: A representation of the integer in the defined base.
:rtype: str()
Example:
>>> # Hexadecimal
>>> to_base(255, '0123456789abcdef')
'ff'
>>> # Base 62
>>> to_base(99999999999999999999999999999999999999999999999,
... '0123456789'
... 'abcdefghijklmnopqrstuvwxyz'
... 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
'2uXfZBR6CN4F4MmXncUdm8QszFB'
>>> # Base 10 capitalized word representation
>>> to_base(13371234567890,
... ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven',
... 'Eight', 'Nine'])
'OneThreeThreeSevenOneTwoThreeFourFiveSixSevenEightNineZero'
>>> # This one is not very useful, but possible
>>> to_base(1337, ['', 'l', '', 'e', '', '', '', 't', '', ''])
'leet'
>>> # Simple custom base conversion function using partial
>>> from functools import partial
>>> to_binary_capitalized_words = partial(to_base,
... base_list=['Zero', 'One'])
>>> to_binary_capitalized_words(42)
'OneZeroOneZeroOneZero'
"""
if base_list is None:
raise ValueError('base_list is not defined')
new_base = len(base_list)
result = []
current = integer
while current != 0:
current, remainder = divmod(current, new_base)
result.append(base_list[remainder])
return ''.join(reversed(result))
def base_words(integer):
"""
Convert a positive integer to a memorable string based on words from
/usr/share/dict/words.
:param integer: positive base 10 number
:type integer: int()
:return: ConcatenatedCapitalizedWords
:rtype: str()
"""
if integer < 0:
raise ValueError('base_words does not support negative integers')
# Save the words in an in-memory cache attached to this method.
if not hasattr(base_words, 'cache'):
words = []
for word in open('/usr/share/dict/words', 'r'):
# Filter out plurals, possessive form and conjugations.
if '\'' not in word:
words.append(word.strip().capitalize())
base_words.cache = list(sorted(set(words)))
words = base_words.cache
return to_base(integer, words) |
class DigiMap(object):
def __init__(self):
self.map = {
'koromon home': {
'pos': (0, 0, 600, 600),
'color': 'red',
'constraint': None
},
'tanemon home': {
'pos': (0, 0, 600, 600),
'color': 'blue',
'constraint': None
},
'tsunomon home': {
'pos': (0, 0, 600, 600),
'color': 'green',
'constraint': None
},
'yokomon home': {
'pos': (0, 0, 600, 600),
'color': 'yellow',
'constraint': None
},
'marineangemon home': {
'pos': (0, 0, 600, 600),
'color': 'white',
'constraint': None
}
}
self.region_size = len(self.map)
self.rect_list = []
self.color_list = []
for region_name, properties in self.map.items():
self.rect_list.append(properties['pos'])
self.color_list.append(properties['color'])
pos = list(properties['pos'])
constraint = (pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3])
self.map[region_name]['constraint'] = constraint
def get_region_colors(self):
return self.color_list
def get_rect_tuples(self):
return self.rect_list
def get_region_size(self):
return self.region_size
def clear_regions(self):
self.map.clear()
def get_region_ranges(self, region_name):
"""
return a tuple of region
:param region_name:
:return:
"""
return self.map[region_name]['constraint']
|
import os
import sqlalchemy
import string
import oauth2client
import httplib2
from flask import Flask, render_template, request, redirect, url_for, flash, send_from_directory
from datetime import datetime
import sys
import json
import requests
import codecs
from datetime import datetime
from datetime import date
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine, asc, desc
from flask import session as sessionmaker
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from werkzeug.utils import secure_filename
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail, Message
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from random import randint
from flask import make_response
import requests
from werkzeug.utils import secure_filename
from flask_login import LoginManager
from flask_login import current_user, login_user
from flask_login import logout_user
from flask_login import login_required
from flask_mail import Mail, Message
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
app = Flask(__name__)
app.secret_key = 'super_secret_key'
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db_up import Base, User, UserMixin, Image,Cmts
engine = create_engine('sqlite:///up.db')
Base.metadata.bind = engine
session = scoped_session(sessionmaker(bind = engine))
UPLOAD_FOLDER = os.path.dirname ('static/im/usrs/')
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'png'])
ALLOWED_EXTENSIONS_BOOKS = set(['pdf','epub','txt','epdf'])
ALLOWED_EXTENSIONS_VIDEOS = set(['mpg','mpeg','mp4','mov'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config.update(
DEBUG=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_HTTPONLY=True,
SESSION_COOKIE_SAMESITE='Lax',
#EMAIL SETTINGS
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME = '',
MAIL_PASSWORD = ''
)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "home"
@login_manager.user_loader
def load_user(id):
user = session.query(User).filter_by(id = id).one()
return user
@app.teardown_request
def remove_session(ex=None):
session.remove()
@app.route('/logout')
def logout():
print("logged out")
logout_user()
return redirect(url_for('home'))
@app.route('/', methods = ["GET","POST"])
def home():
print('home')
return render_template('index.html')
@app.route('/su', methods = ["GET","POST"])
def upsignUp():
print('in sign up')
users = session.query(User).all()
if request.method == 'POST':
for u in users:
print('checking if user in system already exists')
if u.email.lower() == request.form['email'].lower():
return redirect(url_for('home'))
newUser = User(username = request.form['usr'], email = request.form['email'].lower())
if request.form['p_word'] == request.form['p_word2']:
newUser.p = request.form['p_word']
session.add(newUser)
session.commit()
os.makedirs('static/im/usrs/' + str(newUser.id))
print('user has been created')
login_user(newUser, remember=True)
return redirect(url_for('main',user = current_user.username))
else:
print('passwords do not match')
return redirect(url_for('home'))
return redirect(url_for('home'))
@app.route('/si', methods = ["GET","POST"])
def sifUsr():
users = session.query(User).all()
if request.method == 'POST':
for u in users:
if u.email.lower() == request.form['email']:
if u.p == request.form['p_word']:
login_user(u, remember=True)
return redirect(url_for('main', user = u))
return redirect(url_for('home'))
return redirect(url_for('home'))
return redirect(url_for('home'))
@app.route('/<string:user>')
@login_required
def main(user):
thisUser = session.query(User).filter_by(id = current_user.id).one()
userImages = session.query(Image).filter_by(user_id = current_user.id).all()
cmts = session.query(Cmts).all()
return render_template('main.html',cmts = cmts, folder = str(thisUser.id), user = thisUser, userImages = userImages)
# photoUploads
@app.route('/upload/', methods = ['GET','POST'])
@login_required
def upload_photo():
userinfo = session.query(User).filter_by(id = current_user.id).one()
allphotos = session.query(Image).filter_by(user_id = current_user.id).all()
if request.method == 'POST':
print('inside upload photo request')
# check if the post requiest has the file part included
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
path = 'static/im/usrs/' + str(userinfo.id)
print(path)
UPLOAD_FOLDER = os.path.dirname ('static/im/usrs/' + str(userinfo.id) + '/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
for a in allphotos:
if a.path == filename:
filename = filename + str("1")
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
newPhoto = Image(path = filename, user_id = userinfo.id, img_desc = request.form['desc'])
session.add(newPhoto)
session.commit()
print('photo added and saved')
return redirect(url_for('main',user = current_user.username))
return redirect(url_for('main',user = current_user.username))
return redirect(url_for('main',user = current_user.username))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
# connect_args=={'check_same_thread': False}
|
#!/usr/bin/env python
import gzip
import FileUtilities as fu
ACCEPTED_CHR = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20","21","22", "X", "Y", "MT"]
def count_alt(strng):
strng = strng.upper()
lst = list(strng)
a=0
c=0
t=0
g=0
n=0
ast=0
match_sum=0
vc='SNP'
for l in lst:
l=str(l)
if l=='A':
a=a+1
elif l=='C':
c=c+1
elif l=='T':
t=t+1
elif l=='G':
g=g+1
elif l=='*':
ast=ast+1
elif l=='N':
n=n+1
elif str(l)=='.' or str(l)==',':
match_sum=match_sum+1
elif l=='+' or l=='-':
vc='INDEL'
return str(match_sum) + '\t' + str(a) + '\t' + str(c) + '\t' + str(t) + '\t' + str(g) + '\t' + str(ast) + '\t' + str(n) + '\t' + str(vc)
def get_count_alt_qpileup(pileup):
outfile=pileup+'.alt_count'
fh = gzip.open(pileup+'.gz', 'r')
fu.delete(outfile+'.gz')
fh_out = gzip.open(outfile+'.gz', 'w')
header = 'CHROM' + '\t' + 'POS' + '\t' + 'REF' + '\t' + 'DEPTH' + '\t' + 'REFCOUNT' + '\t' + 'A' + '\t' + 'C' + '\t' + 'T' + '\t' + 'G' + '\t' + 'DEL' + '\t' + 'N' + '\t' + 'VC'
fh_out.write(header+'\n')
for line in fh:
fields=line.split('\t')
#1 909768 A 41 GGgGgggGGGGGGggggGGggggGggggggggggggggg^]g^]g
tofile=str(fields[0])+'\t'+str(fields[1])+'\t'+str(fields[2])+'\t'+str(fields[3])+'\t'+count_alt(str(fields[4]))
fh_out.write(tofile+'\n')
fh.close()
fh_out.close()
####Find the maximum allele(s) out of A,T,C,G,DEL
def find_max_alleles(allele_counts, allele_list):
a=allele_counts
b=allele_list
max_a=max(a)
max_b=""
for i in range(5):
if (a[i]==max_a):
max_b=max_b+b[i]
return max_b
def get_macount(altcount):
outfile_aap = altcount + '.aap'
outfile_ref = altcount + '.ref'
fh = gzip.open(altcount+'.gz', 'r')
fu.delete(outfile_aap+'.gz')
fu.delete(outfile_ref+'.gz')
fh_aapout = gzip.open(outfile_aap+'.gz', 'w')
fh_refout = gzip.open(outfile_ref + '.gz', 'w')
# in "alt_count file"
#CHROM POS REF DEPTH REF COUNT A C T G DEL N VC
header='CHROM'+'\t'+'POS'+'\t'+'REF'+'\t'+'DEPTH'+ '\t' + 'REFCOUNT' + '\t' + 'A' + '\t' + 'C' + '\t' + 'T' + '\t' + 'G' + '\t' + 'DEL' + '\t' + 'N' + '\t' + 'VC' + '\t' + 'AD' + '\t' + 'MACOUNT' + '\t' + 'MA'
fh_aapout.write(header+'\n')
fh_refout.write(header+'\n')
linecount = 0
alt_alleles=['A', 'C', 'T', 'G', 'DEL']
for line in fh:
line = str(line).strip()
if linecount > 0:
fields=line.split('\t')
ichr=str(fields[0])
if ichr.startswith('chr'):
chr=ichr[3:len(ichr)]
else:
chr=ichr
pos=str(fields[1])
ref=str(fields[2])
depth=int(fields[3])
ref_count=int(fields[4])
n_count=int(fields[10])
vc=str(fields[11])
## reference is not counted in the previous step. In pileup, if bp==ref then it is '.' or ','
alleles_counts = map(int, fields[5:10]) #fields[5:10]=>A,C,T,G,DEL
sum_of_alternative=sum(alleles_counts)
if ( fu.find_first_index(ACCEPTED_CHR, chr.strip())>-1 ):
# Determine "max_allele", "max_allele_count", and "adjusted_depth"
if (sum_of_alternative == 0):
adjusted_depth = int(depth - n_count)
max_allele_count=0
max_allele="."
tofile=line+'\t'+str(adjusted_depth)+'\t'+str(max_allele_count)+'\t'+str(max_allele)
fh_refout.write(tofile+'\n')
elif (sum_of_alternative > 0):
max_allele_count = max(alleles_counts) # maximum alternative alele count
#max_allele = alt_alleles[alleles_counts.index(max_allele_count)] # maximum allele
max_allele=find_max_alleles(alleles_counts, alt_alleles)
adjusted_depth = int(ref_count + max_allele_count) #adjusted depth
tofile=line+'\t'+str(adjusted_depth)+'\t'+str(max_allele_count)+'\t'+str(max_allele)
fh_aapout.write(tofile+'\n')
linecount = linecount+1
fh.close()
fh_aapout.close()
fh_refout.close()
|
# pylint: disable=invalid-name,too-few-public-methods
'''
Module contains classes relevant to plotting maps. The Map class handles all the
functionality related to a Basemap, and adding airports to a blank map. The
DataMap class takes as input a Map object and a DataHandler object (e.g.,
UPPData object) and creates a standard plot with shaded fields, contours, wind
barbs, and descriptive annotation.
'''
from math import isnan
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.offsetbox as mpob
import matplotlib.patches as mpatches
from mpl_toolkits.basemap import Basemap
import numpy as np
import adb_graphics.utils as utils
# TILE_DEFS is a dict with predefined tiles specifying the corners of the grid to be plotted.
# Order: [lower left lat, upper right lat, lower left lon, upper right lon]
TILE_DEFS = {
'NC': [36, 51, -109, -85],
'NE': [36, 48, -91, -62],
'NW': [35, 52, -126, -102],
'SC': [24, 41, -107, -86],
'SE': [22, 37, -93.5, -72],
'SW': [24.5, 45, -122, -103],
'AKZoom': [52, 73, -162, -132],
'AKRange': [59.722, 65.022, -153.583, -144.289],
'Anchorage': [58.59, 62.776, -152.749, -146.218],
'ATL': [31.2, 35.8, -87.4, -79.8],
'CA-NV': [30, 45, -124, -114],
'CentralCA': [34.5, 40.5, -124, -118],
'CHI-DET': [39, 44, -92, -83],
'DCArea': [36.7, 40, -81, -72],
'EastCO': [36.5, 41.5, -108, -101.8],
'Florida': [19.2305, 29.521, -86.1119, -73.8189],
'GreatLakes': [37, 50, -96, -70],
'HI': [16.6, 24.6, -157.6, -157.5],
'Juneau': [55.741, 59.629, -140.247, -129.274],
'NYC-BOS': [40, 43, -78.5, -68.5],
'PuertoRico': [15.5257, 24.0976, -74.6703, -61.848],
'SEA-POR': [43, 50, -125, -119],
'SouthCA': [31, 37, -120, -114],
'SouthFL': [24, 28.5, -84, -77],
'VortexSE': [30, 37, -92.5, -82],
}
class Map():
'''
Class includes utilities needed to create a Basemap object, add airport
locations, and draw the blank map.
Required arguments:
airport_fn full path to airport file
ax figure axis
Keyword arguments:
map_proj dict describing the map projection to use.
The only options currently are for lcc settings in
_get_basemap()
corners list of values lat and lon of lower left (ll) and upper
right(ur) corners:
ll_lat, ur_lat, ll_lon, ur_lon
model model designation used to trigger higher resolution maps if needed
tile a string corresponding to a pre-defined tile in the
TILE_DEFS dictionary
'''
def __init__(self, airport_fn, ax, **kwargs):
self.ax = ax
self.grid_info = kwargs.get('grid_info', {})
self.model = kwargs.get('model')
self.tile = kwargs.get('tile', 'full')
self.airports = self.load_airports(airport_fn)
if self.tile in ['full', 'conus', 'AK',]:
self.corners = self.grid_info.pop('corners')
else:
self.corners = self.get_corners()
self.grid_info.pop('corners')
# Some of Hawaii's smaller islands and islands in the Caribbean don't
# show up with a larger threshold.
area_thresh = 1000
if self.tile in ['HI', 'Florida', 'PuertoRico'] or self.model in ['hrrrhi', 'hrrrcar']:
area_thresh = 100
self.m = self._get_basemap(area_thresh=area_thresh, **self.grid_info)
def boundaries(self):
''' Draws map boundaries - coasts, states, countries. '''
try:
self.m.drawcoastlines(linewidth=0.5)
except ValueError:
self.m.drawcounties(color='k',
linewidth=0.4,
zorder=2,
)
else:
if self.tile not in ['full', 'conus', 'AK']:
self.m.drawcounties(antialiased=False,
color='gray',
linewidth=0.1,
zorder=2,
)
self.m.drawstates()
self.m.drawcountries()
def draw(self):
''' Draw a map with political boundaries and airports only. '''
self.boundaries()
self.draw_airports()
def draw_airports(self):
''' Plot each of the airport locations on the map. '''
lats = self.airports[:, 0]
lons = 360 + self.airports[:, 1] # Convert to positive longitude
x, y = self.m(lons, lats)
self.m.plot(x, y, 'ko',
ax=self.ax,
color='w',
fillstyle='full',
markeredgecolor='k',
markeredgewidth=0.5,
markersize=4,
)
del x
del y
def _get_basemap(self, **get_basemap_kwargs):
''' Wrapper around basemap creation '''
basemap_args = dict(
ax=self.ax,
resolution='i',
)
corners = self.corners
if corners is not None:
basemap_args.update(dict(
llcrnrlat=corners[0],
llcrnrlon=corners[2],
urcrnrlat=corners[1],
urcrnrlon=corners[3],
))
basemap_args.update(get_basemap_kwargs)
return Basemap(**basemap_args)
def get_corners(self):
'''
Gather the corners for a specific tile. Corners are supplied in the
following format:
lat and lon of lower left (ll) and upper right(ur) corners:
ll_lat, ur_lat, ll_lon, ur_lon
'''
return TILE_DEFS[self.tile]
@staticmethod
def load_airports(fn):
''' Load lat, lon pairs from a text file, return a list of lists. '''
with open(fn, 'r') as f:
data = f.readlines()
return np.array([l.strip().split(',') for l in data], dtype=float)
class DataMap():
#pylint: disable=too-many-arguments
'''
Class that combines the input data and the chosen map to plot both together.
Input:
field datahandler data object for data field to shade
contour_fields list of datahandler object fields to contour
hatch_fields list of datahandler object fields to hatch over shaded
fields
map maps object
'''
def __init__(self, field, map_, contour_fields=None, hatch_fields=None, model_name=None):
self.field = field
self.contour_fields = contour_fields
self.hatch_fields = hatch_fields
self.map = map_
self.model_name = model_name
@staticmethod
def add_logo(ax):
''' Puts the NOAA logo at the bottom left of the matplotlib axes. '''
logo = mpimg.imread('static/noaa-logo-50x50.png')
imagebox = mpob.OffsetImage(logo)
ab = mpob.AnnotationBbox(
imagebox,
(0, 0),
box_alignment=(-0.2, -0.2),
frameon=False,
)
ax.add_artist(ab)
def _colorbar(self, cc, ax):
''' Internal method that plots the color bar for a contourf field.
If ticks is set to zero, use a user-defined list of clevs from default_specs
If ticks is less than zero, use abs(ticks) as the step for labeling clevs '''
if self.field.ticks > 0:
ticks = np.arange(np.amin(self.field.clevs),
np.amax(self.field.clevs+1), self.field.ticks)
elif self.field.ticks == 0:
ticks = self.field.clevs
else:
ticks = self.field.clevs[0:len(self.field.clevs):-self.field.ticks]
ticks = np.around(ticks, 4)
cbar = plt.colorbar(cc,
ax=ax,
orientation='horizontal',
pad=0.02,
shrink=1.0,
ticks=ticks,
)
if self.field.short_name == 'flru':
ticks = [label.rjust(30) for label in ['VFR', 'MVFR', 'IFR', 'LIFR']]
cbar.ax.set_xticklabels(ticks, fontsize=12)
@utils.timer
def draw(self, show=False): # pylint: disable=too-many-locals, too-many-branches
''' Main method for creating the plot. Set show=True to display the
figure from the command line. '''
ax = self.map.ax
# Draw a map and add the shaded field
self.map.draw()
cf = self._draw_field(ax=ax,
colors=self.field.colors,
extend='both',
field=self.field,
func=self.map.m.contourf,
levels=self.field.clevs,
)
self._colorbar(ax=ax, cc=cf)
not_labeled = [self.field.short_name]
if self.hatch_fields:
not_labeled.extend([h.short_name for h in self.hatch_fields])
# Contour secondary fields, if requested
if self.contour_fields:
self._draw_contours(ax, not_labeled)
# Add hatched fields, if requested
if self.hatch_fields:
self._draw_hatches(ax)
# Add wind barbs, if requested
add_wind = self.field.vspec.get('wind', False)
if add_wind:
self._wind_barbs(add_wind)
# Add field values at airports
annotate = self.field.vspec.get('annotate', False)
if annotate:
self._draw_field_values(ax)
# Finish with the title
self._title()
# Create a pop-up to display the figure, if show=True
if show:
plt.tight_layout()
plt.show()
self.add_logo(ax)
return cf
def _draw_contours(self, ax, not_labeled):
''' Draw the contour fields requested. '''
for contour_field in self.contour_fields:
levels = contour_field.contour_kwargs.pop('levels',
contour_field.clevs)
cc = self._draw_field(ax=ax,
field=contour_field,
func=self.map.m.contour,
levels=levels,
**contour_field.contour_kwargs,
)
if contour_field.short_name not in not_labeled:
try:
clab = plt.clabel(cc, levels[::4],
colors='w',
fmt='%1.0f',
fontsize=10,
inline=1,
)
# Set the background color for the line labels to black
_ = [txt.set_bbox(dict(color='k')) for txt in clab]
except ValueError:
print(f'Cannot add contour labels to map for {self.field.short_name} \
{self.field.level}')
def _draw_field(self, ax, field, func, **kwargs):
'''
Internal implementation that calls a matplotlib function.
Input args:
ax: Figure axis
field: Field to be plotted
func: Matplotlib function to be called.
Keyword args:
Can be any of the keyword args accepted by original func in
matplotlib.
Return:
The return from the function called.
'''
x, y = self._xy_mesh(field)
vals = field.values()[::]
ret = func(x, y, vals,
ax=ax,
**kwargs,
)
del x
del y
try:
vals.close()
except AttributeError:
del vals
print(f'CLOSE ERROR: {field.short_name} {field.level}')
return ret
def _draw_field_values(self, ax):
''' Add the text value of the field at airport locations. '''
annotate_decimal = self.field.vspec.get('annotate_decimal', 0)
lats = self.map.airports[:, 0]
lons = 360 + self.map.airports[:, 1]
x, y = self.map.m(lons, lats)
data_values = self.field.values()
for i, lat in enumerate(lats):
if self.map.corners[1] > lat > self.map.corners[0] and \
self.map.corners[3] > lons[i] > self.map.corners[2]:
xgrid, ygrid = self.field.get_xypoint(lat, lons[i])
data_value = data_values[xgrid, ygrid]
if xgrid > 0 and ygrid > 0:
if (not isnan(data_value)) and (data_value != 0.):
ax.annotate(f"{data_value:.{annotate_decimal}f}", \
xy=(x[i], y[i]), fontsize=10)
data_values.close()
def _draw_hatches(self, ax):
''' Draw the hatched regions requested. '''
# Levels should be included in the settings dict here since they don't
# correspond to a full field of contours.
handles = []
for field in self.hatch_fields:
colors = field.contour_kwargs.get('colors', 'k')
hatches = field.contour_kwargs.get('hatches', '----')
labels = field.contour_kwargs.get('labels', 'XXXX')
linewidths = field.contour_kwargs.get('linewidths', 0.1)
handles.append(mpatches.Patch(edgecolor=colors[-1], facecolor='lightgrey', \
label=labels, hatch=hatches[-1]))
cf = self._draw_field(ax=ax,
extend='both',
field=field,
func=self.map.m.contourf,
**field.contour_kwargs,
)
# For each level, we set the color of its hatch
for collection in cf.collections:
collection.set_edgecolor(colors)
collection.set_facecolor(['None'])
collection.set_linewidth(linewidths)
# Create legend for precip type field
if self.field.short_name == 'ptyp':
plt.legend(handles=handles, loc=[0.25, 0.03])
def _title(self):
''' Creates the standard annotation for a plot. '''
f = self.field
atime = f.date_to_str(f.anl_dt)
vtime = f.date_to_str(f.valid_dt)
# Create a descriptor string for the first hatched field, if one exists
contoured = []
contoured_units = []
not_labeled = [f.short_name]
if self.hatch_fields:
cf = self.hatch_fields[0]
not_labeled.extend([h.short_name for h in self.hatch_fields])
if not any(list(set(cf.short_name).intersection(['pres']))):
title = cf.vspec.get('title', cf.field.long_name)
contoured.append(f'{title} ({cf.units}, hatched)')
# Add descriptor string for the important contoured fields
if self.contour_fields:
for cf in self.contour_fields:
if cf.short_name not in not_labeled:
title = cf.vspec.get('title', cf.field.long_name)
title = title.replace("Geopotential", "Geop.")
contoured.append(f'{title}')
contoured_units.append(f'{cf.units}')
contoured = '\n'.join(contoured) # Make 'contoured' a string with linefeeds
if contoured_units:
contoured = f"{contoured} ({', '.join(contoured_units)}, contoured)"
# Analysis time (top) and forecast hour with Valid Time (bottom) on the left
plt.title(f"{self.model_name}: {atime}\nFcst Hr: {f.fhr}, Valid Time {vtime}",
alpha=None,
fontsize=14,
loc='left',
)
level, lev_unit = f.numeric_level(index_match=False)
if f.vspec.get('print_units', True):
units = f'({f.units}, shaded)'
else:
units = f''
# Title or Atmospheric level and unit in the high center
if f.vspec.get('title'):
title = f"{f.vspec.get('title')} {units}"
else:
level = level if not isinstance(level, list) else level[0]
title = f'{level} {lev_unit} {f.field.long_name} {units}'
plt.title(f"{title}", position=(0.5, 1.08), fontsize=18)
# Two lines for hatched data (top), and contoured data (bottom) on the right
plt.title(f"{contoured}",
loc='right',
fontsize=14,
)
def _wind_barbs(self, level):
''' Draws the wind barbs. '''
u, v = self.field.wind(level)
model = self.model_name
tile = self.map.tile
# Set the stride and size of the barbs to be plotted with a masked array.
if self.map.m.projection == 'lcc' and tile == 'full':
if model == 'HRRR-HI':
stride = 12
length = 4
else:
stride = 30
length = 5
elif tile == 'HI':
stride = 1
length = 4
elif len(tile) == 2 or tile in ['full', 'conus', 'GreatLakes', 'CA-NV']:
stride = 10
length = 4
else:
stride = 4
length = 4
mask = np.ones_like(u)
mask[::stride, ::stride] = 0
mu, mv = [np.ma.masked_array(c, mask=mask) for c in [u, v]]
x, y = self._xy_mesh(self.field)
self.map.m.barbs(x, y, mu, mv,
barbcolor='k',
flagcolor='k',
length=length,
linewidth=0.2,
sizes={'spacing': 0.25},
)
def _xy_mesh(self, field):
''' Helper function to create mesh for various plot. '''
lat, lon = field.latlons()
adjust = 360 if np.any(lon < 0) else 0
return self.map.m(adjust + lon, lat)
|
def foo (stopwords=None):
with open('stopwords.txt') as s:
lines = s.readlines()
print(lines)
foo(stopwords=None) |
import discord
from random import shuffle
import re
import emoji
import sys
import json
import string
from copy import deepcopy
from datetime import datetime
token = ""
client = discord.Client()
@client.event
async def on_message(message):
if message.content != "hi": return
await message.channel.send(file=discord.File('PNG/2C.png'))
client.run(token) |
from django.conf.urls import patterns, url
from fashion import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^load_lattes/$', views.load_lattes, name='load_lattes'),
url(r'^researcher/$', views.researcher, name='researcher'),) |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from projects.models import Project, Person
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.contrib.auth.models import User
from projects.forms import NameForm
from django.core.mail import send_mail
from datetime import datetime, timedelta
from django.utils import timezone
import json
import datetime
# Create your views here.
# Creating Forms: https://docs.djangoproject.com/en/2.1/topics/forms/
def sendMail(request):
subject = 'Test Email'
message = 'Hello, this is my test email!'
sender = 'jeffreylamothe@gmail.com'
recipients = ['jeffreylamothe@hotmail.com']
send_mail(subject, message, sender, recipients)
return HttpResponse('Message sent')
def projectPage(request):
return render (request, 'projects/projectPage.html')
def addProject(request):
return render (request, 'projects/addProject.html')
def formSkillsDictFromRequest(request):
skills = {
'c': request.GET.get(('c'), "False"),
'cSharp': request.GET.get(('cSharp'), "False"),
'cpp': request.GET.get(('cpp'), "False"),
'dataScience': request.GET.get(('dataScience'), "False"),
'desktopApplication': request.GET.get(('desktopApplication'), "False"),
'java': request.GET.get(('java'), "False"),
'javaScript': request.GET.get(('javaScript'), "False"),
'nodeJs': request.GET.get(('nodeJs'), "False"),
'php': request.GET.get(('php'), "False"),
'python': request.GET.get(('python'), "False"),
'ruby': request.GET.get(('ruby'), "False"),
'sql': request.GET.get(('sql'), "False"),
'swift': request.GET.get(('swift'), "False"),
'webDevelopment': request.GET.get(('webDevelopment'), "False"),
}
return skills
def parseSkillsStringIntoArray(skillsString):
skillDict = eval(skillsString)
skillsList = []
for key in skillDict:
if skillDict[key] == 'True':
skillsList.append(key)
return skillsList
def projectAdded(request):
if request.user.is_authenticated:
title = request.GET['title']
body = request.GET['body']
#c = request.GET['c']
#cSharp = request.GET['cSharp']
skills = formSkillsDictFromRequest(request)
skillsString = str(skills)
university = request.GET['university']
data = {'title':title, 'body':body, 'skills':str(parseSkillsStringIntoArray(skillsString)), 'university':university}
newProject = Project(title=title, pub_date=datetime.datetime.now(), body=body, skills=skillsString, university=university, owner=request.user)
newProject.save()
return render(request, 'projects/projectAdded.html',data)
else:
return render(request, 'projects/createProjectError.html')
def home(request):
username = None
message = ""
if request.user.is_authenticated:
username = request.user.username
message = "Hello " + username + ", "
objects = Project.objects.order_by('-pub_date')
data = {'projects':objects, 'message':message, 'userAuthenticated':request.user.is_authenticated}
return render(request, 'projects/home.html', data)
def getTimeInDays(time):
if time == "week":
return 7
elif time =="month":
return 30
else:
return 9999
def filterProjects(request):
university = ''
# skillsArray contains the skills that we are searching for...
searchSkillsArray = parseSkillsStringIntoArray(str(formSkillsDictFromRequest(request)))
timeSincePosting = ''
skills = ''
universitySet = Project.objects.none()
skillSet = Project.objects.none()
timeSet = Project.objects.none()
objects = Project.objects.order_by('-pub_date')
validObjects = [] # list containing the objects that meet both the university and the skills requirements
if 'university' in request.GET.keys():
university = request.GET['university']
universitySet = Project.objects.filter(university=university)
objects = universitySet
if len(searchSkillsArray) > 0:
# Find the set with any of the skills contained in the array
for object in objects:
# Loop through the objects that meet the university criteria to see if they also meet skill criteria
objectSkills = parseSkillsStringIntoArray(object.skills)
for objectSkill in objectSkills:
# For each skill in the project we check to see if it matches any of the search criteria
if objectSkill in searchSkillsArray:
# If one of the project's skills was earched for, add it to the array
validObjects.append(object)
validUniversityAndSkillsSet = (set(objects) & set(validObjects))
objects = validUniversityAndSkillsSet
objectList = list(objects)
if 'timeSincePosting' in request.GET.keys():
timeSincePosting = request.GET['timeSincePosting']
timeInDays = getTimeInDays(timeSincePosting)
print("Time since posting: " + timeSincePosting)
now = timezone.now()
compareDate = now - timedelta(days=timeInDays)
for object in objectList:
print(object)
now = timezone.now()
compareDate = now - timedelta(days=timeInDays)
if object.pub_date < compareDate:
objectList.remove(object)
data = {'userAuthenticated':request.user.is_authenticated, 'projects':objectList, 'university':university, 'skills':skills, 'timeSincePosting':timeSincePosting}
return render(request, 'projects/filteredHome.html', data)
def projectDetails(request, projectId):
if projectId[-1] == '/':
projectId = projectId[:-1]
project = Project.objects.get(pk=projectId)
owner = project.owner
return render(request, 'projects/projectDetails.html', {'project': project, 'owner': owner})
def signUp(request):
return render(request, 'projects/signUp.html')
def signedUp(request):
username = request.GET['username']
email = request.GET['email']
password = request.GET['password']
university = request.GET['university']
skills = request.GET['skills']
user = User.objects.create_user(username, email, password)
person = Person(user=user, skills=skills, university=university)
person.save()
return render(request, 'projects/signedUp.html', {'username': username, 'email':email})
def login(request):
if request.user.is_authenticated:
auth_logout(request)
return redirect('home')
else:
return render(request, 'projects/login.html', {'message': ''})
def loggedIn(request):
inputUsername = request.GET['username']
inputPassword = request.GET['password']
user = authenticate(username=inputUsername, password=inputPassword)
if user is not None:
auth_login(request, user)
return redirect('home')
else:
print("We did not find a user")
return render(request, 'projects/login.html', {'message': 'That is not a valid username/password'})
def logUserOut(request):
message = ""
def userHome(request):
inputUsername = request.GET['username']
message = "Hello " + inputUsername + ", "
inputPassword = request.GET['password']
user = authenticate(username=inputUsername, password=inputPassword)
objects = Project.objects.order_by('pub_date')
if user is not None:
print("We found a user")
auth_login(request, user)
return render(request, 'projects/home.html', {'message': message, 'projects':objects})
else:
print("We did not find a user")
return render(request, 'projects/login.html', {'message': 'That is not a valid username/password'})
def profile(request):
username = request.user.username
objects = Person.objects.filter(user__username=username)
currentUser = objects[0]
userObject = {
'username': username,
'skills': currentUser.skills,
'university': currentUser.university,
'email': request.user.email
}
projects = Project.objects.filter(owner=request.user)
return render(request, 'projects/profile.html', {'user': userObject, 'projects': projects})
def editProfile(request):
username = request.user.username
objects = Person.objects.filter(user__username=username)
currentUser = objects[0]
userObject = {
'username': username,
'skills': currentUser.skills,
'university': currentUser.university,
'email': request.user.email
}
return render(request, 'projects/editProfile.html', {'user': userObject})
def profileEdited(request):
user = request.user
user.username = request.GET["username"]
user.save()
username = request.user.username
objects = Person.objects.filter(user__username=username)
currentUser = objects[0]
userObject = {
'username': username,
'skills': currentUser.skills,
'university': currentUser.university,
'email': request.user.email
}
projects = Project.objects.filter(owner=request.user)
return profile(request)
def clearProjects(request):
for project in Project.objects.all():
project.delete()
return HttpResponse('All projects have been removed from the database.')
def editProject(request, projectId):
if projectId[-1] == '/':
projectId = projectId[:-1]
project = Project.objects.get(pk=projectId)
return render(request, 'projects/editProject.html', {'project': project})
def projectEdited(request, projectId):
return render(request, 'proejcts/projectEdited.html')
def deleteProject(request, projectId):
if projectId[-1] == '/':
projectId = projectId[:-1]
project = Project.objects.get(pk=projectId)
name = project.title
project.delete()
return render(request, 'projects/deleteProject.html',{'name': name})
|
#Logan Wang 51603232
'''
* Project#3 Try Not to Breathe
* ICS 32A
* 11/13/20
* Handles search and reverse searching with Nominatim API
* @author Logan Wang
'''
import urllib.request
import urllib.parse
import json
import math
import time
class NominatimAPIHandler:
def __init__(self, query_string: str):
self.query_str = query_string
self.base_url = 'https://nominatim.openstreetmap.org'
def _convert_location_query(self, query_string: str) -> str:
"""given a query_string, return a formatted string that can be used to do a search on Nominatim's API with the given
query details"""
query_list = query_string.split()
real_query = ''
for element in query_list:
real_query += element + '+'
real_query = real_query[:-1]
real_query = real_query.replace(',', '%2C')
formatted_query = f'/search?q={real_query}&format=json'
return formatted_query
def _convert_coords_query(self, query_string: str) -> str:
"""given a query_string, return a formatted string that can be used to do a reverse search on Nominatim's API
with the given query latitude and longitude"""
query_list = query_string.split()
query_parameters = [
('format', 'json'), ('lat', query_list[0]), ('lon', query_list[1])
]
real_query = urllib.parse.urlencode(query_parameters)
formatted_query = f'/reverse?{real_query}'
return formatted_query
def return_data_from_query(self, location_str: str, type: int) -> (int, int) or str:
"""given a location string, return a piece of data with the Nominatim API
if type ==0, then will return coordinates from a location
if type == 1, then will return a location from coordinates"""
time.sleep(1)
try:
# Makes HTTP request to Nominatim API
url_resource = ''
if (type == 0):
url_resource = self._convert_location_query(location_str)
else:
url_resource = self._convert_coords_query(location_str)
url = self.base_url + url_resource
request = urllib.request.Request(url, headers={
'Referer': 'https://www.ics.uci.edu/~thornton/ics32a/ProjectGuide/Project3/loganw1'})
response = urllib.request.urlopen(request)
# If Response status not correct, do not even attempt to read file
if response.status != 200:
print('FAILED')
print(response.status)
print(url)
print('NOT 200')
else: # If correct response status, put url's data in a variable to convert into dictoinary later
try:
urldata = response.read().decode(encoding='utf-8')
try:
# tries to load a json file, if not a json file-> failure
json_data = json.loads(urldata)
if type == 0: # returns tuple of lat,lon
return (float(json_data[0]['lat']), float(json_data[0]['lon']))
else: # returns string description of location
return json_data['display_name']
except:
print('FAILED')
print(response.status)
print(url)
print('FORMAT')
finally:
response.close()
except: # If error in HTTP request, assume network error
print('FAILED')
print(url)
print('NETWORK')
def make_readable_coords(self, coordinates:str)->str:
"""Given coordinates of latitude and longitude separated by a space,
return a string coordinate that indicated North,South,East,West directions"""
coords = coordinates.split()
readable_coords=''
if float(coords[0]) >0:
readable_coords+= str(abs(float(coords[0]))) +'/N '
else:
readable_coords += str(abs(float(coords[0]))) +'/S '
if float(coords[1]) >0:
readable_coords+= str(abs(float(coords[1]))) +'/E'
else:
readable_coords += str(abs(float(coords[1]))) +'/W'
return readable_coords
def print_details(self)->None:
"""Prints a nominatim handler class' coordinates and display_name"""
print(self.make_readable_coords(self.query_str))
print(self.return_data_from_query(self.query_str,1))
def readable_coords_from_location(self)->str:
"""If a nominatim handler class was constructed with a location, return its coordinates in a readable format"""
coords = self.return_data_from_query(self.query_str,0)
coords_str = str(coords[0]) +' ' + str(coords[1])
return self.make_readable_coords(coords_str) |
from browser import document, ajax
import json
def get_input_data():
age = document['age'].value
gender = 0
race = document['race'].value
height = document['height'].value
weight = document['weight'].value
pulse = document['pulse'].value
heaviest = document['heaviest'].value
smoke = document['smoke'].value
age_smoke = document['age_smoke'].value
pressure = document['pressure'].value
salt = document['salt'].value
relative = document['relative'].value
food = document['food'].value
milk = document['milk'].value
supps = document['supps'].value
tv = document['tv'].value
income = document['income'].value
return {'age': int(age),
'gender': gender,
'race': int(race),
'height': float(height),
'weight': float(weight),
'pulse': int(pulse),
'heaviest': float(heaviest),
'smoke': smoke,
'age_smoke': int(age_smoke),
'pressure': pressure,
'salt': salt,
'relative': relative,
'food': int(food),
'milk': milk,
'supps': int(supps),
'tv': float(tv),
'income': float(income),
}
def display_prediction(req):
result = json.loads(req.text)
document['prediction'].html = result['risk']
def send_input_data(data):
req = ajax.Ajax()
req.bind('complete', display_prediction)
req.open('POST', '/predict', True)
req.set_header('Content-Type', 'application/json')
req.send(json.dumps(data))
def click(event):
user_info = get_input_data()
send_input_data(user_info)
document['predict'].bind('click', click) |
from django.contrib.auth.models import PermissionsMixin
from django.utils.translation import templatize
from django.views.generic import TemplateView
from django.http import HttpResponse
from django import http
from django.shortcuts import render
from rest_framework import viewsets,permissions
from .models import Producto
from .serializers import ProductoSerializer
from django.core import serializers
import json
# Create your views here.
class ProductoViewSet(viewsets.ModelViewSet):
queryset = Producto.objects.all()
serializer_class = ProductoSerializer
class ProductoByName(TemplateView):
def get(self, request) -> http.HttpResponse:
print(request.GET)
query = Producto.objects.filter(marca__iexact='cocacolacompany')
return HttpResponse(serializers.serialize('json',[item for item in query]))
def post(self, request) -> http.HttpResponse:
request_json = json.loads(request.body)
query = None
if(request_json['type']=='all'):
query = Producto.objects.all()
elif(request_json['type']=='nombre'):
query = Producto.objects.filter(nombre__icontains=request_json['str'])
elif(request_json['type']=='marca'):
query = Producto.objects.filter(marca__icontains=request_json['str'])
elif(request_json['type']=='precio'):
query = Producto.objects.filter(precio__gte=float(request_json['str'])*0.9,precio__lte=float(request_json['str'])*1.1)
elif(request_json['type']=='descripcion'):
query = Producto.objects.filter(descripcion__icontains=request_json['str'])
return HttpResponse(serializers.serialize('json',[item for item in query]),content_type="application/json") |
# @see https://adventofcode.com/2015/day/13
import re
from itertools import permutations
def parse_line(s: str):
r = re.match(r'([A-Z][a-z]+) would (gain|lose) ([\d]+) happiness units by sitting next to ([A-Z][a-z]+).', s.strip())
# Parse happiness change...
h = int(r[3]) if 'gain' == r[2] else int(r[3]) * -1
return r[1], r[4], h
with open('day13_input.txt', 'r') as f:
hmap = dict()
for l in f:
a, b, h = parse_line(l)
if a not in hmap:
hmap[a] = {b: h}
else:
hmap[a][b] = h
def calc_happiness_for_arrangement(a: list, hm: dict):
# Calc happiness change between first and last person
acc = hm[a[0]][a[-1]] + hm[a[-1]][a[0]]
# Calc happiness change between everyone in-between
for i in range(len(a)-1):
p1, p2 = a[i], a[i+1]
acc += hm[p1][p2] + hm[p2][p1]
return acc
def calc_happiness_for_optimal_sitting(hm: dict):
# Gather everyone into a set...
names = set(hm.keys())
# Generate all permutations of sitting arrangements
# ...and calculate happines change for each person
# We will also filter out reverse connections i.e. a->b->c is the same as c->b->a
return max([calc_happiness_for_arrangement(p, hm) for p in permutations(names, len(names)) if p <= p[::-1]])
def add_self(hm: dict):
me = {}
for k in hm.keys():
hm[k]['me'] = 0
me[k] = 0
hm['me'] = me
return hm
print('------------ PART 01 -------------')
print('Total change in happiness:', calc_happiness_for_optimal_sitting(hmap))
print('\n------------ PART 02 -------------')
# Add myself to the list
hmap = add_self(hmap)
print('Total change in happiness:', calc_happiness_for_optimal_sitting(hmap)) |
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the codec picker.
This file is also the place for tests that cover several codecs."""
import os
import unittest
import encoder
import pick_codec
class TestPickCodec(unittest.TestCase):
def test_DistinctWorkdirs(self):
seen_dirs = set()
for codec_name in pick_codec.AllCodecNames():
codec = pick_codec.PickCodec(codec_name)
context = encoder.Context(codec)
workdir = os.path.abspath(context.cache.WorkDir())
self.assertNotIn(workdir, seen_dirs,
'Duplicate workdir %s for codec %s' %
(workdir, codec_name))
seen_dirs.add(workdir)
if __name__ == '__main__':
unittest.main()
|
def sieve(N):
s = [0,0,1]+[1,0]*(N/2)
i = 3
while i*i < N:
if s[i]:
for itr in xrange(i*2,N,i):
s[itr] = 0
i += 2
return [i for i in range(N) if s[i]==1]
def palidrome(n):
s = str(n)
l = len(s)
for i in xrange(0,l/2):
if s[i] != s[l-1]:
return False
return True
s = 0
for i in sieve(10000)[:1000]:
s += i
print s
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 16:11:12 2020
@author: adeela
"""
'''
BFS(G, s)
for each v ∈ G: color[v] = WHITE; d[v] = ∞ color[s] ← GRAY; d[s] ← 0
Q←∅
ENQUEUE(Q, s)
while Q ≠ ∅
u ← DEQUEUE(Q)
for each v ∈ Adj[u]
if color[v] = WHITE then color[v] ← GRAY d[v] ← d[u] + 1
ENQUEUE(Q, v) color[u] ← BLACK
''' |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('games', '0008_auto_20141108_1436'),
]
operations = [
migrations.AddField(
model_name='game',
name='skill',
field=models.FloatField(default=25.0),
preserve_default=True,
),
migrations.AddField(
model_name='game',
name='skip',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='game',
name='vote',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
|
import requests
from lxml import etree
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Referer': 'http://i.jzj9999.com/quoteh5/',
}
url = 'http://i.jzj9999.com/quoteh5/'
session = requests.Session()
s = session.get(url, headers=headers)
html = s.text
print(html)
tree = etree.HTML(html)
name = tree.xpath('//span[@class="symbol-name y-middle"]//text()')
print(name)
# price =tree.xpath('//*[not(contains(@class,"symbol")) or *[contains(@class,"price")]//text()')
# //*[text()="hello"]//a
price = tree.xpath('//*[starts-with(@class,"symbol-price")]//text()')
d = {
'\ue1f2': '0',
'\uefab': '1',
'\ueba3': '2',
'\uecfa': '3',
'\uedfd': '4',
'\ueffa': '5',
'\uef3a': '6',
'\ue6f5': '7',
'\uecb2': '8',
'\ue8ae': '9',
}
print(price)
p = ','.join(price)
print(p)
count = 0
for k, v in d.items():
p.replace(k, v)
count += 1
print(price)
print(count)
|
inputX = input("x = ?")
inputY = input("y = ?")
x = float(inputX)
y = float(inputY)
a = "PはR1とR2の両方の円の内側にある。"
b = "PはR1の内側にある。"
c = "PはR2の内側にある。"
d = "PはR1の内側でもなく、R2の内側でもない。"
if x**2 + y**2 < 10**2 and (x-10)**2 + y**2 < 10**2:
print(a)
elif x**2 + y**2 < 10**2:
print(b)
elif (x-10)**2 + y**2 < 10**2:
print(c)
else:
print(d)
"""
実験結果3(2)
(x,y) |領域
-------+--------
(1,1) |A
(1,5) |B
(11,0) |C
(5,9) |D
"""
#
|
from socket import *
import asyncio
import pickle
all_connections = []
all_addresses = []
async def echo_server(address, loop):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind(address)
sock.listen(1)
sock.setblocking(False)
for c in all_connections:
c.close()
del all_connections[:]
del all_addresses[:]
while True:
# client,addr=sock.accept()
client, addr = await loop.sock_accept(sock)
all_connections.append(client)
all_addresses.append(address)
print('connection from', all_addresses[0])
loop.create_task(get_data(client, loop))
# t=threading.Thread(Target=echo_handler,args=(client,))
# t.start()
async def get_data(client, loop):
while True:
# data=client.recv(1024)
data = await loop.sock_recv(client, 1024)
data=pickle.loads(data)
if not data:
break
# client.sendall(b'Got :'+data)
print('recived data is ',data)
#print('sending back '+data)
#await loop.sock_sendall(client, b'Got :' + data.encode())
print('connection closing ')
client.close()
print('Connection Closed')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(echo_server(('', 25000), loop))
|
from flask import Flask
from flask_restful import Api
from resources.users import UsersList, Me
from resources.properties import PropertiesList, PropertyResource
app = Flask(__name__)
app.debug = True
api = Api(app, catch_all_404s=True)
api.add_resource(UsersList, '/landlords', endpoint='landlords', resource_class_kwargs={'type': 'landlord'})
api.add_resource(UsersList, '/tenants', endpoint='tenants', resource_class_kwargs={'type': 'tenant'})
api.add_resource(Me, '/me/', endpoint='me', resource_class_kwargs={'id': 1})
api.add_resource(PropertiesList, '/properties', endpoint='properties', resource_class_kwargs={'landlord_id': 1})
api.add_resource(PropertyResource, '/property/<int:id>', endpoint='property', resource_class_kwargs={'landlord_id': 1})
if __name__ == '__main__':
app.run()
|
from .Action import Action
# A group of actions that are performed/reversed together, treated as a single action.
class ActionGroup(Action):
def __init__(self, actions):
Action.__init__(self)
self.actions = actions
def add(self, action):
if action not in self.actions:
self.actions.append(action)
def remove(self, action):
if action in self.actions:
self.actions.remove(action)
def modifiesState(self):
for action in self.actions:
if action.modifiesState():
return True
return False
def cleanup(self):
for action in self.actions:
action.cleanup()
self.actions = None
Action.cleanup(self)
def do(self):
Action.do(self)
for action in self.actions:
action.do()
def undo(self):
Action.undo(self)
for action in reversed(self.actions):
action.undo()
|
"""
Student: Karina Jonina - 10543032
Module: B8IT110
Module Name: HDIP PROJECT
Project Objective: Time Series Forecasting of Cryptocurrency
Task: Scraping Yahoo Finance so that the user can select the crypto currency
based on Market Cap
"""
#importing important packages
import re
import json
#from io import String10
import requests
import codecs
from bs4 import BeautifulSoup
import pandas as pd
from pandas.io.json import json_normalize
url_yahoo_finance = []
for num in range(0,51,25):
url = 'https://finance.yahoo.com/cryptocurrencies/?offset=25&count='+ str(num)
url_yahoo_finance.append(url)
# getting the live page
def get_yahoo_table():
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
url = 'https://finance.yahoo.com/cryptocurrencies/'
# url = 'https://coinmarketcap.com/'
response = requests.get(url , headers = headers)
content = response.content
soup = BeautifulSoup(content, features="html.parser")
pattern = re.compile(r'\s--\sData\s--\s')
script_data = soup.find('script', text = pattern).contents[0]
start = script_data.find("context")-2
json_data = json.loads(script_data[start:-12])
# this is where the data is
crypto_json = json_data['context']['dispatcher']['stores']['ScreenerResultsStore']['results']['rows']
return crypto_json[0]
get_yahoo_table() |
import socket
import sys
import select
import subprocess
import argparse
import time
import re, uuid
parser = argparse.ArgumentParser(description='Display WLAN signal strength.')
parser.add_argument(dest='interface', nargs='?', default='wlan0',
help='wlan interface (default: wlan0)')
args = parser.parse_args()
client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(('192.168.43.95',12345))
time.sleep(0.1)
client.send("\n".encode('ascii'))
socket_list=[sys.stdin,client]
while True:
# ready,_,_=select.select(socket_list,[],[],0)
# for socket in ready:
# if socket is client:
# data=client.recv(1024)
# print(data.decode('ascii'))
# if not data :
# sys.exit()
macAd=':'.join(re.findall('..', '%012x' % uuid.getnode()))
cmd = subprocess.Popen('iwconfig %s' % args.interface, shell=True,
stdout=subprocess.PIPE)
b=-1
for line in cmd.stdout:
if 'Link Quality' in line:
a= line.lstrip(' ')[-10:-7]
a=abs(int(a))
if(a<50):
b='0'
if(a>50 and a<57):
b='1'
if(a>60):
b='2'
client.send(str(b).encode('ascii'))
client.send(macAd.encode('ascii'))
elif 'Not-Associated' in line:
b='No signal'
client.send(str(b).encode('ascii'))
# msg=sys.stdin.readline()
# client.send(str(msg).encode('ascii'))
|
from django.db import models
from loginsignup.models import Beaver
# Create your models here.
class Post(models.Model):
post_creator = models.ForeignKey(
Beaver,
related_name="posts",
related_query_name="post",
on_delete=models.CASCADE,
)
posted_on = models.DateField(auto_now_add=True)
caption = models.TextField(null=True)
picture = models.ImageField(null=True, upload_to="images/post/", blank=True)
likes = models.IntegerField(default=0)
choice = models.TextField()
class Meta:
verbose_name_plural = "Posts"
def __str__(self):
return f"{self.post_creator} {self.posted_on}"
class Comment(models.Model):
post = models.ForeignKey(
Post,
related_name="comments",
related_query_name="comment",
on_delete=models.CASCADE,
)
comment_creator = models.ForeignKey(
Beaver,
related_name="user_comments",
related_query_name="user_comment",
on_delete=models.CASCADE,
)
comment = models.TextField()
posted_on = models.DateField(auto_now_add=True)
likes = models.IntegerField(default=0)
class Meta:
verbose_name_plural = "Comments"
def __str__(self):
return f"{self.comment_creator} {self.post}"
class Like(models.Model):
post = models.ForeignKey(Post, related_name="post_likes", on_delete=models.CASCADE)
liker = models.ForeignKey(
Beaver,
related_name="total_likes",
related_query_name="like",
on_delete=models.CASCADE,
)
class Meta:
verbose_name_plural = "Likes"
def __str__(self):
return f"{self.liker} {self.post}"
class FriendRequest(models.Model):
sender = models.ForeignKey(
Beaver, related_name="request_sends", on_delete=models.CASCADE
)
receiver = models.ForeignKey(
Beaver, related_name="request_receives", on_delete=models.CASCADE
)
def __str__(self):
return f"{self.sender} --> {self.receiver}"
@classmethod
def sendRequest(cls, sender, receiver):
request = cls.objects.filter(sender=sender, receiver=receiver).first()
if request is None:
cls.objects.create(sender=sender, receiver=receiver)
# The receiver can only accept the request
def acceptRequest(self):
Beaver.make_friend(creator=self.sender, friend=self.receiver)
self.delete()
|
from django.shortcuts import render
from Segment.models import forecastol_img
from django.http import HttpResponse
import MySQLdb
import collections
import json
from PIL import Image
from Segment.leaf_predict import predict, count, count2
import os
import base64
import cv2
def get_data(sql):
conn = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='123456',
db='lybb',
charset='utf8',
)
cur = conn.cursor()
cur.execute(sql)
results = cur.fetchall()
cur.close()
conn.commit()
conn.close()
return results
def forecastol_update_leaf(request):
if request.method == 'POST':
imgbase64 = request.POST.get('imgbase64', '')
#print(imgbase64)
if "data:image/png;base64,"in imgbase64:
imgbase64 = imgbase64.replace("data:image/png;base64,", "")
elif "data:image/jpeg;base64," in imgbase64:
imgbase64 = imgbase64.replace("data:image/jpeg;base64,", "")
#print(imgbase64)
imgdata = base64.b64decode(imgbase64)
path = os.getcwd()
newpath = path+"/forecast_img/"
sql = "select max(id) " + "from Segment_forecastol_img"
data = get_data(sql)
#print(data)
pic_num = int(data[0][0])+1
pic_root = newpath+'img_'+str(pic_num)+'.png'
picname = 'img_'+str(pic_num)+'.png'
with open(pic_root, "wb+")as f:
f.write(imgdata)
#print(picname)
info = forecastol_img()
info.id = pic_num
info.imgroot = pic_root
info.save()
oblist = []
oblist.append(picname)
data = json.dumps(oblist)
result = predict(pic_root)
result2 = count(result)
result3 = count2()
#print(result)
#print(path)
# 保存病斑分割出来的二值图像
#cv2.imwrite(path + '/Segment/static/resultimg/' + "img_" + str(pic_num) + ".jpg", result)
result3.save(path + '/Segment/static/resultimg/' + "img_" + str(pic_num) + ".png")
#print("ssss:"+data)
return HttpResponse(data)
def mobile_forecast_leaf(request):
return render(request, "mobileforecastleaf.html")
|
import tkinter as tk
import threading
from functools import partial
from app_sub1.service_Student_Member import StudentMember
import time
def remove_ent(app):
app.entry1.delete(0, 'end')
app.entry2.delete(0, 'end')
app.entry3.delete(0, 'end')
app.entry4.delete(0, 'end')
def btn1_clicked(app,service,service2,event):
print('sub_btn1 clicked: DB updating')
print(event)
grade = app.entry1.get()
Class = app.entry2.get()
num = app.entry3.get()
name = app.entry4.get()
ID = ID_make(grade, Class, num)
member = StudentMember(ID, name)
#h = service.select(id)
# if h != None:
# print('중복된 학번!!! 다시 입력해주세요')
# else:
service2.insert(member)
service2.update(ID)
print('DB입력 성공')
print('sub_btn1 clicked: video_show threading')
service.video_GUI(ID,"Manager")
video_showing = threading.Thread(target=service.video_show, args=(ID,"Manager"))
video_showing.start()
remove_ent(app)
time.sleep(2)
#def btn2_clicked(app,event):
# print('sub_btn2 clicked')
def make(app, service,service2):
app.label0 = tk.Label(app.sub_fr, font=60, text="학생 정보 입력")
app.label1 = tk.Label(app.sub_fr, font=60, text="학년 : ")
app.entry1 = tk.Entry(app.sub_fr, width=10)
app.label2 = tk.Label(app.sub_fr, font=60, text="반 : ")
app.entry2 = tk.Entry(app.sub_fr, width=10)
app.label3 = tk.Label(app.sub_fr, font=60, text="번호 : ")
app.entry3 = tk.Entry(app.sub_fr, width=10)
app.label4 = tk.Label(app.sub_fr, font=60, text="이름 : ")
app.entry4 = tk.Entry(app.sub_fr, width=10)
app.label0.grid(row=0, column=0, columnspan=6)
app.label1.grid(row=1, column=0)
app.entry1.grid(row=1, column=1)
app.label2.grid(row=1, column=2)
app.entry2.grid(row=1, column=3)
app.label3.grid(row=1, column=4)
app.entry3.grid(row=1, column=5)
app.label4.grid(row=2, column=0)
app.entry4.grid(row=2, column=1)
app.btn1 = tk.Button(app.sub_fr, width=15, font=60, text='저장')
app.btn1.grid(row=3,column=2,columnspan=2)
app.btn1.bind('<Button-1>', partial(btn1_clicked,app,service,service2))
#app.btn1['command'] = btn1_clicked
#app.btn1.bind('<Button-1>', partial(btn1_clicked, app))
#app.btn2.bind('<Button-1>', partial(btn2_clicked, app))
def ID_make(g,c,n):
if len(c) < 2:
c = '0' + c
if len(n) < 2:
n = '0' + n
eid = g + c + n
print(type(id))
return eid |
from datetime import datetime
import logging
import requests
import tweepy
from secrets import A_TOKEN, A_TOKEN_SECRET, C_KEY, C_SECRET
logging.basicConfig(
filename='sun_will_rise.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info('Starting sun_will_rise_script')
# Tweepy auth
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
# Get sunrise info from sunrise-sunset.org
params = {
'lat': -33.900387,
'lng': 18.413332,
}
sunrise_response = requests.get('https://api.sunrise-sunset.org/json', params=params).json()
did_sunrise = sunrise_response.get('results').get('sunrise') is not None
# Get today's date
today = datetime.today().strftime('%d %b %Y')
# Construct tweet
did_sunrise_str = 'Yes' if did_sunrise else 'No'
did_sunrise_tweet = f'{today}: {did_sunrise_str}'
api.update_status(did_sunrise_tweet)
logger.info(f'Tweeted: {did_sunrise_tweet}')
|
# Generated by Django 2.1.5 on 2019-10-14 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Product', '0002_auto_20191014_1045'),
]
operations = [
migrations.AddField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete='SET_NULL', related_name='subcategory', to='Product.Category'),
),
migrations.RemoveField(
model_name='product',
name='category',
),
migrations.AddField(
model_name='product',
name='category',
field=models.ManyToManyField(to='Product.Category'),
),
]
|
import os
import time
STORAGE_ROOT = '/mnt/usb-sd'
def filename(capture_time):
name = capture_time.isoformat().replace(':', '_')
return f'{name}.jpg'
def image_path(folder, capture_time):
base_path = os.path.join(STORAGE_ROOT, folder)
os.makedirs(base_path, exist_ok=True)
return os.path.join(base_path, filename(capture_time))
def image_s3_key(folder, capture_time):
return os.path.join(folder, filename(capture_time))
|
import sys
import urllib2
import urllib
def Aggregation_Transit_Centralized(conf,inputs,outputs):
start_point = inputs["StartPoint"]["value"]
walkshed_collection = inputs["WalkshedCollection"]["value"]
walkshed_union = inputs["WalkshedUnion"]["value"]
poi = inputs["POI"]["value"]
crime = inputs["Crime"]["value"]
transit = inputs["Transit"]["value"]
walking_time_period = inputs["WalkingTimePeriod"]["value"]
distance_decay_function = inputs["DistanceDecayFunction"]["value"]
params = urllib.urlencode({'start_point': start_point, 'walkshed_collection': walkshed_collection, 'walkshed_union': walkshed_union, 'poi': poi, 'crime': crime, 'transit': transit, 'walking_time_period': walking_time_period ,'distance_decay_function': distance_decay_function})
aggregation_service_url = "http://127.0.0.1:9364/aggregation"
try:
aggregation_data = urllib2.urlopen(aggregation_service_url, params).read()
except urllib2.HTTPError, e:
print "HTTP error: %d" % e.code
except urllib2.URLError, e:
print "Network error: %s" % e.reason.args[1]
outputs["AggregationResult"]["value"] = aggregation_data
return 3
|
import torch
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
def forward(self, x):
batch_size = x.shape[0]
conv = self._model_conv(x)
linear_in = conv.view(batch_size, -1)
linear_out = self._model_linear(linear_in)
output = self._softmax(linear_out)
return output
def save(self, filename):
state_dict = {name:value.cpu() for name, value \
in self.state_dict().items()}
status = {'state_dict':state_dict,}
with open(filename, 'wb') as f_model:
torch.save(status, f_model)
def load(self, filename):
status = torch.load(filename)
self.load_state_dict(status['state_dict'])
class SimpleCNN(CNN):
def __init__(self):
super(SimpleCNN, self).__init__()
self._init_network()
def _init_network(self):
self._model_conv1 = torch.nn.Sequential(
# input_channel, out_channel, filter_size, stride, padding
torch.nn.Conv2d(1, 16, 5, 1, 2),
torch.nn.MaxPool2d(2),
torch.nn.ReLU(),
)
self._model_conv2 = torch.nn.Sequential(
torch.nn.Conv2d(16, 32, 5, 1, 2),
torch.nn.Dropout2d(p=0.2),
torch.nn.MaxPool2d(2),
torch.nn.ReLU(),
)
self._model_conv3 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, 5, 1, 2),
torch.nn.MaxPool2d(2),
torch.nn.ReLU(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(6*6*64, 256),
torch.nn.Linear(256, 64),
torch.nn.Linear(64, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
self._model_conv = torch.nn.Sequential(
self._model_conv1,
self._model_conv2,
self._model_conv3,
)
class Mobilenet(CNN):
# Inspired from mobilnet
# use the depthwise convolution
def __init__(self):
super(Mobilenet, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
def _dropout():
return torch.nn.Dropout()
def _conv_bn(in_channel, out_channel, stride):
conv_bn = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, stride,
1, bias=False),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv_bn
def _conv_dw(in_channel, out_channel, stride):
conv_dw = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, in_channel, 3, stride, 1,
groups=in_channel, bias=False),
torch.nn.BatchNorm2d(in_channel),
_relu(),
torch.nn.Conv2d(in_channel, out_channel, 1, 1, 0,
bias=False),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv_dw
self._model_conv = torch.nn.Sequential(
_conv_bn( 1, 16, 1),
_conv_bn( 16, 32, 1),
_conv_bn( 32, 32, 2),
_conv_dw( 32, 64, 1),
_conv_dw( 64, 64, 1),
_conv_dw( 64, 128, 2),
_conv_dw(128, 256, 2),
_conv_dw(256, 512, 2),
#torch.nn.AvgPool2d(2),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
torch.nn.BatchNorm1d(512),
_dropout(),
_relu(),
torch.nn.Linear(512, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class VGG(CNN):
# Inspired from VGG network
# Not obtaining great performance on this task
def __init__(self):
super(VGG, self).__init__()
self._init_network()
def _init_network(self):
def _conv(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.ReLU(inplace=True),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout2d():
return torch.nn.Dropout2d()
def _dropout():
return torch.nn.Dropout()
self._model_conv = torch.nn.Sequential(
_conv(1, 16),
_conv(16, 32),
_max_pool(),
_conv(32, 64),
_conv(64, 64),
_max_pool(),
#_dropout2d(),
_conv(64, 128),
_conv(128, 128),
_max_pool(),
_conv(128, 256),
_conv(256, 256),
_max_pool(),
#_dropout2d(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(256*3*3, 256),
_dropout(),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(256, 32),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(32, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class TsungHan(CNN):
# Model suggested from TsungHan
# This model is used to compare with the other model performance
# Reach about 66% for validation
def __init__(self):
super(TsungHan, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
def _conv(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout():
return torch.nn.Dropout()
self._model_conv = torch.nn.Sequential(
_conv(1, 16),
_conv(16, 32),
_conv(32, 64),
_max_pool(),
_conv(64, 128),
_max_pool(),
_conv(128, 256),
_max_pool(),
_conv(256, 512),
_max_pool(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
torch.nn.BatchNorm1d(512),
_relu(),
_dropout(),
torch.nn.Linear(512, 256),
_relu(),
torch.nn.Linear(256, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class Kai(CNN):
# Inspired from TsungHan model
# Kai takes more care about the later stage of convolution
# and pays less attention to the early stage.
def __init__(self):
super(Kai, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.LeakyReLU(inplace=True)
def _conv(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout():
return torch.nn.Dropout()
def _dropout2d():
return torch.nn.Dropout2d()
self._model_conv = torch.nn.Sequential(
_conv(1, 16),
_conv(16, 32),
_max_pool(),
_conv(32, 64),
_conv(64, 128),
_max_pool(),
_conv(128, 256),
_conv(256, 256),
_max_pool(),
_dropout2d(),
_conv(256, 512),
_conv(512, 512),
_max_pool(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
#torch.nn.BatchNorm1d(512),
_relu(),
_dropout(),
torch.nn.Linear(512, 256),
_relu(),
torch.nn.Linear(256, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class Kai2(CNN):
# which is a trying from Kai that the convolution kernel
# size alters.
def __init__(self):
super(Kai2, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
def _conv5(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 5, 1, 2),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _conv3(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout():
return torch.nn.Dropout()
self._model_conv = torch.nn.Sequential(
_conv5(1, 16),
_conv5(16, 32),
_max_pool(),
_conv5(32, 64),
_conv5(64, 128),
_max_pool(),
_conv3(128, 256),
_conv3(256, 256),
_max_pool(),
_conv3(256, 512),
_conv3(512, 512),
_max_pool(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
torch.nn.BatchNorm1d(512),
_relu(),
_dropout(),
torch.nn.Linear(512, 256),
_relu(),
torch.nn.Linear(256, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class Kai3(CNN):
# A transform from Kai
# It pays more attention on the early stage of convolution
def __init__(self):
super(Kai3, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
def _conv(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout():
return torch.nn.Dropout()
def _dropout2d():
return torch.nn.Dropout2d()
self._model_conv = torch.nn.Sequential(
_conv(1, 16),
_conv(16, 16),
_conv(16, 32),
_max_pool(),
_dropout2d(),
_conv(32, 64),
_conv(64, 64),
_conv(64, 128),
_max_pool(),
_dropout2d(),
_conv(128, 256),
_max_pool(),
_conv(256, 512),
_max_pool(),
_dropout2d(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
torch.nn.BatchNorm1d(512),
_relu(),
_dropout(),
torch.nn.Linear(512, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class Kai4(CNN):
def __init__(self):
super(Kai4, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
def _conv(in_channel, out_channel):
conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, 3, 1, 1),
torch.nn.BatchNorm2d(out_channel),
_relu(),
)
return conv
def _max_pool():
return torch.nn.MaxPool2d(2)
def _dropout():
return torch.nn.Dropout()
self._model_conv = torch.nn.Sequential(
_conv(1, 16),
_conv(16, 32),
_max_pool(),
_conv(32, 64),
_conv(64, 128),
_max_pool(),
_conv(128, 256),
_max_pool(),
_conv(256, 512),
_max_pool(),
)
self._model_linear = torch.nn.Sequential(
torch.nn.Linear(512*3*3, 512),
torch.nn.BatchNorm1d(512),
_relu(),
_dropout(),
torch.nn.Linear(512, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
class SimpleDNN(CNN):
def __init__(self):
super(SimpleDNN, self).__init__()
self._init_network()
def _init_network(self):
def _relu():
return torch.nn.ReLU(inplace=True)
self._linear = torch.nn.Sequential(
torch.nn.Linear(48*48*1, 1536),
_relu(),
torch.nn.Linear(1536, 512),
_relu(),
torch.nn.Linear(512, 128),
_relu(),
torch.nn.Linear(128, 32),
_relu(),
torch.nn.Linear(32, 7),
)
self._softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, -1)
y = self._linear(x)
prob = self._softmax(y)
return prob
|
from vcenter_connect import update_virtual_disk_capacity, get_all_disknumbers
virtualmachine_name = 'Nexii2' #raw_input("enter virtual machine name:")
all_disks_numbers = get_all_disknumbers(virtualmachine_name)
print "All disks number avaliable:"
print ",".join(map(str,all_disks_numbers))
selected_disk_number = int(raw_input("Selec a disk number:"))
disk_size = raw_input("Enter Disk size:")
if selected_disk_number in all_disks_numbers:
if update_virtual_disk_capacity(virtualmachine_name,selected_disk_number,disk_size):
print "Disc mode is changed"
else:
print "Something went wrong"
else:
print "Please select proper disc number"
|
import tweepy
import pymysql
import dotenv
import logging
import json
import string
import os
import time
from config import create_api
from misinformation_model import calculate_validity_score
logging.basicConfig(filename='app.log', filemode='w',
format='%(name)s - %(levelname)s - %(message)s')
# gets 10 most recent tweets according to filter parameters
class TweetListener(tweepy.StreamListener):
def __init__(self, api, model, keywords):
self.api = api
self.responses = []
self.keywords = keywords
self.model = model
self.num_tweets = 0
self.limit = 1
def increment_num_tweets(self):
self.num_tweets += 1
def on_status(self, tweet):
"""
Process the tweets and save them to a PyMySQL AWS database
"""
logging.info(f"Processing tweet id {tweet.id}\n")
logging.info(f"Tweet: {tweet.text}\n")
# assume keyword in text due to high frequency usage in crisis event
if any(word in tweet.text for word in self.keywords):
# serve this ID and score to the client
score = calculate_validity_score(tweet, self.model)
# TODO: extended text
self.insert_into_database(
tweet.created_at, tweet.text, tweet.user.screen_name, tweet.id, score)
response = {'id': tweet.id, 'handle': tweet.user.screen_name,
'created_at': tweet.created_at, 'text': tweet.text, 'validity_score': float(score)}
self.responses.append(response)
self.increment_num_tweets()
if self.num_tweets < self.limit:
return True
else:
return False
# save to database for data analytics
def insert_into_database(self, created_at, text, screen_name, tweet_id, validity_score):
db = pymysql.connect(os.getenv("host"), user='admin', passwd=os.getenv(
"db_pw"), db='twitter', charset="utf8")
cursor = db.cursor()
insert_query = "INSERT INTO twitter (tweet_id, user_handle, created_at, text, validity_score) VALUES (%s, %s, %s, %s, %s)"
final_score = float(validity_score[0][0])
cursor.execute(insert_query, (tweet_id, screen_name,
created_at, text, final_score))
db.commit()
logging.info("Data added to AWS RDBMS succesfuly")
cursor.close()
db.close()
return
def on_error(self, status):
logging.error(f"Error: {status}")
# get hashtags
# this may not work because the filter may not be able to get all together
def get_tweets(keywords, languages, locations, model):
api = create_api()
tweets_listener = TweetListener(api, model, keywords)
tweet_stream = tweepy.Stream(
api.auth, tweets_listener, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
tweet_stream.filter(track=keywords, languages=languages,
locations=locations)
return tweets_listener.responses
if __name__ == "__main__":
# test case
get_tweets(["Python"], ["en"], [-6.38, 49.87, 1.77, 55.81])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from models.reg import Registry
from models.permissions import Role
from storage.impl.deserialize import Deserializer
from storage.schema.deserialize import SchemaDeserializer
from pprint import pprint
import json
if __name__ == '__main__':
impl_reg = Registry()
resource_reg = Registry()
type_reg = Registry()
impl_deserializer = Deserializer('./schema/implementation')
spec_deserializer = SchemaDeserializer('./schema')
for resource_name in impl_deserializer.resources():
impl_reg.add_type(impl_deserializer.decode_resource(resource_name))
for type_name in spec_deserializer.types():
type_reg.add_type(spec_deserializer.decode_type(type_name))
for resource_name in spec_deserializer.resources():
resource_reg.add_type(spec_deserializer.decode_resource(resource_name))
spec_resource_names = set(resource_reg.types.keys())
impl_resource_names = set(impl_reg.types.keys())
print "\n>>> Unimplemented Resources:"
pprint(spec_resource_names.difference(impl_resource_names))
print "\n>>> Unspecified Resources:"
pprint(impl_resource_names.difference(spec_resource_names))
spec_methods = []
spec_fields = {}
impl_methods = []
impl_fields = {}
permissions = {}
methods = {}
for rn in spec_resource_names.intersection(impl_resource_names):
spec_resource = resource_reg.get_type(rn)
for mn in spec_resource.methods:
fmn = rn + '.' + mn
spec_methods.append(fmn)
impl_resource = impl_reg.get_type(rn)
for mn in impl_resource.methods:
fmn = rn + '.' + mn
impl_methods.append(fmn)
for mn in set(spec_resource.methods).intersection(set(impl_resource.methods)):
spec_method = spec_resource.get_method(mn)
for fn in spec_method.fields:
spec_fields[rn + '.' + mn + '.' + fn] = spec_method.get_field(fn)
impl_method = impl_resource.get_method(mn)
for fn in impl_method.fields:
impl_fields[rn + '.' + mn + '.' + fn] = impl_method.get_field(fn)
permissions[rn + '.' + mn] = {}
permissions[rn + '.' + mn]['spec'] = spec_method.permissions
permissions[rn + '.' + mn]['impl'] = impl_method.permissions
print "\n>>> Unimplemented methods:"
pprint(set(spec_methods).difference(set(impl_methods)))
print "\n>>> Unspecified methods:"
pprint(set(impl_methods).difference(set(spec_methods)))
print "\n>>> Unimplemented fields:"
for field in set(spec_fields.keys()).difference(set(impl_fields.keys())):
print field.split(u'.')[0] + ',' + field.split(u'.')[1] + u',' + field.split(u'.')[2]
print "\n>>> Unspecified fields:"
for field in set(impl_fields.keys()).difference(set(spec_fields.keys())):
print field.split(u'.')[0] + ',' + field.split(u'.')[1] + u',' + field.split(u'.')[2]
print "\n>>> Incorrect field type:"
for fn in set(spec_fields.keys()).intersection(set(impl_fields.keys())):
if spec_fields[fn].datatypename != impl_fields[fn].datatypename:
print fn + ',' + spec_fields[fn].datatypename + ',' + impl_fields[fn].datatypename
print "\n>>> Incorrect requirement modifier:"
for fn in set(spec_fields.keys()).intersection(set(impl_fields.keys())):
if spec_fields[fn].required != impl_fields[fn].required:
print fn + ',' + str(spec_fields[fn].required) + ',' + str(impl_fields[fn].required)
print "\n>>> Methods:"
for rn in spec_resource_names.union(impl_resource_names):
if rn in resource_reg.types:
spec_resource = resource_reg.get_type(rn)
for mn in spec_resource.methods:
fmn = rn + '.' + mn
if fmn.lower() in methods:
methods[fmn.lower()][u'spec'] = True
methods[fmn.lower()][u'deprecated'] = spec_resource.get_method(mn).deprecated
else:
methods[fmn.lower()] = {
u'name': fmn,
u'spec': True,
u'impl': False,
u'used': False,
u'deprecated': spec_resource.get_method(mn).deprecated,
u'private': False
}
if rn in impl_reg.types:
impl_resource = impl_reg.get_type(rn)
for mn in impl_resource.methods:
fmn = rn + '.' + mn
if fmn.lower() in methods:
methods[fmn.lower()][u'impl'] = True
methods[fmn.lower()][u'private'] = impl_resource.get_method(mn).is_private
else:
methods[fmn.lower()] = {
u'name': fmn,
u'spec': False,
u'impl': True,
u'used': False,
u'deprecated': False,
u'private': impl_resource.get_method(mn).is_private
}
methods_usage = []
with open('./schema/operation/usage.json') as f:
data = json.load(f)
methods_usage = []
for m in data:
methods_usage.append(m)
for method in methods_usage:
if method.lower() in methods:
methods[method.lower()][u'used'] = True
else:
methods[method.lower()] = {
u'name': method,
u'spec': False,
u'impl': False,
u'used': True,
u'deprecated': False,
u'private': False
}
for full_method_name in methods:
method = methods[full_method_name]
resource = None
method_name = None
if len(method[u'name'].split(u'.')) > 0:
resource = method[u'name'].split(u'.')[0]
if len(method[u'name'].split(u'.')) > 1:
method_name = method[u'name'].split(u'.')[1]
print unicode(resource) + u',' +\
unicode(method_name) + u',' +\
unicode(method[u'spec']) + u',' +\
unicode(method[u'impl']) + u',' +\
unicode(method[u'used']) + u',' +\
unicode(method[u'deprecated']) + u',' +\
unicode(method[u'private'])
print "\n>>> Permissions:"
not_spec = []
not_impl = []
difference = []
for mn in permissions:
for role in Role.ROLES:
if role not in permissions[mn]['spec']:
not_spec.append((mn, role))
if role not in permissions[mn]['impl']:
not_impl.append((mn, role))
if (role in permissions[mn]['spec'] and
role in permissions[mn]['impl']):
spec_role = permissions[mn]['spec'][role]
impl_role = permissions[mn]['impl'][role]
if spec_role.access != impl_role.access:
difference.append((mn, role, spec_role.access, impl_role.access))
print "\n>>>> Not Specified Permissions:"
for (mn, role) in not_spec:
print mn + ',' + role
print "\n>>>> Not Implemented Permissions:"
for (mn, role) in not_impl:
print mn + ',' + role
print "\n>>>> Difference in Permissions:"
for (mn, role, sa, ia) in difference:
print mn + ',' + role + ',' + unicode(sa) + ',' + unicode(ia)
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from sqlalchemy import create_engine
f = open("D:\\python_code\\sample\\猫眼\\movieComments.csv",'rb')
data = pd.read_csv(f, names = ['id', 'city', 'comment', 'ranking', 'time'])
df = data.drop_duplicates() # 去掉重复行
df1 = df.dropna(how = 'any', inplace = False) # 去掉有空字段的行
df1 = df1.reset_index(drop = True)
# 选取city一栏中非空不重复的项
df2 = df1.drop_duplicates(subset = ["city"])
df2['city'].to_csv("D:\\python_code\\sample\\猫眼\\city.csv")
# 将city.csv匹配好对应的省份保存为cityname.csv并写入dataframe
f = open("D:\\python_code\\sample\\猫眼\\cityname.csv",'rb')
citydata = pd.read_csv(f, names = ['city', 'province'])
# 初始化数据库连接,使用pymysql模块
engine = create_engine('mysql+pymysql://root:mym@1249690440@localhost:3306/maoyan')
# 将新建的DataFrame储存为MySQL中的数据表,储存index列
citydata.to_sql('cityname', engine, index=True)
df1.to_sql('moviecomment', engine, index=True)
print('Write to Mysql table successfully!') |
import os
import sys
import unittest
import pytorch_lightning as pl
import pytorch_lightning.loggers
from deep_depth_transfer import DepthNetResNet
from deep_depth_transfer.data import TumValidationDataModuleFactory
from deep_depth_transfer.models import DepthEvaluationModel
from deep_depth_transfer.utils import DepthMetric
from test.data_module_mock import DataModuleMock
if sys.platform == "win32":
WORKERS_COUNT = 0
else:
WORKERS_COUNT = 4
class TestDepthEvaluationModel(unittest.TestCase):
def setUp(self) -> None:
current_folder = os.path.dirname(os.path.abspath(__file__))
dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "tum_rgbd",
"rgbd_dataset_freiburg3_large_cabinet_validation")
data_module_factory = TumValidationDataModuleFactory(dataset_folder)
self._data_module = data_module_factory.make_data_module(
final_image_size=(128, 384),
batch_size=1,
num_workers=WORKERS_COUNT,
)
self._data_module = DataModuleMock(self._data_module)
depth_net = DepthNetResNet()
self._model = DepthEvaluationModel(depth_net, DepthMetric()).cuda()
def test_evaluation_model(self):
tb_logger = pl.loggers.TensorBoardLogger('logs/')
trainer = pl.Trainer(logger=tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20)
trainer.test(self._model, self._data_module.test_dataloader())
|
from .factory import create
|
#!/usr/bin/python
import sys
import os
import struct
if len(sys.argv) != 2:
print "Usage: " + sys.argv[0] + " filename"
sys.exit(1)
filename = sys.argv[1]
filesize = os.stat(filename).st_size
#tty = open("/dev/ttyS0", "w")
tty = open("/dev/ttyUSB0", "w")
f = open(filename)
# call 'load' command
tty.write("L")
# write file size (as little endian unsigned short == 2 bytes)
tty.write(struct.pack("<H", filesize))
# write memory target address (begin of memory)
tty.write(struct.pack("<H", 0x8000))
# send file data
tty.write(f.read(filesize))
f.close
tty.close
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that rules which use built dependencies work correctly.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('use-built-dependencies-rule.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('use-built-dependencies-rule.gyp', chdir='relocate/src')
test.built_file_must_exist('main_output', chdir='relocate/src')
test.built_file_must_match('main_output', 'output', chdir='relocate/src')
test.pass_test()
|
# coding: utf-8
from nltk.corpus import brown
brown.categories()
print("Numero de categorias ",len(brown.categories()))
print("Numero de archivos ",len(brown.fileids()))
print("Numero de caracteres en cr09 ",len(brown.raw(fileids=['cr09'])))
print("Numero de palabras en cr09 ",len(brown.words(fileids=['cr09'])))
print("Numero de oraciones en cr09 ",len(brown.sents(fileids=['cr09'])))
|
# Напишите reducer, который объединяет элементы из множества A и B. На вход в reducer приходят пары key / value, где key - элемент множества, value - маркер множества (A или B)
# Sample Input:
# 1 A
# 2 A
# 2 B
# 3 B
# Sample Output:
# 1
# 2
# 3
import sys
prev = ''
for line in sys.stdin:
key, value = line.strip().split('\t')
# First line
if not prev:
print(key)
prev = key
# First key occurrence
elif key != prev:
print(key)
prev = key |
import weakref
def doStuff():
def meth():
pass
wr = weakref.ref(meth)
return wr
def recurse(f, n):
if n:
return recurse(f, n-1)
return f()
w = recurse(doStuff, 100)
# Try creating a large object to make sure we can handle them:
def f():
class C(object):
# Adding a __slots__ directive increases the size of the type object:
__slots__ = ['a' + str(i) for i in xrange(1000)]
return weakref.ref(C)
r = recurse(f, 100)
import gc
gc.collect()
assert r() is None, "object was not collected"
assert w() is None, "object was not collected"
|
def strik(lista,x):
i = 0
j = x
soma = 0
while i < 3:
soma = soma +lista[j]
i += 1
j += 1
return soma
#---------------------------------------
def spare(lista,x):
i = 0
j = x
soma = 0
while i <= 2:
soma = soma +lista[j]
i += 1
j += 1
return soma
#---------------------------------------
pinos = [int(i) for i in input().split()]
jogadas = 10
placar = []
pontos = 0
soma_pontos = 0
soma_total = 0
i = 0
j = 1
for a in range(jogadas):
if pinos[i] == 10:
soma_pontos = strik(pinos,i)
if a == 9:
cont = 0
x = i
while cont < 3:
if pinos[x] == 10:
placar.append('X')
else:
placar.append(pinos[x])
x += 1
cont += 1
else:
placar.append('X')
placar.append('_')
placar.append('|')
i += 1
j += 1
else:
pontos = pinos[i] + pinos[j]
if pontos == 10:
soma_pontos = spare(pinos,i)
if a == 9:
placar.append(pinos[i])
placar.append('/')
placar.append(pinos[j+1])
else:
placar.append(pinos[i])
placar.append('/')
placar.append('|')
else:
soma_pontos = pontos
if a == 9:
placar.append(pinos[i])
placar.append(pinos[i+1])
else:
placar.append(pinos[i])
placar.append(pinos[i+1])
placar.append('|')
i += 2
j += 2
soma_total = soma_total + soma_pontos
j=0
for i in range(len(placar)):
print(placar[j],'',end="")
j = j + 1
print('\n{}'.format(soma_total))
|
from __future__ import print_function
import sys
import cplex
from cplex.callbacks import UserCutCallback, LazyConstraintCallback
import numpy as np
def powerset(A):
if A == []:
return [[]]
a = A[0]
incomplete_pset = powerset(A[1:])
rest = []
for set in incomplete_pset:
rest.append([a] + set)
a=(rest + incomplete_pset)
return a
from collections import defaultdict
#This class represents a directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A function used by DFS
def DFSUtil(self,v,visited,a1):
# Mark the current node as visited and print it
visited[v]= True
a1.append(v)
#print (v ,)
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.DFSUtil(i,visited,a1)
return a1
def fillOrder(self,v,visited, stack):
# Mark the current node as visited
visited[v]= True
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.fillOrder(i, visited, stack)
stack = stack.append(v)
# Function that returns reverse (or transpose) of this graph
def getTranspose(self):
g = Graph(self.V)
# Recur for all the vertices adjacent to this vertex
for i in self.graph:
for j in self.graph[i]:
g.addEdge(j,i)
return g
# The main function that finds and prints all strongly
# connected components
def printSCCs(self):
stack = []
# Mark all the vertices as not visited (For first DFS)
visited =[False]*(self.V)
# Fill vertices in stack according to their finishing
# times
for i in range(self.V):
if visited[i]==False:
self.fillOrder(i, visited, stack)
# Create a reversed graph
gr = self.getTranspose()
# Mark all the vertices as not visited (For second DFS)
visited =[False]*(self.V)
a2=[]
# Now process all vertices in order defined by Stack
while stack:
i = stack.pop()
if visited[i]==False:
a1=[]
a1=gr.DFSUtil(i, visited,a1)
a2.append(a1)
a1=[]
#print"s"
return a2
class LazyCallback(LazyConstraintCallback):
def __init__(self, env) :
LazyConstraintCallback.__init__(self, env)
def __call__(self):
qq=open("dataopt.txt","w")
#print("sssss")
#print(self.get_objective_value())
active_arcs=[]
for i in self.D:
for j in self.D:
print(self.get_values(self.w[i][j]),i,j)
#print(self.get_values(x[1][2]),i,j)
for i in self.A:
print()
if self.get_values(self.x[i[0]][i[1]])>0.9:
active_arcs.append(i)
#print("s")
print(active_arcs)
plt.scatter(loc_x[1:],loc_y[1:],c='b')
r=[]
t1=[]
dpair=[]
for i in active_arcs:
if i[0] in self.D:
r.append(i[0])
if i[1] in self.D:
t1.append(i[1])
for i in r:
for j in t1:
if i!=j and [i,j] not in dpair:
dpair.append([i,j])
print(dpair)
b=[]
for i in active_arcs:
if i[0] not in b:
b.append(i[0])
if i[1] not in b:
b.append(i[1])
print(b)
u=b[:]
g= Graph(len(b))
for i in active_arcs:
z=i[0]
y=i[1]
g.addEdge(b.index(z),b.index(y))
a2=g.printSCCs()
for i in range(len(a2)):
for j in range(len(a2[i])):
a2[i][j]=b[a2[i][j]]
print(a2)
if len(a2)!=1:
for i in a2:
if len(i)!=1:
thevars=[]
thecoefs=[]
pp=0
for j in i:
for k in self.V:
if k not in i:
#print(x[k][j])
thevars.append(self.x[k][j])
thevars.append(self.x[j][k])
thecoefs.append(1)
thecoefs.append(1)
pp+=self.get_values(self.x[k][j])+self.get_values(self.x[j][k])
if pp<0.90:
print(i)
self.add(constraint=cplex.SparsePair(thevars,thecoefs),
sense="G",
rhs=1)
for j in i:
if j not in self.D:
b.remove(j)
print(b)
for w1 in dpair:
#print(k)
c=b[:]
"""for i in active_arcs:
if i[0] not in c and i[0]!=w[1]:
c.append(i[0])
if i[1] not in c and i[1]!=w[1]:
c.append(i[1])"""
c.remove(w1[1])
#d=c[0:]
#print(c)
#c[0]=2
#print(c)
print(c)
print(c[0])
g=Graph(len(c))
for i in active_arcs:
z=i[0]
y=i[1]
if y==w1[1]:
y=w1[0]
if z==w1[1]:
z=w1[0]
if z not in c or y not in c:
continue
#if z==c[0]:
# g.addEdge(0,c.index(y))
#else:
#print(c.index(z))
g.addEdge(c.index(z),c.index(y))
a2=g.printSCCs()
print(a2,"rrr")
for i in range(len(a2)):
for j in range(len(a2[i])):
a2[i][j]=c[a2[i][j]]
for i in a2:
if w1[0] in i and len(i)>1:
i.append(w1[1])
print(a2,"ttt")
if len(a2)==1:
break
if len(a2)!=1:
for i in a2:
if len(i)!=1:
print(i)
for jj in range(0,10):
for ii in active_arcs:
#print(ii)
if ii[0] in i and ii[1] not in i:
i.append(ii[1])
print(ii)
if ii[1] in i and ii[0] not in i:
i.append(ii[0])
print(ii)
if len(i)==len(u):
pp=1
plt.scatter(loc_x[1:],loc_y[1:],c='b')
#for i,j in active_arcs:
#plt.plot([self.loc_x[i],self.loc_x[j]],[self.loc_y[i],self.loc_y[j]],c='g')
#plt.axis('equal')
#plt.show()
break
pp=0
thevars=[]
thecoefs=[]
for j in i:
for k in self.V:
if k not in i:
#print(x[k][j])
thevars.append(self.x[k][j])
thevars.append(self.x[j][k])
thecoefs.append(1)
thecoefs.append(1)
pp+=self.get_values(self.x[k][j])+self.get_values(self.x[j][k])
if pp<0.9:
print(i,"yyy")
self.add(constraint=cplex.SparsePair(thevars,thecoefs),
sense="G",
rhs=1)
"""
if pp>0.9:
#plt.show()
qq.write(str(active_arcs))
qq.write("\n")
qq.write(" ssss ")
qq.close()
#for i,j in active_arcs:
# plt.plot([self.loc_x[i],self.loc_x[j]],[self.loc_y[i],self.loc_y[j]],c='g')
#plt.axis('equal')
#plt.show()
"""
#assign one variable to each depot
#if one of them is 1 all others should be zero condition
rnd = np.random
rnd.seed(0)
n=81
Q=150
X=20
a=40
b=4.3
nd=8
coords=[[-a/2,a/2],[0,a/2],[a/2,a/2],[a/2,0],[a/2,-a/2],[0,-a/2],[-a/2,-a/2],[-a/2,0]]
loc_x=[-a/2,0,a/2,a/2,a/2,0,-a/2,-a/2]
loc_y=[a/2,a/2,a/2,0,-a/2,-a/2,-a/2,0]
#random.shuffle(self.coords)
for i in range(0,int(a/b)):
for j in range(0,int(a/b)):
coords.append([-a/2+b/2+(b*j),a/2-b/2-(b*i)])
loc_x.append(-a/2+b/2+(b*j))
loc_y.append(a/2-b/2-(b*i))
print(coords)
print(loc_x)
print(loc_y)
D= range(0,nd)
N=[i for i in range(nd,n+nd)]
V=range(0,nd)+N
print(V)
import matplotlib.pyplot as plt
A=[(i,j) for i in V for j in V ]
#print(A)
c={(i,j):np.hypot(loc_x[i]-loc_x[j],loc_y[i]-loc_y[j]) for i,j in A}
for i in D:
for j in D:
c[i,j]=abs(loc_x[i]-loc_x[j])+abs(loc_y[i]-loc_y[j])
cpx = cplex.Cplex()
#print(len(A))
x=[]
#print(c[0][2])
for i in range(len(V)):
x.append([])
for j in range(len(V)):
#print (i,j)
#print(c[i][j])
varName = "x." + str(i) + "." + str(j)
x[i].append(cpx.variables.get_num())
cpx.variables.add(obj=[c[i,j]],
lb=[0.0], ub=[1.0], types=["B"],
names=[varName])
cpx.variables.set_upper_bounds(x[i][i], 0)
w=[]
for i in range(len(D)):
w.append([])
for j in range(len(D)):
#print (i,j)
#print(c[i][j])
varName = "w." + str(i) + "." + str(j)
w[i].append(cpx.variables.get_num())
cpx.variables.add(obj=[c[i,j]],
lb=[0.0], ub=[8.0], types=["C"],
names=[varName])
print(w)
t=cpx.variables.add(obj=D,
lb=[-1] * len(D), ub=[1] * len(D),
types=['C'] * len(D),
names=['t(%d)' % (j) for j in D]
)
p=cpx.variables.add(obj=V,
lb=[-X] * len(V), ub=[X] * len(V),
types=['C'] * len(V),
names=['p(%d)' % (j) for j in V]
)
o=cpx.variables.add(obj=V,
lb=[-X] * len(V), ub=[X] * len(V),
types=['C'] * len(V),
names=['o(%d)' % (j) for j in V]
)
u = cpx.variables.add(obj=V,
lb=[0] * len(V), ub=[Q] * len(V),
types=['C'] * len(V),
names=['u(%d)' % (j) for j in V]
)
a=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['a(%d)' % (j) for j in V]
)
b=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['b(%d)' % (j) for j in V]
)
d=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['d(%d)' % (j) for j in V]
)
e=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['e(%d)' % (j) for j in V]
)
f=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['f(%d)' % (j) for j in V]
)
g=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['g(%d)' % (j) for j in V]
)
h=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['h(%d)' % (j) for j in V]
)
ii=cpx.variables.add(obj=V,
lb=[0.0] * len(V), ub=[1.0] * len(V),
types=['B'] * len(V),
names=['ii(%d)' % (j) for j in V]
)
for i in V:
cpx.objective.set_linear(u[i],0)
cpx.objective.set_linear(p[i],0)
cpx.objective.set_linear(o[i],0)
cpx.objective.set_linear(a[i],0)
cpx.objective.set_linear(b[i],0)
cpx.objective.set_linear(d[i],0)
cpx.objective.set_linear(e[i],0)
cpx.objective.set_linear(f[i],0)
cpx.objective.set_linear(g[i],0)
cpx.objective.set_linear(h[i],0)
cpx.objective.set_linear(ii[i],0)
for i in D:
cpx.objective.set_linear(t[i],0)
cpx.variables.set_upper_bounds(u[i], 0)
cpx.variables.set_upper_bounds(p[i], 0)
cpx.variables.set_upper_bounds(o[i], 0)
cpx.variables.set_upper_bounds(a[i], 0)
cpx.variables.set_upper_bounds(b[i], 0)
cpx.variables.set_upper_bounds(d[i], 0)
cpx.variables.set_upper_bounds(e[i], 0)
cpx.variables.set_upper_bounds(f[i], 0)
cpx.variables.set_upper_bounds(g[i], 0)
cpx.variables.set_upper_bounds(h[i], 0)
cpx.variables.set_upper_bounds(ii[i], 0)
for i in D:
for j in D:
cpx.variables.set_upper_bounds(x[i][j],0)
print(e)
for i in N:
thevars = []
thecoefs = []
for j in range(0, len(V)):
if i!=j:
thevars.append(x[i][j])
thecoefs.append(1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[1.0])
for j in N:
thevars = []
thecoefs = []
for i in range(0, len(V)):
if i!=j:
thevars.append(x[i][j])
thecoefs.append(1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[1.0])
"""
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[0][j])
thecoefs.append(1)
for i in N:
thevars.append(a1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[1][j])
thecoefs.append(1)
for i in N:
thevars.append(b1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[2][j])
thecoefs.append(1)
for i in N:
thevars.append(d1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[3][j])
thecoefs.append(1)
for i in N:
thevars.append(e1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[4][j])
thecoefs.append(1)
for i in N:
thevars.append(f1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[5][j])
thecoefs.append(1)
for i in N:
thevars.append(g1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[6][j])
thecoefs.append(1)
for i in N:
thevars.append(h1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for j in D:
thevars=[]
thecoefs=[]
thevars.append(w[7][j])
thecoefs.append(1)
for i in N:
thevars.append(ii1[i][j])
thecoefs.append(-1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
"""
for i in D:
for j in N:
if i!=j:
#print(u[j])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=c[i,j],
sense="E",
lin_expr=[[u[j],x[i][j]],[1,0]])
for i in N:
for j in N:
if i!=j:
#print(u[j])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=c[i,j],
sense="E",
lin_expr=[[u[j],u[i]],[1,-1]])
for i in N:
for j in D:
if i!=j:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=Q-c[i,j],
sense="L",
lin_expr=[[u[j],u[i]],[0,1]])
for i in D:
for j in N:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=loc_x[j]-loc_x[i],
sense="E",
lin_expr=[[p[j],a[i]],[1,0]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=loc_y[j]-loc_y[i],
sense="E",
lin_expr=[[o[j],a[i]],[1,0]])
for i in N:
for j in N:
if i!=j:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=loc_x[j]-loc_x[i],
sense="E",
lin_expr=[[p[j],p[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=loc_y[j]-loc_y[i],
sense="E",
lin_expr=[[o[j],o[i]],[1,-1]])
i=0
for j in range(len(V)):
if i !=j:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[a[j],a[i]],[1,0]])
i=1
for j in range(len(V)):
if j>7:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[b[j],a[i]],[1,0]])
i=2
for j in range(len(V)):
if j >7:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[d[j],a[i]],[1,0]])
i=3
for j in range(len(V)):
if j>7:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[e[j],a[i]],[1,0]])
i=4
for j in range(len(V)):
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[f[j],a[i]],[1,0]])
i=5
for j in range(len(V)):
if j>7:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[g[j],a[i]],[1,0]])
i=6
for j in range(len(V)):
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[h[j],a[i]],[1,0]])
i=7
for j in range(len(V)):
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=1.0,
sense="E",
lin_expr=[[ii[j],a[i]],[1,0]])
for i in N:
for j in N:
if i!=j:
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[a[j],a[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[b[j],b[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[d[j],d[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[e[j],e[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[f[j],f[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[g[j],g[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[h[j],h[i]],[1,-1]])
cpx.indicator_constraints.add(
indvar=x[i][j],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[ii[j],ii[i]],[1,-1]])
for i in N:
for j in D:
cpx.indicator_constraints.add(
indvar=a[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[0][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=b[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[1][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=d[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[2][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=e[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[3][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=f[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[4][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=g[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[5][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=h[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[6][j],x[i][j]],[-1,1]])
cpx.indicator_constraints.add(
indvar=ii[i],
complemented=0,
rhs=0.0,
sense="L",
lin_expr=[[w[7][j],x[i][j]],[-1,1]])
for i in D:
thevars=[]
thecoefs=[]
thevars.append(t[i])
thecoefs.append(1)
for j in N:
thevars.append(x[i][j])
thevars.append(x[j][i])
thecoefs.append(-1)
thecoefs.append(1)
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(thevars, thecoefs)],
senses=["E"], rhs=[0.0])
for i in D:
for j in range(i+1,len(D)):
if i !=j:
cpx.linear_constraints.add(
lin_expr=[([t[i],t[j]],[1,1])],
senses=["L"], rhs=[1.0])
for i in D:
for j in range(i+1,len(D)):
cpx.linear_constraints.add(
lin_expr=[([t[i],t[j]],[1,1])],
senses=["G"], rhs=[-1.0])
for i in N:
cpx.indicator_constraints.add(
indvar=a[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][2],x[i][3],x[i][4],x[i][5],x[i][6]],[1,1,1,1,1]])
cpx.indicator_constraints.add(
indvar=b[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][4],x[i][5],x[i][6]],[1,1,1]])
cpx.indicator_constraints.add(
indvar=d[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][0],x[i][4],x[i][5],x[i][6],x[i][7]],[1,1,1,1,1]])
cpx.indicator_constraints.add(
indvar=e[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][0],x[i][6],x[i][7]],[1,1,1]])
cpx.indicator_constraints.add(
indvar=f[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][0],x[i][2],x[i][1],x[i][6],x[i][7]],[1,1,1,1,1]])
cpx.indicator_constraints.add(
indvar=g[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][0],x[i][1],x[i][2]],[1,1,1]])
cpx.indicator_constraints.add(
indvar=h[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][0],x[i][1],x[i][2],x[i][3],x[i][4]],[1,1,1,1,1]])
cpx.indicator_constraints.add(
indvar=ii[i],
complemented=0,
rhs=0.0,
sense="E",
lin_expr=[[x[i][2],x[i][3],x[i][4]],[1,1,1]])
lazycb = cpx.register_callback(LazyCallback)
lazycb.x = x
lazycb.w = w
lazycb.N = N
lazycb.V = V
lazycb.u = u
lazycb.A = A
lazycb.D = D
lazycb.d = d
#lazycb.b1=b1
lazycb.loc_x=loc_x
lazycb.loc_y=loc_y
thevars=[]
thecoefs=[]
####Mip1=[(1, 10), (2, 16), (5, 31), (8, 13), (9, 8), (10, 11), (11, 15), (12, 2), (13, 18), (14, 9), (15, 14), (16, 22), (17, 12), (18, 19), (19, 20), (20, 21), (21, 17), (22, 5), (23, 28), (24, 23), (25, 24), (26, 32), (27, 30), (28, 29), (29, 5), (30, 25), (31, 26), (32, 27)]
#Mip1=[[7,16],[16,17],[17,21],[21,20],[20,7],[7,12],[12,8],[8,9],[9,13],[13,1],[1,10],[10,11],[11,15],[15,14],[14,3],[3,19],[19,18],[18,22],[22,23],[23,3]]
#Mip1=[[7,10],[10,7],[7,8],[8,1],[1,9],[9,3],[3,11],[11,3]]
#Mip1=[[1,9],[9,8],[8,13],[13,18],[18,19],[19,14],[14,10],[10,1],[1,11],[11,12],[12,16],[16,15],[15,20],[20,21],[21,17],[17,22],[22,3],[3,27],[27,32],[32,31],[31,26],[26,25],[25,30],[30,5],[5,29],[29,28],[28,24],[24,23],[23,7]]
#321Mip1=[(1, 10), (3, 27), (5, 29), (8, 13), (9, 8), (10, 11), (11, 12), (12, 17), (13, 18), (14, 9), (15, 14), (16, 15), (17, 16), (18, 19), (19, 20), (20, 21), (21, 22), (22, 3), (23, 5), (24, 23), (25, 30), (26, 25), (27, 26), (28, 24), (29, 28), (30, 31), (31, 32), (32, 5)]
#Mip1=[[7,16],[16,20],[20,21],[21,17],[17,13],[13,12],[12,8],[8,1],[1,9],[9,10],[10,11],[11,3],[3,15],[15,14],[14,18],[18,22],[22,23],[23,19],[19,3]]
#Mip2=[[5, 68], [68, 69], [69, 70], [70, 71], [71, 63], [63, 62], [62, 61], [61, 60], [60, 52], [52, 53], [53, 54], [54, 55], [55, 47], [47, 46], [46, 45], [45, 44], [44, 5], [5, 67], [67, 66], [66, 65], [65, 64], [64, 56], [56, 57], [57, 58], [58, 59], [59, 51], [51, 50], [50, 49], [49, 48], [48, 40], [40, 41], [41, 42], [42, 43], [43, 7], [7, 32], [32, 33], [33, 34], [34, 35], [35, 27], [27, 26], [26, 25], [25, 24], [24, 16], [16, 17], [17, 18], [18, 19], [19, 11], [11, 10], [10, 9], [9, 8], [8, 1], [1, 12], [12, 13], [13, 14], [14, 15], [15, 23], [23, 22], [22, 21], [21, 20], [20, 28], [28, 29], [29, 30], [30, 31], [31, 39], [39, 38], [38, 37], [37, 36], [36, 1]]
#Mip1=[(1, 9), (5, 16), (7, 14), (8, 11), (9, 10), (10, 13), (11, 7), (12, 8), (13, 12), (14, 15), (15, 5), (16, 5)]
#36#Mip1=[[5, 41], [41, 42], [42, 43], [43, 37], [37, 36], [36, 35], [35, 29], [29, 30], [30, 31], [31, 5], [5, 40], [40, 39], [39, 38], [38, 32], [32, 33], [33, 34], [34, 28], [28, 27], [27, 26], [26, 7], [7, 20], [20, 21], [21, 22], [22, 16], [16, 15], [15, 14], [14, 8], [8, 9], [9, 10], [10, 1], [1, 11], [11, 12], [12, 13], [13, 19], [19, 18], [18, 17], [17, 23], [23, 24], [24, 25], [25, 1]]
#49
#Mip1=[[5, 54], [54, 55], [55, 56], [56, 49], [49, 48], [48, 47], [47, 40], [40, 41], [41, 42],[42,5], [5, 53], [53, 52], [52, 51], [51, 50], [50, 43], [43, 44], [44, 45], [45, 46], [46, 39], [39, 38], [38, 37], [37, 36], [36, 7], [7, 29], [29, 30], [30, 31], [31, 32], [32, 25], [25, 24], [24, 23], [23, 22], [22, 15], [15, 16], [16, 17], [17, 18], [18, 11], [11, 10], [10, 9], [9, 8], [8, 1], [1, 12], [12, 13], [13, 14], [14, 21], [21, 20], [20, 19], [19, 26], [26, 27], [27, 28], [28, 35], [35, 34], [34, 33], [33, 1]]
#81
Mip1=[[5, 85], [85, 86], [86, 87], [87, 88], [88, 79], [79, 78], [78, 77], [77, 76], [76, 67], [67, 68], [68, 69], [69, 70], [70, 61], [61, 60], [60, 59], [59, 58], [58, 57], [57, 66], [66, 75], [75, 84], [84, 5], [5, 83], [83, 82], [82, 81], [81, 80], [80, 71], [71, 72], [72, 73], [73, 74], [74, 65], [65, 64], [64, 63], [63, 62], [62, 53], [53, 54], [54, 55], [55, 56], [56, 7], [7, 44], [44, 45], [45, 46], [46, 47], [47, 48], [48, 39], [39, 38], [38, 37], [37, 36], [36, 35], [35, 26], [26, 27], [27, 28], [28, 29], [29, 30], [30, 21], [21, 20], [20, 19], [19, 18], [18, 17], [17, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 1], [1, 13], [13, 14], [14, 15], [15, 16], [16, 25], [25, 24], [24, 23], [23, 22], [22, 31], [31, 32], [32, 33], [33, 34], [34, 43], [43, 42], [42, 41], [41, 40], [40, 49], [49, 50], [50, 51], [51, 52], [52, 1]]
for i in Mip1:
thevars.append(x[i[0]][i[1]])
thecoefs.append(1)
ee=[]
#ee=[w[5][5],w[5][7],w[7][1],w[1][1]]
aaa=[85, 86, 87, 88, 79, 78, 77, 76, 67, 68, 69, 70, 61, 60, 59, 58, 57, 66, 75, 84,83, 82, 81, 80, 71, 72, 73, 74, 65, 64, 63, 62, 53, 54, 55, 56]
ddd=[44, 45, 46, 47, 48, 39, 38, 37, 36, 35, 26, 27, 28, 29, 30, 21, 20, 19, 18, 17, 8, 9, 10, 11, 12]
ggg=[13, 14, 15, 16, 25, 24, 23, 22, 31, 32, 33, 34, 43, 42, 41, 40, 49, 50, 51, 52]
#see=[w[7][5],w[5][5]]
#aaa=[16]
##ddd=[14,15]
#ggg=[8,9,10,11,12,13]
for i in aaa:
ee.append(g[i])
for i in ddd:
ee.append(ii[i])
for i in ggg:
ee.append(b[i])
#ee=[ii[16],ii[17],ii[21],ii[20],ii1[20][7],ii[12],ii[8],ii[9],ii[13],ii1[13][1],b[10],b[11],b[14],b[15],b1[14][3],e[19],e[18],e[22],e[23],e1[23][3]]
#ee=[ii[10],ii1[10][7],ii[8],ii1[8][1],b[9],b1[9][3],e[11],e1[11][3]]
#ee=[b[9],b[8],b[13],b[18],b[19],b[14],b[10],b[11],b[12],b[16],b[15],b[20],b[21],b[17],b[22],e[27],e[32],e[31],e[26],e[25],e[30],g[29],g[28],g[24],g[23]]
ff=[1]*len(ee)
print(len(ee))
thevars+=ee
thecoefs+=ff
for i in V:
for j in V:
if x[i][j] not in thevars:
thevars.append(x[i][j])
thecoefs.append(0)
#for i in D:
# for j in D:
# if w[i][j] not in thevars:
# thevars.append(w[i][j])
# thecoefs.append(0)
for i in V:
if a[i] not in thevars:
thevars.append(a[i])
thecoefs.append(0)
if b[i] not in thevars:
thevars.append(b[i])
thecoefs.append(0)
if d[i] not in thevars:
thevars.append(d[i])
thecoefs.append(0)
if e[i] not in thevars:
thevars.append(e[i])
thecoefs.append(0)
if f[i] not in thevars:
thevars.append(f[i])
thecoefs.append(0)
if g[i] not in thevars:
thevars.append(g[i])
thecoefs.append(0)
if h[i] not in thevars:
thevars.append(h[i])
thecoefs.append(0)
if ii[i] not in thevars:
thevars.append(ii[i])
thecoefs.append(0)
cpx.MIP_starts.add(cplex.SparsePair(thevars,thecoefs),cpx.MIP_starts.effort_level.repair)
cpx.parameters.mip.limits.repairtries=1000
#cpx.parameters.mip.strategy.startalgorithm.set(4)
cpx.parameters.mip.strategy.rinsheur.set(5)
cpx.write('model5.lp')
cpx.solve()
print(cpx.solution.get_values(x[1][2]))
active_arcs = [r for r in A if cpx.solution.get_values(x[r[0]][r[1]])>0.9]
#print(u[1].solution_value)
plt.scatter(loc_x[1:],loc_y[1:],c='b')
print(active_arcs)
for i,j in active_arcs:
plt.plot([loc_x[i],loc_x[j]],[loc_y[i],loc_y[j]],c='g')
plt.axis('equal')
print(cpx.solution.get_status_string())
print(cpx.solution.get_objective_value())
for i in D:
for j in D:
print(cpx.solution.get_values(w[i][j]),i,j)
#for i in V:
# print(cpx.solution.get_values(p[i]))
"""
for i in V:
for j in D:
print(cpx.solution.get_values(a1[i][j]),i,j)
for i in D:
print(cpx.solution.get_values(t[i]),i)
for j in range(0,8):
print(j)
for i in N:
#print(ii[i])
print(cpx.solution.get_values(a1[i][j]),cpx.solution.get_values(a[i]),"a",i,j)
print(cpx.solution.get_values(b1[i][j]),cpx.solution.get_values(b[i]),"b",i,j)
print(cpx.solution.get_values(d1[i][j]),cpx.solution.get_values(d[i]),"d",i,j)
print(cpx.solution.get_values(e1[i][j]),cpx.solution.get_values(e[i]),"e",i,j)
print(cpx.solution.get_values(f1[i][j]),cpx.solution.get_values(f[i]),"f",i,j)
print(cpx.solution.get_values(g1[i][j]),cpx.solution.get_values(g[i]),"g",i,j)
print(cpx.solution.get_values(h1[i][j]),cpx.solution.get_values(h[i]),"h",i,j)
print(cpx.solution.get_values(ii1[i][j]),cpx.solution.get_values(ii[i]),"ii",i,j)
#print(cpx.solution.get_values(a1[i][j]))
#for j in D:
# print(cpx.solution.get_values(w[i][j]),i,j)
"""
plt.show()
"""
print("ss")
print(cpx.solution.get_values(a1[i][j]))
print(cpx.solution.get_values(b1[i][j]))
print(cpx.solution.get_values(c1[i][j]))
print(cpx.solution.get_values(d1[i][j]))
print(cpx.solution.get_values(e1[i][j]))
print(cpx.solution.get_values(f1[i][j]))
print(cpx.solution.get_values(g1[i][j]))
print(cpx.solution.get_values(h1[i][j]))
"""
"""
po = powerset(act)
po.remove([])
po.remove(act)
for i in po:
thevars=[]
thecoefs=[]
pp=0
for j in i:
for k in act:
if k not in i:
thevars.append(self.x[k][j])
thevars.append(self.x[j][k])
thecoefs.append(1)
thecoefs.append(1)
pp+=self.get_values(self.x[k][j])+self.get_values(self.x[j][k])
if pp==0:
print("sss")
self.add(constraint=cplex.SparsePair(thevars,thecoefs),
sense="G",
rhs=1)
""" |
import json
import logging
import sys
from io import BytesIO
from os import chdir, environ, path, remove
from pathlib import Path
from socket import gethostname
from subprocess import check_output, CalledProcessError, STDOUT
from time import strftime, time
from traceback import format_exc
import graphyte
from geoip import geolite2
from rq import get_current_job
from rq.decorators import job
import qmk_redis
import qmk_storage
from qmk_commands import QMK_FIRMWARE_PATH, QMK_GIT_BRANCH, checkout_qmk, find_firmware_file, store_source, checkout_chibios, checkout_lufa, checkout_vusb, write_version_txt
from qmk_redis import redis
DEBUG = int(environ.get('DEBUG', 0))
API_URL = environ.get('API_URL', 'https://api.qmk.fm/')
GRAPHITE_HOST = environ.get('GRAPHITE_HOST', 'qmk_metrics_aggregator')
GRAPHITE_PORT = int(environ.get('GRAPHITE_PORT', 2023))
# The `keymap.c` template to use when a keyboard doesn't have its own
DEFAULT_KEYMAP_C = """#include QMK_KEYBOARD_H
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
__KEYMAP_GOES_HERE__
};
"""
# Local Helper Functions
def store_firmware_metadata(job, result):
"""Save `result` as a JSON file along side the firmware.
"""
json_data = json.dumps({
'created_at': job.created_at.strftime('%Y-%m-%d %H:%M:%S %Z'),
'enqueued_at': job.enqueued_at.strftime('%Y-%m-%d %H:%M:%S %Z'),
'id': job.id,
'is_failed': result['returncode'] != 0,
'is_finished': True,
'is_queued': False,
'is_started': False,
'result': result
})
json_obj = BytesIO(json_data.encode('utf-8'))
filename = '%s/%s.json' % (result['id'], result['id'])
qmk_storage.save_fd(json_obj, filename)
def store_firmware_binary(result):
"""Called while PWD is qmk_firmware to store the firmware hex.
"""
firmware_storage_path = '%(id)s/%(firmware_filename)s' % result
if not result['firmware_filename'] or not path.exists(result['firmware_filename']):
return False
qmk_storage.save_file(result['firmware_filename'], firmware_storage_path)
result['firmware_binary_url'] = [path.join(API_URL, 'v1', 'compile', result['id'], 'download')]
if result['public_firmware']:
file_ext = result["firmware_filename"].split(".")[-1]
file_name = f'compiled/{result["keyboard"]}/default.{file_ext}'
qmk_storage.save_file(result['firmware_filename'], file_name, bucket=qmk_storage.COMPILE_S3_BUCKET, public=True)
def store_firmware_source(result):
"""Called while PWD is the top-level directory to store the firmware source.
"""
# Store the keymap source
qmk_storage.save_file(path.join('qmk_firmware', result['keymap_archive']), path.join(result['id'], result['keymap_archive']))
# Store the full source
result['source_archive'] = 'qmk_firmware-%(keyboard)s-%(keymap)s.zip' % (result)
result['source_archive'] = result['source_archive'].replace('/', '-')
store_source(result['source_archive'], QMK_FIRMWARE_PATH, result['id'])
result['firmware_keymap_url'] = ['/'.join((API_URL, 'v1', 'compile', result['id'], 'keymap'))]
result['firmware_source_url'] = ['/'.join((API_URL, 'v1', 'compile', result['id'], 'source'))]
def compile_keymap(job, result):
logging.debug('Executing build: %s', result['command'])
try:
result['output'] = check_output(result['command'], stderr=STDOUT, universal_newlines=True)
result['returncode'] = 0
result['firmware_filename'] = find_firmware_file()
if not result['firmware_filename']:
# Build returned success but no firmware file on disk
result['return_code'] = -4
except CalledProcessError as build_error:
print('Could not build firmware (%s): %s' % (build_error.cmd, build_error.output))
result['returncode'] = build_error.returncode
result['cmd'] = build_error.cmd
result['output'] = build_error.output
@job('default', connection=redis, timeout=900)
def compile_json(keyboard_keymap_data, source_ip=None, send_metrics=True, public_firmware=False):
"""Compile a keymap.
Arguments:
keyboard_keymap_data
A configurator export file that's been deserialized
source_ip
The IP that submitted the compile job
"""
start_time = time()
base_metric = f'{gethostname()}.qmk_compiler.compile_json'
result = {
'keyboard': 'unknown',
'returncode': -2,
'output': '',
'firmware': None,
'firmware_filename': '',
'source_ip': source_ip,
'output': 'Unknown error',
'public_firmware': public_firmware,
}
if DEBUG:
print('Pointing graphite at', GRAPHITE_HOST)
send_metrics=True
if send_metrics:
graphyte.init(GRAPHITE_HOST, GRAPHITE_PORT)
try:
for key in ('keyboard', 'layout', 'keymap'):
result[key] = keyboard_keymap_data[key]
# Gather information
result['keymap_archive'] = '%s-%s.json' % (result['keyboard'].replace('/', '-'), result['keymap'].replace('/', '-'))
result['keymap_json'] = json.dumps(keyboard_keymap_data)
result['command'] = ['qmk', 'compile', result['keymap_archive']]
job = get_current_job()
result['id'] = job.id
branch = keyboard_keymap_data.get('branch', QMK_GIT_BRANCH)
converter = keyboard_keymap_data.get('converter', None)
# Fetch the appropriate version of QMK
git_start_time = time()
checkout_qmk(branch=branch)
git_time = time() - git_start_time
chdir(QMK_FIRMWARE_PATH)
# Sanity check
if not path.exists('keyboards/' + result['keyboard']):
print('Unknown keyboard: %s' % (result['keyboard'],))
return {'returncode': -1, 'command': '', 'output': 'Unknown keyboard!', 'firmware': None}
# Pull in the modules from the QMK we just checked out
if './lib/python' not in sys.path:
sys.path.append('./lib/python')
from qmk.info import info_json
# If this keyboard needs a submodule check it out
submodule_start_time = time()
kb_info = info_json(result['keyboard'])
if 'protocol' not in kb_info:
kb_info['protocol'] = 'unknown'
# FIXME: Query qmk_firmware as not all converters will be ChibiOS
if converter:
kb_info['protocol'] = 'ChibiOS'
if kb_info['protocol'] in ['ChibiOS', 'LUFA']:
checkout_lufa()
if kb_info['protocol'] == 'ChibiOS':
checkout_chibios()
if kb_info['protocol'] == 'V-USB':
checkout_vusb()
submodule_time = time() - submodule_start_time
# Write the keymap file
with open(result['keymap_archive'], 'w') as fd:
fd.write(result['keymap_json'] + '\n')
# Compile the firmware
compile_start_time = time()
compile_keymap(job, result)
compile_time = time() - compile_start_time
# Store the source in S3
storage_start_time = time()
store_firmware_binary(result)
chdir('..')
if not public_firmware:
store_firmware_source(result)
storage_time = time() - storage_start_time
# Send metrics about this build
if send_metrics:
graphyte.send(f'{base_metric}.{result["keyboard"]}.all_layouts', 1)
graphyte.send(f'{base_metric}.{result["keyboard"]}.{result["layout"]}', 1)
graphyte.send(f'{base_metric}.{result["keyboard"]}.git_time', git_time)
graphyte.send(f'{base_metric}.all_keyboards.git_time', git_time)
graphyte.send(f'{base_metric}.{result["keyboard"]}.submodule_time', submodule_time)
graphyte.send(f'{base_metric}.all_keyboards.submodule_time', submodule_time)
graphyte.send(f'{base_metric}.{result["keyboard"]}.compile_time', compile_time)
graphyte.send(f'{base_metric}.all_keyboards.compile_time', compile_time)
if result['returncode'] == 0:
graphyte.send(f'{base_metric}.{result["keyboard"]}.compile_time', compile_time)
graphyte.send(f'{base_metric}.all_keyboards.compile_time', compile_time)
else:
graphyte.send(f'{base_metric}.{result["keyboard"]}.errors', 1)
if source_ip:
ip_location = geolite2.lookup(source_ip)
if ip_location:
if ip_location.subdivisions:
location_key = f'{ip_location.country}_{"_".join(ip_location.subdivisions)}'
else:
location_key = ip_location.country
graphyte.send(f'{gethostname()}.qmk_compiler.geoip.{location_key}', 1)
total_time = time() - start_time
graphyte.send(f'{base_metric}.{result["keyboard"]}.storage_time', storage_time)
graphyte.send(f'{base_metric}.all_keyboards.storage_time', storage_time)
graphyte.send(f'{base_metric}.{result["keyboard"]}.total_time', total_time)
graphyte.send(f'{base_metric}.all_keyboards.total_time', total_time)
except Exception as e:
result['returncode'] = -3
result['exception'] = e.__class__.__name__
result['stacktrace'] = format_exc()
if send_metrics:
graphyte.send(f'{base_metric}.{result["keyboard"]}.errors', 1)
store_firmware_metadata(job, result)
return result
@job('default', connection=redis)
def ping():
"""Write a timestamp to redis to make sure at least one worker is running ok.
"""
return redis.set('qmk_api_last_ping', time())
|
import numpy as np
# for reproducibility
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop
"""
在回归网络(regressor_example)中用到的是 model.add 一层一层添加神经层,这里的方法是直接在模型的里面加多个神经层。
好比一个水管,一段一段的,数据是从上面一段掉到下面一段,再掉到下面一段。
第一段就是加入 Dense 神经层。32 是输出的维度,784 是输入的维度。
第一层传出的数据有 32 个 feature,传给激励单元,激励函数用到的是 relu 函数。
经过激励函数之后,就变成了非线性的数据。
然后再把这个数据传给下一个神经层,这个 Dense 我们定义它有 10 个输出的 feature。
同样的,此处不需要再定义输入的维度,因为它接收的是上一层的输出。
接下来再输入给下面的 softmax 函数,用来分类。
"""
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
""" 1.1. download data """
# (60000, 28, 28)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
""" 1.2. data pre-processing """
print(X_train.shape)
# (60000, 784)
X_train = X_train.reshape(X_train.shape[0], -1) / 255. # normalize
print(X_train.shape)
X_test = X_test.reshape(X_test.shape[0], -1) / 255. # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
print('-' * 10)
print(y_test[0])
y_test = np_utils.to_categorical(y_test, num_classes=10)
print(y_test[0])
""" 2. define model """
# 多分类输出的activation 采用 softmax
model = Sequential([
Dense(32, input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
# Another way to define your optimizer
rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# We add metrics to get more results you want to see
""" 3. choose loss function and optimizing method """
model.compile(optimizer=rmsprop,
loss='categorical_crossentropy',
metrics=['accuracy'])
""" 4. train """
model.fit(X_train, y_train, epochs=2, batch_size=32)
""" 5. test """
loss, accuracy = model.evaluate(X_test, y_test)
print('test loss: ', loss)
print('test accuracy: ', accuracy) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.