blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30fae2d3f59753bd1fcccb3c3e55f5caf7f1e023 | 3f7bd3d8399938f1fefe7a25a2310913250ba6ad | /tape/tape/main.py | f96f7a9c0de0787cc5f4a48a53d78857abcd0690 | [
"BSD-3-Clause"
] | permissive | MachineLearningLifeScience/meaningful-protein-representations | e9893667d17e101aa5bfc62afcef7c5ac3a8a0b0 | 3c4eb46c5badcfca692e82335ad4158bd91a6b43 | refs/heads/master | 2023-04-11T03:10:48.272329 | 2022-03-02T16:56:17 | 2022-03-02T16:56:17 | 357,120,971 | 87 | 9 | null | null | null | null | UTF-8 | Python | false | false | 13,000 | py | import typing
import os
import logging
import argparse
import warnings
import inspect
try:
import apex # noqa: F401
APEX_FOUND = True
except ImportError:
APEX_FOUND = False
from .registry import registry
from . import training
from . import utils
CallbackList = typing.Sequence[typing.Callable]
OutputDict = typing.Dict[str, typing.List[typing.Any]]
logger = logging.getLogger(__name__)
warnings.filterwarnings( # Ignore pytorch warning about loss gathering
'ignore', message='Was asked to gather along dimension 0', module='torch.nn.parallel')
def create_base_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Parent parser for tape functions',
add_help=False)
parser.add_argument('model_type', help='Base model class to run')
parser.add_argument('--model_config_file', default=None, type=utils.check_is_file,
help='Config file for model')
parser.add_argument('--vocab_file', default=None,
help='Pretrained tokenizer vocab file')
parser.add_argument('--output_dir', default='./results', type=str)
parser.add_argument('--no_cuda', action='store_true', help='CPU-only flag')
parser.add_argument('--seed', default=42, type=int, help='Random seed to use')
parser.add_argument('--local_rank', type=int, default=-1,
help='Local rank of process in distributed training. '
'Set by launch script.')
parser.add_argument('--tokenizer', choices=['iupac', 'unirep'],
default='iupac', help='Tokenizes to use on the amino acid sequences')
parser.add_argument('--num_workers', default=8, type=int,
help='Number of workers to use for multi-threaded data loading')
parser.add_argument('--log_level', default=logging.INFO,
choices=['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR',
logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR],
help="log level for the experiment")
parser.add_argument('--debug', action='store_true', help='Run in debug mode')
return parser
def create_train_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Run Training on the TAPE datasets',
parents=[base_parser])
parser.add_argument('task', choices=list(registry.task_name_mapping.keys()),
help='TAPE Task to train/eval on')
parser.add_argument('--learning_rate', default=1e-4, type=float,
help='Learning rate')
parser.add_argument('--batch_size', default=1024, type=int,
help='Batch size')
parser.add_argument('--data_dir', default='./data', type=utils.check_is_dir,
help='Directory from which to load task data')
parser.add_argument('--num_train_epochs', default=10, type=int,
help='Number of training epochs')
parser.add_argument('--num_steps_per_epoch', default=-1, type=int,
help='Number of steps per epoch')
parser.add_argument('--num_log_iter', default=20, type=int,
help='Number of training steps per log iteration')
parser.add_argument('--fp16', action='store_true', help='Whether to use fp16 weights')
parser.add_argument('--warmup_steps', default=10000, type=int,
help='Number of learning rate warmup steps')
parser.add_argument('--gradient_accumulation_steps', default=1, type=int,
help='Number of forward passes to make for each backwards pass')
parser.add_argument('--loss_scale', default=0, type=int,
help='Loss scaling. Only used during fp16 training.')
parser.add_argument('--max_grad_norm', default=1.0, type=float,
help='Maximum gradient norm')
parser.add_argument('--exp_name', default=None, type=str,
help='Name to give to this experiment')
parser.add_argument('--from_pretrained', default=None, type=str,
help='Directory containing config and pretrained model weights')
parser.add_argument('--log_dir', default='./logs', type=str)
parser.add_argument('--eval_freq', type=int, default=1,
help="Frequency of eval pass. A value <= 0 means the eval pass is "
"not run")
parser.add_argument('--save_freq', default='improvement', type=utils.int_or_str,
help="How often to save the model during training. Either an integer "
"frequency or the string 'improvement'")
parser.add_argument('--patience', default=-1, type=int,
help="How many epochs without improvement to wait before ending "
"training")
parser.add_argument('--resume_from_checkpoint', action='store_true',
help="whether to resume training from the checkpoint")
parser.add_argument('--val_check_frac', default=1.0, type=float,
help="Fraction of validation to check")
return parser
def create_eval_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Run Eval on the TAPE Datasets',
parents=[base_parser])
parser.add_argument('task', choices=list(registry.task_name_mapping.keys()),
help='TAPE Task to train/eval on')
parser.add_argument('from_pretrained', type=str,
help='Directory containing config and pretrained model weights')
parser.add_argument('--batch_size', default=1024, type=int,
help='Batch size')
parser.add_argument('--data_dir', default='./data', type=utils.check_is_dir,
help='Directory from which to load task data')
parser.add_argument('--metrics', default=[],
help=f'Metrics to run on the result. '
f'Choices: {list(registry.metric_name_mapping.keys())}',
nargs='*')
parser.add_argument('--split', default='test', type=str,
help='Which split to run on')
return parser
def create_embed_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description='Embed a set of proteins with a pretrained model',
parents=[base_parser])
parser.add_argument('data_file', type=str,
help='File containing set of proteins to embed')
parser.add_argument('out_file', type=str,
help='Name of output file')
parser.add_argument('from_pretrained', type=str,
help='Directory containing config and pretrained model weights')
parser.add_argument('--batch_size', default=1024, type=int,
help='Batch size')
parser.add_argument('--full_sequence_embed', action='store_true',
help='If true, saves an embedding at every amino acid position '
'in the sequence. Note that this can take a large amount '
'of disk space.')
parser.set_defaults(task='embed')
return parser
def create_distributed_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(add_help=False, parents=[base_parser])
# typing.Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
return parser
def create_model_parser(base_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(add_help=False, parents=[base_parser])
parser.add_argument('--model_args', nargs=argparse.REMAINDER, default=None)
return parser
def run_train(args: typing.Optional[argparse.Namespace] = None, env=None) -> None:
if env is not None:
os.environ = env
if args is None:
base_parser = create_base_parser()
train_parser = create_train_parser(base_parser)
model_parser = create_model_parser(train_parser)
args = model_parser.parse_args()
if args.gradient_accumulation_steps < 1:
raise ValueError(
f"Invalid gradient_accumulation_steps parameter: "
f"{args.gradient_accumulation_steps}, should be >= 1")
if (args.fp16 or args.local_rank != -1) and not APEX_FOUND:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex "
"to use distributed and fp16 training.")
arg_dict = vars(args)
arg_names = inspect.getfullargspec(training.run_train).args
missing = set(arg_names) - set(arg_dict.keys())
if missing:
raise RuntimeError(f"Missing arguments: {missing}")
train_args = {name: arg_dict[name] for name in arg_names}
training.run_train(**train_args)
def run_eval(args: typing.Optional[argparse.Namespace] = None) -> typing.Dict[str, float]:
if args is None:
base_parser = create_base_parser()
parser = create_eval_parser(base_parser)
parser = create_model_parser(parser)
args = parser.parse_args()
if args.from_pretrained is None:
raise ValueError("Must specify pretrained model")
if args.local_rank != -1:
raise ValueError("TAPE does not support distributed validation pass")
arg_dict = vars(args)
arg_names = inspect.getfullargspec(training.run_eval).args
missing = set(arg_names) - set(arg_dict.keys())
if missing:
raise RuntimeError(f"Missing arguments: {missing}")
eval_args = {name: arg_dict[name] for name in arg_names}
return training.run_eval(**eval_args)
def run_embed(args: typing.Optional[argparse.Namespace] = None) -> None:
if args is None:
base_parser = create_base_parser()
parser = create_embed_parser(base_parser)
parser = create_model_parser(parser)
args = parser.parse_args()
if args.from_pretrained is None:
raise ValueError("Must specify pretrained model")
if args.local_rank != -1:
raise ValueError("TAPE does not support distributed validation pass")
arg_dict = vars(args)
arg_names = inspect.getfullargspec(training.run_embed).args
missing = set(arg_names) - set(arg_dict.keys())
if missing:
raise RuntimeError(f"Missing arguments: {missing}")
embed_args = {name: arg_dict[name] for name in arg_names}
training.run_embed(**embed_args)
def run_train_distributed(args: typing.Optional[argparse.Namespace] = None) -> None:
"""Runs distributed training via multiprocessing.
"""
if args is None:
base_parser = create_base_parser()
distributed_parser = create_distributed_parser(base_parser)
distributed_train_parser = create_train_parser(distributed_parser)
parser = create_model_parser(distributed_train_parser)
args = parser.parse_args()
# Define the experiment name here, instead of dealing with barriers and communication
# when getting the experiment name
exp_name = utils.get_expname(args.exp_name, args.task, args.model_type)
args.exp_name = exp_name
utils.launch_process_group(
run_train, args, args.nproc_per_node, args.nnodes,
args.node_rank, args.master_addr, args.master_port)
if __name__ == '__main__':
run_train_distributed()
| [
"skaftenicki@gmail.com"
] | skaftenicki@gmail.com |
d4901fbfedfbce16749f96877abc6af30c33b1f5 | 9c08f48500db81e16d05fd299ac4e57f815fc893 | /tests/integration/test_consumers.py | f51820880f4f18b73f853636a45b981e5fa2245b | [
"MIT"
] | permissive | dailymuse/musekafka-py | 9886a633ea194091d14e65a7fa773deda46d9d48 | 3aec3d5ae620d5760733b2d9b73e9ac135dbd875 | refs/heads/main | 2023-02-02T02:25:49.589972 | 2020-12-08T21:17:05 | 2020-12-08T21:17:05 | 311,790,018 | 1 | 0 | MIT | 2020-12-08T21:17:07 | 2020-11-10T21:33:23 | Python | UTF-8 | Python | false | false | 10,646 | py | from typing import Iterator, List
import pytest
from confluent_kafka import OFFSET_BEGINNING, SerializingProducer, TopicPartition
from confluent_kafka.admin import AdminClient, NewTopic
from confluent_kafka.avro import AvroConsumer
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from musekafka import consumers
from musekafka.message import stream as message_stream
GOOD_SCHEMA = """
{
"type": "record",
"name": "Test",
"namespace": "musekafka.test",
"fields": [
{
"name": "test",
"type": ["null", "string"],
"default": null
}
]
}
"""
class Goofed(Exception):
"""Marker exception to assert test success."""
@pytest.fixture(scope="module")
def producer(broker_host: str, registry: SchemaRegistryClient) -> SerializingProducer:
return SerializingProducer(
{
"bootstrap.servers": broker_host,
"value.serializer": AvroSerializer(GOOD_SCHEMA, registry),
}
)
@pytest.fixture(scope="module")
def topics(
broker_host: str, admin: AdminClient, registry: SchemaRegistryClient
) -> Iterator[List[str]]:
# Seed data for the tests. We do this up-front, as otherwise the tests
# will be too slow.
topic_futures = admin.create_topics(
[NewTopic("musekafka_consumer_test", 1, 1)], operation_timeout=20
)
for _, future in topic_futures.items():
future.result() # Block until the topic gets created.
topics = list(topic_futures.keys())
yield topics
admin.delete_topics(topics, operation_timeout=20)
subjects = [f"{topic}-value" for topic in topics]
for subj in registry.get_subjects():
if subj in subjects:
registry.delete_subject(subj)
@pytest.fixture(scope="module")
def messages(producer: SerializingProducer, topics: List[str]) -> List[dict]:
test_messages = [{"test": f"TESTING!{i}"} for i in range(4)]
test_messages.append({"test": "FAIL."})
test_messages.extend([{"test": f"TESTING AFTER!{i}"} for i in range(4)])
for topic in topics:
for message in test_messages:
producer.produce(topic, value=message)
return test_messages
@pytest.fixture
def app(broker_host: str, registry_url: str, topics: List[str]) -> consumers.App:
consumer_app = consumers.App(
"testconsumers",
[broker_host],
topics,
consumer_cls=AvroConsumer,
schema_registry_url=registry_url,
app_mode=False,
from_beginning=True,
close_after_consume=False,
timeout=10,
)
yield consumer_app
consumer_app.close()
def test_app_stream(app: consumers.App, messages: List[dict]):
"""App.stream consumes all data up to count."""
with app.stream(count=len(messages)) as stream:
actual_messages = [msg.value() for msg in stream]
assert actual_messages == messages
def test_app_batch(app: consumers.App, messages: List[dict]):
"""App.batch consumes a batch of messages."""
with app.batch(batch_size=2, batch_count=2) as batches:
first_batch, second_batch = list(batches)
assert [m.value() for m in first_batch] == messages[:2]
assert [m.value() for m in second_batch] == messages[2:4]
def test_app_stream_fail(app: consumers.App, messages: List[dict]):
"""App.stream exits on exception, and does not commit offsets."""
actual_messages = []
with pytest.raises(Goofed):
with app.stream() as stream:
for msg in stream:
if msg.value()["test"] == "FAIL.":
raise Goofed("Dun goofed.")
actual_messages.append(msg.value())
# Only got the first four messages before we enocuntered an error.
fail_msg_idx = messages.index({"test": "FAIL."})
assert actual_messages == messages[:fail_msg_idx]
with pytest.raises(Goofed):
with app.stream() as stream:
for msg in stream:
if msg.value()["test"] == "FAIL.":
raise Goofed("Dun goofed.")
actual_messages.append(msg.value())
# Should not have received any additional messages.
assert actual_messages == messages[:fail_msg_idx]
# Let's just consume the remaining messages to check that
# we do consume everything.
with app.stream(count=len(messages[fail_msg_idx:]) + 1) as stream:
for msg in stream:
actual_messages.append(msg.value())
assert actual_messages == messages
def test_app_batch_fail(app: consumers.App, messages: List[dict]):
"""App.batch exits on exception, and does not commit offsets."""
actual_messages = []
with pytest.raises(Goofed):
with app.batch(batch_size=4) as batch:
for msgs in batch:
for msg in msgs:
if msg.value()["test"] == "FAIL.":
raise Goofed("Dun goofed.")
actual_messages.append(msg.value())
# Only got the first four messages before we enocuntered an error.
fail_msg_idx = messages.index({"test": "FAIL."})
assert actual_messages == messages[:fail_msg_idx]
with pytest.raises(Goofed):
with app.batch(batch_size=4) as batch:
for msgs in batch:
for msg in msgs:
if msg.value()["test"] == "FAIL.":
raise Goofed("Dun goofed.")
actual_messages.append(msg.value())
# Should not have received any additional messages.
assert actual_messages == messages[:fail_msg_idx]
# Let's just consume the remaining messages to check that
# we do consume everything.
with app.batch(batch_size=1, batch_count=len(messages[fail_msg_idx:]) + 1) as batch:
for msgs in batch:
for msg in msgs:
actual_messages.append(msg.value())
assert actual_messages == messages
def test_app_consume_from_end(
app: consumers.App, topics: List[str], producer: SerializingProducer, messages: List[dict]
):
"""App.consume consumes from the tail end of the topic on first startup."""
app.from_beginning = False
actual_messages = []
with app.consume(message_stream, timeout=0.5) as msgs:
actual_messages.extend(msgs)
# Will not have consumed anything, since we are tailing.
assert not actual_messages
# Now we add an additional message. We should consume it.
for topic in topics:
producer.produce(topic, value={"test": "An additional message!"})
with app.consume(message_stream, count=1) as msgs:
actual_messages.extend([msg.value() for msg in msgs])
assert actual_messages == [{"test": "An additional message!"}]
def test_app_consume_topic_partitions(app: consumers.App, topics: List[str], messages: List[dict]):
"""App.consume consumes starting from the given topic partitions."""
offset = len(messages) - 3
app.from_beginning = False
app.topic_partitions = [TopicPartition(topic, 0, offset) for topic in topics]
expected_messages = messages[offset:]
actual_messages = []
with app.consume(message_stream, count=len(expected_messages)) as msgs:
actual_messages.extend([msg.value() for msg in msgs])
assert actual_messages == expected_messages
def test_app_mode(app: consumers.App):
"""App.consume raises SystemExit in app mode."""
app.app_mode = True
with pytest.raises(SystemExit):
with app.consume(message_stream, count=1) as msgs:
list(msgs)
def test_app_mode_exception(app: consumers.App, messages: List[dict]):
"""App.consume raises SystemExit in app mode if an underlying exception occurs."""
app.app_mode = True
with pytest.raises(SystemExit):
with app.consume(message_stream, count=len(messages)) as msgs:
for msg in msgs:
if msg.value()["test"] == "FAIL.":
raise Goofed("Dun goofed.")
def test_app_start_for_assign(app: consumers.App):
"""App.start assigns the consumer."""
app.start()
assert app.started
assert len(app.consumer.assignment()) == 1
def test_app_start_for_subscribe(
app: consumers.App, topics: List[str], producer: SerializingProducer
):
"""App.start subscribes the consumer."""
def on_assign(consumer, _partitions):
for topic in topics:
producer.produce(topic, value={"test": "Assigned."})
app.from_beginning = False
# on_assign is only called for subscribe, so this is a good
# check that we subscribed rather than made an explicit assignment.
app.on_assign = on_assign
app.start()
assert app.started, "consumer not started."
success = False
for msg in message_stream(app.consumer, timeout=20):
if msg.value()["test"] == "Assigned.":
success = True
break
assert success, "consumer not assigned."
def test_app_stop_for_assign(app: consumers.App):
"""App.stop unassigns the consumer."""
# Need to start consumer for stop to have any impact.
app.start()
# assign will block, so we can immediately call stop.
app.stop()
assert not app.started, "app is still running."
assert not app.consumer.assignment(), "consumer still has an assignment"
def test_app_stop_for_subscribe(
app: consumers.App, topics: List[str], producer: SerializingProducer
):
"""App.stop unassigns the consumer."""
subscribed = False
# Need to start consumer for stop to have any impact.
def on_assign(_consumer, _partitions):
nonlocal subscribed
subscribed = True
for topic in topics:
producer.produce(topic, value={"test": "Assigned."})
def on_revoke(consumer, partitions):
for topic in topics:
consumer.assign([TopicPartition(topic, 0, OFFSET_BEGINNING)])
producer.produce(topic, value={"test": "Revoked."})
# setting from_beginning to False will put us in subscribe mode.
app.from_beginning = False
app.on_assign = on_assign
app.on_revoke = on_revoke
app.start()
for msg in message_stream(app.consumer, timeout=20):
# Don't really care which message we get. Just need something.
break
assert subscribed, "consumer never got subscribed."
app.stop()
assert not app.started, "app is still running."
for msg in message_stream(app.consumer, timeout=20):
# Don't really care which message we get. Just need something.
if msg.value() == {"test": "Revoked."}:
subscribed = False
break
assert not subscribed, "consumer is still subscribed."
| [
"ian@themuse.com"
] | ian@themuse.com |
f786d674190c6174648ee591d91f22d67c0e17a4 | 2e71a55ef8cb99eb57b3525f6c365c27ccb5c404 | /django_mariadb_travis/urls.py | f71511c1bca858b1f86a0e44856bf4017ebb754a | [] | no_license | OskarPersson/django-mariadb-travis | a6f2b26ae2888ed990fac0c1832c8ef897dfab26 | 48c4c8a94b292dd5711739cef6b6ef80bf185261 | refs/heads/master | 2022-04-30T19:50:33.767711 | 2019-09-01T11:21:56 | 2019-09-01T11:21:56 | 205,528,038 | 0 | 0 | null | 2022-04-22T22:16:42 | 2019-08-31T10:09:50 | Python | UTF-8 | Python | false | false | 763 | py | """django_mariadb_travis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"oskar.persson@polken.se"
] | oskar.persson@polken.se |
54765f3e4b87157be1e6fcd80c8ddb90a5a8c1f5 | 394a6e1bd557507fbf41505d9e9bbf36006d6c62 | /hubspot_blog/__init__.py | 29676a48c1ac4e3b8645fd97426a07c6aba9c371 | [
"MIT"
] | permissive | mammuth/djangocms-hubspot-blog | d77b44f6e5c9116cceebb9239a8f2bfcab62d667 | 53bfad37a2e4e45468ad7373172e25a7a7d17d34 | refs/heads/master | 2020-03-15T13:15:36.074508 | 2018-05-04T16:10:31 | 2018-05-04T16:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # -*- coding: utf-8 -*-
__version__ = '0.1.0'
default_app_config = 'hubspot_blog.apps.HubspotBlogConfig' | [
"max@blueshoe.de"
] | max@blueshoe.de |
5d351eb6deec46f5944082944a1f2379c2ddf7e7 | 085703b26619cf84a31040e6736a635ef879bf49 | /get_usb_string_description.py | 61702274f2b73d553d4c9a48704f671954e59c0a | [] | no_license | sunduoze/CP2112_test | 5b161c12ff17618aa853735294f203a3643ca344 | fd2e642628f84146a326031b7a7e82c5bab33bb4 | refs/heads/main | 2023-09-03T23:28:32.929386 | 2021-11-04T02:27:24 | 2021-11-04T02:27:24 | 384,299,006 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,380 | py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import string
import struct
import sys
from base64 import decode
import win32api
import win32file
import pywintypes
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method
def USB_CTL(id):
# CTL_CODE(FILE_DEVICE_USB, (id), METHOD_BUFFERED, FILE_ANY_ACCESS)
return CTL_CODE(0x22, id, 0, 0)
IOCTL_USB_GET_ROOT_HUB_NAME = USB_CTL(258) # HCD_GET_ROOT_HUB_NAME
IOCTL_USB_GET_NODE_INFORMATION = USB_CTL(258) # USB_GET_NODE_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION = USB_CTL(259) # USB_GET_NODE_CONNECTION_INFORMATION
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME = USB_CTL(264) # USB_GET_NODE_CONNECTION_DRIVERKEY_NAME
IOCTL_USB_GET_NODE_CONNECTION_NAME = USB_CTL(261) # USB_GET_NODE_CONNECTION_NAME
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION = USB_CTL(260) # USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION
USB_CONFIGURATION_DESCRIPTOR_TYPE = 2
USB_STRING_DESCRIPTOR_TYPE = 3
USB_INTERFACE_DESCRIPTOR_TYPE = 4
MAXIMUM_USB_STRING_LENGTH = 255
def open_dev(name):
try:
handle = win32file.CreateFile(name,
win32file.GENERIC_WRITE,
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
except pywintypes.error as e:
return None
return handle
def get_root_hub_name(handle):
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
6,
None)
act_len, _ = struct.unpack('LH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_ROOT_HUB_NAME,
None,
act_len,
None)
return buf[4:].decode('utf-16le')
def get_driverkey_name(handle, index):
key_name = chr(index) + '\0' * 9
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
10,
None)
except pywintypes.error as e:
print(e.strerror, index)
sys.exit(1)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME,
key_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_ext_hub_name(handle, index):
hub_name = chr(index) + '\0' * 9
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
10,
None)
_, act_len, _ = struct.unpack('LLH', buf)
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_CONNECTION_NAME,
hub_name,
act_len,
None)
return buf[8:].decode('utf-16le')
def get_str_desc(handle, conn_idx, str_idx):
req = struct.pack('LBBHHH',
conn_idx,
0,
0,
(USB_STRING_DESCRIPTOR_TYPE << 8) | str_idx,
win32api.GetSystemDefaultLangID(),
12 + MAXIMUM_USB_STRING_LENGTH)
try:
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION,
req,
12 + MAXIMUM_USB_STRING_LENGTH,
None)
except pywintypes.error as e:
return 'ERROR: no String Descriptor for index {}'.format(str_idx)
if len(buf) > 16:
return buf[14:].decode('utf-16le')
return ''
def exam_hub(name, verbose, level):
handle = open_dev(r'\\.\{}'.format(name))
if not handle:
print('Failed to open device {}'.format(name))
return
buf = win32file.DeviceIoControl(handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
print_hub_ports(handle, ord(buf[6]), verbose, level)
handle.close()
def print_str_or_hex(to_be_print):
if all(c in string.printable for c in to_be_print):
print('"{}"'.format(to_be_print))
return
print('Hex: ', end='')
for x in to_be_print:
print('{:02x} '.format(ord(x)), end='')
print('')
def print_hub_ports(handle, num_ports, verbose, level):
for idx in range(1, num_ports + 1):
info = chr(idx) + '\0' * 34
try:
buf = win32file.DeviceIoControl(handle,
(IOCTL_USB_GET_NODE_CONNECTION_INFORMATION),
info,
34 + 11 * 30,
None)
except pywintypes.error as e:
print(e.winerror, e.funcname, e.strerror)
return
_, vid, pid, vers, manu, prod, seri, _, ishub, _, stat = struct.unpack('=12sHHHBBB3s?6sL', buf[:35])
if ishub:
if verbose:
print('{} [Port{}] {}'.format(' ' * level, idx, 'USB Hub'))
exam_hub(get_ext_hub_name(handle, idx), verbose, level)
elif stat == 0 and verbose:
print('{} [Port{}] {}'.format(' ' * level, idx, 'NoDeviceConnected'))
elif stat == 1:
if verbose or (manu != 0 or prod != 0 or seri != 0):
print('{} [Port{}] {}'.format(' ' * level, idx, get_driverkey_name(handle, idx)))
print('{} Vendor ID: 0x{:04X}'.format(' ' * level, vid))
print('{} Product ID: 0x{:04X}'.format(' ' * level, pid))
print('{} Device BCD: 0x{:04X}'.format(' ' * level, vers))
if manu != 0:
print('{} Manufacturer (0x{:x}) -> '.format(' ' * level, manu), end='')
print_str_or_hex(get_str_desc(handle, idx, manu))
if prod != 0:
print('{} Product (0x{:x}) -> '.format(' ' * level, prod), end='')
print_str_or_hex(get_str_desc(handle, idx, prod))
if seri != 0:
print('{} Serial No (0x{:x}) -> '.format(' ' * level, seri), end='')
print_str_or_hex(get_str_desc(handle, idx, seri))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true',
help="Increase output verbosity.")
args = parser.parse_args()
for i in range(10):
name = r"\\.\HCD{}".format(i)
handle = open_dev(name)
if not handle:
continue
root = get_root_hub_name(handle)
print('{}RootHub: {}'.format('\n' if i != 0 else '', root))
# ext = get_ext_hub_name(handle, index)
# print('{}ExtHub: {}'.format('\n' if i != 0 else '', ext))
dev_name = r'\\.\{}'.format(root)
dev_handle = open_dev(dev_name)
if not dev_handle:
print('Failed to open device {}'.format(dev_name))
continue
buf = win32file.DeviceIoControl(dev_handle,
IOCTL_USB_GET_NODE_INFORMATION,
None,
76,
None)
num = buf[6]
print_hub_ports(dev_handle, num, args.verbose, 0)
dev_handle.close()
handle.close()
if __name__ == '__main__':
main()
| [
"sunduoze@163.com"
] | sunduoze@163.com |
9862630e7b2d92ec61b1b3fafa2d66bf2c38aecb | f275aead39428664296b54750fdb577799c4a928 | /delete_motion_ve_votes.py | ef3cd0d743e0ffee14705b10d041067f11ddfce9 | [] | no_license | KohoVolit/scraper-psp.cz | 6ec46e8e08e60e9bbc0027c69390af7e70adc51c | f2f05522a758c7115fbe53da5b106f74054b95c5 | refs/heads/master | 2020-12-24T15:49:52.178424 | 2018-04-04T22:27:10 | 2018-04-04T22:27:10 | 22,727,536 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import vpapi
import authentication
vpapi.parliament('cz/psp')
vpapi.authorize(authentication.username,authentication.password)
vpapi.timezone('Europe/Prague')
vpapi.delete("votes")
vpapi.delete("vote-events")
vpapi.delete("motions")
| [
"michal.skop@kohovolit.eu"
] | michal.skop@kohovolit.eu |
39b4addcc99726b50837e3fd9fdbcd6e86573415 | dd456df9c9e3b327463d8a729ab837ed262e4dce | /venv/Scripts/pip3.8-script.py | f255ad31ef96f95ed32417b9f68fd5e0943f719b | [] | no_license | Lelethu-Ndidi/secondDataTypeExercise | 5262f39099fd4895d2e75f66808f5899e7856dcb | 64b0c485ed1b56f3d6a25a4fe1196957a453f396 | refs/heads/master | 2021-03-07T11:20:12.396861 | 2020-03-10T11:55:24 | 2020-03-10T12:36:06 | 246,261,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!"C:\Users\User\PycharmProjects\DataType Ex2\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"lelethundidi@gmail.com"
] | lelethundidi@gmail.com |
c728711407cbf93363708bc7ebf19bea2a3b104a | 6568474f411000b7863ac15071a51136fd0558d8 | /ROC-probabilities_Ricardo Zamora Mennigke_Calibracion.py | e9e2fc06654eb836c28e1db43e2a76f365542382 | [] | no_license | zamoraricardo15/Python-Model-Calibration-Methods | ea8769751aab72b61c9b12a2fd4e219b9dc655c1 | d60a0560bab3c93764428218ee21479d452e8919 | refs/heads/main | 2023-06-20T08:13:41.747289 | 2021-07-17T15:52:21 | 2021-07-17T15:52:21 | 386,977,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,924 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 23:20:41 2020
@author: rzamoram
"""
##Pregunta 1
import numpy as np
import pandas as pd
import random as rd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import os
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from pandas import DataFrame
from matplotlib import colors as mcolors
import seaborn as sns
pasada = os.getcwd()
os.chdir("C:/Users/rzamoram/OneDrive - Intel Corporation/Documents/Machine Learning/Métodos Supervisados con Python/Clase 01")
print(os.getcwd())
datos = pd.read_csv('tumores.csv',delimiter=',',decimal=".")
datos['imagen'] = datos['imagen'].astype('category')
print(datos.shape)
print(datos.head())
print(datos.info())
def distribucion_variable_predecir(data:DataFrame,variable_predict:str):
colors = list(dict(**mcolors.CSS4_COLORS))
df = pd.crosstab(index=data[variable_predict],columns="valor") / data[variable_predict].count()
fig = plt.figure(figsize=(10,9))
g = fig.add_subplot(111)
countv = 0
titulo = "Distribución de la variable %s" % variable_predict
for i in range(df.shape[0]):
g.barh(1,df.iloc[i],left = countv, align='center',color=colors[11+i],label= df.iloc[i].name)
countv = countv + df.iloc[i]
vals = g.get_xticks()
g.set_xlim(0,1)
g.set_yticklabels("")
g.set_title(titulo)
g.set_ylabel(variable_predict)
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
countv = 0
for v in df.iloc[:,0]:
g.text(np.mean([countv,countv+v]) - 0.03, 1 , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
distribucion_variable_predecir(datos,"tipo")
def indices_general(MC, nombres = None):
precision_global = np.sum(MC.diagonal()) / np.sum(MC)
error_global = 1 - precision_global
precision_categoria = pd.DataFrame(MC.diagonal()/np.sum(MC,axis = 1)).T
precision_positiva = MC[1][1]/(MC[1][1] + MC[1][0])
precision_negativa = MC[0][0]/(MC[0][0] + MC[0][1])
falsos_positivos = 1 - precision_negativa
falsos_negativos = 1 - precision_positiva
asertividad_positiva = MC[1][1]/(MC[0][1] + MC[1][1])
asertividad_negativa = MC[0][0]/(MC[0][0] + MC[1][0])
if nombres!=None:
precision_categoria.columns = nombres
return {"Matriz de Confusión":MC,
"Precisión Global":precision_global,
"Error Global":error_global,
"Precisión por categoría":precision_categoria,
"Precision Positiva (PP)": precision_positiva,
"Precision Negativa (PN)":precision_negativa,
"Falsos Positivos(FP)": falsos_positivos,
"Falsos Negativos (FN)": falsos_negativos,
"Asertividad Positiva (AP)": asertividad_positiva,
"Asertividad Negativa (NP)": asertividad_negativa}
def poder_predictivo_categorica(data:DataFrame, var:str, variable_predict:str):
df = pd.crosstab(index= data[var],columns=data[variable_predict])
df = df.div(df.sum(axis=1),axis=0)
titulo = "Distribución de la variable %s según la variable %s" % (var,variable_predict)
g = df.plot(kind='barh',stacked=True,legend = True, figsize = (10,9), \
xlim = (0,1),title = titulo, width = 0.8)
vals = g.get_xticks()
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
for bars in g.containers:
plt.setp(bars, width=.9)
for i in range(df.shape[0]):
countv = 0
for v in df.iloc[i]:
g.text(np.mean([countv,countv+v]) - 0.03, i , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
def poder_predictivo_numerica(data:DataFrame, var:str, variable_predict:str):
sns.FacetGrid(data, hue=variable_predict, height=6).map(sns.kdeplot, var, shade=True).add_legend()
from sklearn.ensemble import RandomForestClassifier
def plotROC(real, prediccion, color = "red", label = None):
fp_r, tp_r, umbral = roc_curve(real, prediccion)
plt.plot(fp_r, tp_r, lw = 1, color = color, label = label)
plt.plot([0, 1], [0, 1], lw = 1, color = "black")
plt.xlabel("Tasa de Falsos Positivos")
plt.ylabel("Tasa de Verdaderos Positivos")
plt.title("Curva ROC")
X = datos.iloc[:,1:17]
print(X.head())
y = datos.iloc[:,17:18]
print(y.head())
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0)
instancia_svm = SVC(kernel = "rbf",gamma='scale',probability=True)
instancia_svm.fit(X_train, y_train.iloc[:,0].values)
#print("Probabilidad del No y del Si:\n",instancia_svm.predict_proba(X_test))
probabilidad = instancia_svm.predict_proba(X_test)[:, 1]
#print("Probabilidad de Si (o sea del 1):\n",probabilidad)
# Gráfico de la Curva ROC
plt.figure(figsize=(10,10))
plotROC(y_test, probabilidad)
instancia_bosques = RandomForestClassifier(n_estimators = 300, max_features = 3)
instancia_bosques.fit(X_train, y_train.iloc[:,0].values)
# Genera la Curva ROC para Bosques
plt.figure(figsize=(10,10))
plotROC(y_test, instancia_bosques.predict_proba(X_test)[:, 1], color = "blue")
instancia_bosques = RandomForestClassifier(n_estimators = 300, max_features = 3)
instancia_bosques.fit(X_train, y_train.iloc[:,0].values)
from sklearn.neighbors import KNeighborsClassifier
instancia_knn = KNeighborsClassifier(n_neighbors=5)
instancia_knn.fit(X_train, y_train.iloc[:,0].values)
#plt.figure(figsize=(10,10))
#plotROC(y_test, instancia_knn.predict_proba(X_test)[:, 1], color = "blue")
instancia_tree = DecisionTreeClassifier()
instancia_tree.fit(X_train, y_train.iloc[:,0].values)
instancia_ADA = AdaBoostClassifier(n_estimators=5)
instancia_ADA.fit(X_train, y_train.iloc[:,0].values)
from sklearn.ensemble import GradientBoostingClassifier
instancia_XGB = GradientBoostingClassifier(n_estimators=5)
instancia_XGB.fit(X_train, y_train.iloc[:,0].values)
from sklearn.neural_network import MLPClassifier
instancia_classifier = MLPClassifier(solver='lbfgs')
instancia_classifier.fit(X_train, y_train.iloc[:,0].values)
from sklearn.naive_bayes import GaussianNB
instancia_bayes = GaussianNB()
instancia_bayes.fit(X_train, y_train.iloc[:,0].values)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
instancia_lda = LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto')
instancia_lda.fit(X_train, y_train.iloc[:,0].values)
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
instancia_qda = QuadraticDiscriminantAnalysis()
instancia_qda.fit(X_train, y_train.iloc[:,0].values)
plt.figure(figsize=(10,10))
plotROC(y_test, instancia_svm.predict_proba(X_test)[:, 1], label = "SVM")
plotROC(y_test, instancia_bosques.predict_proba(X_test)[:, 1], color = "blue", label= "Bosques Aleatorios")
plotROC(y_test, instancia_knn.predict_proba(X_test)[:, 1], color = "red", label= "KNN")
plotROC(y_test, instancia_tree.predict_proba(X_test)[:, 1], color = "#67E568", label= "Arboles Decision")
#plotROC(y_test, instancia_knn.predict_proba(X_test)[:, 1], color = "#257F27", label= "KNN")
plotROC(y_test, instancia_ADA.predict_proba(X_test)[:, 1], color = "#08420D", label= "ADA")
plotROC(y_test, instancia_XGB.predict_proba(X_test)[:, 1], color = "#FFF000", label= "XGBoosting")
plotROC(y_test, instancia_classifier.predict_proba(X_test)[:, 1], color = "#FFB62B", label= "Redes neuronales")
plotROC(y_test, instancia_bayes.predict_proba(X_test)[:, 1], color = "#E56124", label= "Bayes")
plotROC(y_test, instancia_lda.predict_proba(X_test)[:, 1], color = "#E53E30", label= "LDA")
#plotROC(y_test, instancia_qda.predict_proba(X_test)[:, 1], color = "#7F2353", label= "QDA")
plt.legend(loc = "lower right")
bosques_area = roc_auc_score(y_test, instancia_bosques.predict_proba(X_test)[:, 1])
svm_area = roc_auc_score(y_test, instancia_svm.predict_proba(X_test)[:, 1])
knn_area = roc_auc_score(y_test, instancia_knn.predict_proba(X_test)[:, 1])
tree_area = roc_auc_score(y_test, instancia_tree.predict_proba(X_test)[:, 1])
knn_area = roc_auc_score(y_test, instancia_knn.predict_proba(X_test)[:, 1])
ADA_area = roc_auc_score(y_test, instancia_ADA.predict_proba(X_test)[:, 1])
XGB_area = roc_auc_score(y_test, instancia_XGB.predict_proba(X_test)[:, 1])
red_area = roc_auc_score(y_test, instancia_classifier.predict_proba(X_test)[:, 1])
bayes_area = roc_auc_score(y_test, instancia_bayes.predict_proba(X_test)[:, 1])
lda_area = roc_auc_score(y_test, instancia_lda.predict_proba(X_test)[:, 1])
#qda_area = roc_auc_score(y_test, instancia_qda.predict_proba(X_test)[:, 1])
print("Área bajo la curva ROC en Bosques Aleatorios: {:.3f}".format(bosques_area))
print("Área bajo la curva ROC en KNN: {:.3f}".format(knn_area))
print("Área bajo la curva ROC en Arboles Decision: {:.3f}".format(tree_area))
print("Área bajo la curva ROC en KNN: {:.3f}".format(knn_area))
print("Área bajo la curva ROC en ADA: {:.3f}".format(ADA_area))
print("Área bajo la curva ROC en XGB: {:.3f}".format(XGB_area))
print("Área bajo la curva ROC en red: {:.3f}".format(red_area))
print("Área bajo la curva ROC en bayes: {:.3f}".format(bayes_area))
print("Área bajo la curva ROC en LDA: {:.3f}".format(lda_area))
#print("Área bajo la curva ROC en QDA: {:.3f}".format(qda_area))
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
##Pregunta 2
import os
import pandas as pd
pasada = os.getcwd()
os.chdir("C:/Users/rzamoram/OneDrive - Intel Corporation/Documents/Machine Learning/Métodos NO Supervisados con Python/Clase 1")
os.getcwd()
ejemplo10 = pd.read_csv("SAheart.csv", delimiter = ';', decimal = ".", header = 0, index_col = 0)
print(ejemplo10.head())
datos = pd.DataFrame(ejemplo10)
def recodificar(col, nuevo_codigo):
col_cod = pd.Series(col, copy=True)
for llave, valor in nuevo_codigo.items():
col_cod.replace(llave, valor, inplace=True)
return col_cod
datos["famhist"] = recodificar(datos["famhist"], {'Present':1,'Absent':2})
datos["chd"] = recodificar(datos["chd"], {'No':0,'Si':1})
print(datos.head())
print(datos.dtypes)
# Conviertiendo la variables en Dummy
datos_dummy = pd.get_dummies(datos)
print(datos_dummy.head())
print(datos_dummy.dtypes)
X = datos.iloc[:,:8]
print(X.head())
y = datos.iloc[:,8:9]
print(y.head())
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.80, random_state = 0)
# bosques aleatorios modelo
instancia_bosques = RandomForestClassifier(n_estimators = 300, max_features = 3)
instancia_bosques.fit(X_train, y_train.iloc[:,0].values)
prediccion = instancia_bosques.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
print("Probabilidad del No y del Si:\n",instancia_bosques.predict_proba(X_test))
probabilidad = instancia_bosques.predict_proba(X_test)[:, 1]
print("Probabilidad de Si (o sea del 1):\n",probabilidad)
# regla de decision
corte = [0.501, 0.502, 0.503, 0.504, 0.505, 0.506, 0.507, 0.508, 0.509]
for c in corte:
print("===========================")
print("Probabilidad de Corte: ",c)
prediccion = np.where(probabilidad > c, 1, 0)
# Calidad de la predicción
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# XGB modelo
instancia_XGB = GradientBoostingClassifier(n_estimators=5)
instancia_XGB.fit(X_train, y_train.iloc[:,0].values)
prediccion = instancia_XGB.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
#print("Probabilidad del No y del Si:\n",instancia_XGB.predict_proba(X_test))
probabilidad = instancia_XGB.predict_proba(X_test)[:, 1]
#print("Probabilidad de Si (o sea del 1):\n",probabilidad)
# regla de decision
corte = [0.501, 0.502, 0.503, 0.504, 0.505, 0.506, 0.507, 0.508, 0.509, 0.6, 0.601, 0.602, 0.603, 0.604, 0.605, 0.606, 0.607, 0.608, 0.609]
for c in corte:
print("===========================")
print("Probabilidad de Corte: ",c)
prediccion = np.where(probabilidad > c, 1, 0)
# Calidad de la predicción
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
##Pregunta 3
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
Clase = np.array([1,0,1,0,0,1,1,0,1,1])
Score = np.array([0.8,0.7,0.65,0.6,0.5,0.35,0.3,0.25,0.2,0.1])
# Graficamos ROC con usando roc_curve de sklearn
fp_r, tp_r, umbral = roc_curve(Clase, Score)
plt.figure(figsize=(10,10))
plt.plot(fp_r, tp_r, lw = 1, color = "red")
plt.plot([0, 1], [0, 1], lw = 1, color = "black")
plt.xlabel("Tasa de Falsos Positivos")
plt.ylabel("Tasa de Verdaderos Positivos")
plt.title("Curva ROC")
# Graficamos puntos con el siguiente algoritmo
i = 1 # Contador
FP_r = -1 # Para que entre al condicional en la primera iteración
TP_r = -1 # Para que entre al condicional en la primera iteración
# linspace genera una sucesión de 201 números del 1 al 0 que equivale a una sucesión del 1 al 0 con paso de 0.005
for Umbral in np.linspace(1, 0, 201):
Prediccion = np.where(Score >= Umbral, 1, 0)
MC = confusion_matrix(Clase, Prediccion)
if (FP_r != MC[0, 1] / sum(MC[0, ])) | (TP_r != MC[1, 1] / sum(MC[1, ])):
FP_r = MC[0, 1] / sum(MC[0, ]) # Tasa de Falsos Positivos
TP_r = MC[1, 1] / sum(MC[1, ]) # Tasa de Verdaderos Positivos
# Graficamos punto
plt.plot(FP_r, TP_r, "o", mfc = "none", color = "blue")
plt.annotate(round(Umbral, 3), (FP_r + 0.01, TP_r - 0.02))
# Imprimimos resultado
print("=====================")
print("Punto i = ", i, "\n")
print("Umbral = T = ", round(Umbral, 3), "\n")
print("MC =")
print(MC, "\n")
print("Tasa FP = ", round(FP_r, 2), "\n")
print("Tasa TP = ", round(TP_r, 2))
i = i + 1
#######
Clase = np.array([1,0,1,0,0,1,1,0,1,1])
Score = np.array([0.8,0.7,0.65,0.6,0.5,0.35,0.3,0.25,0.2,0.1])
fp_r, tp_r, umbral = roc_curve(Clase, Score)
plt.figure(figsize=(10,10))
plt.plot(fp_r, tp_r, lw = 1, color = "red")
plt.plot([0, 1], [0, 1], lw = 1, color = "black")
plt.xlabel("Tasa de Falsos Positivos")
plt.ylabel("Tasa de Verdaderos Positivos")
plt.title("Curva ROC")
# Aquí se inicializan para que de igual a la corrida a pie
Umbral = min(Score)
Paso = (max(Score) - min(Score)) / 10
N = 10 # ceros
P = 10 # unos
TP = 0
FP = 0
for i in range(0, 10):
if Score[i] > Umbral:
if Clase[i] == 1:
TP = TP + 1
else:
FP = FP + 1
else:
if Clase[i] == 0:
FP = FP + 1
else:
TP = TP + 1
# Graficamos punto
plt.plot(FP / N, TP / P, "o", mfc = "none", color = "blue")
plt.annotate(i + 1, (FP / N + 0.01, TP / P - 0.02))
Umbral = Umbral + Paso
| [
"noreply@github.com"
] | zamoraricardo15.noreply@github.com |
4afad2de190cd0ca1010f308b3adc427836bb9a7 | c07ccdeb74abdbf377a5e1de188296524fe1c5d3 | /hw3/hw3.py | b8a516726d0b075927fc97d8409918ea1f995552 | [] | no_license | emmashie/amath-582 | c563bd14a2d026311cbda46188765f295411addf | 4b7d55ffa325d2ce48a4d6fc3a8e8ce71af6c169 | refs/heads/main | 2023-04-03T10:01:34.987983 | 2021-04-06T18:16:38 | 2021-04-06T18:16:38 | 327,985,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,044 | py | import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io as sio
import hw3_functions as funct
plt.ion()
plt.style.use('ggplot')
# define data path
datapath = '../data/'
########### TEST 1 #############
# define filenames
cam11_file = 'cam1_1.mat'
cam21_file = 'cam2_1.mat'
cam31_file = 'cam3_1.mat'
# load mat files into python
cam11 = sio.loadmat(os.path.join(datapath, cam11_file))
cam21 = sio.loadmat(os.path.join(datapath, cam21_file))
cam31 = sio.loadmat(os.path.join(datapath, cam31_file))
# pull camera data from mat files
vid11 = cam11['vidFrames1_1']
vid21 = cam21['vidFrames2_1']
vid31 = cam31['vidFrames3_1']
# put camera data into greyscale
vid11_grey = np.asarray([funct.rgb2grey(vid11[:,:,:,i]) for i in range(len(vid11[0,0,0,:]))])
vid21_grey = np.asarray([funct.rgb2grey(vid21[:,:,:,i]) for i in range(len(vid21[0,0,0,:]))])
vid31_grey = np.asarray([funct.rgb2grey(vid31[:,:,:,i]) for i in range(len(vid31[0,0,0,:]))])
# find indices of mass from each frame
mindx11, mindy11 = funct.find_ind_and_plot(vid11_grey, 'plots/vid11_animation/fig', xmin=250, xmax=400, plot=False)
mindx21, mindy21 = funct.find_ind_and_plot(vid21_grey, 'plots/vid21_animation/fig', xmin=200, xmax=400, plot=False)
mindx31, mindy31 = funct.find_ind_and_plot(vid31_grey, 'plots/vid31_animation/fig', xmin=200, xmax=600, plot=False)
# plot
funct.plot_positions(mindx11, mindy11, mindx21, mindy21, mindx31, mindy31, '1')
funct.plot_positions(mindx11, mindy11, mindx21[10:], mindy21[10:], mindx31, mindy31, '1_shifted')
minlen = np.min([len(mindx11), len(mindx21[10:]), len(mindx31)])
X = np.zeros((6, minlen))
X[0,:] = mindx11[:minlen]
X[1,:] = mindy11[:minlen]
X[2,:] = mindx21[10:10+minlen]
X[3,:] = mindy21[10:10+minlen]
X[4,:] = mindx31[:minlen]
X[5,:] = mindy31[:minlen]
X = X - np.expand_dims(np.mean(X, axis=-1), axis=-1)
## python vs. matlab differences in svd function: https://stackoverflow.com/questions/50930899/svd-command-in-python-v-s-matlab
[m, n] = X.shape
U, Sdiag, VH = np.linalg.svd(X)
V = VH.T
pca1 = np.matmul(U.T, X)
percentage1 = Sdiag**2/np.sum(Sdiag**2)
########### TEST 2 #############
# define filenames
cam12_file = 'cam1_2.mat'
cam22_file = 'cam2_2.mat'
cam32_file = 'cam3_2.mat'
# load mat files into python
cam12 = sio.loadmat(os.path.join(datapath, cam12_file))
cam22 = sio.loadmat(os.path.join(datapath, cam22_file))
cam32 = sio.loadmat(os.path.join(datapath, cam32_file))
# pull camera data from mat files
vid12 = cam12['vidFrames1_2']
vid22 = cam22['vidFrames2_2']
vid32 = cam32['vidFrames3_2']
# put camera data into greyscale
vid12_grey = np.asarray([funct.rgb2grey(vid12[:,:,:,i]) for i in range(len(vid12[0,0,0,:]))])
vid22_grey = np.asarray([funct.rgb2grey(vid22[:,:,:,i]) for i in range(len(vid22[0,0,0,:]))])
vid32_grey = np.asarray([funct.rgb2grey(vid32[:,:,:,i]) for i in range(len(vid32[0,0,0,:]))])
# find indices of mass from each frame
mindx12, mindy12 = funct.find_ind_and_plot(vid12_grey, 'plots/vid12_animation/fig', xmin=250, xmax=500, plot=False)
mindx22, mindy22 = funct.find_ind_and_plot(vid22_grey, 'plots/vid22_animation/fig', xmin=200, xmax=450, plot=False)
mindx32, mindy32 = funct.find_ind_and_plot(vid32_grey, 'plots/vid32_animation/fig', xmin=225, xmax=600, plot=False)
# plot
funct.plot_positions(mindx12, mindy12, mindx22, mindy22, mindx32, mindy32, '2')
# svd calculation
minlen = np.min([len(mindx12), len(mindx22), len(mindx32)])
X = np.zeros((6, minlen))
X[0,:] = mindx12[:minlen]
X[1,:] = mindy12[:minlen]
X[2,:] = mindx22[:minlen]
X[3,:] = mindy22[:minlen]
X[4,:] = mindx32[:minlen]
X[5,:] = mindy32[:minlen]
X = X - np.expand_dims(np.mean(X, axis=-1), axis=-1)
[m, n] = X.shape
U, Sdiag, VH = np.linalg.svd(X)
V = VH.T
#Xrank1_2 = np.matmul(np.expand_dims(U[:,0]*Sdiag[0],axis=-1), np.expand_dims(V[:,0], axis=-1).T)
pca2 = np.matmul(U.T, X)
percentage2 = Sdiag**2/np.sum(Sdiag**2)
########### TEST 3 #############
# define filenames
cam13_file = 'cam1_3.mat'
cam23_file = 'cam2_3.mat'
cam33_file = 'cam3_3.mat'
# load mat files into python
cam13 = sio.loadmat(os.path.join(datapath, cam13_file))
cam23 = sio.loadmat(os.path.join(datapath, cam23_file))
cam33 = sio.loadmat(os.path.join(datapath, cam33_file))
# pull camera data from mat files
vid13 = cam13['vidFrames1_3']
vid23 = cam23['vidFrames2_3']
vid33 = cam33['vidFrames3_3']
# put camera data into greyscale
vid13_grey = np.asarray([funct.rgb2grey(vid13[:,:,:,i]) for i in range(len(vid13[0,0,0,:]))])
vid23_grey = np.asarray([funct.rgb2grey(vid23[:,:,:,i]) for i in range(len(vid23[0,0,0,:]))])
vid33_grey = np.asarray([funct.rgb2grey(vid33[:,:,:,i]) for i in range(len(vid33[0,0,0,:]))])
# find indices of mass from each frame
mindx13, mindy13 = funct.find_ind_and_plot(vid13_grey, 'plots/vid13_animation/fig', xmin=250, xmax=450, plot=False)
mindx23, mindy23 = funct.find_ind_and_plot(vid23_grey, 'plots/vid23_animation/fig', xmin=200, xmax=450, ymin=200, ymax=415, plot=False, restricty=True)
mindx33, mindy33 = funct.find_ind_and_plot(vid33_grey, 'plots/vid33_animation/fig', xmin=225, xmax=600, plot=False)
# plot
funct.plot_positions(mindx13, mindy13, mindx23, mindy23, mindx33, mindy33, '3')
# svd calculation
minlen = np.min([len(mindx13), len(mindx23), len(mindx33)])
X = np.zeros((6, minlen))
X[0,:] = mindx13[:minlen]
X[1,:] = mindy13[:minlen]
X[2,:] = mindx23[:minlen]
X[3,:] = mindy23[:minlen]
X[4,:] = mindx33[:minlen]
X[5,:] = mindy33[:minlen]
X = X - np.expand_dims(np.mean(X, axis=-1), axis=-1)
[m, n] = X.shape
U, Sdiag, VH = np.linalg.svd(X)
V = VH.T
pca3 = np.matmul(U.T, X)
percentage3 = Sdiag**2/np.sum(Sdiag**2)
########### TEST 4 #############
# define filenames
cam14_file = 'cam1_4.mat'
cam24_file = 'cam2_4.mat'
cam34_file = 'cam3_4.mat'
# load mat files into python
cam14 = sio.loadmat(os.path.join(datapath, cam14_file))
cam24 = sio.loadmat(os.path.join(datapath, cam24_file))
cam34 = sio.loadmat(os.path.join(datapath, cam34_file))
# pull camera data from mat files
vid14 = cam14['vidFrames1_4']
vid24 = cam24['vidFrames2_4']
vid34 = cam34['vidFrames3_4']
# put camera data into greyscale
vid14_grey = np.asarray([funct.rgb2grey(vid14[:,:,:,i]) for i in range(len(vid14[0,0,0,:]))])
vid24_grey = np.asarray([funct.rgb2grey(vid24[:,:,:,i]) for i in range(len(vid24[0,0,0,:]))])
vid34_grey = np.asarray([funct.rgb2grey(vid34[:,:,:,i]) for i in range(len(vid34[0,0,0,:]))])
# find indices of mass from each frame
mindx14, mindy14 = funct.find_ind_and_plot(vid14_grey, 'plots/vid14_animation/fig', xmin=300, xmax=500, plot=False)
mindx24, mindy24 = funct.find_ind_and_plot(vid24_grey, 'plots/vid24_animation/fig', xmin=215, xmax=400, plot=False)
mindx34, mindy34 = funct.find_ind_and_plot(vid34_grey, 'plots/vid34_animation/fig', xmin=200, xmax=600, plot=False)
# plot
funct.plot_positions(mindx14, mindy14, mindx24, mindy24, mindx34, mindy34, '4')
# svd calculation
minlen = np.min([len(mindx14), len(mindx24), len(mindx34)])
X = np.zeros((6, minlen))
X[0,:] = mindx14[:minlen]
X[1,:] = mindy14[:minlen]
X[2,:] = mindx24[:minlen]
X[3,:] = mindy24[:minlen]
X[4,:] = mindx34[:minlen]
X[5,:] = mindy34[:minlen]
X = X - np.expand_dims(np.mean(X, axis=-1), axis=-1)
[m, n] = X.shape
U, Sdiag, VH = np.linalg.svd(X)
V = VH.T
pca4 = np.matmul(U.T, X)
percentage4 = Sdiag**2/np.sum(Sdiag**2)
### analysis figures ###
fig, ax = plt.subplots(figsize=(12,7), nrows=2, ncols=2, sharex=False, sharey=True)
ax[0,0].plot(mindx11-np.mean(mindx11), '--', color='tab:blue')
ax[0,0].plot(mindy11-np.mean(mindy11),label='Camera 1', color='tab:blue')
ax[0,0].plot(mindx21-np.mean(mindx21), '--', color='tab:green')
ax[0,0].plot(mindy21-np.mean(mindy21), label='Camera 2', color='tab:green')
ax[0,0].plot(mindx31-np.mean(mindx31), '--', color='tab:red')
ax[0,0].plot(mindy31-np.mean(mindy31), label='Camera 3', color='tab:red')
ax[0,0].legend(loc='best')
ax[0,0].set_title('Ideal Case')
ax[0,1].plot(mindx12-np.mean(mindx12), '--', color='tab:blue')
ax[0,1].plot(mindy12-np.mean(mindy12),label='Camera 1', color='tab:blue')
ax[0,1].plot(mindx22-np.mean(mindx22), '--', color='tab:green')
ax[0,1].plot(mindy22-np.mean(mindy22), label='Camera 2', color='tab:green')
ax[0,1].plot(mindx32-np.mean(mindx32), '--', color='tab:red')
ax[0,1].plot(mindy32-np.mean(mindy32), label='Camera 3', color='tab:red')
ax[0,1].set_title('Noisy Case')
ax[1,0].plot(mindx13-np.mean(mindx13), '--', color='tab:blue')
ax[1,0].plot(mindy13-np.mean(mindy13),label='Camera 1', color='tab:blue')
ax[1,0].plot(mindx23-np.mean(mindx23), '--', color='tab:green')
ax[1,0].plot(mindy23-np.mean(mindy23), label='Camera 2', color='tab:green')
ax[1,0].plot(mindx33-np.mean(mindx33), '--', color='tab:red')
ax[1,0].plot(mindy33-np.mean(mindy33), label='Camera 3', color='tab:red')
ax[1,0].set_title('Horizontal Displacement Case')
ax[1,1].plot(mindx14-np.mean(mindx14), '--', color='tab:blue')
ax[1,1].plot(mindy14-np.mean(mindy14),label='Camera 1', color='tab:blue')
ax[1,1].plot(mindx24-np.mean(mindx24), '--', color='tab:green')
ax[1,1].plot(mindy24-np.mean(mindy24), label='Camera 2', color='tab:green')
ax[1,1].plot(mindx34-np.mean(mindx34), '--', color='tab:red')
ax[1,1].plot(mindy34-np.mean(mindy34), label='Camera 3', color='tab:red')
ax[1,1].set_title('Horizontal Displacement and Rotation Case')
fig.savefig('plots/positions.png')
fig, ax = plt.subplots(figsize=(7,8.5), nrows=4, sharex=True)
ax[0].plot(pca1[0,:], label='Mode 1', color='tab:purple')
ax[0].plot(pca1[1,:], label='Mode 2', color='tab:cyan')
ax[0].plot(pca1[2,:], label='Mode 3', color='tab:gray')
ax[0].set_title('Ideal Case')
ax[0].legend(loc='best')
ax[0].set_ylabel('Position', fontsize=14)
ax[1].plot(pca2[0,:], label='Mode 1', color='tab:purple')
ax[1].plot(pca2[1,:], label='Mode 2', color='tab:cyan')
ax[1].plot(pca2[2,:], label='Mode 3', color='tab:gray')
ax[1].set_title('Noisy Case')
#ax[1].legend(loc='best')
ax[1].set_ylabel('Position', fontsize=14)
ax[2].plot(pca3[0,:], label='Mode 1', color='tab:purple')
ax[2].plot(pca3[1,:], label='Mode 2', color='tab:cyan')
ax[2].plot(pca3[2,:], label='Mode 3', color='tab:gray')
ax[2].set_title('Horizontal Displacement Case')
#ax[2].legend(loc='best')
ax[2].set_ylabel('Position', fontsize=14)
ax[3].plot(pca4[0,:], label='Mode 1', color='tab:purple')
ax[3].plot(pca4[1,:], label='Mode 2', color='tab:cyan')
ax[3].plot(pca4[2,:], label='Mode 3', color='tab:gray')
ax[3].set_title('Horizontal Displacement and Rotation Case')
ax[3].set_xlabel('Frame', fontsize=14)
ax[3].set_ylabel('Position', fontsize=14)
#ax[3].legend(loc='best')
fig.savefig('plots/PCA_comparison_modes.png')
fig, ax = plt.subplots()
ax.plot(np.arange(1,7,1), percentage1, '-^', label='Ideal Case')
ax.plot(np.arange(1,7,1), percentage2, '-^', label='Noisy Case')
ax.plot(np.arange(1,7,1), percentage3, '-^', label='Horizontal Displacement Case')
ax.plot(np.arange(1,7,1), percentage4, '-^', label='Horizontal Displacement and Rotation Case')
ax.legend(loc='best')
ax.set_xlabel('Mode')
ax.set_ylabel('Proportion of Variance')
fig.savefig('plots/variance.png')
| [
"enuss@CEEs-MacBook-Pro.local"
] | enuss@CEEs-MacBook-Pro.local |
0ad6bc7fb125b4c05b355fce78189bb9437fe60e | 11b5b1af7cfa74f35e05dd342cd70e2d0a2b3fd7 | /model/model.py | 296ba0e99f7c72081d25c4c1642c49971ea1d2fd | [
"MIT"
] | permissive | SegunMarcaida/X_RAY_THORAX | e49644633f2a5e194068b9fb4f40bc48d99b821f | 35a39761d3b11ce9e47509025054f25e5f26aab9 | refs/heads/main | 2023-04-09T03:00:48.459693 | 2021-04-22T21:30:21 | 2021-04-22T21:30:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,560 | py | import torch
import torch.nn as nn
import sys
from torchvision import models
from .Global_pooling import GlobalPool
from .attention import AttentionMap
from efficientnet_pytorch import EfficientNet
from torch.autograd import Function
import torch.nn.functional as F
class ResNet18(nn.Module):
"""Model modified.
The architecture of our model is the same as standard ResNet18
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, cfg):
super(ResNet18, self).__init__()
img_model = models.resnet18(pretrained=cfg.pretrained)
self.cfg = cfg
self.num_outputs = cfg.num_classes if cfg.multi_label else 1
img_model.avgpool = GlobalPool(cfg)
img_model.fc = torch.nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512, self.num_outputs),
nn.Sigmoid()
)
self.img_model = img_model
def forward(self, x):
"""
:param x: input image [size N X H X W X C]
:return x: probability of each disease [size N X 8]
"""
x = self.img_model(x)
return x
class DenseNet121(nn.Module):
"""Model modified.
The architecture of our model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>
"""
def __init__(self, cfg):
super(DenseNet121, self).__init__()
self.img_model = models.densenet121(pretrained=cfg.pretrained)
self.num_ftrs = self.img_model.classifier.in_features
self.num_outputs = cfg.num_classes if cfg.multi_label else 1
self.cfg = cfg
self.pool = GlobalPool(cfg)
self.drop = nn.Dropout(0.5)
self.img_model.classifier = torch.nn.Sequential(nn.Conv2d(self.num_ftrs, self.num_outputs, kernel_size=1,
stride=1, padding=0, bias=True))
self.sig = nn.Sigmoid()
if cfg.attention_map:
self._init_attention_map()
def _init_attention_map(self):
setattr(self, "attention_map_1", AttentionMap(self.cfg, self.num_ftrs))
def forward(self, x):
"""
:param x: input image [size N X H X W X C]
:return x: probability of each disease [size N X 8]
"""
feat_map = x
# for k, v in self.img_model.features._modules.items():
# feat_map = v(feat_map)
# if self.cfg.attention_map:
# feat_map = self.attention_map(feat_map)
feat_map = self.img_model.features(x)
x = self.pool(feat_map)
x = self.drop(x)
x = self.img_model.classifier(x)
x = self.sig(x)
if len(x.shape) > 2:
x = torch.squeeze(x, -1)
if len(x.shape) > 2:
x = torch.squeeze(x, -1)
return x
class PCAM_Model(nn.Module):
"""Probabilistic Class Activation Map,
Ref; https://arxiv.org/pdf/2005.14480.pdf
"""
def __init__(self, cfg):
super(PCAM_Model, self).__init__()
self.cfg = cfg
if self.cfg.backbone == 'densenet121':
self.img_model = models.densenet121(pretrained=cfg.pretrained)
self.num_ftrs = self.img_model.classifier.in_features
elif self.cfg.backbone == 'ResNet18':
self.img_model = models.resnet18(pretrained=cfg.pretrained)
self.num_ftrs = 512
self.img_model = nn.Sequential(
self.img_model.conv1,
self.img_model.bn1,
self.img_model.relu,
self.img_model.maxpool,
self.img_model.layer1,
self.img_model.layer2,
self.img_model.layer3,
self.img_model.layer4)
self.num_outputs = cfg.num_classes
self.cfg.global_pool = 'PCAM'
self.global_pool = GlobalPool(cfg)
self.drop = nn.Dropout(0.0)
self.sig = nn.Sigmoid()
if cfg.attention_map:
self._init_attention_map()
self._init_classifier()
def _init_classifier(self):
for index in range((self.cfg.num_classes)):
if self.cfg.backbone == 'ResNet18':
setattr(
self,
"fc_" + str(index),
nn.Conv2d(
self.num_ftrs,
1,
kernel_size=1,
stride=1,
padding=0,
bias=True))
elif self.cfg.backbone == 'densenet121':
setattr(
self,
"fc_" +
str(index),
nn.Conv2d(
self.num_ftrs,
1,
kernel_size=1,
stride=1,
padding=0,
bias=True))
classifier = getattr(self, "fc_" + str(index))
if isinstance(classifier, nn.Conv2d):
classifier.weight.data.normal_(0, 0.01)
classifier.bias.data.zero_()
def _init_attention_map(self):
setattr(self, "attention_map", AttentionMap(self.cfg, self.num_ftrs))
def forward(self, x):
"""
:param x: input image [size N X H X W X C]
:return: problity of each disease [size N X 8]
"""
if self.cfg.backbone == 'densenet121':
feat_map = self.img_model.features(x)
elif self.cfg.backbone == 'ResNet18':
feat_map = self.img_model(x)
logits = list()
# [(N, H, W), (N, H, W),...]
logit_maps = list()
for index in range((self.cfg.num_classes)):
if self.cfg.attention_map:
feat_map = self.attention_map(feat_map)
classifier = getattr(self, "fc_" + str(index))
# (N, 1, H, W)
logit_map = classifier(feat_map)
logit_maps.append(logit_map.squeeze())
# (N, C, 1, 1)
feat = self.global_pool(feat_map, logit_map)
feat = F.dropout(feat)
# (N, num_class, 1, 1)
logit = classifier(feat)
# (N, num_class)
logit = logit.squeeze(-1).squeeze(-1)
logits.append(logit)
logits = torch.cat(logits, dim=1)
return logits, logit_maps
class EfficientNet_model(nn.Module):
def __init__(self, cfg):
super(EfficientNet_model, self).__init__()
self.img_model = EfficientNet.from_pretrained('efficientnet-b0')
self.num_ftrs = 1280
self.cfg = cfg
self.num_outputs = cfg.num_classes if cfg.multi_label else 1
self.pool = GlobalPool(cfg)
self.drop = nn.Dropout(0.5)
self.FF = torch.nn.Sequential(nn.Conv2d(self.num_ftrs, self.num_outputs, kernel_size=1,
stride=1, padding=0, bias=True))
self.sig = nn.Sigmoid()
def forward(self, x):
"""
:param image: input image [size N X H X W X C]
:return: problity of each disease [size N X 8]
"""
feat_map = self.img_model.extract_features(x)
x = self.pool(feat_map)
x = self.drop(x)
x = self.FF(x)
x = torch.squeeze(x)
x = self.sig(x)
if len(x.shape) == 1:
x = torch.unsqueeze(x, 0)
return x
class custom_xray(nn.Module):
"""
A custom model similar to ResNet architecture but smaller is size
"""
def __init__(self, cfg):
"""
Args : cfg :input configurations
"""
super(custom_xray, self).__init__()
self.num_channels = 3
self.num_outputs = cfg.num_classes if cfg.multi_label else 1
self.first_layer = nn.Sequential(
nn.Conv2d(3, self.num_channels * 5, kernel_size=7, stride=2,
padding=3, bias=False),
nn.BatchNorm2d(self.num_channels * 5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.num_channels = self.num_channels * 5
for x in range(4):
if x == 0:
coff = 1
else:
coff = 1.5
layer = torch.nn.Sequential(
nn.BatchNorm2d(int(self.num_channels * coff)),
nn.ReLU(inplace=True),
nn.Conv2d(int(self.num_channels * coff), self.num_channels * 2, kernel_size=3,
stride=1, padding=1, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=1,
stride=1, padding=0, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=3,
stride=1, padding=1, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=1,
stride=1, padding=0, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=1,
stride=1, padding=0, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=1,
stride=1, padding=0, bias=True),
nn.BatchNorm2d(self.num_channels * 2),
nn.ReLU(inplace=True),
nn.Conv2d(self.num_channels * 2, self.num_channels * 2, kernel_size=1,
stride=1, padding=0, bias=True),
nn.Dropout(),
nn.AvgPool2d(2, stride=2)
)
setattr(self, 'layer_' + str(x), layer)
skip_layer = torch.nn.Sequential(
nn.BatchNorm2d(int(self.num_channels * coff)),
nn.ReLU(inplace=True),
nn.Conv2d(int(self.num_channels * coff), self.num_channels, kernel_size=1,
stride=1, padding=0, bias=True),
nn.AvgPool2d(2, stride=2)
)
setattr(self, 'skiplayer_' + str(x), skip_layer)
self.num_channels = self.num_channels * 2
self.pool = GlobalPool(cfg)
self.drop = nn.Dropout(0.5)
self.FF = torch.nn.Sequential(nn.Conv2d(int(self.num_channels * 1.5), self.num_outputs, kernel_size=1,
stride=1, padding=0, bias=True))
self.sig = nn.Sigmoid()
def forward(self, image):
"""
:param image: input image [size N X H X W X C]
:return: problity of each disease [size N X 8]
"""
image = self.first_layer(image)
for x_ in range(4):
image_ = getattr(self, 'skiplayer_' + str(x_))(image)
image = getattr(self, 'layer_' + str(x_))(image)
image = torch.cat([image, image_], dim=1)
image = self.pool(image)
image = self.drop(image)
image = self.FF(image).squeeze()
output = self.sig(image)
return output
| [
"alinsteinjose@gmail.com"
] | alinsteinjose@gmail.com |
d4629669e16ac320298aefd9df96cf73804d3c4a | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_43033.py | b05910d62650ebad3a06ec9bae8a3e8ca0a1252c | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((630.119, 511.453, 576.726), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((585.061, 509.682, 526.489), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((521.572, 509.549, 475.752), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((514.774, 584.669, 593.047), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((398.748, 489.561, 324.612), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((594.944, 502.698, 547.787), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((595.68, 502, 549.422), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((590.851, 506.155, 576.512), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((579.508, 505.854, 602.118), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((581.34, 478.731, 594.646), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((565.109, 462.805, 611.033), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((562.847, 435.724, 603.464), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((622.894, 507.143, 551.239), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((507.324, 358.874, 651.796), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((423.381, 375.222, 468.659), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((423.381, 375.222, 468.659), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((444.891, 391.216, 477.849), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((466.761, 405.64, 488.571), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((488.345, 421.708, 497.487), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((510.58, 438.69, 502.338), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((533.123, 455.696, 503.421), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((557.926, 466.707, 510.956), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((358.514, 329.566, 603.332), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((758.231, 603.822, 421.217), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((551.323, 476.152, 468.534), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((551.323, 476.152, 468.534), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((554.703, 504.908, 470.795), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((545.665, 532.2, 474.802), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((523.908, 541.179, 491.62), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((601.262, 561.713, 583.895), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((429.12, 518.724, 408.184), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((581.344, 531.554, 544.402), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((581.249, 531.713, 544.571), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((553.972, 524.124, 546.396), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((528.843, 511.821, 542.697), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((543.289, 489.576, 552.432), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((553.515, 464.901, 561.789), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((567.605, 441.714, 569.925), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((583.014, 421.154, 581.709), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((614.392, 444.586, 505.477), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((549.816, 400.211, 659.848), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((614.126, 482.344, 478.893), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((595.247, 498.309, 487.311), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((554.02, 533.585, 507.429), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((512.331, 568.802, 525.914), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((574.576, 611.669, 553.431), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((408.788, 583.555, 528.966), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((486.777, 522.32, 566.41), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((496.443, 524.298, 540.008), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((502.84, 524.53, 512.223), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((495.136, 528.701, 483.861), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((511.917, 538.275, 460.872), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((535.611, 543.917, 443.776), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((589.32, 520.617, 500.38), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((492.954, 569.195, 378.67), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
70c0eebdf6fdce12711b9dca5bb4eaf6f545171f | 0c6ae006fbcb27f60399917d29084ff42a6a96ad | /app2/populate/test.py | a045fa45611b0f95d6e1c1670cce702ead82834c | [] | no_license | ailan12345/app2 | 9ac54c287d90b86ca0726dde12d5dec62ae2f6eb | b9a00cb55a0d54198eb2030b158433ad73274a17 | refs/heads/master | 2020-03-17T09:42:11.457932 | 2018-05-15T13:47:52 | 2018-05-15T13:47:52 | 133,484,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | from populate import base, admin, user
admin.populate()
user.populate() | [
"isccyut@isccyut-M11BB"
] | isccyut@isccyut-M11BB |
ac1338440b36fda5674e33b1bf2e2085aaa9d69a | 98e6328bc25a81bf2b492127b091e01c0dabb345 | /graphs/data/2009-10-08/process.py | 22c76d80336cb52792cd7c8f2fd39561badc57ce | [] | no_license | bernardokyotoku/Kyotoku-thesis | 7d80bbcf8c93b3610262e4b3e186dbafb0eae29c | 548eb33fc60f397f48c4999420e87c20737ee7f9 | refs/heads/master | 2016-09-06T01:12:54.305113 | 2011-04-14T14:15:56 | 2011-04-14T14:15:56 | 1,613,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #!/usr/bin/env python
from numpy import *
from matplotlib.pyplot import *
import scientific_utils as su
T=range(1,11)
data = loadtxt('./T1-0.dat')
wavelength = data[:,0]
figure(10)
plot(wavelength,10*log10(data[:,1]))
figure(2)
mask =wavelength>1483
mask&=wavelength<1493
wavelength = wavelength[mask]
data = wavelength
for i in range(24,14,-1):
for j in range(len(T)):
filename = './T%d-%d.dat'%(T[j],i)
print filename
raw = loadtxt(filename)
data = vstack((data,raw[mask,1]))
wavelength = raw[mask,0]
power = raw[mask,1]
plot(wavelength,10*log10(power))
data = 40.5+10*log10(data)
data[0,:]=wavelength
savetxt('data1dB.dat',data.transpose(),fmt='%.8e')
input()
| [
"bernardo@kyotoku.org"
] | bernardo@kyotoku.org |
8ad1db408f0ed4b1820869e77505354eb6f528a5 | 210fb7e5d8b588cb8796acd1699ba184a40566c3 | /django/contrib/staticfiles/handlers.py | aba383f07ce604d41bc68f13f45aa78c81e01a6a | [] | no_license | suosuo1930/parse_django | 2703f8195c4e0a75f604641b2f264e1f75a9ab86 | ffaa299868dc1295aa665ad4b11d3f1e7cf0403f | refs/heads/master | 2020-09-11T14:46:12.577076 | 2019-11-25T13:27:19 | 2019-11-25T13:27:19 | 222,100,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,368 | py | from urllib.parse import urlparse
from urllib.request import url2pathname
from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.core.handlers.exception import response_for_exception
from django.core.handlers.wsgi import WSGIHandler, get_path_info
# 关键 类
# Django 专门 用来处理 静态文件的 类
class StaticFilesHandler(WSGIHandler):
"""
拦截对静态文件目录的调用的WSGI中间件
由STATIC_URL设置定义,并为这些文件提供服务
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def __init__(self, application):
print("3333333333333333333333333")
self.application = application
# self.application = <django.core.handlers.wsgi.WSGIHandler object at 0x000001AFF81CB8C8>
self.base_url = urlparse(self.get_base_url()) # 静态文件 url 解析 结果
# self.base_url == ParseResult(scheme='', netloc='', path='/static/', params='', query='', fragment='')
# 例如
# base_url = urlparse('http://www.cwi.nl:80/%7Eguido/Python.html')
# 则 base_url == ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html',
# params='', query='', fragment='')
super().__init__()
def load_middleware(self):
# Middleware are already loaded for self.application; no need to reload
# them for self.
pass
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Return the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""Serve the request path."""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
return response_for_exception(request, e)
return super().get_response(request)
# 关键代码 关键代码 关键代码 关键代码
def __call__(self, environ, start_response):
# print("environ==", environ, "start_response==", start_response)
# for k,v in enumerate(environ.items()):
# print("{}---{}=={}".format(k, str(v[0]).lower(), v[1]))
if not self._should_handle(get_path_info(environ)): # 执行
# get_path_info(environ) = "/"
# 关键代码
return self.application(environ, start_response)
return super().__call__(environ, start_response)
| [
"1963275449@qq.com"
] | 1963275449@qq.com |
1ed9fd2930b314df28d115380f4651567df40106 | 6ca52e73fabce4c2d0b6b9cfb07ef36246c47cc4 | /adapted_from_Allen_Institute/LGN_modifications/nwb_copy.py | 90919b1118a30df75c771f0109f49f478105c3ab | [
"BSD-3-Clause"
] | permissive | lenamyk/Exploring_the_Allen_model | c1903e36fe5321152bc70d14e54f61040c40233b | 2da3a248a0ffc613b8019ba798e88bd2f71348b1 | refs/heads/master | 2022-04-26T07:26:36.258678 | 2020-05-02T18:39:01 | 2020-05-02T18:39:01 | 258,632,148 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,032 | py | # Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import copy
import numpy as np
import os
import h5py
import time
import uuid
import tempfile
from bmtk.analyzer.visualization.widgets import PlotWidget, MovieWidget
import numpy as np
__version__ = '0.1.0'
allowed_dimensions = {'firing_rate': ('hertz',),
'time': ('second', 'millisecond'),
'brightness': ('intensity',),
'distance': ('pixel',),
'index': ('gid',),
'intensity': ('bit',None),
'voltage': ('volt',),
'current': ('ampere',),
None: (None,),
'dev': ('dev',)}
allowed_groups = {'firing_rate': ('firing_rate',),
'spike_train': ('index', 'time'),
'grayscale_movie': ('intensity',),
'time_series': ('voltage', 'current'),
'dev': ('dev',)}
top_level_data = ['file_create_date',
'stimulus',
'acquisition',
'analysis',
'processing',
'epochs',
'general',
'session_description',
'nwb_version',
'identifier']
def open_file(file_name):
return h5py.File(file_name)
class Scale(object):
def __init__(self, scale_range, dimension, unit):
assert dimension in allowed_dimensions
assert unit in allowed_dimensions[dimension]
self.scale_range = scale_range
self.dimension = dimension
self.unit = unit
self._hdf5_location = None
def __eq__(self, other):
d = self.dimension == other.dimension
u = self.unit == other.unit
s = np.allclose(self.scale_range, other.scale_range)
return d and u and s
@ property
def data(self):
return self.scale_range
class DtScale(object):
def __init__(self, dt, dimension, unit):
assert dimension in allowed_dimensions
assert unit in allowed_dimensions[dimension]
self.dt = dt
self.dimension = dimension
self.unit = unit
self._hdf5_location = None
def __eq__(self, other):
d = self.dimension == other.dimension
u = self.unit == other.unit
s = np.allclose(self.scale_range, other.scale_range)
return d and u and s
@ property
def data(self):
return self.dt
class NullScale(object):
def __init__(self):
self._hdf5_location = None
self.data = None
self.dimension = None
self.unit = None
class Data(object):
def __init__(self, data, dimension, unit, scales, metadata):
assert dimension in allowed_dimensions
assert unit in allowed_dimensions[dimension]
if isinstance(scales, (Scale, DtScale)):
assert len(data.shape) == 1
scales = (scales,)
#for key in metadata.iterkeys():
for key in metadata.keys():
assert isinstance(key, (str, unicode))
for ii, scale in enumerate(scales):
if isinstance(scale, Scale):
assert len(scale.scale_range) == data.shape[ii]
elif isinstance(scale, DtScale):
assert isinstance(scale.dt, (float, np.float)) and scale.dt > 0
else:
raise Exception
if len(scales) == 0:
scales = [NullScale()]
metadata = copy.copy(metadata)
self.data = data
self.scales = scales
self.dimension = dimension
self.unit = unit
self.metadata = metadata
self._hdf5_location = None
def __eq__(self, other):
da = np.allclose(self.data, other.data)
d = self.dimension == other.dimension
u = self.unit == other.unit
s = [s1 == s2 for s1, s2 in zip(self.scales, other.scales)].count(True) == len(self.scales)
if len(self.metadata) != len(other.metadata):
m = False
else:
try:
sum = 0
for key in self.metadata.keys():
sum += other.metadata[key] == self.metadata[key]
assert sum == len(self.metadata)
m = True
except:
m = False
return da and d and u and s and m
@staticmethod
def _get_from_group(object_class, parent_group, group_name, ii=0):
data_group = parent_group['%s/%s' % (group_name, ii)]
data, scales, dimension, unit, metadata = _get_data(data_group)
assert dimension in allowed_groups[object_class.group]
if unit == "None":
unit = None
scale_list = []
for scale in scales:
if scale.attrs['type'] == 'Scale':
curr_scale = Scale(scale, scale.attrs['dimension'], scale.attrs['unit'])
elif scale.attrs['type'] == 'DtScale':
curr_scale = DtScale(float(scale.value), scale.attrs['dimension'], scale.attrs['unit'])
elif scale.attrs['type'] == 'NullScale':
curr_scale = None
else:
raise Exception
if curr_scale is not None:
scale_list.append(curr_scale)
if len(scale_list) == 1:
scale_list = scale_list[0]
return object_class(data, dimension=dimension, unit=unit, scale=scale_list, metadata=metadata)
def add_to_stimulus(self, f, compression='gzip', compression_opts=4):
self._add_to_group(f, 'stimulus', self.__class__.group, compression=compression,
compression_opts=compression_opts)
@classmethod
def get_stimulus(cls, f, ii=None):
if ii is None:
return_data = [cls.get_stimulus(f, ii) for ii in range(len(f['stimulus/%s' % cls.group]))]
if len(return_data) == 1:
return_data = return_data[0]
return return_data
else:
return Data._get_from_group(cls, f['stimulus'], cls.group, ii=ii)
def add_to_acquisition(self, f, compression='gzip', compression_opts=4):
self._add_to_group(f, 'acquisition', self.__class__.group, compression=compression,
compression_opts=compression_opts)
@classmethod
def get_acquisition(cls, f, ii=None):
if ii is None:
return_data = [cls.get_acquisition(f, ii) for ii in range(len(f['acquisition/%s' % cls.group]))]
if len(return_data) == 1:
return_data = return_data[0]
return return_data
else:
return Data._get_from_group(cls, f['acquisition'], cls.group, ii=ii)
def add_to_processing(self, f, processing_submodule_name):
if processing_submodule_name not in f['processing']:
f['processing'].create_group(processing_submodule_name)
return self._add_to_group(f, 'processing/%s' % processing_submodule_name, self.__class__.group)
@classmethod
def get_processing(cls, f, subgroup_name, ii=None):
if ii is None:
return_data = {}
for ii in range(len(f['processing/%s/%s' % (subgroup_name, cls.group)])):
return_data[ii] = cls.get_processing(f, subgroup_name, ii)
return return_data
else:
return Data._get_from_group(cls, f['processing/%s' % subgroup_name], cls.group, ii=ii)
def add_to_analysis(self, f, analysis_submodule_name):
if analysis_submodule_name not in f['analysis']:
f['analysis'].create_group(analysis_submodule_name)
return self._add_to_group(f, 'analysis/%s' % analysis_submodule_name, self.__class__.group)
@classmethod
def get_analysis(cls, f, subgroup_name, ii=None):
if ii is None:
return [cls.get_analysis(f, ii, subgroup_name)
for ii in range(len(f['analysis/%s/%s' % (subgroup_name, cls.group)]))]
else:
return Data._get_from_group(cls, f['analysis/%s' % subgroup_name], cls.group, ii=ii)
def _add_to_group(self, f, parent_name, group_name, compression='gzip', compression_opts=4):
assert group_name in allowed_groups
assert self.dimension in allowed_groups[group_name]
try:
parent_group = f[parent_name]
except ValueError:
try:
file_name = f.filename
raise Exception('Parent group:%s not found in file %s' % parent_name, file_name)
except ValueError:
raise Exception('File not valid: %s' % f)
if self.__class__.group in parent_group:
subgroup = parent_group[self.__class__.group]
int_group_name = str(len(subgroup))
else:
subgroup = parent_group.create_group(self.__class__.group)
int_group_name = '0'
# Create external link:
if isinstance(self.data, h5py.Dataset):
if subgroup.file == self.data.file:
raise NotImplementedError
else:
return _set_data_external_link(subgroup, int_group_name, self.data.parent)
else:
dataset_group = subgroup.create_group(int_group_name)
# All this to allow do shared scale management:
scale_group = None
scale_list = []
for ii, scale in enumerate(self.scales):
if isinstance(scale, (Scale, DtScale, NullScale)):
if scale._hdf5_location is None:
if scale_group is None:
scale_group = dataset_group.create_group('scale')
curr_scale = _set_scale(scale_group, 'dimension_%s' % ii, scale.data, scale.dimension,
scale.unit, scale.__class__.__name__)
scale._hdf5_location = curr_scale
else:
curr_scale = _set_scale(scale_group, 'dimension_%s' % ii, scale.data, scale.dimension,
scale.unit, scale.__class__.__name__)
scale._hdf5_location = curr_scale
else:
curr_scale = scale._hdf5_location
elif isinstance(scale, h5py.Dataset):
curr_scale = scale
else:
raise Exception
scale_list.append(curr_scale)
_set_data(subgroup, dataset_group.name, self.data, scale_list, self.dimension, self.unit,
metadata=self.metadata, compression=compression, compression_opts=compression_opts)
class FiringRate(Data):
group = 'firing_rate'
def __init__(self, data, **kwargs):
dimension = 'firing_rate'
unit = 'hertz'
scale = kwargs.get('scale')
metadata = kwargs.get('metadata', {})
assert isinstance(scale, (Scale, DtScale))
super(FiringRate, self).__init__(data, dimension, unit, scale, metadata)
def get_widget(self, **kwargs):
rate_data = self.data[:]
t_range = self.scales[0].data[:]
return PlotWidget(t_range, rate_data, metadata=self.metadata, **kwargs)
class Dev(Data):
group = 'dev'
def __init__(self, data, **kwargs):
dimension = kwargs.get('dimension')
unit = kwargs.get('unit')
scale = kwargs.get('scale')
metadata = kwargs.get('metadata', {})
super(Dev, self).__init__(data, dimension, unit, scale, metadata)
class TimeSeries(Data):
group = 'time_series'
def __init__(self, data, **kwargs):
dimension = kwargs.get('dimension')
unit = kwargs.get('unit')
scale = kwargs.get('scale')
metadata = kwargs.get('metadata', {})
assert isinstance(scale, (Scale, DtScale))
assert scale.dimension == 'time'
super(TimeSeries, self).__init__(data, dimension, unit, scale, metadata)
class SpikeTrain(Data):
group = 'spike_train'
def __init__(self, data, **kwargs):
scales = kwargs.get('scale',[])
unit = kwargs.get('unit', 'gid')
metadata = kwargs.get('metadata',{})
if isinstance(scales, Scale):
super(SpikeTrain, self).__init__(data, 'index', unit, scales, metadata)
elif len(scales) == 0:
assert unit in allowed_dimensions['time']
scales = []
super(SpikeTrain, self).__init__(data, 'time', unit, scales, metadata)
else:
assert len(scales) == 1 and isinstance(scales[0], Scale)
super(SpikeTrain, self).__init__(data, 'index', unit, scales, metadata)
class GrayScaleMovie(Data):
group = 'grayscale_movie'
def __init__(self, data, **kwargs):
dimension = 'intensity'
unit = kwargs.get('unit', None)
scale = kwargs.get('scale')
metadata = kwargs.get('metadata', {})
super(GrayScaleMovie, self).__init__(data, dimension, unit, scale, metadata)
def get_widget(self, ax=None):
data = self.data[:]
t_range = self.scales[0].data[:]
return MovieWidget(t_range=t_range, data=data, ax=ax, metadata=self.metadata)
def get_temp_file_name():
f = tempfile.NamedTemporaryFile(delete=False)
temp_file_name = f.name
f.close()
os.remove(f.name)
return temp_file_name
def create_blank_file(save_file_name=None, force=False, session_description='', close=False):
if save_file_name is None:
save_file_name = get_temp_file_name()
if not force:
f = h5py.File(save_file_name, 'w-')
else:
if os.path.exists(save_file_name):
os.remove(save_file_name)
f = h5py.File(save_file_name, 'w')
f.create_group('acquisition')
f.create_group('analysis')
f.create_group('epochs')
f.create_group('general')
f.create_group('processing')
f.create_group('stimulus')
f.create_dataset("file_create_date", data=np.string_(time.ctime()))
f.create_dataset("session_description", data=session_description)
f.create_dataset("nwb_version", data='iSee_%s' % __version__)
f.create_dataset("identifier", data=str(uuid.uuid4()))
if close:
f.close()
else:
return f
def assert_subgroup_exists(child_name, parent):
try:
assert child_name in parent
except:
raise RuntimeError('Group: %s has no subgroup %s' % (parent.name, child_name))
def _set_data_external_link(parent_group, dataset_name, data):
parent_group[dataset_name] = h5py.ExternalLink(data.file.filename, data.name)
def _set_scale_external_link(parent_group, name, scale):
print(parent_group, name, scale)
print(scale.file.filename, scale.name)
parent_group[name] = h5py.ExternalLink(scale.file.filename, scale.name)
return parent_group[name]
def _set_data(parent_group, dataset_name, data, scales, dimension, unit, force=False, metadata={}, compression='gzip',
compression_opts=4):
# Check inputs:
if isinstance(scales, h5py.Dataset):
scales = (scales,)
else:
assert isinstance(scales, (list, tuple))
assert data.ndim == len(scales)
assert dimension in allowed_dimensions
assert unit in allowed_dimensions[dimension]
for ii, scale in enumerate(scales):
assert len(scale.shape) in (0, 1)
check_dimension = str(scale.attrs['dimension'])
if check_dimension == 'None':
check_dimension = None
check_unit = scale.attrs['unit']
if check_unit == 'None':
check_unit = None
assert check_dimension in allowed_dimensions
assert check_unit in allowed_dimensions[check_dimension]
if len(scale.shape) == 1:
assert len(scale) == data.shape[ii] or len(scale) == 0
if dataset_name not in parent_group:
dataset_group = parent_group.create_group(dataset_name)
else:
dataset_group = parent_group[dataset_name]
for key, val in metadata.items():
assert key not in dataset_group.attrs
dataset_group.attrs[key] = val
if 'data' in dataset_group:
if not force:
raise IOError('Field "stimulus" of %s is not empty; override with force=True' % parent_group.name)
else:
del dataset_group['data']
dataset = dataset_group.create_dataset(name='data', data=data, compression=compression,
compression_opts=compression_opts)
for ii, scale in enumerate(scales):
dataset.dims[ii].label = scale.attrs['dimension']
dataset.dims[ii].attach_scale(scale)
# Modification made for making the model run with Python 3:
dim_string = str(dimension)
print('generating spikes')
unit_string = str(unit)
dim_string_conv = dim_string.encode('UTF-8')
unit_string_conv = unit_string.encode('UTF-8')
dataset.attrs.create('dimension', dim_string_conv)
dataset.attrs.create('unit', unit_string_conv)
return dataset
def _set_scale(parent_group, name, scale, dimension, unit, scale_class_name):
assert dimension in allowed_dimensions
assert unit in allowed_dimensions[dimension]
if scale is None:
scale = parent_group.create_dataset(name=name, shape=(0,))
else:
scale = np.array(scale)
assert scale.ndim in (0, 1)
scale = parent_group.create_dataset(name=name, data=scale)
scale.attrs['dimension'] = str(dimension)
scale.attrs['unit'] = str(unit)
scale.attrs['type'] = scale_class_name
return scale
def _get_data(dataset_group):
data = dataset_group['data']
dimension = dataset_group['data'].attrs['dimension']
unit = dataset_group['data'].attrs['unit']
scales = tuple([dim[0] for dim in dataset_group['data'].dims])
metadata = dict(dataset_group.attrs)
return data, scales, dimension, unit, metadata
def get_stimulus(f):
category = 'stimulus'
for parent_group in f[category]:
for data_group in f[category][parent_group]:
print(f[category][parent_group][data_group])
def add_external_links(parent_group, external_file_name, external_group_name_list=top_level_data):
for subgroup in external_group_name_list:
parent_group[subgroup] = h5py.ExternalLink(external_file_name, subgroup)
| [
"noreply@github.com"
] | lenamyk.noreply@github.com |
55a7d840491b41f34f563c3ff74aad8e9bf7d2d4 | a92ab416bc7116e327a0e744c09169ca362a1f4d | /manage.py | ccdc4875b121897e28fa545475a863a1775951fd | [] | no_license | Himrocks29/News-detection | 98d77db05a8b7b7a408bafd9ba787b5e70055244 | caeb53185a270c488880db14c7caa57a021fcab9 | refs/heads/master | 2022-11-14T12:11:41.504264 | 2020-07-10T10:24:14 | 2020-07-10T10:24:14 | 275,581,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_back_end.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Himrocks29.noreply@github.com |
31128be2e863dec1082878d679f57d46b8658064 | 6178a536e3966d10cded2c2ed79f80a78aa3b65b | /Search_Engine_BG/search_engine/store_name.py | c84d006877d4e043b40006153651dc63c120511b | [] | no_license | HOLMEScdk/SearchEngines | 73e82fa28e6404f81c6ead8f5436e936ebc7b95e | bfe5963ad4cb6b4d884a6993d63a812ec2282fb9 | refs/heads/master | 2021-04-09T11:59:33.169558 | 2018-10-04T08:30:21 | 2018-10-04T08:30:21 | 125,612,866 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | # -*- coding: utf-8 -*-
# __author__ = 'K_HOLMES_'
# __time__ = '2018/4/16 10:45'
import pymongo
import time
import datetime
mongo_client = pymongo.MongoClient(host='localhost', port=27017)
mongo_crawler = mongo_client.zhihu_crawler
def search_all():
collection = mongo_crawler['person_json']
query = {}
flag, timestamp = 0, ''
try:
with open('log_time.txt', 'r', encoding='utf-8') as f:
timestamp = list(f.readlines())[-1][:-1] # get time
except Exception as e:
flag = 1
print(e)
if not flag: # 对时间的判断在此
print(timestamp)
query = {'Time': {'$gte': datetime.datetime(int(timestamp[:4]), int(timestamp[5:7]), int(timestamp[8:10]),
int(timestamp[11:13]), int(timestamp[14:16]), int(timestamp[17:19]))
}
}
projection = {'urlToken': True, 'name': True, 'Time': True} # 时间节点的选择上
res = collection.find(query, projection)
res.sort('Time', pymongo.ASCENDING)
cnt = res.count()
print(cnt)
return res, cnt
def store_in_file():
res, cnt = search_all()
with open('utlToken.txt', 'a+', encoding='utf-8') as f, open('user_name.txt', 'a+', encoding='utf-8') as f1, \
open('log_time.txt', 'a+', encoding='utf-8') as f2:
for i, each in enumerate(res):
f.write(each['urlToken'] + ' 4\n')
f1.write(each['name'] + ' 4\n')
if i == cnt - 1:
f2.write(str(each['Time']) + '\n')
def main():
store_in_file()
if __name__ == '__main__':
while True:
t1 = time.time()
main()
print(time.time() - t1)
time.sleep(3000) | [
"noreply@github.com"
] | HOLMEScdk.noreply@github.com |
298a09a0b7bc0ee8a791968bd6fb1a5c26742c0d | 8165748e222f1089a8ed648056662813bcbe7f10 | /shaDow/para_samplers/pybind11/pybind11/_version.py | fc7a56cc7c5e69faf421939ecc96b03324eac45e | [
"BSD-3-Clause"
] | permissive | DeepGNNShallowSampler/shaDow-GNN-release | 034de4c984084ab43c73ce54c43ab4c86f4a5338 | 4cf3161549750870157e4cd0e3d96603f81102a2 | refs/heads/main | 2023-03-05T08:09:11.555210 | 2021-02-22T08:39:31 | 2021-02-22T08:39:31 | 338,206,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
def _to_int(s):
try:
return int(s)
except ValueError:
return s
__version__ = "2.6.1"
version_info = tuple(_to_int(s) for s in __version__.split("."))
| [
"deepgnnshallowsampler@gmail.com"
] | deepgnnshallowsampler@gmail.com |
f10dd705eaf6c87053ba4c18d83226cfb47d9c1a | f58c6a500b5a2652c7b039d9d7f1eea3c68003b3 | /testCase/pypost.py | bccab87c0056eba9aef91eed373287ac6f44b54f | [
"MIT"
] | permissive | zh417233956/pytestdemo | 041138a985b98c8db8f4ccb1a733b102c2cd08ba | cea91ce497d6a2f4d6e6e4a53487ddfb23ae04af | refs/heads/master | 2022-12-11T13:24:55.294418 | 2020-02-10T06:42:21 | 2020-02-10T06:42:21 | 235,714,327 | 1 | 0 | MIT | 2021-06-02T01:01:30 | 2020-01-23T03:11:06 | Python | UTF-8 | Python | false | false | 704 | py |
import requests
import json
# dataJson={'msgtype':'markdown','markdown':{
# "title":"回家过年了",
# "text":"#### 回家过年了\n"+
# "> 我将搭乘今日19:18的高铁回家过年\n"+
# "> 提前祝大家新年快乐!!!\n"+
# "> 鼠年希望您有:\n"+
# "> “鼠”不尽的收获\n"+
# "> “鼠”不尽的幸福\n"+
# "> "
# }}
# result= requests.post(url='https://oapi.dingtalk.com/robot/send?access_token=d59d68d76f2bf3c80dacdc398a422d7e84a0477b5f53e431d97a1c080c791853',
# data=json.dumps(dataJson),
# headers={'Content-Type':'application/json'})
# print("成功") | [
"417233956@qq.com"
] | 417233956@qq.com |
9c4a630df634091abbd42ba9e66d1838edec4010 | ee2dc497c2d4bfa9829a7cdc03cf9dcecb27d0bc | /fortune.py | d4015c6d532e957bab897e6d6e96f83a92f633fe | [] | no_license | secretisdead/cogs | 08d484e1585750cf462a66d4207601c3942e6e50 | 8b5c6e0b1b88a3991764fb04848eaa0df1e3dcdc | refs/heads/master | 2020-03-15T19:43:27.478203 | 2018-06-26T04:35:49 | 2018-06-26T04:35:49 | 132,315,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | import math
from random import choice
import discord
from discord.ext import commands
class fortunes:
"""port of imageboard style fortunes from notsecret irc bot"""
def __init__(self, bot):
self.bot = bot
fortunes = [
'Bad Luck',
'Average Luck',
'Good Luck',
'Excellent Luck',
'Reply hazy, try again',
'Godly Luck',
'Very Bad Luck',
'Outlook good',
'Better not tell you now',
'You will meet a dark handsome stranger',
'キタ━━━━━━(゚∀゚)━━━━━━ !!!!',
'( ´_ゝ`)フーン',
'Good news will come to you by mail',
'It\'s a secret to everybody',
'Bad luck and extreme misfortune will infest your pathetic soul for all eternity',
'404 fortune not found',
'Don\'t look',
'Never draw',
'Never go outside',
'(´・ω・`)',
'ʕ •ᴥ• ʔ',
'I\'ll hear it directly thanks',
'Thanx :(',
'Whoever deserves it will get it',
'horse',
'Oh Shit',
'You died',
'You\'ve grown an inch',
'You\'ve shrunk an inch',
'O-oooooooooo AAAAE-A-A-I-A-U- JO-oooooooooooo AAE-O-A-A-U-U-A- E-eee-ee-eee AAAAE-A-E-I-E-A- JO-ooo-oo-oo-oo EEEEO-A-AAA-AAAA',
'Huh',
'╰U╯',
'weehaw',
]
def random_fortune(self):
total_fortunes = len(self.fortunes)
fortune = choice(self.fortunes)
fortune_index = self.fortunes.index(fortune)
c = (2 * math.pi * fortune_index / total_fortunes)
r = int(127 + 127 * math.sin(c))
g = int(127 + 127 * math.sin(c + (2 / 3 * math.pi)))
b = int(127 + 127 * math.sin(c + (4 / 3 * math.pi)))
color = int('0x{:02X}{:02X}{:02X}'.format(r, g, b), 16)
return (fortune, color)
@commands.command(pass_context=True)
async def fortune(self, ctx):
fortune, color = self.random_fortune()
description = 'Your fortune: ' + fortune
embed = discord.Embed(colour=color, description=description)
await self.bot.say(embed=embed)
@commands.command()
async def housetune(self):
fortune, color = self.random_fortune()
description = (
'┏┓\n' +
'┃┃╱╲\n' +
'┃╱╱╲╲\n' +
'╱╱╭╮╲╲ \n' +
'▔▏┗┛▕▔ \n' +
'╱▔▔▔▔▔▔▔▔▔▔╲ \n' +
'╱╱┏┳┓╭╮┏┳┓ ╲╲ \n' +
'▔▏┗┻┛┃┃┗┻┛▕▔\n' +
'in this house ' + fortune + '\n' +
'╱╱┏┳┓╭╮┏┳┓ ╲╲ \n' +
'▔▏┗┻┛┃┃┗┻┛▕▔'
)
embed = discord.Embed(colour=color, description=description)
await self.bot.say(embed=embed)
@commands.command()
async def jortune(self):
fortune, color = self.random_fortune()
description = (
'```\n' +
' ∧_∧\n' +
' (。・ω・。)つ━☆・*。\n' +
' ⊂ ノ ・゜+.\n' +
' しーJ °。+ *´¨)\n' +
' .· ´¸.·*´¨) ¸.·*¨)\n' +
' (¸.·´ (¸.·’* ' + fortune + ' in jorts' +
'```')
embed = discord.Embed(colour=color, description=description)
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(fortunes(bot))
| [
"secretisdead@gmail.com"
] | secretisdead@gmail.com |
0420534522a09d4ce97b37a8bcc31c5d5a8cea67 | 1e5a87ca416f5358920656cb7839f162b0b9f796 | /setup.py | 5132cd80fca29d63eb6c9f970d01d989a053851c | [] | no_license | youjin827/flask_miniter | 4361fe0405985896f68fbdc0f298e668ae46889e | c6c77658ff5e5d298e0864175269d1b72cfa1725 | refs/heads/main | 2023-03-19T21:28:34.904115 | 2021-03-04T17:35:31 | 2021-03-04T17:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import sys
from flask_script import Manager
from app import create_app
from flask_twisted import flask_twisted
from twisted.python import log
if __name__ == "__main__" :
app=create_app()
twisted = Twisted(app)
log.startLogging(sys.stdout)
app.logger.info(f"Running the app..")
manager = Manager(app)
manager.run() | [
"reason9827@naver.com"
] | reason9827@naver.com |
3ef0ad7ff5b1b6100b5b47882ef07366a21f1ac0 | 6bce144a2dc9293f290207d1c6c2d08a63763cd2 | /examples/swap_dims.py | e46054f2782340d727fbc5325328b16aa891688f | [
"BSD-3-Clause"
] | permissive | tlambert03/napari | 0f7b90de5333b520567a7eb9f00dea5c15fa448c | 19867df427b1eb1e503618a1ab109e7210ae8a83 | refs/heads/main | 2023-08-30T21:32:29.433620 | 2023-05-08T13:58:18 | 2023-05-08T13:58:18 | 216,388,440 | 5 | 0 | BSD-3-Clause | 2023-05-01T07:58:42 | 2019-10-20T16:02:35 | Python | UTF-8 | Python | false | false | 794 | py | """
Swap dims
=========
Display a 4-D image and points layer and swap the displayed dimensions
.. tags:: visualization-nD
"""
import numpy as np
from skimage import data
import napari
blobs = np.stack(
[
data.binary_blobs(
length=128, blob_size_fraction=0.05, n_dim=3, volume_fraction=f
)
for f in np.linspace(0.05, 0.5, 10)
],
axis=0,
)
viewer = napari.view_image(blobs.astype(float))
# add the points
points = np.array(
[
[0, 0, 0, 100],
[0, 0, 50, 120],
[1, 0, 100, 40],
[2, 10, 110, 100],
[9, 8, 80, 100],
]
)
viewer.add_points(
points, size=[0, 6, 10, 10], face_color='blue', out_of_slice_display=True
)
viewer.dims.order = (0, 2, 1, 3)
if __name__ == '__main__':
napari.run()
| [
"noreply@github.com"
] | tlambert03.noreply@github.com |
4ae9e7730ba0c3caaa877e17749a4dcdd46f6428 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_bytes.py | ae79d8aa9169daa82931977f257491cb8afc15f1 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 48,581 | py | """Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_copy(self):
a = self.type2test(b"abcd")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character
# sizes.
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc",
False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc",
False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
with self.assertRaises((OverflowError, MemoryError)):
c = b * sys.maxsize
with self.assertRaises((OverflowError, MemoryError)):
b *= sys.maxsize
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex('1a2B30'), b)
self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
with self.assertRaises(TypeError) as cm:
b.startswith([b'h'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
with self.assertRaises(TypeError) as cm:
b.endswith([b'o'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegex(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class BytesTest(BaseBytesTest):
type2test = bytes
def test_buffer_is_readonly(self):
fd = os.dup(sys.stdin.fileno())
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
# Test PyBytes_FromFormat()
@test.support.impl_detail("don't test cpyext here")
def test_from_format(self):
test.support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int, c_char_p
PyBytes_FromFormat = pythonapi.PyBytes_FromFormat
PyBytes_FromFormat.restype = py_object
self.assertEqual(PyBytes_FromFormat(b'format'),
b'format')
self.assertEqual(PyBytes_FromFormat(b'%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s')
self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]')
self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_')
self.assertEqual(PyBytes_FromFormat(b'c:%c', c_int(255)),
b'c:\xff')
self.assertEqual(PyBytes_FromFormat(b's:%s', c_char_p(b'cstr')),
b's:cstr')
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
@test.support.impl_detail("undocumented bytes.__alloc__()")
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
@test.support.impl_detail(
"resizing semantics of CPython rely on refcounting")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
@test.support.impl_detail("resizing semantics", cpython=False)
def test_resize_forbidden_non_cpython(self):
# on non-CPython implementations, we cannot prevent changes to
# bytearrays just because there are buffers around. Instead,
# we get (on PyPy) a buffer that follows the changes and resizes.
b = bytearray(range(10))
v = memoryview(b)
b[5] = 99
self.assertIn(v[5], (99, bytes([99])))
b[5] = 100
b += b
b += b
b += b
self.assertEquals(len(v), 80)
self.assertIn(v[5], (100, bytes([100])))
self.assertIn(v[79], (9, bytes([9])))
del b[10:]
self.assertRaises(IndexError, lambda: v[10])
self.assertEquals(len(v), 10)
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.assertFalse(b.replace(b'', b'') is b)
def test_compare(self):
if sys.flags.bytes_warning:
def bytes_warning():
return test.support.check_warnings(('', BytesWarning))
with bytes_warning():
b'' == ''
with bytes_warning():
b'' != ''
with bytes_warning():
bytearray(b'') == ''
with bytes_warning():
bytearray(b'') != ''
else:
self.skipTest("BytesWarning is needed for this test: use -bb option")
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(b".")[0]', 'val.rpartition(b".")[2]',
'val.splitlines()[0]', 'val.replace(b"", b"")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class BytesAsStringTest(FixedStringTest):
type2test = bytes
class SubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(self.subclass2test, self.type2test))
self.assertIsInstance(self.subclass2test(), self.type2test)
a, b = b"abcd", b"efgh"
_a, _b = self.subclass2test(a), self.subclass2test(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = self.subclass2test(b"abcd")
s2 = self.type2test().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is self.type2test, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is self.type2test)
def test_pickle(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
class ByteArraySubclass(bytearray):
pass
class BytesSubclass(bytes):
pass
class ByteArraySubclassTest(SubclassTest):
type2test = bytearray
subclass2test = ByteArraySubclass
def test_init_override(self):
class subclass(bytearray):
def __init__(me, newarg=1, *args, **kwargs):
bytearray.__init__(me, *args, **kwargs)
x = subclass(4, b"abcd")
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
class BytesSubclassTest(SubclassTest):
type2test = bytes
subclass2test = BytesSubclass
def test_main():
test.support.run_unittest(
BytesTest, AssortedBytesTest, BytesAsStringTest,
ByteArrayTest, ByteArrayAsStringTest, BytesSubclassTest,
ByteArraySubclassTest, BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| [
"thezhangwei@gmail.com"
] | thezhangwei@gmail.com |
131d066772fc0f1d32c43fa05e219436d1258eae | ba3d31026d954f1fa9496a712fdbfd213963cb97 | /src/lib/urllib3/_version.py | 6fbc84b30f205772224053b3d2ccb90102373276 | [
"MIT"
] | permissive | skleinei/alfred-confluence | 2ed56dc89e1f6d14493278a16eca99baf22fc7c2 | 4fe667692becd846960c841b3ba8e5aa9adadf6f | refs/heads/main | 2022-10-01T21:29:55.716185 | 2022-09-09T21:50:41 | 2022-09-09T21:50:41 | 88,511,852 | 105 | 21 | MIT | 2022-09-06T16:39:32 | 2017-04-17T13:42:43 | Python | UTF-8 | Python | false | false | 64 | py | # This file is protected via CODEOWNERS
__version__ = "1.26.12"
| [
"stefan@k15t.com"
] | stefan@k15t.com |
068feac8406d25ab5bc8ec17f5e12833be281f5d | eda064d83bdc45dfda92ca533f4965babc3a6804 | /django_dynamic_path/tests/urls.py | ca30c538b7b4e07542be9ccfa1de1f5598b9e2dd | [
"MIT"
] | permissive | quadrant-newmedia/django_dynamic_path | bcad2138b4ebc8abfe2cc8a4736bb14023882538 | c8246cf0bf913f72945e4219d179e55a4c5a1437 | refs/heads/master | 2021-07-13T08:31:33.340390 | 2021-06-29T20:13:31 | 2021-06-29T20:13:31 | 249,819,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from django.http import HttpResponse, HttpResponseServerError
from django.urls import path, include
from django_dynamic_path import DynamicPath
def confirm_args(request, *args, **kwargs):
if len(args) == 2 and args[0] == 'a' and kwargs['b'] == 2 :
return HttpResponse()
raise RuntimeError('did not get expected args/kwargs')
def do_nothing(request, *args, **kwargs):
pass
def interact(path):
import code; code.interact(local=locals())
urlpatterns = [
path('path_before/', lambda r: HttpResponse('path before'), name='path_before_name'),
DynamicPath(
lambda path: path == 'bar/' and ((), {}),
lambda r: HttpResponse('bar from dynamic'),
),
DynamicPath(
lambda path: path == 'baz/' and (('a', 'b'), {'a': 1, 'b': 2}),
confirm_args,
),
DynamicPath(
lambda path: path == 'list_args/' and ([1, 2], {}),
do_nothing,
),
path('included/', include([DynamicPath(
lambda path: path == 'baz/' and (('a', 'b'), {'a': 1, 'b': 2}),
confirm_args,
)])),
path('path_after/', lambda r: HttpResponse('path after'), name='path_after_name'),
path('path_after/<value>/', lambda r: HttpResponse('path after'), name='path_after_name'),
] | [
"alex@quadrant.net"
] | alex@quadrant.net |
307eb8462211b543bf7984c50db4be1a64aa8c68 | a4a2c9a26087bd3e326269ab689b1fe929997324 | /FirstDjangoProj/mysite/mysite/settings.py | 3b4d40a5e68ecbd0a2ca8a7e0e099c5546617363 | [] | no_license | molaksh/PythonProjects | 960813ac054c0bd508ddd027f29ad033be1161f6 | 5dd6b46d6f17ef44ae2e32fc8a12ec6f832d8eaa | refs/heads/master | 2021-06-27T23:41:13.526222 | 2019-07-04T01:15:14 | 2019-07-04T01:15:14 | 142,596,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u23lydl-udf8m*(&p7(y176uul&jxr$m2)1pttf)@=t$p*sbw('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/Users/mohan/Documents/GitHub/PythonProjects/FirstDjangoProj/mysite/templates/'],
#'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_cdn')
#https://www.youtube.com/watch?v=YH-ipgxlJzs - steps for python manage.py collectstatic
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_cdn') | [
"mohan.kris.lakshmanan@gmail.com"
] | mohan.kris.lakshmanan@gmail.com |
7f4cf0d59708157a9f67bc9e4e462d9a835f5225 | 99445b05a6ae648de9d94ed4f059c37620c2eecf | /Audio Keylogger/src/keystrokes.py | fefd4da38c6f18b97f7c830d3c7ffabe8f5efecb | [] | no_license | owenlu552/Hackathon-2017 | 8f2dfe2b52d8bef14e96395f0f2c188d0ffc5bb5 | 052170e29806dadfc5e7f08edec24de31aac4d58 | refs/heads/master | 2021-01-20T05:10:11.803101 | 2017-04-30T17:01:01 | 2017-04-30T17:01:01 | 89,755,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | from scipy.io import wavfile
from scipy import signal
from numpy import mean, std
from matplotlib import pyplot as plt
def keystroke_starts(wav_filename):
c_minFreqHz = 400
c_maxFreqHz = 12000
freq, data = wavfile.read(wav_filename)
# select only the left channel for now
leftData = data[:,0]
numSamples = len(data)
#
# coefficients = abs(fft.rfft(data))
# frequencies = fft.rfftfreq(numSamples, 1/freq)
# frequencies = [(i,f) for i,f in enumerate(frequencies) if c_minFreqHz < f < c_maxFreqHz]
# plt.plot([f[1] for f in frequencies], coefficients[[f[0] for f in frequencies]])
# plt.show()
# energy = sum(coefficients[[f[0] for f in frequencies]])
# print(energy)
f, t, Sxx = signal.spectrogram(leftData, fs=freq, nperseg=440)
freqs = [(index, freq) for index, freq in enumerate(f) if c_minFreqHz < freq < c_maxFreqHz]
energies = [sum(col) for col in zip(*Sxx[[f[0] for f in freqs]])]
threshold = mean(energies) + 1.5*std(energies)
#plt.plot(t, energies)
energies = [(i,e) for i, e in enumerate(energies) if e > threshold]
print(len(energies))
print(t[[e[0] for e in energies]])
#plt.plot(t[[e[0] for e in energies]],[e[1] for e in energies])
#plt.show()
return (freq*(t[[e[0] for e in energies]])).tolist()
# for e in energies:
# wavfile.write(str(e[0]) + '.wav', freq, data[range(freq*t[e[0]], freq*t[e[0]])])
# print(t[e[0]])
# #for i in range(100,200):
# plt.plot([f[1] for f in freqs], Sxx[[f[0] for f in freqs], i])
#lt.show()
if __name__=="__main__":
wav_filename = "../resources/abc.wav"
starts = keystroke_starts(wav_filename)
print(starts)
| [
"github@harishlall.com"
] | github@harishlall.com |
f7a57d8b122354d67e2bc6db33b6b1c566835d37 | 75ec42b3e07422bd0159e5c27296524cd31c27c2 | /app/run.py | 56847893e0b922ec02254211d397a4a18dd6a408 | [] | no_license | jaescbar/Project_2 | b556020734d91aeea00cc3e396f8be0e7c0ff809 | 53464a1193fa8bf0bbb63a3ca2e990eccc3a0dc7 | refs/heads/main | 2023-02-27T12:36:33.582938 | 2021-02-05T01:56:52 | 2021-02-05T01:56:52 | 335,058,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,911 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
import numpy as np
import pandas as pd
import sys
import os
import re
from sqlalchemy import create_engine
import pickle
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import fbeta_score, make_scorer
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator,TransformerMixin
app = Flask(__name__)
def tokenize(text):
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('Database Project_2', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
y_labels = df.iloc[:,4:].columns
y_values = (df.iloc[:,4:] == 1).sum().values
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=y_values,
y=y_labels,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Targets',
'yaxis': {
'title': "Frequency"
},
'xaxis': {
'title': "Categories"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | jaescbar.noreply@github.com |
1178eab42c3b0ccd60dd23924fded6982087b9d5 | f97df2ae339368b8b5fe066c0530a273db7e76c7 | /importDialogue.py | 9ed006cd2e93c41f77c2aef6faf3cdda65e10126 | [] | no_license | briceFoundry/flixOps | 5fd4612ed980c6a442d5fc921f35cbb11bd6a8d7 | c60548d965d4a071f24bcf42fc5c2aab37cb5864 | refs/heads/master | 2021-01-11T07:59:12.669259 | 2016-10-27T21:20:52 | 2016-10-27T21:20:52 | 72,126,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,114 | py | #!/usr/bin/env python
#------------------------------------------------------------------------------
# importDialogue.py - Imports a Flix text file and assigns dialogue to the panels
#------------------------------------------------------------------------------
# Copyright (c) 2014 The Foundry Visionmongers Ltd. All Rights Reserved.
#------------------------------------------------------------------------------
import os
import urllib2, urllib
import copy
import math
from flix.fileServices import FileService
from flix.fileServices.repathDefault import RepathDefault
from flix.plugins.toMov import ToMov
from flix.core2.shotCutList import ShotCutList
from flix.gui.fileBrowserTypes import kBrowseTypeLoadFiles
from flix.utilities.log import log as log
from flix.utilities.osUtils import OSUtils
from flix.utilities.editUtils import EditUtils
import flix.exceptions
from flix.plugins.pluginDecorator import PluginDecorator
from flix.core2.mode import Mode
from flix.web.serverSession import ServerSession
from flix.core2.markerList import MarkerList
from flix.web.serverFlixFunctions import ServerFlixFunctions
import flix.core2.shotCutList
import flix.remote
import flix.logger
from xml.sax.saxutils import escape
import xml.etree.ElementTree as ET
# -----------------------------------------------------
# Global Variables
# -----------------------------------------------------
serverSession = ServerSession()
fileService = FileService()
class ImportDialogue(PluginDecorator):
#--------------------------------------------------------------------------
# object
#--------------------------------------------------------------------------
def __init__(self):
self.fileService = fileService
self.rePath = RepathDefault()
self.shotList = ''
self.serverFlixFunctions = ServerFlixFunctions()
# load the icon
iconPath = Mode().get('[FLIX_CONFIG_FOLDER]')+'/plugins/icons/custom_icon.png'
icon = self.fileService.loadByteArray(iconPath)
self.init(label ='Import Dialogue',
icon =icon,
tooltip ='Import dialogue to current edit',
group ='Export',
pluginPath='flixConfig.plugins.importDialogue.ImportDialogue')
#--------------------------------------------------------------------------
# executions
#--------------------------------------------------------------------------
def execute(self, shotCutList, selection, additionalData=None):
dialogueFile = self.loadFileBrowser()
if not dialogueFile or not os.path.exists(dialogueFile) or not dialogueFile[-4:] == ".txt":
raise flix.exceptions.FlixException("No valid text file selected.")
self.shotList = flix.core2.shotCutList.ShotCutList.fromFile(shotCutList.defaultPath())
panels = self.getPanelSetupList()
with open(dialogueFile) as f:
dialogueFileContent = f.readlines()
panelDialogueLines = self.getPanelDialogueLines(panels, dialogueFileContent)
data = []
data.append('<Recipies>')
for panel in self.shotList:
recipe = panel.recipe
if not panel.isMarker():
dialogueLines = panelDialogueLines[recipe.getShortLabelName()]
if not dialogueLines == [-1]:
newDialogue = u""
for line in range(dialogueLines[0], dialogueLines[1]+1):
newDialogue += dialogueFileContent[line].strip("\t")
poses = recipe.getPoses()
poseXML = poses[0]['poseXML']
poseXML.attrib['dialogue'] = newDialogue
recipe.saveRecipeFiles()
data.append('<Setup show="%(show)s" sequence="%(sequence)s" beat="%(beat)s" setup="%(setup)s" version="%(version)s">' % recipe.getProperties())
data.append(ET.tostring(recipe.getMultiTrackXML()) + "</Setup>")
data.append("</Recipies>")
dataString = "".join(data)
log("DATASTRING: %s" % dataString)
self.serverFlixFunctions.addFeedback('reloadSetupsMultiTracks', dataString)
def loadFileBrowser(self):
output = OSUtils.runFileBrowser(kBrowseTypeLoadFiles, 'Select the text file', '/', 'dialogue')
return escape(output[0].decode('utf-8'))
def getPanelSetupList(self):
""" Returns a list of panels with their 'short label name' (e.g. '0010-2' for 'hum_p_0010_v2')
"""
panelSetupList = []
for panel in self.shotList:
panelSetupList.append(panel.recipe.getShortLabelName())
return panelSetupList
def getPanelDialogueLines(self, panels, dialogueFileContent):
""" Returns a dictionary listing the first and last dialogue lines for each panel
"""
panelDialogueLines = {}
previousPanel = None
for panel in panels:
# Panels with no dialogue get set to -1
panelDialogueLines[panel] = [-1]
# Ignore Markers
if "marker" in panel:
continue
panelName = "%s:\n" % panel
# If this panel has dialogue associated to it in the text file
if panelName in dialogueFileContent:
# Record the first line of dialogue for this panel
panelDialogueLines[panel] = [dialogueFileContent.index(panelName) + 1]
if previousPanel is not None:
# Record the last line of dialogue for the previous panel
panelDialogueLines[previousPanel].append(dialogueFileContent.index(panelName) - 2)
if panel == panels[-1]:
panelDialogueLines[panel].append(len(dialogueFileContent)-1)
else:
previousPanel = panel
log("PANELDIALOGUELINES: %s" % panelDialogueLines)
return panelDialogueLines
| [
"brice@thefoundry.co.uk"
] | brice@thefoundry.co.uk |
00aaebefa565e2ae33be7572d88c22ea8abfd2e7 | 64a444c1583cdb7bfa481b5962720d2522bafa0b | /Amalie_M_week_1.py | ac0df7b3e142e8b6d764ef93b130b05384cc65f6 | [] | no_license | amalietm/exam_coding_1 | abb22383a95450b946442d1774d04baf249fd4ba | 8eca19be4dc1e27a8196a0419441280e88694919 | refs/heads/main | 2023-03-11T04:17:01.056014 | 2021-02-18T10:37:39 | 2021-02-18T10:37:39 | 340,015,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # Write a program that outputs even if a number is even and odd if a number is odd. Think about what if the value is neither even nor odd (the value can be fractional or it can even not be a number at all).
num = int(input("Enter a number"))
if (num % 2) == 0:
print("number is even")
elif (num % 2) == 1:
print("number is odd")
| [
"noreply@github.com"
] | amalietm.noreply@github.com |
49d9eb798f3e278b4bcf71e58edb75976f1f6147 | c42bb5fe3505703e56791c23b5cb838938816833 | /arch/task_manager/db/models.py | e6f515616650adfe6159052b1e1da3d52db7abd1 | [
"Apache-2.0"
] | permissive | nemirorox/FATE | fe727c1a8bb39e18d0508bdee14745cfb2814d6c | 7f4a3e7ca50f24e49090e6c117bfabd1785603f2 | refs/heads/master | 2020-06-27T03:25:10.342834 | 2019-07-26T03:08:27 | 2019-07-26T03:08:27 | 199,831,026 | 1 | 0 | Apache-2.0 | 2019-07-31T10:08:25 | 2019-07-31T10:08:25 | null | UTF-8 | Python | false | false | 3,773 | py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api.utils import log_utils
from playhouse.pool import PooledMySQLDatabase
from arch.task_manager.settings import DATABASE
from peewee import Model, CharField, IntegerField, BigIntegerField, DateTimeField, TextField, CompositeKey
import datetime
import sys
import inspect
LOGGER = log_utils.getLogger()
data_base_config = DATABASE.copy()
# TODO: create instance according to the engine
engine = data_base_config.pop("engine")
db_name = data_base_config.pop("name")
DB = PooledMySQLDatabase(db_name, **data_base_config)
def close_db(db):
try:
if db:
db.close()
except Exception as e:
LOGGER.exception(e)
@DB.connection_context()
def init_tables():
members = inspect.getmembers(sys.modules[__name__], inspect.isclass)
table_objs = []
for name, obj in members:
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
table_objs.append(obj)
DB.create_tables(table_objs)
class DataBaseModel(Model):
class Meta:
database = DB
def to_json(self):
return self.__dict__['__data__']
def save(self, *args, **kwargs):
if hasattr(self, "update_date"):
self.update_date = datetime.datetime.now()
super(DataBaseModel, self).save(*args, **kwargs)
class JobInfo(DataBaseModel):
job_id = CharField(max_length=50)
name = CharField(max_length=100, null=True, default='')
task = CharField(max_length=50, null=True, index=True)
module = CharField(max_length=50, null=True, index=True)
initiator = IntegerField(null=True, index=True)
role = CharField(max_length=50, null=True, default='', index=True)
party_id = IntegerField(null=True, default=0, index=True)
config = TextField(null=True)
all_party = TextField(null=True)
pid = IntegerField(null=True, index=True)
status = CharField(max_length=50, null=True, default='ready') # waiting/ready/start/running/success/failed/partial/deleted
set_status = CharField(max_length=50, null=True) # ready/running/success/failed/partial/deleted
progress = IntegerField(null=True, default=0)
current_step = CharField(max_length=100, null=True, index=True)
create_date = DateTimeField(index=True)
update_date = DateTimeField(index=True)
begin_date = DateTimeField(null=True, index=True)
end_date = DateTimeField(null=True, index=True)
elapsed = BigIntegerField(null=True)
class Meta:
db_table = "job_info"
primary_key = CompositeKey('job_id', 'role', 'party_id')
class JobQueue(DataBaseModel):
job_id = CharField(max_length=50)
role = CharField(max_length=50, null=True, default='', index=True)
party_id = IntegerField(null=True, default=0, index=True)
config = TextField(null=True)
status = CharField(max_length=50, null=True, default='ready') # waiting/ready/start/running/success/failed/partial/deleted
pid = IntegerField(null=True, index=True)
create_date = DateTimeField(index=True)
update_date = DateTimeField(index=True)
class Meta:
db_table = "job_queue"
primary_key = CompositeKey('job_id', 'role', 'party_id')
init_tables()
| [
"jicezeng@gmail.com"
] | jicezeng@gmail.com |
36d1d6d6e916c60a5b3f6e02f28a3a6c98c6de8a | 6a0589aa1a5f9071cbcee3f84452c880bf96c12d | /src/uw_py220_extras/lesson10/src/mailing_list/send.py | 98a46f491654bb836950d44e09294401862efaaa | [
"MIT"
] | permissive | UWPCE-PythonCert/py220_extras | d3203e2fd44ee840d008fac9597a5b0c165e8cc7 | 57336429fb782c4901e7709c0275242e6af4264a | refs/heads/master | 2020-12-01T23:42:58.660565 | 2020-03-11T02:44:18 | 2020-03-11T02:44:18 | 230,816,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | def send_message(message, recipient):
return True
| [
"akmiles@icloud.com"
] | akmiles@icloud.com |
66bc0b8b72aae523ac2a80991533a21aa03b8b2d | 4dc411c043df8a4c0292e6177e6612ee2a2584a2 | /html_downloader.py | 74c5e5f314dfb693934d8752e55f0dc1f6dabca1 | [] | no_license | Angel-LQ/MySampleCrawler | fb466ca1929486d9941256ee096199afc8906b10 | 817576106e50cbe47981c2d57cb6c2a818994f15 | refs/heads/master | 2020-03-10T22:09:40.931131 | 2018-04-15T13:42:27 | 2018-04-15T13:42:27 | 129,612,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from urllib import request
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = request.urlopen(url)
if response.getcode() == 200:
return response
else:
return None | [
"871091822@qq.com"
] | 871091822@qq.com |
a4d50b7137e189eb5c81b1988335ceb0716eb12a | 8c749118797abdbff60fc79624bdca2df340f2a0 | /env/lib/python2.7/site-packages/indicoio/utils/image.py | 9eb74db0bffe6b08fdf0070a43fa0f71041d0f20 | [] | no_license | gmech838/Fakex | bf16cd58955c29b7a290d9886703f4c9de6b2345 | af418e4f29c945d37705c54f215e154a0a42eb73 | refs/heads/master | 2021-01-05T20:21:52.353868 | 2018-05-09T04:59:10 | 2018-05-09T04:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | """
Image Utils
Handles preprocessing images before they are sent to the server
"""
import os.path, base64, re, warnings
from six import BytesIO, string_types, PY3
from PIL import Image
from indicoio.utils.errors import IndicoError
B64_PATTERN = re.compile("^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)")
def file_exists(filename):
"""
Check if a file exists (and don't error out on unicode inputs)
"""
try:
return os.path.isfile(filename)
except (UnicodeDecodeError, UnicodeEncodeError, ValueError):
return False
def image_preprocess(image, size=None, min_axis=None, batch=False):
"""
Takes an image and prepares it for sending to the api including
resizing and image data/structure standardizing.
"""
if batch:
return [image_preprocess(img, size=size, min_axis=min_axis, batch=False) for img in image]
if isinstance(image, string_types):
b64_or_url = re.sub('^data:image/.+;base64,', '', image)
if file_exists(image):
# check type of element
out_image = Image.open(image)
else:
return b64_or_url
elif isinstance(image, Image.Image):
out_image = image
elif type(image).__name__ == "ndarray": # image is from numpy/scipy
if "float" in str(image.dtype) and image.min() >= 0 and image.max() <= 1:
image *= 255.
try:
out_image = Image.fromarray(image.astype("uint8"))
except TypeError as e:
raise IndicoError("Please ensure the numpy array is acceptable by PIL. Values must be between 0 and 1 or between 0 and 255 in greyscale, rgb, or rgba format.")
else:
raise IndicoError("Image must be a filepath, url, base64 encoded string, or a numpy array")
if size or min_axis:
out_image = resize_image(out_image, size, min_axis)
# convert to base64
temp_output = BytesIO()
out_image.save(temp_output, format='PNG')
temp_output.seek(0)
output_s = temp_output.read()
return base64.b64encode(output_s).decode('utf-8') if PY3 else base64.b64encode(output_s)
def resize_image(image, size, min_axis):
if min_axis:
min_idx, other_idx = (0,1) if image.size[0] < image.size[1] else (1,0)
aspect = image.size[other_idx]/float(image.size[min_idx])
if aspect > 10:
warnings.warn(
"An aspect ratio greater than 10:1 is not recommended",
Warning
)
size_arr = [0,0]
size_arr[min_idx] = size
size_arr[other_idx] = int(size * aspect)
image = image.resize(tuple(size_arr))
elif size:
image = image.resize(size)
return image
def get_list_dimensions(_list):
"""
Takes a nested list and returns the size of each dimension followed
by the element type in the list
"""
if isinstance(_list, list) or isinstance(_list, tuple):
return [len(_list)] + get_list_dimensions(_list[0])
return []
def get_element_type(_list, dimens):
"""
Given the dimensions of a nested list and the list, returns the type of the
elements in the inner list.
"""
elem = _list
for _ in range(len(dimens)):
elem = elem[0]
return type(elem)
| [
"jaykachhadia@hotmail.com"
] | jaykachhadia@hotmail.com |
8ad6930e0c50ef4aa984b410b70526a3f7a72fff | 1405a87e42a04af6a21e3002a48f9b9687abc83d | /Paper_topography_figures/Figure_S7_D.py | 1f58f79ec4bb07ca3423237c3e4189de2a7adf93 | [
"CC-BY-4.0"
] | permissive | kavli-ntnu/mini2p_topography | fabefb4f5a79a9f0b74cc51e01e2f97bde2ed740 | 83c2dfc8597f9b7f4918f27b735420c4a0cc3415 | refs/heads/main | 2023-04-09T22:24:47.719101 | 2022-01-27T14:03:47 | 2022-01-27T14:03:47 | 452,277,116 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,415 | py | ### Figure S7 - Obenhaus et al.
# panel D: anatomical masks
# Plots an overlay of anatomical masks as
# annotated by the experimenter
import sys, os
import os.path
from pathlib import Path
from matplotlib.figure import Figure
import numpy as np
import pandas as pd
import datajoint as dj
import cmasher as cmr
# Make plots pretty
import seaborn as sns
sns.set(style='white')
# Prevent bug in figure export as pdf:
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
##### IMPORTS #########################################################################################
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from helpers_topography.fov_stitching import make_composite_fov
from dj_schemas.anatomical_alignment import get_aligned_anatomical_masks
from skimage.exposure import rescale_intensity
from PIL import Image, ImageCms
import colorsys
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
##### LOAD SCHEMA COMPONENTS ##########################################################################
from dj_schemas.dj_conn import *
##### EXPORT LOCATION #################################################################################
figure_dir = 'YOUR_EXPORT_DIRECTORY/'
figure_dir = Path(figure_dir)
#### HELPERS FOR CMYK -> RGB conversion ###############################################################
def _convert_to_8bit(data):
'''
Convert numpy array from arbitrary range
to np.unit8
'''
data = np.nan_to_num(data)
data = rescale_intensity(data, in_range='image', out_range=np.uint8)
data = data.astype(np.uint8)
return np.nan_to_num(data)
def _shift_hue(arr,
hout,
):
r, g, b, a = np.rollaxis(arr, axis=-1)
h, s, v = rgb_to_hsv(r, g, b)
h += hout
r, g, b = hsv_to_rgb(h, s, v)
arr = np.dstack((r, g, b, a))
return arr
def _colorize(image,
hue_change,
):
'''
Hue change PIL image `image` with the given
`hue_change` (hue within 0-360)
returns another PIL image
'''
img = image.convert('RGBA')
arr = np.array(np.asarray(img).astype('float'))
hue_shifted = _shift_hue(arr,
hue_change/360.,
)
new_img = Image.fromarray(hue_shifted.astype('uint8'), 'RGBA')
return new_img
#######################################################################################################
def _plot_fov_and_masks(animal_name,
projection_short,
ssim_thresh,
center_plane=0,
plot_mask_fov='fov',
percentile=99.8,
size_scalebar=50.,
):
'''
Anatomical FOV +
PLOTTING OF ANATOMICAL MASKS!
'''
assert plot_mask_fov in ['fov','mask'], f'Choose "fov" or "mask" for plot_mask_fov'
# Retrieve sessions and plot FOV composite
session_filter = (Session.proj(..., metasession_ref='metasession_name')
& f'animal_name = "{animal_name}"'
& FilteredSessions)
metasessions = (AlignmentFOV.proj(diff_ssim = 'ssim_warped-ssim_original')
& session_filter
& f'diff_ssim > {ssim_thresh}'
& f'projection_short = "{projection_short}"')
alignment_composite, x_min, x_max, y_max, y_min = make_composite_fov(metasessions,
center_plane = center_plane,
padding_composite = 5.,
add_edge=False,
)
if plot_mask_fov == 'mask':
# Retrieve anatomical masks (manual annotations)
region_masks_mec = get_aligned_anatomical_masks(animal_name=animal_name,
region='mec_label',
projection_short=projection_short,
edge=False
)
region_masks_pas = get_aligned_anatomical_masks(animal_name=animal_name,
region='pas_label',
projection_short=projection_short,
edge=False
)
# Normalize
region_masks_mec /= region_masks_mec.max()
region_masks_pas /=region_masks_pas.max()
# Create a CMYK stack from those masks,
# filling "C", and "Y" and leaving the others at zero
# See also my gist:
# https://gist.github.com/horsto/072758ba4b6a292264cd65a9a98a80d2
all_masks_cmyk = np.stack([region_masks_mec,
np.zeros_like(region_masks_mec),
region_masks_pas,
np.zeros_like(region_masks_mec)
],
axis=2)
all_masks_cmyk8bit = _convert_to_8bit(all_masks_cmyk)
# profiles
cmyk_profile = '/Users/hotte/Documents/python/hotte-dj-moser-imaging/color_profiles/USWebCoatedSWOP.icc'
rgb_profile = '/Users/hotte/Documents/python/hotte-dj-moser-imaging/color_profiles/sRGB Color Space Profile.icm'
img_pil = Image.fromarray(all_masks_cmyk8bit, mode='CMYK')
img = ImageCms.profileToProfile(img_pil,
cmyk_profile,
rgb_profile,
renderingIntent=0,
outputMode='RGB')
# HUE CHANGE
img_hue = _colorize(img, hue_change=185)
# ... convert back to RGB image
all_masks_rgb = np.array(img_hue)
# Plot ...
cmap = 'Greys'
figure = plt.figure(figsize=(10,10))
ax = figure.add_subplot(111)
if plot_mask_fov == 'fov':
ax.imshow(alignment_composite, vmin=0, vmax=np.nanpercentile(alignment_composite, percentile), cmap=cmap)
else:
ax.imshow(all_masks_rgb)
# Scalebar ...
if size_scalebar is not None:
ax.plot([x_max-size_scalebar-10,x_max-size_scalebar-10+size_scalebar],
[y_max-10,y_max-10],
lw=3,
color='k',
solid_capstyle='butt'
)
ax.set_xlim([x_min,x_max]), ax.set_ylim([y_max,y_min])
# Construct title
projection_key = (metasessions * FOVProjParam).fetch('projection_key')[0]
ax.get_xaxis().set_ticks([]); ax.get_yaxis().set_ticks([])
sns.despine(left=True, bottom=True)
print(f'Animal {animal_name} (n={len(metasessions)+1} sessions) {projection_key}')
print('Saving figure under {}'.format(str(figure_dir)))
figure.savefig(figure_dir / f'{animal_name} composite {plot_mask_fov}.png', bbox_inches='tight', dpi=300)
if __name__ == "__main__":
#### PLOT COMPOSITE FOV EXAMPLE #####################################################
ssim_thresh = 0. # This is: [ssim_warped-ssim_original] in AlignmentFOV
center_plane = 0
animal_name = '88592'
projection_short = 'mean_im2' # FOVProjParam()
percentile = 99.8
mask_fov = 'fov' # 'mask' or 'fov' - either plot the anatomical fov defined by projection_short
# or the annotated region masks for MEC / PAS only
size_scalebar = 50.
print('Plotting (anatomical) FOV composite')
_plot_fov_and_masks(animal_name,
projection_short,
ssim_thresh,
center_plane=0,
plot_mask_fov=mask_fov,
percentile=percentile,
size_scalebar=size_scalebar,
)
print('Success.') | [
"7548928+horsto@users.noreply.github.com"
] | 7548928+horsto@users.noreply.github.com |
d5ae247878b5808b35c405d5a41e7182c2c8cef2 | 2e8c210b999f74445d382b6f56fe75766490a40b | /testing/reducer.py | bf1f9bd79e041e65863ca756fbb610d2b9ecccf6 | [] | no_license | yzhh029/wikitrending | cc190b8f78022d1e16749a59e36ae603031d054f | 158d565d3cb7f292928f81e7c054cec30d9b9354 | refs/heads/master | 2020-05-30T21:14:54.743763 | 2014-12-05T05:11:29 | 2014-12-05T05:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
import sys
current_name = None
current_count = 0
current_date = None
word = None
for line in sys.stdin:
line = line.strip()
rd = line.split('\t')
if len(rd) != 3:
continue
name, views, date = rd[0], rd[1], rd[2]
try:
count = int(views)
except ValueError:
continue
if current_name == name and current_date == date:
current_count += count
else:
if current_name:
print '%s\t%s\t%s' % (current_name, current_count, current_date)
current_count = count
current_name = name
current_date = date
if current_name == name:
print '%s\t%s\t%s' % (current_name, current_count, current_date)
| [
"yzhh029@gmail.com"
] | yzhh029@gmail.com |
a1f18c3115ef61507d291c92a1fb592e652a1b31 | 86d7526f06552f0b46d539b41ac147c5daddb1d6 | /flipkartgrid.py | 8ad77183727fe0149002bdca04fd083497d88e93 | [] | no_license | ShrungDN/Flipkart-Grid-3.0 | 3a6b35317bee6a392f01377a5f23f228c651caa5 | b4f9c48606e045d718fbc6ec1d10df254ca969a2 | refs/heads/main | 2023-08-10T19:09:16.522837 | 2021-10-03T11:04:52 | 2021-10-03T11:04:52 | 405,348,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,307 | py | # -*- coding: utf-8 -*-
"""FlipkartGrid.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16vpyuFlG8XAHVhG2uDySuP_ilW6S1xLs
# Main Code (Run From Here)
## Initial Steps (used to get the start points, checkpoints and destination points)
"""
# importing libraries
import cv2
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from math import atan2, cos, sin, sqrt, pi
def get_start_points(w, h):
a = h / 10
b1 = np.array([8.5*a, 0.5*a])
b2 = b1 + np.array([a, 0])
b3 = b2 + np.array([a, 0])
b4 = b3 + np.array([a, 0])
return [
[int(x) for x in b1],
[int(x) for x in b2],
[int(x) for x in b3],
[int(x) for x in b4]
]
def get_destinations(w, h):
a = h / 10
d1 = np.array([3*a//2, 17*a//2], np.uint8)
d2 = d1 + np.array([0, a])
d3 = d2 + np.array([17*a, 0])
d4 = d3 + np.array([0, -a])
return [
[int(x) for x in d1],
[int(x) for x in d2],
[int(x) for x in d3],
[int(x) for x in d4]
]
def get_checkpoints(w, h):
a = h / 10
c1 = np.array([17*a//2, 17*a//2], np.uint8)
c2 = c1 + np.array([a, a])
c3 = c2 + np.array([a, 0])
c4 = c3 + np.array([a, -a])
return [
[int(x) for x in c1],
[int(x) for x in c2],
[int(x) for x in c3],
[int(x) for x in c4]
]
"""# HSV Calculator (used to detect the two markers on the bot)"""
#HSV FINDER TOOL
def nothing(x):
pass
def create_Trackbars(trackbarwindow = 'Controls',num_masks=1):
lower = [0,0,0]
upper = [180,255,255]
#HUE
cv2.createTrackbar('lh_mask1',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('uh_mask1',trackbarwindow,upper[0],179,nothing)
#Saturation
cv2.createTrackbar('ls_mask1',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('us_mask1',trackbarwindow,upper[1],255,nothing)
#Value
cv2.createTrackbar('lv_mask1',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('uv_mask1',trackbarwindow,upper[2],255,nothing)
#Same for Mask2
if num_masks==2:
cv2.createTrackbar('lh_mask2',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('uh_mask2',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('ls_mask2',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('us_mask2',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('lv_mask2',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('uv_mask2',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('save',trackbarwindow,0,1,nothing)
#cv2.createTrackbar('mode',trackbarwindow,0,3,nothing)
def get_mask_3d(mask_number,hsv_img,trackbarwindow):#Here, 3d indicates that the shape of the mask is a tuple
lh = cv2.getTrackbarPos('lh_mask{}'.format(mask_number),trackbarwindow)
uh = cv2.getTrackbarPos('uh_mask{}'.format(mask_number),trackbarwindow)
ls = cv2.getTrackbarPos('ls_mask{}'.format(mask_number),trackbarwindow)
us = cv2.getTrackbarPos('us_mask{}'.format(mask_number),trackbarwindow)
lv = cv2.getTrackbarPos('lv_mask{}'.format(mask_number),trackbarwindow)
uv = cv2.getTrackbarPos('uv_mask{}'.format(mask_number),trackbarwindow)
lower = np.array([lh,ls,lv])
upper = np.array([uh,us,uv])
mask = cv2.inRange(hsv_img,lower,upper)
mask_3d = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
return mask_3d,[lower,upper]
def get_hsv_limits(img,window_name="Controls"):
cv2.namedWindow(window_name)
create_Trackbars(window_name,num_masks=1)
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
while True:
mask,hsv_limits = get_mask_3d(1,hsv,window_name)
mask_gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
contours,h = cv2.findContours(mask_gray.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if contours:
max_area_cnt = max(contours, key = cv2.contourArea)
peri = cv2.arcLength(max_area_cnt, True)
approx = cv2.approxPolyDP(max_area_cnt, 0.02 * peri, True)
pnts = approx.squeeze()
img_contours = img.copy()
cv2.drawContours(img_contours,[pnts],-1,(0,0,255),15)
else:
img_contours = img.copy()
img_and_mask = np.hstack((img,mask))
img_and_mask = cv2.resize(img_and_mask,None,fx = 0.4,fy=0.4)
cv2.imshow(window_name,img_and_mask)
key = cv2.waitKey(10)
if key == ord('s'):
break
cv2.destroyAllWindows()
return hsv_limits # [lower,upper]
"""# Other Helper Functions
### Finding centroids of the markers and finding their orientations
"""
#returns the centroid of the detected marker on the bot which is later used to find the orientation
def get_centroid(bot_img,hsv_limits,draw_bb,marker_color):
hsv_bot_img = cv2.cvtColor(bot_img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_bot_img,hsv_limits[0],hsv_limits[1])
contours,h = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
marker_detected = False
if contours:
max_area_cnt = max(contours, key = cv2.contourArea)
M = cv2.moments(max_area_cnt)
if M['m00']==0:
marker_detected = False
else:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if draw_bb:
bot_img = cv2.drawContours(bot_img,[max_area_cnt],-1,marker_color,2)
marker_detected = True
if marker_detected == True:
return (cx,cy)
else:
return -1
def get_angle(img,previous_angle,hsv_limits1,hsv_limits,draw_bb = True):
c1 = get_centroid(img,hsv_limits1,draw_bb,marker_color=(255,0,0))
c2 = get_centroid(img,hsv_limits2,draw_bb,marker_color=(0,255,255))
if c1 == -1 or c2 == -1:
angle = previous_angle
else:
angle = round((atan2(c1[1]-c2[1],c2[0]-c1[0])*180/pi))
return angle
"""### Get Command Function to give commands to the bot"""
from math import atan2,pi
def getcommand(loc, bot_orientation, dst):
x,y = loc
x1,y1 = dst
target_orientation = (atan2((y-y1),(x1-x)))*180/pi
diff_angle = target_orientation-bot_orientation
#conveting negative difference in angle to positive
if diff_angle<0:
diff_angle += 360
#if the bot's orientation is within 20 degrees of the required orientation, we go straight
if diff_angle <= 20:
return 'F'
#the difference angle is measured from the bots orientation vector to the destination direction vector
#if it is within 180 degrees, the command is to take a left
elif 20<diff_angle < 180:
return 'L'
else:
return 'R'
"""### Used to warp an image when 4 points are given (used when cropping the arena)
"""
def order_points(pts):
#parameters: pts 4x2 np.ndarray
#returns: 4x2 np.ndarray
# The function initialises a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
#*************************************************************#
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
#returns a warped image
#parameters: image (np.ndarray) and the 4 pts np.ndarray of size (4,2)
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
"""# Execution"""
video1 = "Moving Bot.mp4"
video2 = "total_arcade2.mp4"
video3 = "total_arcade3.mp4"
video4 = "total_arcade_endless.mp4"
cap = cv2.VideoCapture(video4)
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video file")
cv2.namedWindow('Frame')
grid_coordinates = []
def save_points(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
grid_coordinates.append((x,y))
################################# Locking and Cropping the Arena ###########################
print("Arena Locking Stage")
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
if len(grid_coordinates)!=0:
for point in grid_coordinates:
cv2.circle(frame, point, radius=2, color=(0,0,255), thickness=4)
cv2.setMouseCallback('Frame', save_points)
cv2.imshow("Frame",frame)
key = cv2.waitKey(25)
#clear the points if c is pressed
if key == ord('c'):
grid_coordinates =[]
if key == ord('f'):
w = frame.shape[1]
h = frame.shape[0]
grid_coordinates=[(0,0),(0,h),(w,0),(w,h)]
break
if key & 0xFF == ord('s'):
break
if key &0xFF == ord('p'): ##use this only for saving a photo after choosing 4 points
cv2.imwrite("grid_cropped{}.jpeg".format(np.random.randint(0,100)),four_point_transform(frame,np.array(grid_coordinates)))
# Break the loop
else:
break
cv2.destroyAllWindows()
print("Arena Locking Done\n")
################################### Selecting BOTS to be tracked #############################
if len(grid_coordinates)!=4:
print("Didnt choose 4 points correctly. Choosing entire frame")
cap.release()
cv2.destroyAllWindows()
else:
print("Drawing bounding boxes")
TrDict = {'csrt': cv2.legacy.TrackerCSRT_create,
'kcf' : cv2.legacy.TrackerKCF_create,
}
trackers = cv2.legacy.MultiTracker_create()
ret,frame_roi = cap.read()
frame = four_point_transform(frame_roi,np.array(grid_coordinates))
bot_largest_dim = 0
fraction = 0.9
num_bots=1
for i in range(num_bots):
if ret:
cv2.imshow('Frame',frame)
bbi = cv2.selectROI('Frame',frame)
bot_largest_dim = max(bbi[2:])
tracker_i = TrDict['csrt']()
trackers.add(tracker_i,frame,bbi)
cv2.waitKey(1)
else:
print("Cap not reading in ROI Selection Stage")
break
bot_largest_dim = int(fraction*bot_largest_dim) #The extra region around the bot for detection of the bot
################################ HSV LIMITS SELECTION ##########################
bot_img_t0 = frame[bbi[1]:bbi[1]+bbi[3],bbi[0]:bbi[0]+bbi[2]]
bot_img_t0 = frame[:,:]
cv2.imshow("bot_img_t0",bot_img_t0)
hsv_limits1 = get_hsv_limits(bot_img_t0,"Finding HSV Limits of Marker 1")
hsv_limits2 = get_hsv_limits(bot_img_t0,"Finding HSV Limits of Marker 2")
############################# Tracking and Velocity Publishing ##############################
print("\nBOT Tracking...")
angles = [-90 for i in range(num_bots)]
bot_imgs = [0 for i in range(num_bots)]
while True:
ret, frame = cap.read()
if not ret:
print("Not able to read in Tracking Stage")
break
frame = four_point_transform(frame,np.array(grid_coordinates))
#frame_c = frame.copy()
#cv2.imshow("orig",frame_c)
(success, boxes) = trackers.update(frame)
height,width,_ = frame.shape
for i in range(len(boxes)):
box = boxes[i]
(x, y, w, h) = [int(a) for a in box]
bot_centroid = (int(x + w/2), int(y + h/2))
search_rect = [bot_centroid[0],bot_centroid[1],
bot_centroid[0]+2*bot_largest_dim,bot_centroid[1]+2*bot_largest_dim]
frame_padded = np.stack([np.pad(frame[:,:,c], bot_largest_dim,
mode='constant', constant_values=0) for c in range(3)], axis=2)
bot_imgs[i] = frame_padded[search_rect[1]:search_rect[3],search_rect[0]:search_rect[2]]
angles[i] = get_angle(bot_imgs[i],angles[i],hsv_limits1,hsv_limits2,draw_bb = True)
destination_point = (int(width/2),35)
cv2.putText(frame,str(destination_point),destination_point,cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
cv2.circle(frame,destination_point , radius=4, color=(0,0,255), thickness=4)
#cv2.putText(frame,str(angles[i]),bot_centroid,cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
cv2.circle(frame,bot_centroid , radius=4, color=(0,0,150), thickness=4)
command = getcommand(bot_centroid,angles[i],destination_point)
cv2.putText(frame,str(command),(bot_centroid[0]+10,bot_centroid[1]+10),cv2.FONT_HERSHEY_SIMPLEX,1,(0,150,0),2)
bot_img = np.hstack(bot_imgs)
cv2.imshow('Frame',frame)
cv2.imshow('bot_img',bot_img)
#cv2.imshow('bot_with_triangle',img_with_triangle)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
"""# Other redundant/supplementary code (below)
# Video Reading Code And Saving Snapshots
"""
import cv2
import numpy as np
import time
# Create a video capture object, in this case we are reading the video from a file
vid_capture = cv2.VideoCapture("total_arcade_endless.mp4")
if (vid_capture.isOpened() == False):
print("Error opening the video file")
else:
fps = vid_capture.get(5)
print('Frames per second : ', fps,'FPS')
# Get frame count
# You can replace 7 with CAP_PROP_FRAME_COUNT as well, they are enumerations
frame_count = vid_capture.get(7)
print('Frame count : ', frame_count)
vid_capture.set(cv2.CAP_PROP_FPS, 1)
fps = vid_capture.get(5)
print('Frames per second : ', fps,'FPS')
while(vid_capture.isOpened()):
# vid_capture.read() methods returns a tuple, first element is a bool
# and the second is frame
ret, frame = vid_capture.read()
if ret == True:
cv2.imshow('Frame',frame)
# 20 is in milliseconds, try to increase the value, say 50 and observe
key = cv2.waitKey(20)
# time.sleep(0.1)
if key == ord('q'):
break
if key == ord('s'):
cv2.imwrite("triangle{}.jpeg".format(np.random.randint(0,100)),frame)
break
else:
break
# Release the video capture object
vid_capture.release()
cv2.destroyAllWindows()
"""# Arena Locking Tool and Mapping (Ignore this)"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
#HSV FINDER TOOL
def nothing(x):
pass
def create_Trackbars(trackbarwindow = 'Controls'):
lower = [0,0,0]
upper = [180,255,255]
#HUE
cv2.createTrackbar('lh_mask1',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('uh_mask1',trackbarwindow,upper[0],179,nothing)
#Saturation
cv2.createTrackbar('ls_mask1',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('us_mask1',trackbarwindow,upper[1],255,nothing)
#Value
cv2.createTrackbar('lv_mask1',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('uv_mask1',trackbarwindow,upper[2],255,nothing)
#Same for Mask2
cv2.createTrackbar('lh_mask2',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('uh_mask2',trackbarwindow,lower[0],179,nothing)
cv2.createTrackbar('ls_mask2',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('us_mask2',trackbarwindow,lower[1],255,nothing)
cv2.createTrackbar('lv_mask2',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('uv_mask2',trackbarwindow,lower[2],255,nothing)
cv2.createTrackbar('save',trackbarwindow,0,1,nothing)
#cv2.createTrackbar('mode',trackbarwindow,0,3,nothing)
def get_mask_3d(mask_number,hsv_img,trackbarwindow):#Here, 3d indicates that the shape of the mask is a tuple
lh = cv2.getTrackbarPos('lh_mask{}'.format(mask_number),trackbarwindow)
uh = cv2.getTrackbarPos('uh_mask{}'.format(mask_number),trackbarwindow)
ls = cv2.getTrackbarPos('ls_mask{}'.format(mask_number),trackbarwindow)
us = cv2.getTrackbarPos('us_mask{}'.format(mask_number),trackbarwindow)
lv = cv2.getTrackbarPos('lv_mask{}'.format(mask_number),trackbarwindow)
uv = cv2.getTrackbarPos('uv_mask{}'.format(mask_number),trackbarwindow)
lower = np.array([lh,ls,lv])
upper = np.array([uh,us,uv])
mask = cv2.inRange(hsv_img,lower,upper)
mask_3d = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
return mask_3d
def visualise_contours(img,contours,thickness=3):
img_c = img.copy()
cv2.drawContours(img_c,contours,-1,(255,0,0),thickness)
plt.imshow(img_c,cmap='gray')
plt.show()
return img_c
def get_points_for_warping(qmask):
qmask_gray = cv2.cvtColor(qmask,cv2.COLOR_BGR2GRAY)
contours,h = cv2.findContours(qmask_gray.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
max_area_cnt = max(contours, key = cv2.contourArea)
peri = cv2.arcLength(max_area_cnt, True)
approx = cv2.approxPolyDP(max_area_cnt, 0.02 * peri, True)
pnts = approx.squeeze()
if pnts.shape !=(4,2):
visualise_contours(img,[contours])
return pnts,contours
def order_points(pts):
#parameters: pts 4x2 np.ndarray
#returns: 4x2 np.ndarray
# The function initialises a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
#*************************************************************#
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
#returns a warped image
#parameters: image (np.ndarray) and the 4 pts np.ndarray of size (4,2)
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def initiate_mapping(cap):
if cap.isOpened()==False:
print('Could not access feed')
return
trackbarwindow = 'Controls'
cv2.namedWindow(trackbarwindow)
create_Trackbars(trackbarwindow)
pnts = np.array([[0,0],[0,1],[1,0],[1,1]])
while True:
ret,img = cap.read()
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
save = cv2.getTrackbarPos('save',trackbarwindow)
mask1 = get_mask_3d(1,hsv_img,trackbarwindow)
mask2 = get_mask_3d(2,hsv_img,trackbarwindow)
mask = cv2.bitwise_or(mask1,mask2)
mask1_and_mask2 = np.hstack((mask1,mask2))
img_and_mask = np.hstack((img,mask))
stacked = np.vstack((img_and_mask,mask1_and_mask2))
stacked = cv2.resize(stacked,None,fx=0.25,fy=0.25)
cv2.imshow("Mask",stacked)
mask_gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
contours,h = cv2.findContours(mask_gray.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if contours:
max_area_cnt = max(contours, key = cv2.contourArea)
peri = cv2.arcLength(max_area_cnt, True)
approx = cv2.approxPolyDP(max_area_cnt, 0.02 * peri, True)
pnts = approx.squeeze()
img_contours = img.copy()
cv2.drawContours(img_contours,[pnts],-1,(0,0,255),15)
img_contours = cv2.resize(img_contours,None,fx = 0.7,fy=0.7)
cv2.imshow('Grid Recognition',img_contours)
else:
cv2.imshow('Grid Recognition',img.copy())
warped = four_point_transform(img,pnts)
warped = cv2.resize(warped,None,fx = 0.7,fy=0.7)
cv2.imshow('Warped',warped)
key = cv2.waitKey(10)
if save ==1:
mask_no = np.random.randint(1,10000)
cv2.imwrite('mask_{}.jpg'.format(mask_no),mask)
break
if key == ord('s'):
break
cv2.destroyAllWindows()
while cap.isOpened():
ret,img = cap.read()
warped = four_point_transform(img,pnts)
cv2.imshow('Grid',warped)
gray_warped = cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY)
#ret,binary_warped = cv2.threshold(gray_warped,200,255,cv2.THRESH_BINARY)
gray_warped = cv2.resize(gray_warped, None,fx=2,fy=2)
cv2.imshow('Grid_Binary',gray_warped)
if cv2.waitKey(10)==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
cap = cv2.VideoCapture(0)
cap.isOpened()
initiate_mapping(cap)
cv2.destroyAllWindows()
cap.release()
def get_orientation_rough2(bot_img):
gray = cv2.cvtColor(bot_img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
(T, mask) = cv2.threshold(blurred, 200, 255,cv2.THRESH_BINARY_INV)
contours,h = cv2.findContours(255-mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
img_contours = bot_img.copy()
if contours:
max_area_cnt = max(contours, key = cv2.contourArea)
peri = cv2.arcLength(max_area_cnt, True)
approx = cv2.approxPolyDP(max_area_cnt, 0.02 * peri, True)
pnts = approx.squeeze()
cv2.drawContours(img_contours,[pnts],-1,(0,0,255),3)
# angle = round(getOrientation(max_area_cnt, img_contours)*180/3.141)
return img_contours#,angle
def get_orientation_rough1(bot_img):
gray = cv2.cvtColor(bot_img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
(T, mask) = cv2.threshold(blurred, 200, 255,cv2.THRESH_BINARY_INV)
contours,h = cv2.findContours(255-mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
max_area_cnt = max(contours, key = cv2.contourArea)
img_c = bot_img.copy()
cv2.drawContours(img_c,max_area_cnt,-1,(255,0,0),2)
angle = round(getOrientation(max_area_cnt, img_c)*180/3.141)
return angle,img_c
(int(w/2),int(h/2))
h,w
cv2.destroyAllWindows()
cap.release()
hsv_limits1 = get_hsv_limits(img,"Finding HSV Limits of Marker 1")
hsv_limits2 = get_hsv_limits(img,"Finding HSV Limits of Marker 2")
plt.imshow(img)
get_angle(img,0,hsv_limits1,hsv_limits2)
ret,thresh = cv2.threshold(img,127,255,0)
print(thresh.shape)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(hsv_img,)
mask_3d = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
import cv2
import numpy as np
img = cv2.imread("grid_cropped5.jpeg")
cv2.imshow("img",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
w = img.shape[1]
h = img.shape[0]
get_start_points(w,h)
get_checkpoints(w,h)
get_destinations(w,h)
for i, b in enumerate(get_start_points(w, h)):
x, y = b
cv2.circle(img, (x, y), 2, (255, 0, 0), -1)
cv2.putText(img, f's{i+1}', (x+1, y+1),
cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 0, 0), 1)
for i, c in enumerate(get_checkpoints(w, h)):
x, y = c
cv2.circle(img, (x, y), 2, (0, 0, 255), -1)
cv2.putText(img, f'c{i+1}', (x+1, y+1),
cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 255), 1)
for i, d in enumerate(get_destinations(w, h)):
x, y = d
cv2.circle(img, (x, y), 2, (0, 255, 0), -1)
cv2.putText(img, f'd{i+1}', (x+1, y+1),
cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 255, 0), 1)
#cv2.imwrite('RESULT.png', img)
cv2.imshow('Platform with points', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
getcommand((0,10),180,(10,10))
"""# Pose Estimation using triangle marker
Getting Orientation from Bot Image
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from math import atan2, cos, sin, sqrt, pi
def get_pose(img,previous_angle,bot_centroid):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
(T, mask) = cv2.threshold(blurred,230,255,cv2.THRESH_BINARY)
contours,h = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
img_contours = img.copy()
detected = False
if len(contours)!=0:
required_pnts=[]
max_area = 0
for cnt in contours:
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt, 0.09 * peri, True)
area = cv2.contourArea(approx)
pnts = approx.squeeze() #np.ndarray (n,1 size)
if len(pnts)==3:
if area<max_area:
pass
else:
detected = True
required_pnts = pnts
max_area = area
if detected:
cv2.drawContours(img_contours,[required_pnts],-1,(0,0,255),1)
max_norm = 0
max_points = []
l1 = norm(required_pnts[0]-required_pnts[1])
l2 = norm(required_pnts[1]-required_pnts[2])
l3 = norm(required_pnts[2]-required_pnts[0])
if l1<=l2 and l1<=l3:
mid_point = (required_pnts[0]+required_pnts[1])/2
top_point = required_pnts[2]
if l2<=l1 and l2<=l3:
mid_point = (required_pnts[1]+required_pnts[2])/2
top_point = required_pnts[0]
if l3<=l1 and l3<=l2:
mid_point = (required_pnts[2]+required_pnts[0])/2
top_point = required_pnts[1]
mid_point = mid_point.astype(np.uint32)
angle = round(atan2(mid_point[1]-top_point[1],top_point[0]-mid_point[0])*180/pi)
centroid = (mid_point+top_point)/2
centroid = centroid.astype(np.uint32)
else:
angle= previous_angle
centroid = bot_centroid
return angle,centroid,img_contours
img = cv2.imread("5x5.png")
img_text = img.copy()
plt.imshow(img)
angle, centroid,img_contours = get_pose(img,0,(10,10))
cv2.putText(img_text,str(angle),(10000,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2)
plt.imshow(img_contours)
plt.figure()
plt.imshow(img_text)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""# Code to Determine Orientation using PCA"""
from math import atan2, cos, sin, sqrt, pi
def drawAxis(img, p_, q_, color, scale):
p = list(p_)
q = list(q_)
## [visualization1]
angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians
hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
# Here we lengthen the arrow by a factor of scale
q[0] = p[0] - scale * hypotenuse * cos(angle)
q[1] = p[1] - scale * hypotenuse * sin(angle)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
# create the arrow hooks
p[0] = q[0] + 9 * cos(angle + pi / 4)
p[1] = q[1] + 9 * sin(angle + pi / 4)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
p[0] = q[0] + 9 * cos(angle - pi / 4)
p[1] = q[1] + 9 * sin(angle - pi / 4)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
## [visualization1]
def getOrientation(pts, img):
## [pca]
# Construct a buffer used by the pca analysis
sz = len(pts)
data_pts = np.empty((sz, 2), dtype=np.float64)
for i in range(data_pts.shape[0]):
data_pts[i,0] = pts[i,0,0]
data_pts[i,1] = pts[i,0,1]
# Perform PCA analysis
mean = np.empty((0))
mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
# Store the center of the object
cntr = (int(mean[0,0]), int(mean[0,1]))
## [pca]
## [visualization]
# Draw the principal components
#cv2.circle(img, cntr, 3, (255, 0, 255), 2)
p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0])
p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0])
drawAxis(img, cntr, p1, (255, 255, 0), 1)
drawAxis(img, cntr, p2, (0, 0, 255), 5)
angle = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians
## [visualization]
# Label with the rotation angle
#label = str(-int(np.rad2deg(angle)) - 90) + " degrees"
#textbox = cv2.rectangle(img, (cntr[0], cntr[1]-25), (cntr[0] + 250, cntr[1] + 10), (255,255,255), -1)
#cv2.putText(img, label, (cntr[0], cntr[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
return angle
import cv2
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video file")
while(cap.isOpened()):
# Capture frame-by-frame
ret, img = cap.read()
if ret == True:
# Display the resulting frame
cv2.imshow("Frame",img)
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Convert image to binary
_, bw = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Find all the contours in the thresholded image
contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for i, c in enumerate(contours):
# Calculate the area of each contour
area = cv2.contourArea(c)
# Ignore contours that are too small or too large
if area < 3700 or 100000 < area:
continue
# Draw each contour only for visualisation purposes
cv2.drawContours(img, contours, i, (0, 0, 255), 2)
# Find the orientation of each shape
#getOrientation(c, img)
cv2.imshow('Output Image', img)
key = cv2.waitKey(25)
if key & 0xFF == ord('q'):
break
# Break the loop
else:
break
cv2.destroyAllWindows()
# Load the image
img = cv2.imread("bot.png")
# Was the image there?
if img is None:
print("Error: File not found")
exit(0)
cv2.imshow('Input Image', img)
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Convert image to binary
_, bw = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Find all the contours in the thresholded image
contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for i, c in enumerate(contours):
# Calculate the area of each contour
area = cv2.contourArea(c)
# Ignore contours that are too small or too large
if area < 3700 or 100000 < area:
continue
# Draw each contour only for visualisation purposes
cv2.drawContours(img, contours, i, (0, 0, 255), 2)
# Find the orientation of each shape
#getOrientation(c, img)
cv2.imshow('Output Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
''' | [
"noreply@github.com"
] | ShrungDN.noreply@github.com |
2b3a9e981f42e8a5e6a40afa375a01bcdf8a0d9f | 91fae4ffd292fe40c3076d166325174cc3898aeb | /mastool/samples/genexp_as_arg.py | 58fe1d4f5d76a40b34dbb4517f12d271bc8338ea | [
"Apache-2.0"
] | permissive | omaraboumrad/mastool | e8d4b29264d340e7d2039ca05683e1f73210a852 | 0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd | refs/heads/master | 2021-01-19T04:19:39.994448 | 2017-07-21T12:56:56 | 2017-07-21T12:56:56 | 43,903,895 | 26 | 1 | null | 2017-07-21T12:56:57 | 2015-10-08T16:56:37 | Python | UTF-8 | Python | false | false | 49 | py | def foo(x, y=(x**2 for x in range(3))):
pass
| [
"jhakimra@redhat.com"
] | jhakimra@redhat.com |
6c54ce0f3e628cf8b9d687c071a7631a8f6315bd | d83f6ba316a0ff77e1bc50c48ef4b37253f56985 | /novathousands/__init__.py | 8c254b6aa4cf85b0f2b2b94420e75fc25f004c84 | [
"Apache-2.0"
] | permissive | pyKun/nova-thousands | 30eba4ab299d1cd3d637bedc9cfed065c72e8238 | 47533b9b3eb64479a9cc3827a07cd86b2c336591 | refs/heads/master | 2020-04-05T23:20:20.257163 | 2014-12-15T04:15:50 | 2014-12-15T04:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | from novathousands.driver import NovaThousandsDriver as ThousandsDriver
| [
"gareth@openstacker.org"
] | gareth@openstacker.org |
d0e22a0756c6d98d97f0cc24f0ca2280ad85f0fc | ae7eb58377a11b11ea6372f12e4e8bc94d327145 | /script.py | f218ac1bd52b34d83ba2ab095623af39b1b05329 | [] | no_license | djangorris/AVdata | 3c29d16cce7aff85c549bced8312fdca20e7e68a | 1d6de2f839b9fb96f0510cfb29c9e1e19b7eb6a9 | refs/heads/master | 2021-01-23T08:44:42.401920 | 2017-09-17T02:15:00 | 2017-09-17T02:15:00 | 102,545,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as mpatches
from matplotlib import style
import numpy as np
style.use('fivethirtyeight')
np_members = np.array(df.Member_Months * .01)
col = df.Select.map({'On':'b', 'Off':'r'})
red_patch = mpatches.Patch(color='red', label='Off Exchange')
blue_patch = mpatches.Patch(color='blue', label='On Exchange')
# green_patch = mpatches.Patch(color='green', label='On Exchange (Select Network*)')
# yellow_patch = mpatches.Patch(color='yellow', label='Off Exchange (Select Network*)')
df.plot.scatter('AV', 'Plan_Adjusted_Index_Rate', s=np_members, marker='o', c=col, edgecolor='black')
plt.xlabel('Actuarial Value (AV)')
plt.ylabel('Plan Adjusted Index Rate')
plt.title('Kaiser Permanente\nVisualizing Plan Popularity (Marker Size)\nMeasured in Member Months')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch])
plt.show()
| [
"lukkyjay@gmail.com"
] | lukkyjay@gmail.com |
8e1d85018fcf4384b655f23eef74bda5040402df | 850012c4237b778661bde10ae8a8cc21ac01ff70 | /classifier/neural_net.py | 7d8aeede8adbedd95b2b949058b32a53dad621c4 | [] | no_license | jamesmcnamara/ml | cd06fbe0b9f6d3209ce14e65575f1225ee66ff63 | 491759e108d31377f6fed5f58464de89e6c0b3ec | refs/heads/master | 2016-09-11T10:58:01.667822 | 2015-05-01T00:35:37 | 2015-05-01T00:35:37 | 29,793,362 | 6 | 0 | null | 2015-04-30T23:28:49 | 2015-01-24T22:05:42 | Python | UTF-8 | Python | false | false | 7,956 | py | from itertools import count
from math import exp
from operator import ne
from random import random
import numpy as np
class NeuralNet:
def __init__(self, units=None, eta=1, steps=1e6):
self.eta = eta
self.steps = steps
input_count, *hidden_counts, output_count = units
self.inputs = [InputNode(eta) for _ in range(input_count)]
self.hidden_layers = [[Node(eta) for _ in range(nth_layer)]
for nth_layer in hidden_counts]
self.outputs = [OutputNode(eta) for _ in range(output_count)]
self.layers = [self.inputs] + self.hidden_layers + [self.outputs]
self.init_connections()
self.trained = False
def init_connections(self):
"""
Connects each node in all non output layers to the i+1th layer
"""
for layer, next_layer in zip(self.layers, self.layers[1:]):
for node in layer:
node.set_next_layer(next_layer)
def clear_nodes(self):
"""
Clears out all inputs and outputs from each node in the net
"""
for layer in self.layers:
for node in layer:
node.clear()
def train(self, data, results):
"""
Trains this neural network on data and results
:param data: N*M data vector, where M is the number of input nodes
:param results: N*K data vector, where K is the number of output nodes
"""
counter = count()
while self.has_error(data, results):
if next(counter) % 10000 == 0:
print("current is at", next(counter))
self.stochastic_descent(data, results)
if self.has_error(data, results):
print("Iterations maxed out")
self.trained = True
def has_error(self, data, results):
"""
Determines if this network produces any errors when predicting the
results for data
:param data: N*M data vector, where M is the number of input nodes
:param results: N*K data vector, where K is the number of output nodes
:return: True if any elements in data are misclassified
"""
for row, result in zip(data, results):
if any(map(ne, row, self.predict(row))):
return True
return False
def stochastic_descent(self, data, results):
"""
Runs one cycle of the backprop error algorithm on data
and results
:param data: N*M data vector, where M is the number of input nodes
:param results: N*K data vector, where K is the number of output nodes
"""
for row, result in zip(data, results):
#print(row)
self.predict(row)
self.backprop(result)
def predict(self, row):
"""
Runs the feed forward algorithm on the given row
:param row: 1*M data vector, where M is the number of input nodes
:return: predicted output for row
"""
self.clear_nodes()
for element, input_node in zip(row, self.inputs):
input_node.set_input(element)
for layer in self.layers:
for node in layer:
node.feed_forward(print_hidden=self.trained)
return [0 if out.output < 0.8 else 1 for out in self.outputs]
def backprop(self, result):
"""
Backpropogates the error of this net given that the result should
be result
:param result: 1*K data vector, where K is the number of output nodes
"""
for resultum, output in zip(result, self.outputs):
#print("output errors")
output.calc_error(resultum)
for layer in (self.hidden_layers[::-1] + [self.inputs]):
#print("new layer errors and weights")
for node in layer:
node.calc_error()
node.update_weights()
class Node:
def __init__(self, eta):
self.eta = eta
self.output = self.input = self.error = 0
self.ws = self.next_layer = None
self.bias = random()
def set_next_layer(self, layer):
"""
Initializes the reference to the next layer, and creates a random
weight vector
:param layer: list of nodes that belong to the next layer
"""
self.next_layer = layer
self.ws = np.random.random(len(layer))
#self.ws = np.zeros(len(layer))
#self.ws.fill(.1)
def set_input(self, input):
"""
Set the input to this node
:param input: float value
"""
raise AttributeError("Hidden node's input must be set by other nodes")
def clear(self):
"""
clear the input, output and error of this node
"""
self.input = self.output = self.error = 0
def calc_output(self):
"""
Set the output value of this node using it's input field
"""
#print("input was", self.input, "bias", self.bias)
self.output = 1 / (1 + exp(-(self.input + self.bias)))
#print("output is", self.output)
def feed_forward(self, print_hidden=False):
"""
Adds to the inputs of each node in the next layer the output
of this node times the weight from this to that node
:param #print_hidden: if true, #print to the console the output value
of this hidden node
"""
self.calc_output()
if print_hidden and self.output != self.input:
print(self.output)
for wij, node in zip(self.ws, self.next_layer):
node.input += (self.output * wij)
#print("weights are", self.ws)
def calc_error(self, *args):
"""
Calculates the error of this node based on the error of nodes in the
next layer
:param args: unused here
"""
self.error = (self.output * (1 - self.output) *
sum(node.error * wij
for wij, node in zip(self.ws, self.next_layer)))
#print("error is", self.error)
def update_weights(self):
"""
Based on the error of each node in the next layer and this
node's input, updates the weight from this node to that node
"""
for j, node in enumerate(self.next_layer):
self.ws[j] += self.eta * (node.error * self.output)
#print("bias was", self.bias, "eta is", self.eta)
self.bias += (self.error * self.eta)
#print("bias became", self.bias)
#print("weights are", self.ws)
class InputNode(Node):
def set_input(self, input):
"""
OVERRIDE: Input nodes can set their own input
:param input: a float value to to set as input
"""
self.input = input
def calc_output(self):
"""
OVERRIDE: Input node's outputs are their inputs
"""
self.output = self.input
def calc_error(self):
"""
OVERRIDE: input nodes have no error
"""
self.error = 0
class OutputNode(Node):
def set_next_layer(self, layer):
"""
OVERRIDE: output nodes have no next layer
:param layer: next layer
"""
raise AttributeError("Output nodes do not have a next layer")
def calc_error(self, result, *args):
"""
OVERRIDE: The error of an output node is directly dependent on
the output result
:param result: bit (0 or 1)
:param args: unused
"""
self.error = self.output * (1 - self.output) * (result - self.output)
#print("output error is", self.error)
def feed_forward(self, print_hidden=False):
"""
OVERRIDE: Output nodes only need to calculate their input
:param #print_hidden: unused
"""
self.calc_output()
| [
"jamesscottmcnamara@gmail.com"
] | jamesscottmcnamara@gmail.com |
df6e073700a543f8342231476d993d8b3541a580 | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/macurl2path.py | 740baa65096a9e47b3824c0fb53cdbf83200c62d | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,454 | py | # 2016.08.04 19:57:02 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/macurl2path.py
"""Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib
import os
__all__ = ['url2pathname', 'pathname2url']
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
tp = urllib.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = pathname.split('/')
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and components[i - 1] not in ('', '..'):
del components[i - 1:i + 1]
i = i - 1
elif components[i] == '' and i > 0 and components[i - 1] != '':
del components[i]
else:
i = i + 1
if not components[0]:
rv = ':'.join(components[1:])
else:
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
return urllib.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError, 'Cannot convert pathname containing slashes'
components = pathname.split(':')
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='')
return component
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\macurl2path.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:57:02 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
e8357eef31591366579a84ab6797d25ad9a08087 | f4d2cb9d22bffc1e4e96fe4b6c308a3a9764d630 | /Clase 3/ejemplos_comprension.py | 564b2021673ecd684c2f1bdbd54dfa0272122e5d | [] | no_license | agustinc24/datascience_unsam | 8a1651049d70f4aadf883a3ba1305a8d7779d4ae | 93c83ec09f59da177d4a5ced77e5b15ff10581a9 | refs/heads/master | 2022-12-09T17:24:19.061436 | 2020-09-15T19:31:10 | 2020-09-15T19:31:10 | 295,829,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | import csv
def leer_camion(nombre_archivo):
'''Lee un archivo y lo almacena en un diccionario,
tambien calcula el precio y lo devuelve'''
f = open(nombre_archivo, 'r')
rows = csv.reader(f)
next(f)
lista_camion = []
for row in rows:
camion = {
'Nombre': row[0],
'Cajones': int(row[1]),
'Precio': float(row[2])
}
lista_camion.append(dict(camion))
f.close()
return(lista_camion)
camion = leer_camion('C:/Users/elcav/OneDrive/Documentos/Python/Curso Unsam/ejercicios_python/Data/camion.csv')
nombre_cajones = [(s['Nombre'], s['Cajones']) for s in camion] #Devuelve una lista de tuplas (nombre, cajon) de la lista camion
#print(nombre_cajones)
nombres = {s['Nombre'] for s in camion} #Devuelve un conjunto de los nombres de la lista camiob
#print("\n",nombres)
stock = {Nombre: 0 for Nombre in nombres} #Creo un diccionario por compresion con pares 'clave:valor' iniciandolos en 0
for s in camion:
stock[s['Nombre']] += s['Cajones'] #Le agrega la cantidad de cajones al elemento buscado
print("\n", stock)
| [
"agustin.cavalotto@gmail.com"
] | agustin.cavalotto@gmail.com |
076d821b7cd0e1afe7ab941c07feea5414608297 | b1972ef757274c7e2fcf423cbea9a4a16b879dca | /tests/test_commit_hooks.py | 43737fee2c1316ddeecbb3a78f2b1954aa51dc36 | [] | no_license | dekoza/commit-hooks | ace73710c8cd2021d22bdf99b853ceb151badd67 | d98fbf8479d2bfa75b3721fa50eee58837641b63 | refs/heads/master | 2020-12-31T09:14:40.845476 | 2020-02-07T16:23:51 | 2020-02-07T16:23:51 | 238,971,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from commit_hooks import __version__
def test_version():
assert __version__ == '0.1.0'
| [
"dominik@kozaczko.info"
] | dominik@kozaczko.info |
1ec1837fa85cf492f7c8202b56defc9e37abbae0 | f3ac663df0dab919f50f4c5e68479f683f18bfab | /Fibonacci.py | 4b8a1a10b49a05545554387edf28a5a843241403 | [] | no_license | IMDCGP105-1819/portfolio-ShmigabornDungiedoo | 8df18626a340c37aff6da1f365e7441d215e5981 | bcd5688110ee0fd38bfccdcb605b3ce352ac551d | refs/heads/master | 2020-03-30T01:18:38.020979 | 2018-12-31T12:09:27 | 2018-12-31T12:09:27 | 150,569,648 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | def fib(n):
if n==0 or n==1:
return 1
else:
return fib(n-1)+fib(n-2)
n=int(input("number: "))
print(fib(n))
for n in range (1, 21):
print(fib(n))
| [
"relonet95@gmail.com"
] | relonet95@gmail.com |
06150e110933f3126f5f13e11c1916c41595e6e0 | 9d7cc513a59b529eb6527040685f84340adab36f | /cheese_doodle.py | d564474066ec5b2b4077d399e3ce400740f0c178 | [] | no_license | nagaem/learn-to-git | 27a1b7000c99af6db750d9a51151654e9af535f9 | 6fdc6a3879f3a7fcf28708187799ea88a3c432be | refs/heads/master | 2021-12-08T04:02:55.511280 | 2016-02-12T05:08:26 | 2016-02-12T05:08:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | def cheese(n):
return n**2
| [
"stephan@echosec.net"
] | stephan@echosec.net |
5cfb68dc1e63a2e8a9db6dc77bd3d747c66c2797 | 4c3673893e9e70af5de37c715f0b1d39c98a898f | /lispy/lis.py | 37b8318445c7e6c4d7473293994bf2798c49a084 | [] | no_license | nettee/code_history | 949d02367c5d66fdffc2dacbfff948fb39ff2b14 | 21f7ebfa450a69c1347c7c5c16784d19ab288565 | refs/heads/master | 2021-01-06T20:38:19.017621 | 2015-03-19T03:13:31 | 2015-03-19T03:13:31 | 23,660,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | # Peter Norvig
# (How to Write a (Lisp) Interpreter (in Python))
import sys
import math
import operator
def add_globals(env):
env.update(vars(math))
env.update({
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'/' : operator.truediv
})
return env
class Env(dict):
"""An environment: a dict of {'var':val} pairs,
with an outer Env. """
def __init__(self, parms=(), args=(), outer=None):
self.update(zip(parms, args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
if var in self:
return self
elif self.outer is None:
print("Env error")
else:
return self.outer.find(var)
global_env = add_globals(Env())
def eval_(x, env=global_env):
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env.find(x)[x]
elif not isinstance(x, list): # constant literal
return x
elif x[0] == 'quote': # (quote exp)
(_, exp) = x
return exp
elif x[0] == 'if': # (if test conseq alt)
(_, test, conseq, alt) = x
if eval_(test, env):
return eval_(conseq, env)
else:
return eval_(alt, env)
elif x[0] == 'define':
(_, var, exp) = x
env[var] = eval_(exp, env)
elif x[0] == 'lambda':
(_, vars, exp) = x
def func_(*args):
return eval_(exp, Env(vars, args, env))
return func_
else: # (proc exp*)
exps = [eval_(exp, env) for exp in x]
proc = exps.pop(0)
return proc(*exps)
def parse(s):
return read_from(tokenize(s))
def tokenize(s):
"Convert a string into a list of tokens."
ss = s.replace('(', ' ( ').replace(')', ' ) ')
return ss.split()
def read_from(tokens):
"Read an expression from a seq of tokens."
if not(tokens):
print("read token error")
t = tokens.pop(0)
if t == '(':
L = []
while tokens[0] != ')':
L.append(read_from(tokens))
tokens.pop(0) # pop off ')'
return L
elif t == ')':
print("unexpected )")
else:
return atom(t)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
class Symbol(str):
def __init__(self, s):
self = s
def to_string(exp):
if isinstance(exp, list):
return '(' + ' '.join(to_string(e) for e in exp) + ')'
else:
return str(exp)
def repl(prompt='lis.py> '):
while True:
val = eval_(parse(input(prompt)))
if val is not None:
print(to_string(val))
if __name__ == '__main__':
repl()
| [
"xf0718@gmail.com"
] | xf0718@gmail.com |
a8ba5e193ad1bfb79c7d4bbd5490208ff2efbccc | 4a014913e00a3e604922fb1c82d9c54b8498a180 | /roughWork/exp7.py | 224f490bbc344dc6123084245ec18cbccccb8f80 | [] | no_license | meghanads/pydlcs | 8ec7e93b199fb54a0048c6beede802fc2e47f2f1 | 31ee858721bd727b492bf168472acf8595f3cb04 | refs/heads/master | 2021-01-17T09:31:53.145526 | 2013-06-22T21:57:14 | 2013-06-22T21:57:14 | 32,127,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # CIRCUIT: T flip Flop
#
from sequence import *
sim = SIMU('sim1',start = 0,plots = 1, debug =1, pclk = 1 , step =0, pannotate = 1)
I = Istream('IN', fname = 'inp', stream =1)
O = Ostream('OUT', stream = 1)
TF = TFlipFlop('TF')
sim.clk_out.connect([I.clk_in, TF.C, O.clk_in])
I.data_out.connect([TF.T])
TF.Q.connect([O.data_in])
sim.addplot([I.data,O.data])
sim.addpname(["input","output"])
#sim.addplot([I.data])
#sim.addpname(["input"])
sim.start = 1
sim.simulate()
| [
"bharat@localhost"
] | bharat@localhost |
ed4abd4b848d29771385c7991bdad7cad9d8a89e | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /poly_kernel_sketch/common.py | 758b1f1c79fc43357e24285b53d31493fbc6614c | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 5,387 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common methods shared by MNIST and ImageNet experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import getpass
import numpy as np
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
# mkdir -p in Python >2.5
def mkdir_p(path):
try:
os.makedirs(path, mode=0o755)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# Returns path to postfix under user's Unix home directory.
def make_experiment_dir(postfix):
home = os.path.expanduser('~')
exp_dir = os.path.join(home, postfix)
mkdir_p(exp_dir)
return exp_dir
# appends .png to file name
def save_fig(folder, filename):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.png')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
plt.savefig(out_file)
# appends .txt to file name
def save_array(x, folder, filename, formatting):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.txt')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
np.savetxt(out_file, x, fmt=formatting)
def load_array(filename):
with open(filename, 'r') as f:
return np.loadtxt(f)
# count parameters for svd truncation
def count_parameters_list(k_values, nrows, ncols):
new_list = []
for k in k_values:
new_k = count_parameters(k, nrows, ncols)
new_list.append(new_k)
return new_list
# number of parameters when nrows-by-ncols matrix is approximated
# with product of nrows-by-rank and rank-by-ncolds matrix.
def count_parameters(rank, nrows, ncols):
return (nrows + ncols) * rank
# Return one random rademacher matrix
def fully_random_rademacher_matrix(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
return np.random.choice(plus_minus_one, (nrows, ncols))
# Return a rank-1 Rademacher matrix
def rank1_rademacher(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
column_vector = np.random.choice(plus_minus_one, (nrows, 1))
row_vector = np.random.choice(plus_minus_one, (1, ncols))
# Plain * is quicker than equivalent np.dot(column_vector, row_vector)
return column_vector * row_vector
# Sketch matrix A
def sketch_matrix(A, sketch_type, k):
tf.logging.info('sketch_matrix %s %d', sketch_type, k)
h1 = A.shape[0]
h2 = A.shape[1]
# Numpy defaults to int64 or float64 (double precision).
# Computing with float32 (single precision) is quicker.
A_hat = np.zeros((h1, h2), dtype=np.float32)
for i in range(0, k):
tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000,
sketch_type, i, k)
# generate random matrix
if sketch_type == 'arora':
mat = fully_random_rademacher_matrix(h1, h2)
elif sketch_type == 'our_sketch':
mat = rank1_rademacher(h1, h2)
else:
print('wrong sketch_type variable')
return -1
# get coefficient
coefficient = np.dot(np.ravel(A), np.ravel(mat))
# add coefficient*matrix to A_hat
A_hat += coefficient * mat
tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)
return (1.0 / k) * A_hat
# Return truncated svd of A, where only the top k components are used.
# Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags
# speeds up svd by almost 2x. However it makes sketching, which is dominant,
# a tiny bit slower and hence it's not worth it.
def truncated_svd(A, k):
tf.logging.info('Computing SVD ...')
u, s, v = np.linalg.svd(A, full_matrices=False)
u_trunc = u[:, 0:k]
s_trunc = s[0:k]
v_trunc = v[0:k, :]
A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))
tf.logging.info('Done computing SVD ...')
return A_hat
# num_params is rank for SVD, number of coefficients for sketches.
def compress(A, compression_type, num_params):
if compression_type == 'svd':
A_hat = truncated_svd(A, num_params)
elif compression_type == 'our_sketch' or compression_type == 'arora':
A_hat = sketch_matrix(A, compression_type, num_params)
else:
print('Error: wrong compression type. Must be svd, our_sketch, or arora.')
return A_hat
# return singular values of A sorted in descending order
def singular_values(A):
u, s, v = np.linalg.svd(A)
sing = sorted(s, reverse=True)
return sing
def plot_and_save_singular_values(s, folder, fn, nrows, ncols):
x = range(1, len(s) + 1)
y = sorted(s, reverse=True)
title = 'Singular values\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'
plt.plot(x, y)
plt.title(title)
plt.tight_layout()
save_fig(folder, fn)
save_array(np.array(s), folder, fn + '_vals', '%.18e')
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
6b5a81a8ab4123fbb88df10243fbfda7a3d694aa | 59f13b09fb6f0c92ab516f31bd4f739fe6040441 | /vindula/tile/browser/accordion.py | 33f5bb2d74538424883a72474d1dc1e3739bc376 | [] | no_license | vindula/vindula.tile | f3d6acb0326cb95dad31b2c5dd21cca290525943 | 1e747e1c7dfda924edb5d4905855ba86deb41b02 | refs/heads/master | 2016-09-10T22:31:12.217220 | 2015-11-25T16:26:12 | 2015-11-25T16:26:12 | 9,704,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # coding: utf-8
from five import grok
from vindula.tile.browser.baseview import BaseView
grok.templatedir('templates')
class AccordionView(BaseView):
grok.name('accordion-view')
def get_itens_abas(self):
context = self.context
current_user = self.p_membership.getAuthenticatedMember()
L = []
itens = self.portal_catalog(**{'sort_on': 'getObjPositionInParent',
'portal_type':['TileAccordionItem',],
'path':{'query':'/'.join(context.getPhysicalPath()), 'depth': 1}
})
for t in itens:
t = t.getObject()
if not t.getExcludeFromNav() and self.has_public_or_permission(current_user, t):
L.append(t)
return L
| [
"cesaraugusto@liberiun.com"
] | cesaraugusto@liberiun.com |
2e698271e5521219a57a30b09eff50ba6a0d081c | 506df7a85d0b3b522b24998acda41f5f707b53a4 | /chess/player.py | 9025ed4b4c892d0a45a43ff14fec01ad32f8d717 | [] | no_license | DomhnallBoyle/python-games | 25dae160cf1b8bd3882ef5d14f998a7094e22d86 | 78083f2c6ad8abd01b0689549bcab82c39ece1f7 | refs/heads/master | 2021-01-18T16:02:20.601949 | 2017-03-30T13:34:59 | 2017-03-30T13:34:59 | 86,705,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py |
class Player(object):
def __init__(self):
self.name = self._get_player_name()
self.pieces = []
self.score = 0
def _get_player_name(self):
name = raw_input('Enter a name for player: ')
while len(name) <= 0:
name = raw_input('Enter a name for player: ')
return name
def make_move(self):
pass
| [
"domhnallboyle@gmail.com"
] | domhnallboyle@gmail.com |
f644e7ba8eedd7cdef234be96ba7b530e6ca331f | 5ef6e49ac75df1f2e8e97ec7f5615d9cdf56f446 | /sft/config-sample/old_keras_agents/agents/lasagne_complex.py | c92f0af66b0822eac7723f4ad4ee485947b54ec8 | [
"MIT"
] | permissive | kevinkepp/search-for-this | c7915a5cafa50365b608ed89cfd73af8ddcf0520 | 37a94ae7c7ad780f34792f8098e4a4066c6e097a | refs/heads/master | 2021-09-04T02:49:53.551799 | 2018-01-14T23:11:55 | 2018-01-14T23:11:55 | 72,003,710 | 0 | 0 | null | 2017-02-17T15:33:13 | 2016-10-26T13:03:46 | Python | UTF-8 | Python | false | false | 1,717 | py | from lasagne.layers import DenseLayer, ConcatLayer, FlattenLayer
from lasagne.nonlinearities import linear, rectify
from lasagne.updates import rmsprop
import sft.agent.DeepQAgentGpu
import sft.agent.model.LasagneMlpModel
import sft.eps.Linear
import sft.reward.TargetMiddle
from sft.log.AgentLogger import AgentLogger
from .. import world
from ..world import *
logger = AgentLogger(__name__)
action_hist_len = 4
action_hist_size = Size(action_hist_len, world.nb_actions)
epsilon_update = sft.eps.Linear.Linear(
start=1,
end=0.1,
steps=epochs * 4/5
)
def build_network(net_in_views, net_in_actions):
net_views_out = FlattenLayer(net_in_views)
net_actions_out = FlattenLayer(net_in_actions)
net_concat = ConcatLayer([net_views_out, net_actions_out])
net_hid = DenseLayer(net_concat, num_units=256, nonlinearity=rectify)
net_hid2 = DenseLayer(net_hid, num_units=64, nonlinearity=rectify)
net_out = DenseLayer(net_hid2, num_units=4, nonlinearity=linear)
return net_out
def optimizer(loss, params):
return rmsprop(loss, params,
learning_rate=0.00025,
rho=0.9,
epsilon=1e-8
)
batch_size = 32
model = sft.agent.model.LasagneMlpModel.LasagneMlpModel(
logger=logger,
batch_size=batch_size,
discount=0.99,
actions=actions,
view_size=world.view_size,
action_hist_size=action_hist_size,
network_builder=build_network,
optimizer=optimizer,
clone_interval=500
)
agent = sft.agent.DeepQAgentGpu.DeepQAgentGpu(
logger=logger,
actions=actions,
batch_size=batch_size,
buffer_size=100000,
start_learn=1000,
learn_interval=1,
view_size=world.view_size,
action_hist_size=action_hist_size,
model=model
)
reward = sft.reward.TargetMiddle.TargetMiddle(
reward_target=1,
reward_not_target=0
)
| [
"dev@kevinkepp.de"
] | dev@kevinkepp.de |
bb4db4b87cead04eedd558dd4868e7965573b518 | 2402d78f985e8dbf907a69294bf89ef7c838618c | /utilities/gitall.py | 1f347533b558d140e4dcd7472368eb11a1f78d31 | [] | no_license | MetaCell/HNN-UI | d111bf0eaccc78252fd37cd74fef79eac67e59d6 | 26582dfcae454850e752dc0e276c00887a802684 | refs/heads/master | 2020-04-01T10:00:33.640897 | 2019-08-27T08:28:00 | 2019-08-27T08:28:00 | 153,098,759 | 1 | 2 | null | 2019-08-27T09:28:01 | 2018-10-15T11:04:14 | Python | UTF-8 | Python | false | false | 2,923 | py | #!/usr/bin/python
#
# Utility script for mass git operatiosn on HNN-UI
# Usage:
# gitall branches: print current branch of each repo
#
# gitall checkout <branch> : checkout <branch> on each repo
#
# gitall pull <remote> <branch> : execute git pull on each repo
#
# gitall fetch <remote> <branch> : execute git fetch on each repo
#
#
import subprocess
import sys
config = {
"repos": [
{
"name": "HNN_UI",
"path": "..",
"url": "https://github.com/MetaCell/HNN-UI"
},
{
"name": "org.geppetto.frontend.jupyter",
"path": "../org.geppetto.frontend.jupyter",
"url": "https://github.com/openworm/org.geppetto.frontend.jupyter"
},
{
"name": "org.geppetto.frontend",
"path": "../hnn_ui/geppetto/",
"url": "https://github.com/openworm/org.geppetto.frontend"
},
{
"name": "Geppetto HNN extension",
"path": "../hnn_ui/geppetto/src/main/webapp/extensions/geppetto-hnn/",
"url": "https://github.com/MetaCell/geppetto-hnn"
}
]
}
def incorrectInput(argv, msg):
print(msg)
sys.exit()
def main(argv):
command = []
if (len(argv) == 0):
incorrectInput(argv, 'Too few paramaters')
elif (argv[0] == 'push'):
command = ['git', 'push', argv[1], argv[2]]
elif (argv[0] == 'add'):
command = ['git', 'add', argv[1]]
elif (argv[0] == 'commit'):
command = ['git', 'commit', argv[1], argv[2]]
elif (argv[0] == 'branches'):
command = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
elif (argv[0] == 'reset'):
command = ['git', 'reset', '--hard', 'HEAD']
elif (argv[0] == 'status'):
command = ['git', argv[0]]
elif (argv[0] == 'remote'):
command = ['git', 'remote', '-v']
elif (argv[0] == 'diff'):
command = ['git', 'diff']
elif (argv[0] == 'checkout'):
if (len(argv) == 2):
command = ['git', 'checkout', argv[1]]
elif (len(argv) == 3):
command = ['git', 'checkout', argv[1], argv[2]]
else:
incorrectInput(argv, 'Expected <=3 paramaters')
elif (argv[0] == 'pull' or argv[0] == 'fetch'):
if (len(argv) == 1):
command = ['git', argv[0]]
elif (len(argv) == 2):
command = ['git', argv[0], argv[1]]
elif (len(argv) == 3):
command = ['git', argv[0], argv[1], argv[2]]
else:
incorrectInput(argv, 'Too many paramaters')
else:
incorrectInput(argv, 'Unrecognized command')
for repo in config['repos']:
try:
print(repo['name'])
print(subprocess.check_output(command, cwd=repo['path']).decode('utf-8'))
except:
print("Error -- trying next repo")
if __name__ == "__main__":
main(sys.argv[1:])
| [
"afonsobspinto@gmail.com"
] | afonsobspinto@gmail.com |
647917924cd80707d6b2baa922c9d4558992a1a0 | d13c098738df6e94332b84a63beb2e75a7036f7e | /Visualization/conv_filter_visualization.py | 54feff0be31e8c595acf57dc5c1b060de47f1065 | [] | no_license | ma-xu/DNN | 0d4962647f3b2ce0d35d3d50a026a4ad7f6871ee | c05bba778810e61e821bcb9093434a1ff9f0c258 | refs/heads/master | 2022-03-20T12:15:42.966469 | 2019-12-05T03:40:00 | 2019-12-05T03:40:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,759 | py | ###########################################
### Copy for keras examples at github
###########################################
"""
#Visualization of the filters of VGG16, via gradient ascent in input space.
This script can run on CPU in a few minutes.
Results example: 
"""
from __future__ import print_function
import time
import numpy as np
from PIL import Image as pil_image
from keras.preprocessing.image import save_img
from keras import layers
from keras.applications import vgg16
from keras import backend as K
def normalize(x):
"""utility function to normalize a tensor.
# Arguments
x: An input tensor.
# Returns
The normalized input tensor.
"""
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def deprocess_image(x):
"""utility function to convert a float array into a valid uint8 image.
# Arguments
x: A numpy-array representing the generated image.
# Returns
A processed numpy-array, which could be used in e.g. imshow.
"""
# normalize tensor: center on 0., ensure std is 0.25
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def process_image(x, former):
"""utility function to convert a valid uint8 image back into a float array.
Reverses `deprocess_image`.
# Arguments
x: A numpy-array, which could be used in e.g. imshow.
former: The former numpy-array.
Need to determine the former mean and variance.
# Returns
A processed numpy-array representing the generated image.
"""
if K.image_data_format() == 'channels_first':
x = x.transpose((2, 0, 1))
return (x / 255 - 0.5) * 4 * former.std() + former.mean()
def visualize_layer(model,
layer_name,
step=1.,
epochs=15,
upscaling_steps=9,
upscaling_factor=1.2,
output_dim=(412, 412),
filter_range=(0, None)):
"""Visualizes the most relevant filters of one conv-layer in a certain model.
# Arguments
model: The model containing layer_name.
layer_name: The name of the layer to be visualized.
Has to be a part of model.
step: step size for gradient ascent.
epochs: Number of iterations for gradient ascent.
upscaling_steps: Number of upscaling steps.
Starting image is in this case (80, 80).
upscaling_factor: Factor to which to slowly upgrade
the image towards output_dim.
output_dim: [img_width, img_height] The output image dimensions.
filter_range: Tupel[lower, upper]
Determines the to be computed filter numbers.
If the second value is `None`,
the last filter will be inferred as the upper boundary.
"""
def _generate_filter_image(input_img,
layer_output,
filter_index):
"""Generates image for one particular filter.
# Arguments
input_img: The input-image Tensor.
layer_output: The output-image Tensor.
filter_index: The to be processed filter number.
Assumed to be valid.
#Returns
Either None if no image could be generated.
or a tuple of the image (array) itself and the last loss.
"""
s_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# we start from a gray image with some random noise
intermediate_dim = tuple(
int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random(
(1, 3, intermediate_dim[0], intermediate_dim[1]))
else:
input_img_data = np.random.random(
(1, intermediate_dim[0], intermediate_dim[1], 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# Slowly upscaling towards the original size prevents
# a dominating high-frequency of the to visualized structure
# as it would occur if we directly compute the 412d-image.
# Behaves as a better starting point for each following dimension
# and therefore avoids poor local minima
for up in reversed(range(upscaling_steps)):
# we run gradient ascent for e.g. 20 steps
for _ in range(epochs):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# some filters get stuck to 0, we can skip them
if loss_value <= K.epsilon():
return None
# Calulate upscaled dimension
intermediate_dim = tuple(
int(x / (upscaling_factor ** up)) for x in output_dim)
# Upscale
img = deprocess_image(input_img_data[0])
img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
pil_image.BICUBIC))
input_img_data = [process_image(img, input_img_data[0])]
# decode the resulting input image
img = deprocess_image(input_img_data[0])
e_time = time.time()
print('Costs of filter {:3}: {:5.0f} ( {:4.2f}s )'.format(filter_index,
loss_value,
e_time - s_time))
return img, loss_value
def _draw_filters(filters, n=None):
"""Draw the best filters in a nxn grid.
# Arguments
filters: A List of generated images and their corresponding losses
for each processed filter.
n: dimension of the grid.
If none, the largest possible square will be used
"""
if n is None:
n = int(np.floor(np.sqrt(len(filters))))
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top n*n filters.
filters.sort(key=lambda x: x[1], reverse=True)
filters = filters[:n * n]
# build a black picture with enough space for
# e.g. our 8 x 8 filters of size 412 x 412, with a 5px margin in between
MARGIN = 5
width = n * output_dim[0] + (n - 1) * MARGIN
height = n * output_dim[1] + (n - 1) * MARGIN
stitched_filters = np.zeros((width, height, 3), dtype='uint8')
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, _ = filters[i * n + j]
width_margin = (output_dim[0] + MARGIN) * i
height_margin = (output_dim[1] + MARGIN) * j
stitched_filters[
width_margin: width_margin + output_dim[0],
height_margin: height_margin + output_dim[1], :] = img
# save the result to disk
save_img('vgg_{0:}_{1:}x{1:}.png'.format(layer_name, n), stitched_filters)
# this is the placeholder for the input images
assert len(model.inputs) == 1
input_img = model.inputs[0]
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
output_layer = layer_dict[layer_name]
assert isinstance(output_layer, layers.Conv2D)
# Compute to be processed filter range
filter_lower = filter_range[0]
filter_upper = (filter_range[1]
if filter_range[1] is not None
else len(output_layer.get_weights()[1]))
assert(filter_lower >= 0
and filter_upper <= len(output_layer.get_weights()[1])
and filter_upper > filter_lower)
print('Compute filters {:} to {:}'.format(filter_lower, filter_upper))
# iterate through each filter and generate its corresponding image
processed_filters = []
for f in range(filter_lower, filter_upper):
img_loss = _generate_filter_image(input_img, output_layer.output, f)
if img_loss is not None:
processed_filters.append(img_loss)
print('{} filter processed.'.format(len(processed_filters)))
# Finally draw and store the best filters to disk
_draw_filters(processed_filters)
if __name__ == '__main__':
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
LAYER_NAME = 'block5_conv1'
# build the VGG16 network with ImageNet weights
vgg = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')
vgg.summary()
# example function call
visualize_layer(vgg, LAYER_NAME) | [
"xuma@my.unt.edu"
] | xuma@my.unt.edu |
f419e81f514108bf11109e0849632fd4c5d074f3 | d7d25574246fd8585396a02ebd2ca8450e49b082 | /leetcode-py/leetcode84.py | 1824ee7477f31f88d415aa6aff3453e23e14c6cb | [] | no_license | cicihou/LearningProject | b6b1de2300e574835f253935d0c0ae693b194020 | 3a5649357e0f21cbbc5e238351300cd706d533b3 | refs/heads/master | 2022-12-04T06:18:14.856766 | 2022-11-29T08:54:16 | 2022-11-29T08:54:16 | 141,606,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | '''
视频:https://www.youtube.com/watch?v=vcv3REtIvEo
monostack
1. find the nearest left bar with height < current bar
2. find the nearest right bar with height < current bar
分别从左向右,以及右向左维护两个单调栈,monostack
左边向右边时:
碰到比左边或者比右边小的,就 pop 掉当前的数,单调栈中存且只存比 cur 的 height 小的 index,
我们单独维护一个数组,存 nearest left bar less than cur 的 index + 1 值
右边向左边时:相反
最后求出每个index可以生成的 histogram 的最大值,(right - left + 1) * bar_height
这题关于单调栈,挺难的,我不是很理解
'''
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
'''
method 1
:param heights:
:return:
'''
n = len(heights)
left, right = [0] * n, [0] * n
monostack = list()
for i in range(n):
while monostack and heights[monostack[-1]] >= heights[i]:
monostack.pop()
left[i] = monostack[-1] if monostack else -1
monostack.append(i)
monostack = list()
for i in range(n-1, -1, -1):
while monostack and heights[monostack[-1]] >= heights[i]:
monostack.pop()
right[i] = monostack[-1] if monostack else n
monostack.append(i)
res = max((right[i] - left[i] - 1) * heights[i] for i in range(n))
return res
| [
"houxi_zuel@163.com"
] | houxi_zuel@163.com |
6aa93f8ca984578884a7b5542a708073c3d95d26 | 12289eecb8ad582578e5722a52e519cbd38c3ab3 | /app/auth/forms.py | e128e95ec44f4b5d935f2fca9b0a9b7aaef9879a | [
"MIT"
] | permissive | kuangzhehuangjian/TaskScheduler | fde409820f4fdbd9bff36f5c181943dd8f5d1601 | 18f25ae1b3ffd14d3c9c736e0e63104df13acb8f | refs/heads/main | 2023-08-21T00:30:31.184742 | 2021-10-27T02:29:55 | 2021-10-27T02:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | # -*- coding: utf-8 -*-
# @Author: guomaoqiu
# @File Name: forms.py
# @Date: 2019-03-14 09:51:08
# @Last Modified by: guomaoqiu
# @Last Modified time: 2019-05-22 17:59:46
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('邮箱', default="root@email.com", validators=[Required(), Length(1, 64),
Email()])
password = StringField('密码', default="123456", validators=[Required()])
remember_me = BooleanField('保持登录')
submit = SubmitField('登录')
class RegistrationForm(FlaskForm):
email = StringField('邮箱', validators=[Required(), Length(1, 64),
Email()])
username = StringField('用户名', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('密码', validators=[
Required(), EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('确认密码', validators=[Required()])
submit = SubmitField('注册')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(FlaskForm):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| [
"870709864@qq.com"
] | 870709864@qq.com |
1d4a746c351655e76b9f92ea08e41692ab68a1fa | 9dbb2c18651df570ded1d4a5fd53e1c36c359afa | /基础心法第1层--炼体/计算个人所得税.py | c7e091cbecaf593c6318e4a87949f194d690d8a6 | [] | no_license | zhanrui2011/Python-100-Days-Study-Notes | a9ca7ef9dc58b7959329f1db85feaf119ad78005 | 18bcc3e5905c6c50470d70ba5d3ac41c0cd62556 | refs/heads/master | 2020-06-26T15:09:18.402828 | 2019-07-30T14:18:47 | 2019-07-30T14:18:47 | 199,668,645 | 2 | 0 | null | 2019-07-30T14:32:03 | 2019-07-30T14:32:03 | null | UTF-8 | Python | false | false | 683 | py | """
输入月收入和五险一金计算个人所得税
"""
salary = float(input('本月收入: '))
insurance = float(input('五险一金: '))
diff = salary - insurance - 3500
if diff <= 0:
rate = 0
deduction = 0
elif diff < 1500:
rate = 0.03
deduction = 0
elif diff < 4500:
rate = 0.1
deduction = 105
elif diff < 9000:
rate = 0.2
deduction = 555
elif diff < 35000:
rate = 0.25
deduction = 1005
elif diff < 55000:
rate = 0.3
deduction = 2755
elif diff < 80000:
rate = 0.35
deduction = 5505
else:
rate = 0.45
deduction = 13505
tax = abs(diff * rate - deduction)
print('个人所得税: ¥%.2f元' % tax)
print('实际到手收入: ¥%.2f元' % (diff + 3500 - tax)) | [
"Vhzix24@gmail.com"
] | Vhzix24@gmail.com |
2e6e6d70f0b9d008c58b9603581611fe2e94b17f | b1b4dc2c530b56a9abbc6b144c3ca3f5a6d11e70 | /twisted_/UDP.py | e56e81deb2f409b7f9ab7d7464fe929c25dbbe04 | [] | no_license | BorisovDima/_ex | d38cff25575bfc6d1906940d40091fb6946cd829 | 6c14d661b8e051f8ec385cb09997da1fa781431a | refs/heads/master | 2020-05-07T18:13:38.304305 | 2019-08-23T10:59:09 | 2019-08-23T10:59:09 | 180,757,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
class UDP(DatagramProtocol):
def stopProtocol(self):
print('END')
def startProtocol(self):
print(self.transport)
print('Start')
def datagramReceived(self, datagram, addr):
print(datagram, addr)
addr[1]
self.transport.write(b'%s - %s' % (datagram, str(addr).encode()), addr)
d = reactor.resolve('google.com')
d.addCallback(lambda r: print(r))
# reactor.listenUDP(8001, UDP())
# reactor.run()
################## my socket
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('0.0.0.0', 8000))
s.setblocking(False)
# reactor.adoptDatagramPort(s.fileno(), socket.AF_INET, UDP())
# reactor.run()
################## MULTICAST
class MultiUDP(DatagramProtocol):
def startProtocol(self):
self.transport.setTTL(5)
self.transport.joinGroup("228.0.0.5")
def datagramReceived(self, datagram, addr):
print(datagram, addr)
self.transport.write(b"Server: Pong", addr)
reactor.listenMulticast(9999, MultiUDP(), listenMultiple=True)
reactor.run()
| [
"you@example.com"
] | you@example.com |
6eb055534e069a46fb58ca98949e9dd2924f5ec0 | 87ba91313fb206e8974fcd0be9676eca6f15fb09 | /w3.py | fb91c18243898c02ade224ad8576bd3fdb6be77e | [] | no_license | tsly2015/Notes | afd55b806184a3332676c7d15bb485c87d1aa6e3 | 81b1eaa7c5931798bfeb0f0a48929c5d354d8be8 | refs/heads/master | 2021-01-19T20:18:34.817264 | 2014-11-28T03:42:43 | 2014-11-28T03:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | from collections import namedtuple
# make a basic Link class
Link = namedtuple('Link', ['id', 'submitter_id', 'submitted_time', 'votes',
'title', 'url'])
# list of Links to work with
links = [
Link(0, 60398, 1334014208.0, 109,
"C overtakes Java as the No. 1 programming language in the TIOBE index.",
"http://pixelstech.net/article/index.php?id=1333969280")]
def query():
for l in links:
if l.id == 15:
return l.votes
def query():
submissions = []
for l in links:
if l.submitter_id == 62443:
submissions.append(l)
submissions.sort(key = lambda x: x.submitted_time)
return submissions
import sqlite3
db = sqlite3.connect(':memory:')
db.execute('create table links ' +
'(id integer, submitter_id integer, submitted_time integer, ' +
'votes integer, title text, url text)')
for l in links:
db.execute('insert into links values (?, ?, ?, ?, ?, ?)', l)
def query():
c = db.execute("select * from links")
results = c.fetchall()
return results
def query():
c = db.execute("select * from links")
for link_tuple in c:
link = Link(*link_tuple)
print link.votes
print link_tuple
def query():
c = db.execute("select * from links where id = 2")
#select * from links where submitter_id = 62443 and votes > 1000
link = Link(*c.fetchone())
return link.votes
#return link.id
def query():
results = []
c = db.execute("select * from links where submitter_id = 62443 order by submitted_time asc")
#for link_tuple in c:
# link = Link(*link_tuple)
# results.append(link.id)
results = [t[0] for t in c]
return results
print query()
def build_link_index():
index = {}
for l in links:
index[l.id] = l
return index
def link_by_id(link_id):
for l in links:
if l.id == link_id:
return l
link_index = build_link_index()
def link_by_id(link_id):
return link_index.get[link_id]
def add_new_link(link):
links.append(link)
link_index[link.id] = link
import os
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainPage(Handler):
def get(self):
self.write("asciichan!")
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
| [
"zhenw.wang@gmail.com"
] | zhenw.wang@gmail.com |
ce3a35be3812760d489066c9e18242b176ff7c97 | 35da3082f950aff29c309e78567fc4a539ba654d | /ventas_api/ventas_api/settings.py | cc3c4183066291dd26a42700ee910a3ace782dcb | [] | no_license | EdvinCV/E-Commerce | 1f125948488f2fde422cc01b30790464629373e9 | fbbdc4dbe8d6c28b985be0e20c69323ab98d80b5 | refs/heads/master | 2023-01-11T15:46:27.494404 | 2020-11-14T20:54:39 | 2020-11-14T20:54:39 | 312,899,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,809 | py | """
Django settings for ventas_api project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(*wn3@h7)*kx=_=+na*9$ho(#d7993a47wfve9-qs1%vierq76'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Change default user model.
AUTH_USER_MODEL = 'usuarios.Usuario'
# Application definition
INSTALLED_APPS = [
# Local Apps
'usuarios.apps.UsuariosConfig',
'ventas.apps.VentasConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # <-- And here
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ventas_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ventas_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True # If this is used then `CORS_ORIGIN_WHITELIST` will not have any effect
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = [
'http://localhost:3030',
] # If this is used, then not need to use `CORS_ORIGIN_ALLOW_ALL = True`
CORS_ORIGIN_REGEX_WHITELIST = [
'http://localhost:3030',
] | [
"edvinc97@gmail.com"
] | edvinc97@gmail.com |
34dc5f92471ec178e2a5420ed1d5d28ca9cbbbad | 044822487dc3ef7683887ca72ac46f0a361dcd3c | /rateapp/forms.py | 7e4ff32f411169f51e3db75fbc79c02ba0437217 | [] | no_license | jer1ck0/Currencies | e41ca590e374403227e44eaaf8cdae33dd39192d | 794e61fe8d30426c630f26697496d16d33335c9d | refs/heads/master | 2023-06-23T11:17:57.454335 | 2021-05-12T06:55:20 | 2021-05-12T06:55:20 | 366,383,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from django import forms
class RequestForm(forms.Form):
first_currency = forms.CharField()
second_currency = forms.CharField()
period = forms.DateTimeField(required=False)
class CurrencyForm(forms.Form):
currency_ident = forms.CharField()
currency_rate = forms.CharField()
period = forms.DateTimeField() | [
"jer1ck0@lex.lebedev.example.com"
] | jer1ck0@lex.lebedev.example.com |
b5054b5f42826f0d2b5346bf1680095877833494 | ca31b0082522bd107510946fa4f9acf83536e50b | /biocoder/apps/common/view.py | 7e4ee6374d080aaf4cfe532093d0109d368fb299 | [
"Apache-2.0"
] | permissive | east301/biocoder-server | 3e850d5164f13cdcab1dd06482434666e86643bf | e7b937e99608fa18675c972ec4377d946896cbb1 | refs/heads/master | 2020-12-04T01:06:12.268189 | 2016-08-28T12:00:34 | 2016-08-28T12:00:34 | 66,765,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | #
# (c) 2016 biocoder developers
#
# This file is part of biocoder,
# released under Apache License Version 2.0 (http://www.apache.org/licenses/LICENCE).
#
import jsonschema
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
from django.core.exceptions import SuspiciousOperation
# ================================================================================
# validation
# ================================================================================
class JSONSchemaValidationMixin(object):
def dispatch(self, request, *args, **kwargs):
#
if request.method == 'POST':
try:
jsonschema.validate(request.POST, self.schema)
except ValueError as exc:
raise SuspiciousOperation('Failed to validate request parameters.') from exc
#
return super(JSONSchemaValidationMixin, self).dispatch(request, *args, **kwargs)
class DjangoFormValidationMixin(object):
def __init__(self, *args, **kwargs):
super(DjangoFormValidationMixin, self).__init__(*args, **kwargs)
self.form_validator = None
def dispatch(self, request, *args, **kwargs):
#
if request.method == 'POST':
self.form_validator = self.form_validator_class(request.POST, request.FILES)
if not self.form_validator.is_valid():
raise SuspiciousOperation('Failed to validate request parameters.')
#
return super(DjangoFormValidationMixin, self).dispatch(request, *args, **kwargs)
# ================================================================================
# authentication
# ================================================================================
class StaffUserRequiredMixin(LoginRequiredMixin, StaffuserRequiredMixin):
pass
| [
"tadaka@sb.ecei.tohoku.ac.jp"
] | tadaka@sb.ecei.tohoku.ac.jp |
37e09223e7074ba03f04238608d482908fc0a19a | 62261d7fbd2feab54b1b5f9f0fef33fd784873f9 | /src/deepfool.py | 8b318f762dff20a8b7b10e9f0e718ca60b020cb4 | [] | no_license | ychnlgy/DeepConsensus-experimental-FROZEN | 50ebfe25b33ce8715fb9f24285969831cef311f2 | 904ae3988fee1df20273e002ba53a49a0d811192 | refs/heads/master | 2020-03-31T06:18:38.029959 | 2018-12-02T05:04:52 | 2018-12-02T05:04:52 | 151,976,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | import numpy as np
from torch.autograd import Variable
import torch as torch
import copy
from torch.autograd.gradcheck import zero_gradients
def deepfool(image, net, num_classes=10, overshoot=0.02, max_iter=50):
"""
:param image: Image of size HxWx3
:param net: network (input: images, output: values of activation **BEFORE** softmax).
:param num_classes: num_classes (limits the number of classes to test against, by default = 10)
:param overshoot: used as a termination criterion to prevent vanishing updates (default = 0.02).
:param max_iter: maximum number of iterations for deepfool (default = 50)
:return: minimal perturbation that fools the classifier, number of iterations that it required, new estimated_label and perturbed image
"""
is_cuda = torch.cuda.is_available()
if is_cuda:
#print("Using GPU")
image = image.cuda()
net = net.cuda()
else:
print("Using CPU")
f_image = net.forward(Variable(image[None, :, :, :], requires_grad=True)).data.cpu().numpy().flatten()
I = (np.array(f_image)).flatten().argsort()[::-1]
I = I[0:num_classes]
label = I[0]
input_shape = image.cpu().numpy().shape
pert_image = copy.deepcopy(image)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
loop_i = 0
x = Variable(pert_image[None, :], requires_grad=True)
fs = net.forward(x)
fs_list = [fs[0,I[k]] for k in range(num_classes)]
k_i = label
while k_i == label and loop_i < max_iter:
pert = np.inf
fs[0, I[0]].backward(retain_graph=True)
grad_orig = x.grad.data.cpu().numpy().copy()
for k in range(1, num_classes):
zero_gradients(x)
fs[0, I[k]].backward(retain_graph=True)
cur_grad = x.grad.data.cpu().numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, I[k]] - fs[0, I[0]]).data.cpu().numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
if is_cuda:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).cuda()
else:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot)
x = Variable(pert_image, requires_grad=True)
fs = net.forward(x)
k_i = np.argmax(fs.data.cpu().numpy().flatten())
loop_i += 1
r_tot = (1+overshoot)*r_tot
return r_tot, loop_i, label, k_i, pert_image
| [
"ychnlgy@gmail.com"
] | ychnlgy@gmail.com |
cebbf6f6e8fc9240025c732d1601f9e7e36ed81b | e900ee347f3bb3f7e1f61f3a66a8ea5ca51df354 | /javbus/javbus/settings.py | d2432594d0514aa56b24b7f382e4a4fe74997b40 | [] | no_license | SakuraMaaya/JavbusScrapy | ff3a269a7a87e4e6c8c33231221b9378637c797b | f714304cd057b38e1f6ef3b37ec25d77e2816979 | refs/heads/master | 2020-03-13T16:09:41.959870 | 2018-08-10T02:05:55 | 2018-08-10T02:05:55 | 131,190,815 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | # -*- coding: utf-8 -*-
# Scrapy settings for javbus project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import os
BOT_NAME = 'javbus'
SPIDER_MODULES = ['javbus.spiders']
NEWSPIDER_MODULE = 'javbus.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'javbus (+http://www.yourdomain.com)'
DOWNLOAD_DELAY = 1 # 250 ms of delay
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"method":"GET",
"scheme":"https",
# "accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
# "accept-encoding":"gzip, deflate, sdch, br",
# "accept-language":"zh-CN,zh;q=0.8",
# "cache-control":"no-cache",
# 'pragma':'no-cache',
'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0'
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'javbus.middlewares.JavbusSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'javbus.middlewares.JavbusDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir,'images')
IMAGES_EXPIRES = 90 #90天内抓取的都不会被重抓
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 3.0
AUTOTHROTTLE_MAX_DELAY = 30.0
CONCURRENT_REQUESTS = 5
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"czkme@qq.com"
] | czkme@qq.com |
77d6bcfd63501931a3dd644c5d7be0143c691f69 | 82b9c39b136428f5d2e05e14a61c8a535fd7d81c | /net/binary_test.py | c1a09faae0e2446fb973e8cafe0adb994ef9c2f4 | [] | no_license | DongminK/python | a6889c4deed48738fbb554b78341315955fe2813 | 6a88ba11f65d430e2109864ea20a7b6e1632e342 | refs/heads/master | 2020-03-16T17:21:20.784177 | 2018-05-29T09:06:17 | 2018-05-29T09:06:17 | 132,828,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | ## STR
import struct
from insoft.openmanager.message.packet import Packet
str_name = "kim dong min"
str_bin = bytes(str_name, "utf-8")
print("BIN : %s" % str_bin)
print("LENGTH : %d" % len(str_bin))
print("STR : %s" % str_bin.decode())
print("")
## INT
float_name = 4
float_bin = float_name.to_bytes(4, "big")
print("BIN : %s" % float_bin)
print("LENGTH : %d" % len(float_bin))
print("INT : %s" % int.from_bytes(float_bin, "big"))
print("")
test_bin = bytes(b'\x0b') # Server ID
print(len(test_bin))
print(int.from_bytes(test_bin, "big"))
test_bin = bytes(b'\x00') # Flag
print(len(test_bin))
print(int.from_bytes(test_bin, "big"))
test_bin = bytes(b'\x00\x04\x93\xec') # Request ID
print(len(test_bin))
print(int.from_bytes(test_bin, "big"))
# AGENT 6 BYTE
# LONG
## INT
float_name = 8
float_bin = float_name.to_bytes(8, "big")
print("BIN : %s" % float_bin)
print("LENGTH : %d" % len(float_bin))
print("LONG : %s" % int.from_bytes(float_bin, "big"))
print("")
# FLOAT
p = struct.pack('f', 3.141592654)
print(p, len(p), struct.unpack('f', p))
print(struct.unpack('!i', b'\x00\x00\x00\x06'))
print(struct.unpack_from('!I', bytes(b'\x0b\x00\x00\x04\x93\xec'), 2))
| [
"kdm0228@gmail.com"
] | kdm0228@gmail.com |
e98c27e3713f47c89125bc6ab3b5d0e5b25d33b7 | bf20eeafee9a9ba4639d3d8c3a917604721dfa20 | /cloudy/db/pgpool.py | 0226a90aff3c78960e1993481f2cc01de5b2ea15 | [
"MIT"
] | permissive | un33k/python-cloudy | 2eb2ebfaca4d9546742b0c1fa7f59e5b4c8625d1 | fb20f45b782cf81a1924cc93ce61d11c8fc27859 | refs/heads/master | 2023-03-31T08:57:27.235184 | 2023-03-19T01:12:21 | 2023-03-19T01:12:21 | 6,311,992 | 1 | 2 | MIT | 2018-12-16T02:02:52 | 2012-10-20T17:59:03 | Python | UTF-8 | Python | false | false | 1,641 | py | import os
import re
import sys
import time
from operator import itemgetter
from fabric.api import local
from fabric.api import run
from fabric.api import task
from fabric.api import sudo
from fabric.api import put
from fabric.api import env
from fabric.api import settings
from fabric.api import hide
from fabric.contrib import files
from cloudy.db.psql import db_psql_default_installed_version
from cloudy.sys.etc import sys_etc_git_commit
from cloudy.util.common import sys_restart_service
def db_pgpool2_install():
""" Install pgpool2 - Ex: (cmd:)"""
# requirements
requirements = '%s' % ' '.join([
'pgpool2',
])
# install requirements
sudo('apt -y install {}'.format(requirements))
sys_etc_git_commit('Installed pgpool2')
def db_pgpool2_configure(dbhost='', dbport=5432, localport=5432):
""" Install pgpool2 - Ex: (cmd:)"""
cfgdir = os.path.join(os.path.dirname( __file__), '../cfg')
localcfg = os.path.expanduser(os.path.join(cfgdir, 'pgpool2/pgpool.conf'))
remotecfg = '/etc/pgpool2/pgpool.conf'
sudo('rm -rf ' + remotecfg)
put(localcfg, remotecfg, use_sudo=True)
sudo('sed -i "s/dbhost/{}/g" {}'.format(dbhost, remotecfg))
sudo('sed -i "s/dbport/{}/g" {}'.format(dbport, remotecfg))
sudo('sed -i "s/localport/{}/g" {}'.format(localport, remotecfg))
localdefault = os.path.expanduser(os.path.join(cfgdir, 'pgpool2/default-pgpool2'))
remotedefault = '/etc/default/pgpool2'
sudo('rm -rf ' + remotedefault)
put(localdefault, remotedefault, use_sudo=True)
sys_etc_git_commit('Configured pgpool2')
sys_restart_service('pgpool2')
| [
"val@neekware.com"
] | val@neekware.com |
9d35ed852c0d06a0a8331cb9469ce2bb6b1cce4b | 6fa42c2dd3d2fad482e354495ee15616784425e8 | /farm_management/lands/models/paddocks.py | ff9517f19bb80fe11a79efe7f6a45050a6e52a78 | [
"MIT"
] | permissive | alexanders0/farm-management | ccf74f9a9d99f4d20173e360e6f776288ce636f3 | 53ed821bbbed312848cf331f8f961ef16c59fb99 | refs/heads/main | 2023-07-18T08:51:20.876231 | 2021-09-01T04:22:59 | 2021-09-01T04:22:59 | 384,026,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | """Paddocks model."""
# Django
from django.db import models
# Utilities
from farm_management.utils.models import FarmModel
class Paddock(FarmModel):
"""Paddock model."""
name = models.CharField('paddock name', max_length=150)
description = models.CharField('paddock description', max_length=150)
meassure = models.FloatField(null=True)
is_active = models.BooleanField(
'active status',
default=False,
help_text='It is used to know if a group of animals is currently using the paddock.'
)
land = models.ForeignKey(
'lands.Land',
on_delete=models.CASCADE
)
def __str__(self):
"""Return paddock name."""
return self.name
| [
"alexandersn059@gmail.com"
] | alexandersn059@gmail.com |
4867499bf7288a93391ea017ca3fdc81362a0829 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/jhm.py | 8a63df3857f86c108b49d23e9c21fe4735ab4a02 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'jHM':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
0ff03af25c4cf1f6e46d865b868f3d0af8f04e17 | 71f8a5841e555d611463fbee6efa1ca2c835455a | /distribution_features.py | 20b69589956599b9f64614318f219df7e13d50c8 | [] | no_license | bons-ai/NASA | bba28bccc3b793ddae2f18a86aa48a91dcb0a878 | bf93d2f0bd05e0740bca66702b0409b8cf697807 | refs/heads/master | 2020-08-21T09:21:00.597056 | 2019-10-19T22:09:31 | 2019-10-19T22:09:31 | 216,129,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import numpy as np
from sklearn.decomposition import NMF
import matplotlib.pyplot as plt
import umap
import hdbscan
import json
filename = "hist_0530_03_09.npy"
data = np.load(filename, allow_pickle=True).item()
hist = np.array(data["hist"])
h = np.array(data["h"])
pos = np.array(data["pos"])
reducer = umap.UMAP()
embedding = reducer.fit_transform(hist)
clusterer = hdbscan.HDBSCAN(algorithm='best', alpha=1.0, approx_min_span_tree=True,
gen_min_span_tree=False, leaf_size=40,
metric='euclidean', min_cluster_size=15, min_samples=None, p=None)
clusterer.fit(embedding)
#
# print(clusterer.labels_)
#
# plt.figure()
# plt.scatter(embedding[:, 0], embedding[:, 1], c=clusterer.labels_)
# plt.show()
#
# plt.figure(figsize=(12,12))
# plt.scatter(pos[0, :], pos[1,:], c=clusterer.labels_)
# plt.colorbar()
# plt.show()
results = []
for n in range(pos.shape[1]):
results.append({"coordinates": list(pos[:, n]),
"type": float(clusterer.labels_[n])})
full_data = {"points": results,
"minLat": np.min(pos[0, :]),
"maxLat": np.max(pos[0, :]),
"minLong": np.min(pos[1, :]),
"maxLong": np.max(pos[1, :])}
with open("results.json", "w", encoding='utf-8') as f:
json.dump(full_data, f)
| [
"mohamed.bahdine.1@ulaval.ca"
] | mohamed.bahdine.1@ulaval.ca |
ce293f3f21f040c9819e5a37cc9389a60372c6ce | 086be2c7d301aee05126964b381ae9a3563183f8 | /persistence/memory-map.py | 98382a34be776929ddc1304171dd48c29aea34d5 | [] | no_license | wkrea/DownTheRabbitHole | fe4ca2aa6141ee482a0f2c649e19243b0431a2f1 | 60fe7601d1725e7458a882de661fa1412fe37fe3 | refs/heads/master | 2022-01-29T15:33:53.338936 | 2019-03-20T19:12:44 | 2019-03-20T19:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | """
This module writes a matrix of numbers of int and long types to a file in binary
format. It mmaps that file, and allows efficient alters-in-place through the api.
"""
import mmap
import struct
import os
def format_to_size(fmt):
if fmt == 'i':
return 4
elif fmt == 'l':
return 8
def get_column_offset(col_index, row_format):
offset = 0
for idx, fmt in enumerate(row_format):
if idx == col_index:
return offset
offset += format_to_size(fmt)
def get_row_length(row_format):
return sum([format_to_size(fmt) for fmt in row_format])
def pack_row(row, row_format):
packed = []
for fmt, value in zip(row_format, row):
packed.append(struct.pack(fmt, value))
return "".join(packed)
def unpack_row(row, row_format):
unpacked = []
for idx, fmt in enumerate(row_format):
offset = get_column_offset(idx, row_format)
size = format_to_size(fmt)
unpacked.append(struct.unpack(fmt,
row[offset:offset+size])[0])
return unpacked
def write_block(blk, row_format):
with open(FILENAME, 'w+') as fp:
for row in blk:
fp.write(pack_row(row, row_format))
def file_to_block(filename, row_format):
packed, idx, row_length = [], 0, get_row_length(row_format)
with open(filename, 'r+') as fp:
mapped = mmap.mmap(fp.fileno(), 0)
while True:
row = mapped[idx*row_length:(idx+1)*row_length]
if not row: break
yield unpack_row(row, row_format)
idx += 1
def print_file(filename, row_format):
packed, idx, row_length = [], 0, get_row_length(row_format)
with open(filename, 'r+') as fp:
mapped = mmap.mmap(fp.fileno(), 0)
while True:
row = mapped[idx*row_length:(idx+1)*row_length]
if not row: break
print(unpack_row(row, row_format))
idx += 1
def batch_increment_column(filename, row_format, column_id, incrby):
idx = 0
row_length = get_row_length(row_format)
col_offset = get_column_offset(column_id, row_format)
col_size = format_to_size(row_format[column_id])
with open(filename, 'r+') as fp:
mapped = mmap.mmap(fp.fileno(), 0)
while True:
row = mapped[idx*row_length:(idx+1)*row_length]
if not row: break
updated = unpack_row(row, row_format)[column_id] + incrby
start = idx*row_length + col_offset
stop = start + col_size
idx += 1
mapped[start:stop] = struct.pack('i', updated)
mapped.flush()
os.fsync(fp.fileno())
if __name__ == '__main__':
ROW_FORMAT = 'liili'
BIGINT, INT = 8, 4
FILENAME = 'myfile.dat'
block = [
[10e12, 55, 74, 1234, 45376],
[2.1e14, 62, 2462, 5678, 324],
[4.3e10, 12, 24, 91011, 2346],
[8e15, 15, 768, 12131, 23]
]
write_block(block, ROW_FORMAT)
print_file(FILENAME, ROW_FORMAT)
batch_increment_column(FILENAME, ROW_FORMAT, 1, 2)
| [
"andrew.kelleher@venmo.com"
] | andrew.kelleher@venmo.com |
122aa3f61736137c1b3bfcdc6d80491b14a6acbd | e01d9449d415a37f3afa2b20be6ba4fcbfef5650 | /Version 2/Menu.py | 7d2be37bb7bf944441d923ec5cf9219af0430f8b | [] | no_license | cramos5/Inova-Class-Extraction | 082f0c38a48444f9443730a1d127aeb971b4bee0 | 75e4f2cc4ea16aad13dcdc88cd8b5c85696a00a4 | refs/heads/master | 2022-02-06T21:46:46.207031 | 2017-08-03T18:04:28 | 2017-08-03T18:04:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import os
def Menu_Instructions():
print("******************************************************************************")
print("Set Up Instructions:\n1) Generate Class Screening Report from Careworks")
print("Excel file should have the Class ID in the A Column\n")
print("2) Save generated report as a xlsx file within Excel\n")
print("3) Place new file in the same location as this program file\n")
print("******************************************************************************")
name = input("Please enter any key to continue")
os.system('cls')
| [
"cr4mos@gmail.com"
] | cr4mos@gmail.com |
a12d1c1d69d2f4a99bc1bedd4191dadd159c9ae1 | 0d26474cc10de46cfe0dcc45c57ab299c7b515bc | /propagsim/cp/classes.py | 825f3b4e324e7af22f83f4d1be99794cdb41fa5e | [
"MIT"
] | permissive | payoto/py-propagsim | 3f735283c51764836ae170f391225f7661f57f23 | d704796c77bc6c921c5f14fab1e27aaadbdd640e | refs/heads/master | 2021-04-20T13:36:42.030817 | 2020-05-17T01:37:26 | 2020-05-17T01:37:26 | 249,688,117 | 0 | 0 | null | 2020-03-24T11:18:35 | 2020-03-24T11:18:35 | null | UTF-8 | Python | false | false | 26,787 | py | import cupy as cp
import numpy as np
import os, pickle
from time import time
from utils import get_least_severe_state, squarify, get_square_sampling_probas, get_cell_sampling_probas, vectorized_choice, group_max, append, repeat, sum_by_group
class State:
def __init__(self, id, name, contagiousity, sensitivity, severity):
""" A state can be carried by an agent. It makes the agent accordingly contagious,
sensitive and in a severe state.
"""
self.id = id
self.name = name
self.contagiousity = contagiousity
self.sensitivity = sensitivity
self.severity = severity
def __str__(self):
return self.name
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_contagiousity(self):
return self.contagiousity
def get_sensitivity(self):
return self.sensitivity
def get_severity(self):
return self.severity
class Agent:
def __init__(self, id, p_move, states, transitions, durations, current_state, home_cell_id, current_state_duration=0, been_infected=0):
self.id = id
self.p_move = p_move
self.states = states
self.transitions = transitions
self.durations = durations
self.current_state = current_state
self.home_cell_id = home_cell_id
self.current_state_duration = current_state_duration # how long the agent has been in this state
self.been_infected = been_infected
self.least_state = get_least_severe_state(states)
def get_id(self):
return self.id
def get_p_move(self):
return self.p_move
def set_p_move(self, p_move):
self.p_move = p_move
def get_states(self):
return self.states
def set_states(self, states):
return self.states
def get_transitions(self):
return self.transitions
def set_transitions(self, transitions):
self.transitions = transitions
def get_transitions_id(self):
return self.transitions.get_id()
def get_transitions_arr(self):
return self.transitions.get_arr()
def get_durations(self):
return self.durations
def set_durations(self, durations):
self.durations = durations
def get_current_state_id(self):
return self.current_state.get_id()
def get_current_state_duration(self):
return self.current_state_duration
def set_current_state(self, current_state):
self.current_state = current_state
def get_home_cell_id(self):
return self.home_cell_id
def set_home_cell_id(self, home_cell_id):
self.home_cell_id = home_cell_id
def get_least_state_id(self):
return self.least_state.get_id()
def get_severity(self):
return self.current_state.get_severity()
class Transitions:
def __init__(self, id, arr):
self.id = id
self.arr = arr.astype(cp.float32)
def get_id(self):
return self.id
def get_arr(self):
return self.arr
class Cell:
def __init__(self, id, position, attractivity, unsafety):
"""A cell is figuratively a place where several agents can be together and possibly get
infected from an infected agent in the cell.
A cell has also a geographic `position` (Euclidean coordinates) and an `attractivity` influencing the
probability of the agents in other cells to move in this cell.
"""
self.id = id
self.position = position
self.attractivity = attractivity
self.unsafety = unsafety
def get_id(self):
return self.id
def get_position(self):
return self.position
def set_position(self, position):
self.position = position
def get_attractivity(self):
return self.attractivity
def set_attractivity(self, attractivity):
self.attractivity = attractivity
def get_unsafety(self):
return self.unsafety
def set_unsafety(self, unsafety):
self.unsafety = unsafety
class Map:
def __init__(self, cell_ids, attractivities, unsafeties, xcoords, ycoords, unique_state_ids,
unique_contagiousities, unique_sensitivities, unique_severities, transitions, agent_ids, home_cell_ids, p_moves, least_state_ids,
current_state_ids, current_state_durations, durations, transitions_ids, dscale=1, current_period=0, verbose=0):
""" A map contains a list of `cells`, `agents` and an implementation of the
way agents can move from a cell to another. `possible_states` must be distinct.
We let each the possibility for each agent to have its own least severe state to make the model more flexible.
Default parameter set to None in order to be able to create an empty map and load it from disk
`dcale` allows to weight the importance of the distance vs. attractivity for the moves to cells
"""
self.current_period = current_period
self.verbose = verbose
self.dscale = dscale
self.n_infected_period = 0
# For cells
self.cell_ids = cell_ids
self.attractivities = attractivities
self.unsafeties = unsafeties
self.xcoords = xcoords
self.ycoords = ycoords
# For states
self.unique_state_ids = unique_state_ids
self.unique_contagiousities = unique_contagiousities
self.unique_sensitivities = unique_sensitivities
self.unique_severities = unique_severities
self.transitions = transitions
# For agents
self.agent_ids = agent_ids
self.home_cell_ids = home_cell_ids
self.p_moves = p_moves
self.least_state_ids = least_state_ids
self.current_state_ids = current_state_ids
self.current_state_durations = current_state_durations # how long the agents are already in their current state
self.durations = cp.squeeze(durations) # 2d, one row for each agent
self.transitions_ids = transitions_ids
# for cells: cell_ids, attractivities, unsafeties, xcoords, ycoords
# for states: unique_contagiousities, unique_sensitivities, unique_severities, transitions
# for agents: home_cell_ids, p_moves, least_state_ids, current_state_ids, current_state_durations, durations (3d)
<<<<<<< HEAD
=======
>>>>>>> 024791b60731bd81bf57a6c52f3f58c77cab4579
# Compute inter-squares proba transition matrix
self.coords_squares, self.square_ids_cells = squarify(xcoords, ycoords)
self.set_attractivities(attractivities)
# the first cells in parameter `cells`must be home cell, otherwise modify here
self.agent_squares = self.square_ids_cells[self.home_cell_ids]
cp.cuda.Stream.null.synchronize()
# Re-order transitions by ids
order = cp.argsort(self.transitions_ids)
self.transitions_ids = self.transitions_ids[order]
self.transitions = cp.dstack(self.transitions)
self.transitions = self.transitions[:,:, order]
cp.cuda.Stream.null.synchronize()
# Compute upfront cumulated sum
self.transitions = cp.cumsum(self.transitions, axis=1)
# Compute probas_move for agent selection
# Define variable for monitoring the propagation (r factor, contagion chain)
self.n_contaminated_period = 0 # number of agent contaminated during current period
self.n_diseased_period = self.get_n_diseased()
self.r_factors = cp.array([])
# TODO: Contagion chains
# Define arrays for agents state transitions
self.infecting_agents, self.infected_agents, self.infected_periods = cp.array([]), cp.array([]), cp.array([])
def contaminate(self, selected_agents, selected_cells):
""" both arguments have same length. If an agent with sensitivity > 0 is in the same cell
than an agent with contagiousity > 0: possibility of contagion """
t_start = time()
i = 0
t0 = time()
selected_unsafeties = self.unsafeties[selected_cells]
selected_agents = selected_agents.astype(cp.uint32)
selected_states = self.current_state_ids[selected_agents]
selected_contagiousities = self.unique_contagiousities[selected_states]
selected_sensitivities = self.unique_sensitivities[selected_states]
print(f'ttt first part contaminate: {time() - t0}')
# Find cells where max contagiousity == 0 (no contagiousity can happen there)
t0 = time()
cont_sens = cp.multiply(selected_contagiousities, selected_sensitivities)
print(f'ttt group max sensitivities: {time() - t0}')
# Combine them
if cp.max(cont_sens) == 0:
return
t0 = time()
mask_zero = (cont_sens > 0)
selected_agents = selected_agents[mask_zero]
selected_contagiousities = selected_contagiousities[mask_zero]
selected_sensitivities = selected_sensitivities[mask_zero]
selected_cells = selected_cells[mask_zero]
selected_unsafeties = selected_unsafeties[mask_zero]
print(f'ttt mask zero all: {time() - t0}')
# Compute proportion (contagious agent) / (non contagious agent) by cell
t0 = time()
_, n_contagious_by_cell = cp.unique(selected_cells[selected_contagiousities > 0], return_counts=True)
_, n_non_contagious_by_cell = cp.unique(selected_cells[selected_contagiousities == 0], return_counts=True)
print(f'ttt non contagious: {time() - t0}')
i += 1
t0 = time()
p_contagious = cp.divide(n_contagious_by_cell, n_non_contagious_by_cell)
n_selected_agents = selected_agents.shape[0]
print(f'ttt p_contagious: {time() - t0}')
if self.verbose > 1:
print(f'{n_selected_agents} selected agents after removing cells with max sensitivity or max contagiousity==0')
if n_selected_agents == 0:
return
# Find for each cell which agent has the max contagiousity inside (it will be the contaminating agent)
t0 = time()
max_contagiousities, mask_max_contagiousities = group_max(data=selected_contagiousities, groups=selected_cells)
print(f'ttt max contagious: {time() - t0}')
t0 = time()
infecting_agents = selected_agents[mask_max_contagiousities]
selected_contagiousities = selected_contagiousities[mask_max_contagiousities]
print(f'ttt mask max contagious: {time() - t0}')
# Select agents that can be potentially infected ("pinfected") and corresponding variables
t0 = time()
pinfected_mask = (selected_sensitivities > 0)
pinfected_agents = selected_agents[pinfected_mask]
selected_sensitivities = selected_sensitivities[pinfected_mask]
selected_unsafeties = selected_unsafeties[pinfected_mask]
selected_cells = selected_cells[pinfected_mask]
print(f'ttt p_infected_mask: {time() - t0}')
# Group `selected_cells` and expand `infecting_agents` and `selected_contagiousities` accordingly
# There is one and only one infecting agent by pinselected_agentsfected_cell so #`counts` == #`infecting_agents`
t0 = time()
_, inverse = cp.unique(selected_cells, return_inverse=True)
print(f'ttt inverse select cell: {time() - t0}')
# TODO: ACHTUNG: count repeat replace by inverse here
t0 = time()
infecting_agents = infecting_agents[inverse]
selected_contagiousities = selected_contagiousities[inverse]
p_contagious = p_contagious[inverse]
print(f'ttt p_contagious inverse: {time() - t0}')
# Compute contagions
t0 = time()
res = cp.multiply(selected_contagiousities, selected_sensitivities)
res = cp.multiply(res, selected_unsafeties)
print(f'ttt cp.multiply: {time() - t0}')
# Modifiy probas contamination according to `p_contagious`
t0 = time()
mask_p = (p_contagious < 1)
res[mask_p] = cp.multiply(res[mask_p], p_contagious[mask_p])
res[~mask_p] = 1 - cp.divide(1 - res[~mask_p], p_contagious[~mask_p])
print(f'ttt res mask p: {time() - t0}')
t0 = time()
draw = cp.random.uniform(size=infecting_agents.shape[0])
draw = (draw < res)
infecting_agents = infecting_agents[draw]
infected_agents = pinfected_agents[draw]
n_infected_agents = infected_agents.shape[0]
print(f'ttt n_infected draw: {time() - t0}')
if self.verbose > 1:
print(f'Infecting and infected agents should be all different, are they? {((infecting_agents == infected_agents).sum() == 0)}')
print(f'Number of infected agents: {n_infected_agents}')
t0 = time()
self.current_state_ids[infected_agents] = self.least_state_ids[infected_agents]
self.current_state_durations[infected_agents] = 0
self.n_infected_period += n_infected_agents
self.infecting_agents = append(self.infecting_agents, infecting_agents)
self.infected_agents = append(self.infected_agents, infected_agents)
self.infected_periods = append(self.infected_periods, cp.multiply(cp.ones(n_infected_agents), self.current_period))
print(f'ttt final: {time() - t0}')
print(f'contaminate computed in {time() - t_start}')
def move_agents(self, selected_agents):
""" First select the square where they move and then the cell inside the square """
t0 = time()
selected_agents = selected_agents.astype(cp.uint32)
agents_squares_to_move = self.agent_squares[selected_agents]
"""
order = cp.argsort(agents_squares_to_move)
selected_agents = selected_agents[order]
agents_squares_to_move = agents_squares_to_move[order]
# Compute number of agents by square
unique_square_ids, counts = cp.unique(agents_squares_to_move, return_counts=True)
# Select only rows corresponding to squares where there are agents to move
square_sampling_ps = self.square_sampling_probas[unique_square_ids,:]
# Apply "repeat sample" trick
square_sampling_ps = cp.repeat(square_sampling_ps, counts.tolist(), axis=0)
"""
square_sampling_ps = self.square_sampling_probas[agents_squares_to_move,:]
# Chose one square for each row (agent), considering each row as a sample proba
selected_squares = vectorized_choice(square_sampling_ps)
"""
order = cp.argsort(selected_squares)
selected_agents = selected_agents[order]
selected_squares = selected_squares[order]
"""
if self.verbose > 1:
print(f'{(agents_squares_to_move != selected_squares).sum()}/{selected_agents.shape[0]} agents moving outside of their square')
# Now select cells in the squares where the agents move
# ACHTUNG: change unique repeat to inverse
unique_selected_squares, inverse = cp.unique(selected_squares, return_inverse=True)
# unique_selected_squares = unique_selected_squares.astype(cp.uint16)
cell_sampling_ps = self.cell_sampling_probas[unique_selected_squares,:]
cell_sampling_ps = cell_sampling_ps[inverse,:]
"""
cell_sampling_ps = cp.repeat(cell_sampling_ps, counts.tolist(), axis=0)
cell_sampling_ps = cell_sampling_ps.astype(cp.float16) # float16 to avoid max memory error, precision should be enough
"""
selected_cells = vectorized_choice(cell_sampling_ps)
# Now we have like "cell 2 in square 1, cell n in square 2 etc." we have to go back to the actual cell id
index_shift = self.cell_index_shift[selected_squares].astype(cp.uint32)
selected_cells = cp.add(selected_cells, index_shift)
# return selected_agents since it has been re-ordered
print(f'move_agents computed in {time() - t0}')
return selected_agents, selected_cells
def make_move(self):
""" determine which agents to move, then move hem and proceed to the contamination process """
probas_move = cp.multiply(self.p_moves.flatten(), 1 - self.unique_severities[self.current_state_ids])
draw = cp.random.uniform(size=probas_move.shape[0])
t0 = time()
draw = (draw < probas_move)
print(f't draw: {time() - t0}')
t0 = time()
selected_agents = self.agent_ids[draw]
print(f't selected: {time() - t0}')
t0 = time()
selected_agents, selected_cells = self.move_agents(selected_agents)
print(f't move_agents(): {time() - t0}')
if self.verbose > 1:
print(f'{selected_agents.shape[0]} agents selected for moving')
t0 = time()
self.contaminate(selected_agents, selected_cells)
print(f't contaminate(): {time() - t0}')
def forward_all_cells(self):
""" move all agents in map one time step forward """
agents_durations = self.durations[cp.arange(0, self.durations.shape[0]), self.current_state_ids].flatten()
print(f'DEBUG: agents_durations.shape: {agents_durations.shape}, self.durations.shape: {self.durations.shape}, self.current_state_ids.shape: {self.current_state_ids.shape}')
to_transit = (self.current_state_durations == agents_durations)
self.current_state_durations += 1
to_transit = self.agent_ids[to_transit]
self.transit_states(to_transit)
# Contamination at home by end of the period
self.contaminate(self.agent_ids, self.home_cell_ids)
# Update r and associated variables
r = self.n_infected_period / self.n_diseased_period if self.n_diseased_period > 0 else 0
r = cp.array([r])
if self.verbose > 1:
print(f'period {self.current_period}: r={r}')
self.r_factors = append(self.r_factors, r)
self.n_diseased_period = self.get_n_diseased()
self.n_infected_period = 0
#Move one period forward
self.current_period += 1
def transit_states(self, agent_ids_transit):
if agent_ids_transit.shape[0] == 0:
return
t0 = time()
agent_ids_transit = agent_ids_transit.astype(cp.uint32)
agent_current_states = self.current_state_ids[agent_ids_transit]
agent_transitions = self.transitions_ids[agent_current_states]
# Select rows corresponding to transitions to do
transitions = self.transitions[agent_current_states,:,agent_transitions]
# Select new states according to transition matrix
new_states = vectorized_choice(transitions)
self.change_state_agents(agent_ids_transit, new_states)
print(f'transit_states computed in {time() - t0}s')
def get_states_numbers(self):
""" For all possible states, return the number of agents in the map in this state
returns a numpy array consisting in 2 columns: the first is the state id and the second,
the number of agents currently in this state on the map """
state_ids, n_agents = cp.unique(self.current_state_ids, return_counts=True)
return state_ids, n_agents
def get_n_diseased(self):
return ((self.unique_severities[self.current_state_ids] > 0) & (self.unique_severities[self.current_state_ids] < 1)).sum()
def get_r_factors(self):
return self.r_factors
def get_contamination_chain(self):
return self.infecting_agents, self.infected_agents, self.infected_periods
def change_state_agents(self, agent_ids, new_state_ids):
""" switch `agent_ids` to `new_state_ids` """
self.current_state_ids[agent_ids] = new_state_ids
self.current_state_durations[agent_ids] = 0
### Persistence methods
def save(self, savedir):
""" persist map in `savedir` """
if not os.path.isdir(savedir):
os.makedirs(savedir)
# Persist arrays
dsave = {}
dsave['unique_state_ids'] = self.unique_state_ids,
dsave['unique_contagiousities'] = self.unique_contagiousities,
dsave['unique_sensitivities'] = self.unique_sensitivities,
dsave['unique_severities'] = self.unique_severities,
dsave['cell_ids'] = self.cell_ids,
dsave['unsafeties'] = self.unsafeties,
dsave['square_sampling_probas'] = self.square_sampling_probas,
dsave['eligible_cells'] = self.eligible_cells,
dsave['coords_squares'] = self.coords_squares,
dsave['square_ids_cells'] = self.square_ids_cells,
dsave['cell_sampling_probas'] = self.cell_sampling_probas,
dsave['cell_index_shift'] = self.cell_index_shift,
dsave['agent_ids'] = self.agent_ids,
dsave['p_moves'] = self.p_moves,
dsave['least_state_ids'] = self.least_state_ids,
dsave['unique_state_ids'] = self.unique_state_ids,
dsave['home_cell_ids'] = self.home_cell_ids,
dsave['current_state_ids'] = self.current_state_ids,
dsave['current_state_durations'] = self.current_state_durations,
dsave['agent_squares'] = self.agent_squares
dsave['transitions'] = self.transitions,
dsave['transitions_ids'] = self.transitions_ids,
dsave['durations'] = self.durations,
dsave['r_factors'] = self.r_factors,
dsave['infecting_agents'] = self.infecting_agents,
dsave['infected_agents'] = self.infected_agents,
dsave['infected_periods'] = self.infected_periods
for fname, arr in dsave.items():
filepath = os.path.join(savedir, f'{fname}.npy')
cp.save(filepath, arr)
# Persist scalars and other parameters
sdict = {}
sdict['current_period'] = self.current_period
sdict['verbose'] = self.verbose
sdict['dcale'] = self.dscale
sdict['n_infected_period'] = self.n_infected_period
sdict['n_diseased_period'] = self.n_diseased_period
sdict_path = os.path.join(savedir, 'params.pkl')
with open(sdict_path, 'wb') as f:
pickle.dump(sdict, f, protocol=pickle.HIGHEST_PROTOCOL)
if self.verbose > 0:
print(f'Map persisted under folder: {savedir}')
def load(self, savedir):
""" load map that has been persisted in `savedir` through `self.save()` """
if not os.path.isdir(savedir):
print(f'{savedir} is not a path')
self.unique_state_ids = cp.squeeze(cp.load(os.path.join(savedir, 'unique_state_ids.npy')))
self.unique_contagiousities = cp.squeeze(cp.load(os.path.join(savedir, 'unique_contagiousities.npy')))
self.unique_sensitivities = cp.squeeze(cp.load(os.path.join(savedir, 'unique_sensitivities.npy')))
self.unique_severities = cp.squeeze(cp.load(os.path.join(savedir, 'unique_severities.npy')))
self.cell_ids = cp.squeeze(cp.load(os.path.join(savedir, 'cell_ids.npy')))
self.unsafeties = cp.squeeze(cp.load(os.path.join(savedir, 'unsafeties.npy')))
self.square_sampling_probas = cp.squeeze(cp.load(os.path.join(savedir, 'square_sampling_probas.npy')))
self.eligible_cells = cp.squeeze(cp.load(os.path.join(savedir, 'eligible_cells.npy')))
self.coords_squares = cp.squeeze(cp.load(os.path.join(savedir, 'coords_squares.npy')))
self.square_ids_cells = cp.squeeze(cp.load(os.path.join(savedir, 'square_ids_cells.npy')))
self.cell_sampling_probas = cp.squeeze(cp.load(os.path.join(savedir, 'cell_sampling_probas.npy')))
self.cell_index_shift = cp.squeeze(cp.load(os.path.join(savedir, 'cell_index_shift.npy')))
self.agent_ids = cp.squeeze(cp.load(os.path.join(savedir, 'agent_ids.npy')))
self.p_moves = cp.squeeze(cp.load(os.path.join(savedir, 'p_moves.npy')))
self.least_state_ids = cp.squeeze(cp.load(os.path.join(savedir, 'least_state_ids.npy')))
self.unique_state_ids = cp.squeeze(cp.load(os.path.join(savedir, 'unique_state_ids.npy')))
self.home_cell_ids = cp.squeeze(cp.load(os.path.join(savedir, 'home_cell_ids.npy')))
self.current_state_ids = cp.squeeze(cp.load(os.path.join(savedir, 'current_state_ids.npy')))
self.current_state_durations = cp.squeeze(cp.load(os.path.join(savedir, 'current_state_durations.npy')))
self.agent_squares = cp.squeeze(cp.load(os.path.join(savedir, 'agent_squares.npy')))
self.transitions = cp.squeeze(cp.load(os.path.join(savedir, 'transitions.npy')))
self.transitions_ids = cp.squeeze(cp.load(os.path.join(savedir, 'transitions_ids.npy')))
self.durations = cp.squeeze(cp.load(os.path.join(savedir, 'durations.npy')))
self.r_factors = cp.squeeze(cp.load(os.path.join(savedir, 'r_factors.npy')))
self.infecting_agents = cp.squeeze(cp.load(os.path.join(savedir, 'infecting_agents.npy')))
self.infected_agents = cp.squeeze(cp.load(os.path.join(savedir, 'infected_agents.npy')))
self.infected_periods = cp.squeeze(cp.load(os.path.join(savedir, 'infected_periods.npy')))
sdict_path = os.path.join(savedir, 'params.pkl')
with open(sdict_path, 'rb') as f:
sdict = pickle.load(f)
self.current_period = sdict['current_period']
self.verbose = sdict['verbose']
self.dscale = sdict['dcale']
self.n_infected_period = sdict['n_infected_period']
self.n_diseased_period = sdict['n_diseased_period']
# For calibration: reset parameters that can change due to public policies
def set_p_moves(self, p_moves):
self.p_moves = p_moves
def set_unsafeties(self, unsafeties):
self.unsafeties = unsafeties
def set_attractivities(self, attractivities):
self.square_sampling_probas = get_square_sampling_probas(attractivities,
self.square_ids_cells,
self.coords_squares,
self.dscale)
mask_eligible = cp.where(attractivities > 0)[0] # only cells with attractivity > 0 are eligible for a move
self.eligible_cells = self.cell_ids[mask_eligible]
# Compute square to cell transition matrix
self.cell_sampling_probas, self.cell_index_shift = get_cell_sampling_probas(attractivities[mask_eligible], self.square_ids_cells[mask_eligible])
# Compute upfront cumulated sum of sampling matrices
self.square_sampling_probas = cp.cumsum(self.square_sampling_probas, axis=1)
self.cell_sampling_probas = cp.cumsum(self.cell_sampling_probas, axis=1)
<<<<<<< HEAD
=======
>>>>>>> 024791b60731bd81bf57a6c52f3f58c77cab4579
| [
"egregius314@gmail.com"
] | egregius314@gmail.com |
224852df1c4b1e762780d1e5dd02015ea14faa4d | 5ffd3067814644928625e4dd53323dd8ee986390 | /main.py | 3dee01369a87bbd63d1859635dcbfc64371e2a7f | [] | no_license | rynkevich/image-filtering | 359717b3a60acf7eca317f2de1335fc46241162e | f996b292e7ce4fe76355f2d9d28505f0c591ae9d | refs/heads/master | 2020-08-02T00:01:03.453078 | 2019-09-26T19:41:41 | 2019-09-26T19:41:41 | 211,167,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | import sys
import matplotlib.pyplot as plt
import imageio
import numpy as np
from filters import MovingAverageFilter, MedianFilter
from filters.EdgeDetectingFilter import EdgeDetectingFilter
VALID_ARGC = 3
WINDOW_SIZE = 3
def main():
if len(sys.argv) < VALID_ARGC:
print('Usage: main.py <path_to_image> (average|median|sobel|prewitt)')
return
original_img = imageio.imread(sys.argv[1])
img_to_filter = original_img
is_grayscale = False
image_filter = None
filter_name = str.lower(sys.argv[2])
if filter_name == 'average':
image_filter = MovingAverageFilter(WINDOW_SIZE)
elif filter_name == 'median':
image_filter = MedianFilter(WINDOW_SIZE)
elif filter_name == 'sobel' or filter_name == 'prewitt':
is_grayscale = True
img_to_filter = rgb_to_grayscale(original_img)
image_filter = EdgeDetectingFilter(use_prewitt=filter_name == 'prewitt')
else:
raise NotImplementedError(f'Filter "{filter_name}" is not supported')
filtered_img = image_filter.apply(img_to_filter)
fig, axes = plt.subplots(1, 2, figsize=(8, 3))
fig.edgecolor = 'black'
fig.canvas.set_window_title('Image Filtering')
show_img(axes[0], original_img, 'Original', is_grayscale)
show_img(axes[1], filtered_img, 'Filtered', is_grayscale)
plt.tight_layout()
plt.show()
def rgb_to_grayscale(image):
return np.dot(image[..., :3], [0.3, 0.587, 0.114])
def show_img(ax, img, title, is_grayscale):
ax.set_title(title)
ax.imshow(img, cmap=(plt.get_cmap('gray') if is_grayscale else None))
ax.axis('off')
if __name__ == '__main__':
main()
| [
"rynkevich@icloud.com"
] | rynkevich@icloud.com |
ec886a50104eadb4bb7095b7baddd9867b7f611a | 12ac898e8db7ac325e42c9d6c6c875aee78da655 | /StockApp/StockQuotes/migrations/0009_buystockmodel.py | 5bdc79480f464e464d08fd48054c223caf87ee44 | [] | no_license | Teri-Ed/Project-4 | 121d0e5fcccf89489bbd4d7ec670f57508839dbc | 518f47416fe687c8fbc52535fe07a552d969ed9d | refs/heads/main | 2023-01-01T20:01:55.988283 | 2020-10-17T23:13:54 | 2020-10-17T23:13:54 | 300,732,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # Generated by Django 3.1.1 on 2020-10-11 23:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('StockQuotes', '0008_auto_20201011_0813'),
]
operations = [
migrations.CreateModel(
name='BuyStockModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.CharField(max_length=100)),
('quantity', models.CharField(max_length=100)),
],
),
]
| [
"teri.edwards@outlook.com"
] | teri.edwards@outlook.com |
ef23d977d1e039a74262aed9f8df4b6a55bfd2df | dd00894931c9d1764bdf129418da1bd7e711fb99 | /kb/groups/migrations/0005_group_imported.py | 55a4a034ec85004b0eb13c3afd4a5f433d110c36 | [] | no_license | johnfelipe/edubox-platform | b8c69112c36d8ec971db1537f18d1e0e1ea7a8c5 | d5cdcc351cabb86ae2f36d3eae0921f63d41383c | refs/heads/master | 2021-01-18T11:13:20.931521 | 2016-05-23T15:11:08 | 2016-05-23T15:11:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20160215_1236'),
]
operations = [
migrations.AddField(
model_name='group',
name='imported',
field=models.BooleanField(default=False),
),
]
| [
"tim.doolan@gmail.com"
] | tim.doolan@gmail.com |
71ad75ed4334334155ffd4dc39edc9241ddd1ede | 70ba7a0cf6d1018152fedf2ea1ae3d758eb75571 | /bin/python-config | 768319652562c971781d1adb69eb490d1dd44575 | [] | no_license | alexdsole/venv | a9205d1dd814b44cc5886da26b69c6427e230b93 | a8d2f708a33b2ce2bf578d256387b5cc6eff3021 | refs/heads/master | 2021-01-01T19:14:46.817101 | 2017-07-27T15:12:50 | 2017-07-27T15:12:50 | 98,550,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | #!/home/alexis/Documentos/Consensus-master/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"adavisole@gmail.com"
] | adavisole@gmail.com | |
64111bc8816975c4707095c57cce2ccf946afbdb | 39095f7c7fafefcd99589ed77a34d6811c87040f | /scripts/get_south_beach_latest.py | 634089860e1d4c42c9cf5c7371ed281e108d4e51 | [] | no_license | dlenz/home-assistant-config | 03656605e08982f5d851de3c9d9fdca78f6f84d5 | 19d132ca20a9aabb7b90d115bfd6d499c1fee110 | refs/heads/master | 2020-03-08T11:30:25.571752 | 2018-08-10T17:26:21 | 2018-08-10T17:26:21 | 128,099,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/python3
import urllib3
import json
import requests
import os
http = urllib3.PoolManager()
r = http.request('GET', 'http://video-monitoring.com/beachcams/boca/latest.json')
latest = json.loads(r.data.decode('utf-8'))
base_url = 'http://video-monitoring.com/beachcams/boca/'
img_url = latest['s4']['hr']
complete_url = base_url + img_url
with open('/home/rosie/docker/volumes/home-assistant-config/config/www/pictures/south_beach_latest.jpg', 'wb') as handle:
response = requests.get(complete_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block) | [
"captainkrypto@gmail.com"
] | captainkrypto@gmail.com |
1f93a53a75605cde0bdc649c9dc7878d4fb215bb | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/encodings/cp1258.py | f667aa8a98b642a6aa031749d9cd65a1e7e5b80d | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | #Embedded file name: encodings\cp1258.py
""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors = 'strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors = 'strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final = False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final = False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1258', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\u20ac\ufffe\u201a\u0192\u201e\u2026\u2020\u2021\u02c6\u2030\ufffe\u2039\u0152\ufffe\ufffe\ufffe\ufffe\u2018\u2019\u201c\u201d\u2022\u2013\u2014\u02dc\u2122\ufffe\u203a\u0153\ufffe\ufffe\u0178\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\u0102\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\u0300\xcd\xce\xcf\u0110\xd1\u0309\xd3\xd4\u01a0\xd6\xd7\xd8\xd9\xda\xdb\xdc\u01af\u0303\xdf\xe0\xe1\xe2\u0103\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\u0301\xed\xee\xef\u0111\xf1\u0323\xf3\xf4\u01a1\xf6\xf7\xf8\xf9\xfa\xfb\xfc\u01b0\u20ab\xff'
encoding_table = codecs.charmap_build(decoding_table)
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
8b52332672b8f363f40ce770b90d8da2b98dcd3f | b30a1c6298191571cbfbd9a0be20e1654625e621 | /브루트포스/2231_분해합.py | 4e7977d8f99e2cc0de4793a366bfca1e2db47b1e | [] | no_license | ChoneungSon/BeakJoon | 5b07d58f97b377984d205d6b481289dc2e6041fe | a313848cb657dfa7c63a8ad86789e92f7a491748 | refs/heads/master | 2021-01-04T16:28:05.892412 | 2020-07-18T14:43:26 | 2020-07-18T14:43:26 | 240,634,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import sys
n = int(sys.stdin.readline())
for i in range(1, n+1):
m = i
s = i
while m != 0:
s += m % 10
m //= 10
if s == n:
print(i)
break
else:
print(0) | [
"thschsmd@naver.com"
] | thschsmd@naver.com |
d31974c2a03e29a8f1c935202d55f90607461b20 | 9fac825a83f90b98702f150fbce6a4e5bc06ed1e | /test_mining.py | 2593e63dff302ddf63fa57e8a7fda5f087743bba | [] | no_license | jiatujiashuai/PRP | ab3a235e1c8120685a870bdcc0dfdcfe102f6d9b | df7a9507e7d72cc20f9772387b35a65a4e9c3889 | refs/heads/master | 2020-03-16T15:28:20.695212 | 2018-05-09T10:48:03 | 2018-05-09T10:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | from pymongo import MongoClient
def test_mining(Ix,Iy):
client=MongoClient('mongodb://172.17.0.3:27017/')
encrypted=client.testdata.encrypted
N=encrypted.count()
transaction=encrypted.find()
support_x,support_xy=0,0
for i in range(N):
sub_support_x=1
for item in Ix:
sub_support_x*=int(transaction[i]['item'+str(item)])
sub_support_xy=sub_support_x
for item in Iy:
sub_support_xy*=int(transaction[i]['item'+str(item)])
support_x+=sub_support_x
support_xy+=sub_support_xy
with open('calculation.dat','w') as f:
f.write("%s\n%s\n%s" %(support_xy,support_x,N))
test_mining([0,1,2],[3])
| [
"noreply@github.com"
] | jiatujiashuai.noreply@github.com |
21a83673b88c2aef4df0eb031e6282b87c5895e0 | fbf832e1aafdd6382f11b0e736faf98765acfe6b | /deployRiaz/deployRiaz/urls.py | b6b75d584789a217db9b86723795f79da64ff753 | [] | no_license | rashedoz/DeployRiaz | 6c8d1f2dcfc782c59b652b219e21dc504ad60591 | 10465f5c73906417b934acb25c444d6d64074b67 | refs/heads/master | 2020-03-17T09:09:34.483087 | 2018-05-15T05:47:06 | 2018-05-15T05:47:06 | 133,463,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """deployRiaz URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('home/',include('home.urls')),
]
| [
"noreply@github.com"
] | rashedoz.noreply@github.com |
bf1597e673d16b97b990e25232920df6cf6c03d1 | 914bfe0f3e735d954436398bd1aa814ba340a6d2 | /TubesSA_2Bot.py | d1775ac7906a99fa4c87be4516befd7bca53bab4 | [] | no_license | azrielnaufal/Tic-Tac-Toe | d19d86f15499d7642c710bafe5693c3a306851bc | ee6144d715c2cc4cb35e7d149694546e90829123 | refs/heads/master | 2023-06-05T21:48:18.960345 | 2021-06-18T10:53:50 | 2021-06-18T10:53:50 | 378,116,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,612 | py | # MODULES
import pygame, sys
import numpy as np
# initializes pygame
pygame.init()
# ---------
# CONSTANTS
# ---------
WIDTH = 600
HEIGHT = 600
LINE_WIDTH = 15 # tebal garis
WIN_LINE_WIDTH = 15 # tebal coretan garis yg menang
BOARD_ROWS = 3
BOARD_COLS = 3
SQUARE_SIZE = 200 # ukuran masing2 kotak -> square_size^2
CIRCLE_RADIUS = 60 # radius lingkaran
CIRCLE_WIDTH = 30 # tebal garis lingkaran
CROSS_WIDTH = 25 # tebal garis silang
SPACE = 30 #
bot = 2
player = 1
# we assign rgb value (red green blue)
RED = (255, 0, 0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BG_COLOR = (145, 121, 250) #ganti warna background
LINE_COLOR = (94, 23, 235) #ganti warna garis pada background
CIRCLE_COLOR = (239, 231, 200)
CROSS_COLOR = (66, 66, 66)
# ------
# SCREEN
# ------
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption('Backtracking Tic Tac Toe')
screen.fill( BG_COLOR )
mainClock = pygame.time.Clock()
# -------------
# CONSOLE BOARD
# -------------
board = np.zeros( (BOARD_ROWS, BOARD_COLS) )
# ---------
# FUNCTIONS
# ---------
def draw_lines(): # untuk gambar garis2 kotak
# 1 horizontal
pygame.draw.line( screen, LINE_COLOR, (0, SQUARE_SIZE), (WIDTH, SQUARE_SIZE), LINE_WIDTH )
# 2 horizontal
pygame.draw.line( screen, LINE_COLOR, (0, 2 * SQUARE_SIZE), (WIDTH, 2 * SQUARE_SIZE), LINE_WIDTH )
# 1 vertical
pygame.draw.line( screen, LINE_COLOR, (SQUARE_SIZE, 0), (SQUARE_SIZE, HEIGHT), LINE_WIDTH )
# 2 vertical
pygame.draw.line( screen, LINE_COLOR, (2 * SQUARE_SIZE, 0), (2 * SQUARE_SIZE, HEIGHT), LINE_WIDTH )
def draw_figures(): # untuk gambar simbol player dan bot ( O DAN X )
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 1:
pygame.draw.circle( screen, CIRCLE_COLOR, (int( col * SQUARE_SIZE + SQUARE_SIZE//2 ), int( row * SQUARE_SIZE + SQUARE_SIZE//2 )), CIRCLE_RADIUS, CIRCLE_WIDTH )
elif board[row][col] == 2:
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH )
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH )
print(board)
def mark_square(row, col, player): # assign 1 kotak, terisi oleh player yang mana (1 , 2 ,...)
if available_square(row,col):
board[row][col] = player
def available_square(row, col): # -> if kotaknya available maka -> return True
return board[row][col] == 0
def is_board_full(): # cek apakah semua kotak penuh ato gaa, kalo penuh -> return True
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return False
return True
def check_win(player): # nge cek udh ada yang menang belum
# vertical win check
for col in range(BOARD_COLS):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
return True
# horizontal win check
for row in range(BOARD_ROWS):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
return True
# asc diagonal win check
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
return True
# desc diagonal win chek
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
return True
return False
#bikin coretan garis kalo berhasil menang
def draw_vertical_winning_line(col, player):
posX = col * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (posX, 15), (posX, HEIGHT - 15), LINE_WIDTH )
def draw_horizontal_winning_line(row, player):
posY = row * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, posY), (WIDTH - 15, posY), WIN_LINE_WIDTH )
def draw_asc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, HEIGHT - 15), (WIDTH - 15, 15), WIN_LINE_WIDTH )
def draw_desc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, 15), (WIDTH - 15, HEIGHT - 15), WIN_LINE_WIDTH )
def winning_Line(player):
# vertical win check
for col in range(BOARD_COLS):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
draw_vertical_winning_line(col, player)
# horizontal win check
for row in range(BOARD_ROWS):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
draw_horizontal_winning_line(row, player)
# asc diagonal win check
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
draw_asc_diagonal(player)
# desc diagonal win chek
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
draw_desc_diagonal(player)
return
def restart():
screen.fill( BG_COLOR )
draw_lines()
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = 0
def checkDraw():
for row in range(0,BOARD_ROWS):
for col in range(0,BOARD_COLS):
if (board[row][col] == 0):
return False
return True
def compMove_X():
bestScore = -800
bestMove_row = -1
bestMove_col = -1
for i in range(0,BOARD_ROWS):
for y in range(0,BOARD_COLS):
if (board[i][y] == 0):
board[i][y] = bot
score = minimax(bot,board, 0, False)
board[i][y] = 0
if (score > bestScore):
bestScore = score
bestMove_row = i
bestMove_col = y
mark_square(bestMove_row,bestMove_col,bot)
return
def compMove_O():
bestScore = -800
bestMove_row = -1
bestMove_col = -1
for i in range(0,BOARD_ROWS):
for y in range(0,BOARD_COLS):
if (board[i][y] == 0):
board[i][y] = player
score = minimax(player,board, 0, False)
board[i][y] = 0
if (score > bestScore):
bestScore = score
bestMove_row = i
bestMove_col = y
mark_square(bestMove_row,bestMove_col,player)
return
def minimax(user, board, depth, isMaximizing):
if user == player:
minVal = bot
elif user == bot:
minVal = player
if (check_win(user)):
return 1
elif (check_win(minVal)):
return -1
elif (checkDraw()):
return 0
if (isMaximizing):
bestScore = -800
for i in range(0,BOARD_ROWS):
for y in range(0,BOARD_COLS):
if (board[i][y] == 0):
board[i][y] = user
score = minimax(user,board, depth + 1, False)
board[i][y] = 0
if (score > bestScore):
bestScore = score
return bestScore
else:
bestScore = 0
for i in range(0,BOARD_ROWS):
for y in range(0,BOARD_COLS):
if (board[i][y] == 0):
board[i][y] = minVal
score = minimax(user,board, depth + 1, True)
board[i][y] = 0
if (score < bestScore):
bestScore = score
return bestScore
def main_2Bot():
draw_lines()
game_over = False
running = False
while not running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
main_2Bot()
if event.key == pygame.K_s:
if not game_over:
compMove_O()
if check_win(1):
game_over = True
winning_Line(1)
compMove_X()
if check_win(2):
game_over = True
winning_Line(2)
draw_figures()
if event.key == pygame.K_ESCAPE:
running = True
pygame.display.update()
mainClock.tick(60)
if __name__ == "__main__":
main_2Bot()
| [
"azrielnaufal31@gmail.com"
] | azrielnaufal31@gmail.com |
458d22ef66eb568298b9ede4ccd4c4756804db67 | 267a5abf4bbff8a1ed3d5732f48e25e105e11b78 | /blogsite/blog/templatetags/blog_tags.py | cd97b844e4613a8fc5ee85ffaac3ed2dc7e2cf38 | [
"Unlicense"
] | permissive | ch1huizong/dj | e2a32bddd7995b004e9607e35e48601e7f721fce | 12009375fc2113acb0a6cb17f6c2146c83595de8 | refs/heads/master | 2021-01-16T00:56:39.105625 | 2019-09-08T12:29:02 | 2019-09-08T12:29:02 | 99,985,586 | 21 | 14 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | from django import template
from django.db.models import Count
from django.utils.safestring import mark_safe
import markdown
from ..models import Post
register = template.Library()
@register.simple_tag
def total_posts():
return Post.published.count()
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts': latest_posts}
@register.simple_tag
def get_most_commented_posts(count=5):
return Post.published.annotate(
total_comments=Count('comments')
).order_by('-total_comments')[:count]
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text)) # some problems?
| [
"ch1huizong@gmail.com"
] | ch1huizong@gmail.com |
5fa96337586ca8c93425d826c1dcece908de6780 | 24e44e66561001db25dcb946e74e9a20d0b5744b | /todo/views.py | 731477bcf0c3694f001a24ed480b029a52db63ca | [] | no_license | akiyoko/django-todo-sample | 07b59c8307caa761be4a87a54da6f61cf9fd7b56 | 24750c720606e0bc9ad66eeb616727d0e9fd22d6 | refs/heads/master | 2022-11-07T21:23:16.709629 | 2020-06-18T10:06:10 | 2020-06-18T10:06:10 | 272,105,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,102 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.http.response import Http404
from django.template.response import TemplateResponse
from django.utils import timezone
from django.views import View
from .forms import TodoForm
from .models import Todo
# Step 1. はじめての画面表示
# def todo_list(request):
# if request.method == 'GET':
# today = timezone.localdate()
# return HttpResponse('今日は {} です。'.format(today))
# Step 2. クラスベースで書き直す
# class TodoListView(View):
# def get(self, request, *args, **kwargs):
# today = timezone.localdate()
# return HttpResponse('今日は {} です。'.format(today))
# Step 3. HTMLを使ったレスポンスを返す
# class TodoListView(View):
# def get(self, request, *args, **kwargs):
# today = timezone.localdate()
# return HttpResponse("""
# <html>
# <body>
# <h1>TODOリスト</h1>
# <p>今日は {} です。</p>
# </body>
# </html>
# """.format(today))
# Step 4. テンプレートを使う
# class TodoListView(View):
# def get(self, request, *args, **kwargs):
# today = timezone.localdate()
# context = {
# 'today': today,
# }
# return TemplateResponse(request, 'todo/todo_list_step4.html', context)
# Step 5. モデルを使う
class TodoListView(View):
def get(self, request, *args, **kwargs):
today = timezone.localdate()
todo_list = Todo.objects.order_by('expiration_date')
context = {
'today': today,
'todo_list': todo_list,
}
# return TemplateResponse(request, 'todo/todo_list_step5.html', context)
return TemplateResponse(request, 'todo/todo_list.html', context)
# Step 6. TODO追加画面を作成する
class TodoCreateView(View):
def get(self, request, *args, **kwargs):
# 空のフォームを作成して画面に表示
context = {
'form': TodoForm(),
}
return TemplateResponse(request, 'todo/todo_create.html', context)
def post(self, request, *args, **kwargs):
# リクエストパラメータからフォームを作成
form = TodoForm(request.POST)
# フォームを使ってバリデーション
if not form.is_valid():
# バリデーションNGの場合はリクエスト元の画面のテンプレートを再表示
context = {
'form': form,
}
return TemplateResponse(request, 'todo/todo_create.html', context)
# オブジェクトを保存
form.save()
# TODOリスト画面にリダイレクト
return HttpResponseRedirect('/todo/')
# Step 7. TODO変更画面を作成する
class TodoUpdateView(View):
def get(self, request, pk, *args, **kwargs):
# 対象レコードを取得
try:
todo = Todo.objects.get(pk=pk)
except Todo.DoesNotExist:
raise Http404
# 対象レコードからフォームを作成して画面に表示
context = {
'form': TodoForm(instance=todo),
}
return TemplateResponse(request, 'todo/todo_update.html', context)
def post(self, request, pk, *args, **kwargs):
# 対象レコードを取得
try:
todo = Todo.objects.get(pk=pk)
except Todo.DoesNotExist:
raise Http404
# リクエストパラメータと対象レコードからフォームを作成
form = TodoForm(request.POST, instance=todo)
# フォームを使ってバリデーション
if not form.is_valid():
# バリデーションNGの場合はリクエスト元の画面のテンプレートを再表示
context = {
'form': form,
}
return TemplateResponse(request, 'todo/todo_update.html', context)
# オブジェクトを更新
form.save()
# TODOリスト画面にリダイレクト
return HttpResponseRedirect('/todo/')
| [
"akiyoko@users.noreply.github.com"
] | akiyoko@users.noreply.github.com |
38e8670b3376dd79368bcd57907d6295d77a0c9f | 3bfd1525e55766b02221329020eab0baa09036ff | /examples/constant_inpainting.py | 74551ef7f754e323c608aa20c30fee9801db97ea | [
"MIT"
] | permissive | xuanhan863/HyperGAN | 937814af8a44412e95386a8bd17833af58f056e3 | fa190dba9860a41950f8d769996ebd919e9697c2 | refs/heads/master | 2021-01-09T07:02:41.761250 | 2017-02-06T07:45:15 | 2017-02-06T07:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,756 | py | import argparse
import os
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
from hypergan.loaders import *
from hypergan.samplers.common import *
from hypergan.util.globals import *
def parse_args():
parser = argparse.ArgumentParser(description='Train a colorizer!', add_help=True)
parser.add_argument('directory', action='store', type=str, help='The location of your data. Subdirectories are treated as different classes. You must have at least 1 subdirectory.')
parser.add_argument('--batch_size', '-b', type=int, default=32, help='Number of samples to include in each batch. If using batch norm, this needs to be preserved when in server mode')
parser.add_argument('--crop', type=bool, default=False, help='If your images are perfectly sized you can skip cropping.')
parser.add_argument('--device', '-d', type=str, default='/gpu:0', help='In the form "/gpu:0", "/cpu:0", etc. Always use a GPU (or TPU) to train')
parser.add_argument('--format', '-f', type=str, default='png', help='jpg or png')
parser.add_argument('--sample_every', type=int, default=50, help='Samples the model every n epochs.')
parser.add_argument('--save_every', type=int, default=30000, help='Saves the model every n epochs.')
parser.add_argument('--size', '-s', type=str, default='64x64x3', help='Size of your data. For images it is widthxheightxchannels.')
parser.add_argument('--use_hc_io', type=bool, default=False, help='Set this to no unless you are feeling experimental.')
return parser.parse_args()
def sampler(name, sess, config):
generator = get_tensor("g")[0]
y_t = get_tensor("y")
z_t = get_tensor("z")
x_t = get_tensor('x')
mask_t = get_tensor('mask')
fltr_x_t = get_tensor('xfiltered')
x = sess.run([x_t])
x = np.tile(x[0][0], [config['batch_size'],1,1,1])
s = [int(x) for x in mask_t.get_shape()]
#mask = np.zeros([s[0], s[1]//2, s[2]//2, s[3]])
#constants = (1,1)
#mask = np.pad(mask, ((0,0),(s[1]//4,s[1]//4),(s[2]//4,s[2]//4),(0,0)),'constant', constant_values=constants)
print("Set up mask")
sample, bw_x = sess.run([generator, fltr_x_t], {x_t: x})#, mask_t: mask})
stacks = []
stacks.append([x[0], bw_x[0], sample[0], sample[1], sample[2], sample[3]])
for i in range(4):
stacks.append([sample[i*6+4+j] for j in range(6)])
images = np.vstack([np.hstack(s) for s in stacks])
plot(config, images, name)
def add_inpaint(gan, net):
x = get_tensor('x')
mask = get_tensor('mask')
s = [int(x) for x in net.get_shape()]
shape = [s[1], s[2]]
x = tf.image.resize_images(x, shape, 1)
mask = tf.image.resize_images(mask, shape, 1)
print("Created bw ", x)
x = x*mask#tf.image.rgb_to_grayscale(x)
#x += tf.random_normal(x.get_shape(), mean=0, stddev=1e-1, dtype=config['dtype'])
return x
def add_original_x(gan, net):
x = get_tensor('x')
mask = get_tensor('mask')
s = [int(x) for x in net.get_shape()]
shape = [s[1], s[2]]
mask = tf.image.resize_images(mask, shape, 1)
x = tf.image.resize_images(x, shape, 1)
#xx += tf.random_normal(xx.get_shape(), mean=0, stddev=config['noise_stddev'], dtype=root_config['dtype'])
x = x*mask
return x
args = parse_args()
width = int(args.size.split("x")[0])
height = int(args.size.split("x")[1])
channels = int(args.size.split("x")[2])
selector = hg.config.selector(args)
config = selector.random_config()
config_filename = os.path.expanduser('~/.hypergan/configs/inpainting.json')
config = selector.load_or_create_config(config_filename, config)
#TODO add this option to D
#TODO add this option to G
config['generator.layer_filter'] = add_inpaint
config['discriminators'][0]['layer_filter'] = add_original_x
# TODO refactor, shared in CLI
config['dtype']=tf.float32
config['batch_size'] = args.batch_size
x,y,f,num_labels,examples_per_epoch = image_loader.labelled_image_tensors_from_directory(
args.directory,
config['batch_size'],
channels=channels,
format=args.format,
crop=args.crop,
width=width,
height=height)
config['y_dims']=num_labels
config['x_dims']=[height,width]
config['channels']=channels
config['model']='inpainting'
config = hg.config.lookup_functions(config)
initial_graph = {
'x':x,
'y':y,
'f':f,
'num_labels':num_labels,
'examples_per_epoch':examples_per_epoch
}
shape = [config['batch_size'], config['x_dims'][0], config['x_dims'][1], config['channels']]
mask = tf.ones([shape[1], shape[2], shape[3]])
scaling = 0.6
mask = tf.image.central_crop(mask, scaling)
print(mask.get_shape())
left = (shape[1]*scaling)//2 * 0.75
top = (shape[2]*scaling)//2 * 0.75
mask = tf.image.pad_to_bounding_box(mask, int(top), int(left), shape[1], shape[2])
mask = (1.0-mask)
#mask = tf.random_uniform(shape, -1, 1)
#mask = tf.greater(mask, 0)
mask = tf.cast(mask, tf.float32)
set_tensor('mask', mask)
gan = hg.GAN(config, initial_graph)
save_file = os.path.expanduser("~/.hypergan/saves/inpainting.ckpt")
gan.load_or_initialize_graph(save_file)
tf.train.start_queue_runners(sess=gan.sess)
for i in range(100000):
d_loss, g_loss = gan.train()
if i % args.save_every == 0 and i > 0:
print("Saving " + save_file)
gan.save(save_file)
if i % args.sample_every == 0 and i > 0:
print("Sampling "+str(i))
sample_file = "samples/"+str(i)+".png"
gan.sample_to_file(sample_file, sampler=sampler)
if args.use_hc_io:
hc.io.sample(config, [{"image":sample_file, "label": 'sample'}])
tf.reset_default_graph()
self.sess.close()
| [
"mikkel@255bits.com"
] | mikkel@255bits.com |
ec76c3ab15308cd45355cd44f238050244620355 | 4f99836d06cc71ad4888acfc21361a3ded073999 | /Game_kivy/exceptions.py | 8440461635a6d5d566813b8fcb5bf5073838abcd | [] | no_license | akhipachi/Sequence | 103d099ace2a959ccb84dd3486f2f137f9214384 | 6b802a5fac67a3d972c4c7dfbe3239c515e696c7 | refs/heads/master | 2023-02-15T16:31:13.837885 | 2021-01-19T06:50:18 | 2021-01-19T06:50:18 | 308,256,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | class ErrorResponse(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return self.error
| [
"akhilesh.pachipulusu@inviz.ai"
] | akhilesh.pachipulusu@inviz.ai |
ee10c81d914c61ceb8313da18a0149fd2e9da506 | c7930b3f227c73c30d267ab6118cca41cd0af049 | /models/vgg.py | 79633828e36d218a17b34ee161671f4792bf8132 | [
"MIT"
] | permissive | YiteWang/sanity-checking-pruning | bdf7e601a9c16e82605ec3c22e51852adbc02b3f | 43d7d81e599365453989319ff163fcefe96166ce | refs/heads/main | 2023-01-21T06:39:03.537273 | 2020-11-14T04:54:48 | 2020-11-14T04:54:48 | 312,670,511 | 0 | 0 | MIT | 2020-11-13T19:54:28 | 2020-11-13T19:54:27 | null | UTF-8 | Python | false | false | 4,334 | py | import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.classes = num_classes
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
if self.classes != 10 and self.classes != 100:
x = nn.AvgPool2d(4)(x)
else:
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * (m.in_channels)
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
# 'E': [64, 128, 'M', 128, 256, 'M', 64, 128, 256, 512, 1024, 'M', 64, 128, 256, 512, 1024, 2048,'M',256, 512, 1024, 512,'M']
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
| [
"jtsu@pku.edu.cn"
] | jtsu@pku.edu.cn |
3116576a3a811e44915121c8f565c2f67f5a76ce | f23e5cb1e2fdaff2f4a00cdd42ab90ebc5ed05b6 | /inst/scripts/fix-realign-cigars | 63a711989c4d69dd6c12791294ac922e8e5dc3e1 | [] | no_license | lianos/TagSeq-original | 9bf5811a50d17b0d64af63223e6e8390cc0f655b | 533c5e351d96ff958e2bc1b1ed7828df402cf881 | refs/heads/master | 2021-01-21T11:09:14.368272 | 2013-03-18T15:25:45 | 2013-03-18T15:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | #!/usr/bin/env python
import sys, os, time, re
from optparse import OptionParser
## The hela passeq always has screwed up cigars, so look for alignments with
## X0:i:N (>1) and ghetto-fix the CIGAR (turn it into *)
## 11M1D24M ATTTTTTAAAACCAGAACATTTATGCATGACTAATCGTTG
csplit = re.compile('[MIDNSHP=X]')
def cigar_correct_fancy(cigar, sequence):
## Tried to verify the cigar string against the length of the sequence by
## counting the I's and D's -- not exactly working.
splits = csplit.split(cigar)
ciglen = sum([int(x) for x in splits[0:len(splits)-1]])
return ciglen == len(sequence)
def cigar_correct(cigar, sequence):
return ('D' not in cigar) and ('I' not in cigar)
if __name__ == '__main__':
usage = "%prog: IN.sam OUT.sam"
parser = OptionParser(usage=usage)
(options,args) = parser.parse_args()
infile = open(args[0], 'r')
outfile = open(args[1], 'w')
n_nuked = 0
t = time.time()
for idx,line in enumerate(infile):
if line.startswith("@") or "X0:i:1" in line:
outfile.write(line)
continue
info = line.split("\t")
cigar = info[5]
sequence = info[9]
if not cigar_correct(cigar, sequence):
info[5] = '*'
n_nuked += 1
line = '\t'.join(info)
outfile.write(line)
sys.stdout.write("Removed " + str(n_nuked) + " cigars\n")
sys.stdout.write("Took " + str(time.time() - t) + " seconds\n")
| [
"slianoglou@gmail.com"
] | slianoglou@gmail.com | |
89b392ee7ff15d286ddd6e816437aaed10e248ec | 84dc6e07be02942ea837a4c69625dc29a0b09a7d | /support/Text2vec/text2vec.py | 1ec19f887b31cb889440a1de3c38f14e21a4bd98 | [] | no_license | brandeis-machine-learning/InfectionMechanismSpectrumPrediction | d5cd8724656d431edda7fad95422a6bbbea76bf7 | b9f23974e616bca28647060fd7f0e26eeb60f456 | refs/heads/main | 2023-04-26T18:51:12.443787 | 2021-05-17T04:25:46 | 2021-05-17T04:25:46 | 368,052,954 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,020 | py | import spacy
from gensim.corpora import Dictionary
from gensim.models.tfidfmodel import TfidfModel
from gensim import models
from gensim.matutils import sparse2full
import numpy as np
import en_core_web_sm
# text2vec methods
class Text2vec:
def __init__(self, doc_list):
# Initialize
self.doc_list = doc_list
self.nlp, self.docs, self.docs_dict = self._preprocess(self.doc_list)
# Functions to lemmatise docs
def _keep_token(self, t):
return (t.is_alpha and
not (t.is_space or t.is_punct or
t.is_stop or t.like_num))
def _lemmatize_doc(self, doc):
return [t.lemma_ for t in doc if self._keep_token(t)]
# Gensim to create a dictionary and filter out stop and infrequent words (lemmas).
def _get_docs_dict(self, docs):
docs_dict = Dictionary(docs)
# CAREFUL: For small corpus please carefully modify the parameters for filter_extremes, or simply comment it out.
# docs_dict.filter_extremes(no_below=5, no_above=0.2)
docs_dict.compactify()
return docs_dict
# Preprocess docs
def _preprocess(self, doc_list):
# Load spacy model
nlp = en_core_web_sm.load()
# lemmatise docs
docs = [self._lemmatize_doc(nlp(doc)) for doc in doc_list]
# Get docs dictionary
docs_dict = self._get_docs_dict(docs)
return nlp, docs, docs_dict
# Gensim can again be used to create a bag-of-words representation of each document,
# build the TF-IDF model,
# and compute the TF-IDF vector for each document.
def _get_tfidf(self, docs, docs_dict):
docs_corpus = [docs_dict.doc2bow(doc) for doc in docs]
model_tfidf = TfidfModel(docs_corpus, id2word=docs_dict)
docs_tfidf = model_tfidf[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(docs_dict)) for c in docs_tfidf])
return docs_vecs
# Get avg w2v for one document
def _document_vector(self, doc, docs_dict, nlp):
# remove out-of-vocabulary words
doc_vector = [nlp(word).vector for word in doc if word in docs_dict.token2id]
return np.mean(doc_vector, axis=0)
# Get a TF-IDF weighted Glove vector summary for document list
# Input: a list of documents, Output: Matrix of vector for all the documents
def tfidf_weighted_wv(self):
# tf-idf
docs_vecs = self._get_tfidf(self.docs, self.docs_dict)
# Load glove embedding vector for each TF-IDF term
tfidf_emb_vecs = np.vstack([self.nlp(self.docs_dict[i]).vector for i in range(len(self.docs_dict))])
# To get a TF-IDF weighted Glove vector summary of each document,
# we just need to matrix multiply docs_vecs with tfidf_emb_vecs
docs_emb = np.dot(docs_vecs, tfidf_emb_vecs)
return docs_emb
# Get average vector for document list
def avg_wv(self):
docs_vecs = np.vstack([self._document_vector(doc, self.docs_dict, self.nlp) for doc in self.docs])
return docs_vecs
# Get TF-IDF vector for document list
def get_tfidf(self):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_tfidf = TfidfModel(docs_corpus, id2word=self.docs_dict)
docs_tfidf = model_tfidf[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_tfidf])
return docs_vecs
# Get Latent Semantic Indexing(LSI) vector for document list
def get_lsi(self, num_topics=300):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_lsi = models.LsiModel(docs_corpus, num_topics, id2word=self.docs_dict)
docs_lsi = model_lsi[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_lsi])
return docs_vecs
# Get Random Projections(RP) vector for document list
def get_rp(self):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_rp = models.RpModel(docs_corpus, id2word=self.docs_dict)
docs_rp = model_rp[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_rp])
return docs_vecs
# Get Latent Dirichlet Allocation(LDA) vector for document list
def get_lda(self, num_topics=100):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_lda = models.LdaModel(docs_corpus, num_topics, id2word=self.docs_dict)
docs_lda = model_lda[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_lda])
return docs_vecs
# Get Hierarchical Dirichlet Process(HDP) vector for document list
def get_hdp(self):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_hdp = models.HdpModel(docs_corpus, id2word=self.docs_dict)
docs_hdp = model_hdp[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_hdp])
return docs_vecs
| [
"43837579+SupremeEthan@users.noreply.github.com"
] | 43837579+SupremeEthan@users.noreply.github.com |
38417b16bf7a59d6354080a44d9600c00253fe21 | ae89c2bade1b22fff40e95f3381627bcbe787c6d | /v1/transactions/migrations/0001_initial.py | 08c5b75b49d01afe7a2e516dc42ff6f32a371aef | [] | no_license | kdaang/DigiWallet | c050bc7c865decd912cea9c59434443cf61f2aad | de35b4c974d32d1a09e61a345b83aa541b2b4390 | refs/heads/master | 2022-04-11T18:22:03.879781 | 2019-11-24T21:16:11 | 2019-11-24T21:16:11 | 250,458,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # Generated by Django 2.2.7 on 2019-11-24 21:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('transaction_id', models.BigAutoField(primary_key=True, serialize=False, unique=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('total', models.BigIntegerField()),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='from_user', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='to_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"k9dang@uwaterloo.ca"
] | k9dang@uwaterloo.ca |
fe87873efdf8ddedf99385dac9968b5ca8a5cc81 | 5988e4ff0aa5ce00c99a01baa4a2443730702f85 | /neo_browser.py | ac7b343e6d8e04312480c443d0dfa43f6c22aadb | [
"MIT"
] | permissive | bhavyejain/Gtk-Web-Browser | 83309c1671d4c248fbc11837b823e195cbbe17e7 | 26679a6df8d244fa1cca8ef081a874b02e3a8abc | refs/heads/master | 2021-04-13T14:00:55.806585 | 2020-04-14T16:05:55 | 2020-04-14T16:05:55 | 249,167,167 | 14 | 0 | null | 2020-04-14T16:05:57 | 2020-03-22T11:16:21 | Python | UTF-8 | Python | false | false | 6,946 | py | import sys, gi
gi.require_version("Gtk", "3.0") # GUI toolkit
gi.require_version("WebKit2", "4.0") # Web content engine
from gi.repository import Gtk, WebKit2 as wk, Gdk
class BrowserTab(Gtk.VBox):
def __init__(self, *args, **kwargs):
super(BrowserTab, self).__init__(*args, **kwargs)
self.web_view = wk.WebView() # initialize webview
self.web_view.load_uri('https://google.com') # default homepage for every tab
self.show()
button_go = Gtk.ToolButton(Gtk.STOCK_APPLY); # go button widget
self.button_back = Gtk.ToolButton(Gtk.STOCK_GO_BACK) # back button widget
self.button_forward = Gtk.ToolButton(Gtk.STOCK_GO_FORWARD) # forward button widget
self.button_refresh = Gtk.ToolButton(Gtk.STOCK_REFRESH) # refresh button widget
self.address_bar = Gtk.Entry() # address bar entry widget
button_go.connect("clicked", self.load_page) # trigger: click
self.address_bar.connect("activate", self.load_page) # trigger: enter
self.button_back.connect("clicked", lambda x : self.web_view.go_back()) # trigger: click
self.button_forward.connect("clicked", lambda x : self.web_view.go_forward()) # trigger: click
self.button_refresh.connect("clicked", lambda x : self.web_view.reload()) # trigger: click
url_box = Gtk.HBox() # create url bar
url_box.pack_start(self.button_back, False, False, 0)
url_box.pack_start(self.button_forward, False, False, 0)
url_box.pack_start(self.button_refresh, False, False, 0)
url_box.pack_start(self.address_bar, True, True, 0)
url_box.pack_start(button_go, False, False, 0)
scrolled_window = Gtk.ScrolledWindow() # scrolling window widget
scrolled_window.add(self.web_view) # add web_view to scrolled window
find_box = Gtk.HBox() # find text dialog
self.find_controller = self.web_view.get_find_controller()
button_close = Gtk.ToolButton(Gtk.STOCK_CLOSE) # close the find dialog
button_next = Gtk.ToolButton(Gtk.STOCK_GO_DOWN) # find next
button_prev = Gtk.ToolButton(Gtk.STOCK_GO_UP) # find previous
self.find_entry = Gtk.Entry() # text to find
button_close.connect("clicked", lambda x : find_box.hide())
self.find_entry.connect("activate", self.find_text)
button_next.connect("clicked", self.find_text_next)
button_prev.connect("clicked", self.find_text_prev)
# attach UI elements to find dialog
find_box.pack_start(button_close, False, False, 0)
find_box.pack_start(self.find_entry, False, False, 0)
find_box.pack_start(button_prev, False, False, 0)
find_box.pack_start(button_next, False, False, 0)
self.find_box = find_box
# add everything to browser tab
self.pack_start(url_box, False, False, 0)
self.pack_start(find_box, False, False, 0)
self.pack_start(scrolled_window, True, True, 0)
url_box.show_all()
scrolled_window.show_all()
def load_page(self, widget): # load page from URI
url = self.address_bar.get_text()
if url.startswith("http://") or url.startswith("https://"):
self.web_view.load_uri(url)
else:
url = "https://" + url
self.address_bar.set_text(url)
self.web_view.load_uri(url)
def find_text(self, widget):
self.find_controller.search(self.find_entry.get_text(), 0, 1)
def find_text_next(self, widget):
self.find_controller.search_next()
def find_text_prev(self, widget):
self.find_controller.search_previous()
class Browser(Gtk.Window):
def __init__(self, *args, **kwargs):
super(Browser, self).__init__(*args, **kwargs)
self.set_title("My Browser") # set title of window
self.set_icon_from_file('images/icon.png') # set icon image file
self.set_default_size(600, 600)
self.tool_bar = Gtk.HBox() # create horizontal box for tool bar
self.button_new_tab = Gtk.ToolButton(Gtk.STOCK_ADD) # create new tab
self.button_close_tab = Gtk.ToolButton(Gtk.STOCK_CLOSE) # close current tab
self.button_find = Gtk.ToolButton(Gtk.STOCK_FIND) # show find dialog
self.button_home = Gtk.ToolButton(Gtk.STOCK_HOME)
self.button_new_tab.connect("clicked", self.open_new_tab)
self.button_close_tab.connect("clicked", self.close_current_tab)
self.button_find.connect("clicked", self.raise_find_dialog)
self.button_home.connect("clicked", self.goto_home)
self.tool_bar.pack_start(self.button_new_tab, False, False, 0)
self.tool_bar.pack_start(self.button_close_tab, False, False, 0)
self.tool_bar.pack_start(self.button_find, False, False, 0)
self.tool_bar.pack_start(self.button_home, False, False, 0)
# create notebook and tabs
self.notebook = Gtk.Notebook()
self.notebook.set_scrollable(True)
self.tabs = [] # list of tuples : each tuple represents a tab (tab, label)
self.set_size_request(600, 600)
# create a first empty browser tab
self.tabs.append((self.create_tab(), Gtk.Label("New Tab")))
self.notebook.insert_page(self.tabs[0][0], self.tabs[0][1], 0)
# connect signals
self.connect("destroy", Gtk.main_quit)
self.notebook.connect("switch-page", self.tab_changed)
self.vbox_container = Gtk.VBox() # pack tool bar and notebook in a vertical box
self.vbox_container.pack_start(self.tool_bar, False, False, 0)
self.vbox_container.pack_start(self.notebook, True, True, 0)
# add vertical box to the Window
self.add(self.vbox_container)
# show widgets
self.tool_bar.show_all()
self.notebook.show()
self.vbox_container.show()
self.show()
def tab_changed(self, notebook, current_page, index):
if not index:
return
title = self.tabs[index][0].web_view.get_title()
if title:
self.set_title("Neo Browser - " + title)
def title_changed(self, web_view, frame):
current_page = self.notebook.get_current_page()
counter = 0
for tab, label in self.tabs:
if tab.web_view is web_view:
label.set_text(tab.web_view.get_title())
if counter == current_page:
self.tab_changed(None, None, counter)
break
counter += 1
def create_tab(self):
tab = BrowserTab()
tab.web_view.connect("notify::title", self.title_changed)
return tab
def close_current_tab(self, widget):
if self.notebook.get_n_pages() == 1:
return
page = self.notebook.get_current_page()
current_tab = self.tabs.pop(page)
self.notebook.remove(current_tab[0])
def open_new_tab(self, widget):
current_page = self.notebook.get_current_page()
page_tuple = (self.create_tab(), Gtk.Label("New Tab"))
self.tabs.insert(current_page + 1, page_tuple)
self.notebook.insert_page(page_tuple[0], page_tuple[1], current_page + 1)
self.notebook.set_current_page(current_page + 1)
def raise_find_dialog(self, widget):
current_page = self.notebook.get_current_page()
self.tabs[current_page][0].find_box.show_all()
self.tabs[current_page][0].find_entry.grab_focus()
def goto_home(self, widget):
current_page = self.notebook.get_current_page()
self.tabs[current_page][0].web_view.load_uri("https://www.google.com/")
if __name__ == "__main__":
Gtk.init(sys.argv)
browser = Browser()
Gtk.main() | [
"bhavyej@gmail.com"
] | bhavyej@gmail.com |
a0be47879aef7af642c28d7f1fe56f99159dcbc8 | 959c99759d3d3193214014be1afa34139f71a6d8 | /Final Version of Files For Submission/Final Project/BS/bp_searching_cs.py | 13e6a2c2ff1eaf3f0cfa7a7089e1410bf561edf6 | [
"MIT"
] | permissive | tmstew/BIOPHYS535_FinalProject | 1d45ae572834c5960839b38811ed8ee5709e8994 | bf9e01061c069a7e4fb3b6b914aea252281a6026 | refs/heads/master | 2020-09-25T05:58:24.320811 | 2019-12-19T22:19:53 | 2019-12-19T22:19:53 | 225,932,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,595 | py | ## Import Module
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pandas as pd
import numpy as np
import io
from sklearn import datasets, svm, metrics
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import recall_score, precision_score
################################################################
## This is used to search the critical chemical shift for
## prediciting base pair.
## neighbors = 1 (best f1-score) : 0.9229 +/- 0.0851
## LOO model
## sklearn: clf = svm.SVC(gamma='auto')
################################################################
################################################################
## Read in and pre-process chemical shift data
################################################################
c=pd.read_csv('final_training.csv', sep=" ")
cs_data=c.replace({'stack':1}) # Replace stack with 1
cs_data=cs_data.replace({'non-stack':0}) # Replace non-stack with 0
cs_data=cs_data.drop(columns=['base_pairing', 'orientation', 'sugar_puckering', 'pseudoknot'])
if 'Unnamed: 0' in cs_data.columns:
cs_data=cs_data.drop(columns=['Unnamed: 0'])
cs_data=cs_data.rename(columns={'stacking':'class'})
## Creat chemical shift list
cs_list = list(cs_data.columns)
cs_list.remove('id')
cs_list.remove('resid')
cs_list.remove('resname')
cs_list.remove('class')
num = len(cs_list)
################################################################
## Global variables and functions
################################################################
NUMBER_CHEMICAL_SHIFT_TYPE = 18
neighbors = np.loadtxt("neighbors", dtype = int)
f1 = np.loadtxt("f1", dtype = float)
recall = np.loadtxt("recall", dtype = float)
precision = np.loadtxt("precision", dtype = float)
def get_cs_all(cs_all, id):
'''
This function gets chemical shifts for a particular RNA.
'''
return(cs_all[cs_all.id == id])
def get_cs_residues(cs_i, resid, dummy=0):
'''
This function return an array contining the chemical shifts
for a particular residues in an RNA.
'''
cs_tmp=cs_i[(cs_i.resid == resid)].drop(['id', 'resid', 'resname', 'class'], axis=1)
info_tmp=cs_i[(cs_i.resid == resid)]
if (cs_tmp.shape[0] != 1):
return(dummy*np.ones(shape=(1, NUMBER_CHEMICAL_SHIFT_TYPE)))
else:
return(cs_tmp.values)
def get_resnames(cs_i, resid, dummy="UNK"):
'''
This function returns the residue name for specified residue (resid)
'''
cs_tmp=cs_i[(cs_i.resid == resid)]
if (cs_tmp.shape[0] != 1):
return(dummy)
else:
return(cs_tmp['resname'].values[0])
def get_cs_features(cs_i, resid, neighbors):
'''
This function return chemical shifts and resnames for
residues (resid) and its neighbors
'''
cs=[]
resnames=[]
for i in range(resid-neighbors, resid+neighbors+1):
cs.append(get_cs_residues(cs_i, i))
resnames.append(get_resnames(cs_i, i))
return(resnames, np.array(cs))
def get_columns_name(neighbors=3, chemical_shift_types = NUMBER_CHEMICAL_SHIFT_TYPE):
'''
Helper function that writes out the required column names
'''
#tmp=2*neighbors+1
#neighbors=1
columns=['id', 'resname', 'resid', 'class']
for i in range(0, neighbors*NUMBER_CHEMICAL_SHIFT_TYPE):
columns.append(i)
return(columns)
def write_out_resname(neighbors=1):
'''
Helper function that writes out the column names associated
resnames for a given residue and its neighbors
'''
colnames = []
for i in range(1-neighbors-1, neighbors+1):
if i < 0:
colnames.append('R%s'%i)
elif i > 0:
colnames.append('R+%s'%i)
else:
colnames.append('R')
return(colnames)
def get_cs_features_rna(cs, neighbors=1, retain = ['id', 'class', 'resid']):
'''
This function generates the complete required data frame an RNA
'''
all_features = []
all_resnames = []
for resid in cs['resid'].unique():
resnames, features = get_cs_features(cs, resid, neighbors)
all_features.append(features.flatten())
all_resnames.append(resnames)
all_resnames = pd.DataFrame(all_resnames, dtype='object', columns = write_out_resname(neighbors))
all_features = pd.DataFrame(all_features, dtype='object')
info = pd.DataFrame(cs[retain].values, dtype='object', columns = retain)
return(pd.concat([info, all_resnames, all_features], axis=1))
def get_cs_features_rna_all(cs, neighbors):
'''
This function generate a pandas dataframe containing training data for all RNAs
Each row in the data frame should contain the class and chemical shifts for given residue and neighbors in a given RNA.
'''
cs_new=pd.DataFrame()
for pdbid in cs['id'].unique()[0 :]:
tmp=get_cs_features_rna(get_cs_all(cs, id=pdbid), neighbors)
cs_new=pd.concat([cs_new, tmp], axis=0)
return(cs_new)
################################################################
## Build model and test
################################################################
drop_names = ['id', 'class', 'resid']
target_name = 'class'
col = 2*neighbors + 1
totalscore = []
totalrecall = []
totalprecision = []
for atom in cs_list:
print(f"[SET UP DATA]: The chemical shift dropped is {atom}")
tmp_c = cs_data.drop(atom, axis=1)
cs_all = get_cs_features_rna_all(tmp_c, neighbors=neighbors)
score = []
recall = []
precision = []
for pdbid in cs_all['id'].unique()[0 :]:
print(f"[INFO]: Now predict RNA --> {pdbid}")
## Prepare test set
test = cs_all[cs_all.id == pdbid]
tmp = test.drop(drop_names, axis=1)
tmp_testX = tmp.iloc[:, col :]
tmp_testy = test[target_name]
testX = tmp_testX.values
testy = tmp_testy.values
id = pd.unique(test.id)
print(f"[INFO]: Test set --> {id}")
## Prepare training set
train = cs_all[cs_all.id != pdbid]
tmp = train.drop(drop_names, axis=1)
tmp_trainX = tmp.iloc[:, col :]
tmp_trainy = train[target_name]
trainX = tmp_trainX.values
trainy = tmp_trainy.values
id = pd.unique(train.id)
print(f"[INFO]: Test set --> {id}")
## Normalization of the training set and test set
scaler = StandardScaler()
scaler.fit(trainX)
trainX_scaled = scaler.transform(trainX)
testX_scaled = scaler.transform(testX)
print(f"[INFO]: Scale the features")
## Train model
clf = svm.SVC(gamma='auto')
clf.fit(trainX_scaled, np.int_(trainy))
## Test model
predicted = clf.predict(testX_scaled)
print(f"[INFO]: Running prediction")
## Recall
recall.append(recall_score(np.int_(testy), predicted))
## Precision
precision.append(precision_score(np.int_(testy), predicted))
## f1-score
score.append(metrics.f1_score(np.int_(testy),predicted))
print(" ")
## Total f1-score
totalscore.append(score)
totalrecall.append(recall)
totalprecision.append(precision)
print(f"[INFO]: Now appending recall to total recall")
print(f"[INFO]: Now appending f1-score to total score")
print(f"[INFO]: Now appending precision to total precision")
print(" ")
print(" ")
################################################################
## Prediction analysis
################################################################
## Prepare data
totalscore = np.asarray(totalscore)
totalrecall = np.asarray(totalrecall)
totalprecision = np.asarray(totalprecision)
totalscore = totalscore.reshape(num,-1)
totalrecall = totalrecall.reshape(num,-1)
totalprecision = totalprecision.reshape(num,-1)
pdbid_list = cs_data['id'].unique()[0 :]
average_name = ['Average of f1 score']
std_name = ['Std of f1 score']
## Analyze prediction result
print(f"[ANALYSIS RESULT]: LOO model result")
i = 0
average_f1 = []
average_recall = []
average_precision = []
std_f1 = []
std_recall = []
std_precision = []
while i < num :
score = totalscore[i,:]
recall = totalrecall[i,:]
precision = totalprecision[i,:]
average_f1.append(np.average(score))
average_recall.append(np.average(recall))
average_precision.append(np.average(precision))
std_f1.append(np.std(score))
std_recall.append(np.std(recall))
std_precision.append(np.std(precision))
print(f"[INFO]: The chemical shift {cs_list[i]} is dropped -->")
print(f"[ANALYSIS RESULT]: The average f1-score is: {average_f1[i]} +/- {std_f1[i]}")
print(f"[ANALYSIS RESULT]: The average recall is: {average_recall[i]} +/- {std_recall[i]}")
print(f"[ANALYSIS RESULT]: The average precision is: {average_precision[i]} +/- {std_precision[i]}")
print(" ")
i += 1
## Save f1-score data to a csv
print(f"[INFO]: Save f1-score data")
tmp_score = pd.DataFrame(totalscore, dtype = 'object', columns = pdbid_list, index = cs_list)
tmp_average = pd.DataFrame(average_f1, dtype = 'object', columns = ['Average of f1-score'], index = cs_list)
tmp_std = pd.DataFrame(std_f1, dtype = 'object', columns = ['Std of f1-score'], index = cs_list)
tmp = pd.concat([tmp_score, tmp_average], axis=1)
all_score = pd.concat([tmp, tmp_std], axis=1)
all_score.to_csv('all_score.csv', sep=' ')
## Save recall data to a csv
print(f"[INFO]: Save recall data")
tmp_recall = pd.DataFrame(totalrecall, dtype = 'object', columns = pdbid_list, index = cs_list)
tmp_average = pd.DataFrame(average_recall, dtype = 'object', columns = ['Average of recall'], index = cs_list)
tmp_std = pd.DataFrame(std_recall, dtype = 'object', columns = ['Std of recall'], index = cs_list)
tmp = pd.concat([tmp_score, tmp_average], axis=1)
all_recall = pd.concat([tmp, tmp_std], axis=1)
all_recall.to_csv('all_recall.csv', sep=' ')
## Save precision data to a csv
print(f"[INFO]: Save recall data")
tmp_recall = pd.DataFrame(totalprecision, dtype = 'object', columns = pdbid_list, index = cs_list)
tmp_average = pd.DataFrame(average_precision, dtype = 'object', columns = ['Average of precision'], index = cs_list)
tmp_std = pd.DataFrame(std_precision, dtype = 'object', columns = ['Std of precision'], index = cs_list)
tmp = pd.concat([tmp_score, tmp_average], axis=1)
all_precision = pd.concat([tmp, tmp_std], axis=1)
all_precision.to_csv('all_precision.csv', sep=' ')
## Plot heatmap
print(f"[INFO]: Plotting heatmap")
delta_score = totalscore - f1
delta_score = delta_score * 100
plt.figure(figsize = (30,15))
ax = sns.heatmap(delta_score, center = 0, xticklabels = pdbid_list, yticklabels = cs_list)
ax.set_title("f1 heatmap")
plt.savefig("f1.png")
delta_recall = totalrecall - recall
delta_recall = delta_recall * 100
plt.figure(figsize = (30,15))
ax = sns.heatmap(delta_recall, center = 0, xticklabels = pdbid_list, yticklabels = cs_list)
ax.set_title("recall heatmap")
plt.savefig("recall.png")
delta_precision = totalprecision - precision
delta_precision = delta_precision * 100
plt.figure(figsize = (30,15))
ax = sns.heatmap(delta_precision, center = 0, xticklabels = pdbid_list, yticklabels = cs_list)
ax.set_title("precision heatmap")
plt.savefig("precision.png")
| [
"tmstew@umich.edu"
] | tmstew@umich.edu |
13fd535e5dd8e09176eba5070f563b0da2312211 | 8f6fcca948676e9d4a4c362aec0256370a1f398b | /map_render.py | 7d33e019be00c73f7ae7356cb22d0342b14b1de6 | [] | no_license | MishaBlin/openworld | f537fdefdbf5df8ee0c0ddab5554153cd40abbf7 | bf154dedf50ed082110f8823c77c61df6274ed63 | refs/heads/main | 2023-02-20T00:48:20.054869 | 2021-01-19T12:35:08 | 2021-01-19T12:35:08 | 321,373,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | import pygame as pg
import pytmx
# rendering tiled map using pytmx
class TiledMap:
def __init__(self, filename):
tm = pytmx.load_pygame(filename, pixelalpha=True)
self.width = tm.width * tm.tilewidth
self.height = tm.height * tm.tileheight
self.tmxdata = tm
self.players = pytmx.TiledImageLayer
self.objects = pytmx.TiledImageLayer
def render(self, surface, sender):
if sender == 'map':
ti = self.tmxdata.get_tile_image_by_gid
for layer in self.tmxdata.visible_layers:
if isinstance(layer, pytmx.TiledTileLayer):
for x, y, gid, in layer:
tile = ti(gid)
if tile:
surface.blit(tile, (x * self.tmxdata.tilewidth, y * self.tmxdata.tileheight))
def make_map(self):
temp_surface = pg.Surface((self.width, self.height))
self.render(temp_surface, 'map')
return temp_surface
# making camera
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
infoObject = pg.display.Info()
self.sizes = (infoObject.current_w, infoObject.current_h)
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def apply_rect(self, rect):
return rect.move(self.camera.topleft)
# updating camera
def update(self, target):
x = -target.rect.x + int(self.sizes[0] / 2)
y = -target.rect.y + int(self.sizes[1] / 2)
x = min(0, x)
y = min(0, y)
x = max(-(self.width - self.sizes[0]), x)
y = max(-(self.height - self.sizes[1]), y)
self.camera = pg.Rect(x, y, self.width, self.height)
| [
"wildline007@gmail.com"
] | wildline007@gmail.com |
156e0b33b99981c5f144b23232e2749c57a87a7e | e1384aec11135fa637e71c5d36da0b206f1b07fa | /contacts/models.py | fdfd2cceda8ccd5bf74ce81df7403e311194f6b5 | [] | no_license | kartik6314/job-portal | 4ff88a62e9101e5f1e611bbd4825cf4e3874a5aa | 84066ca90fbade4d05996c30e8e155cb01129db7 | refs/heads/main | 2023-08-03T19:03:08.516253 | 2021-09-14T18:04:45 | 2021-09-14T18:04:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | from django.db import models
from datetime import datetime
class Contact(models.Model):
name = models.CharField(max_length=200)
company = models.CharField(max_length=200)
email = models.CharField(max_length=100)
phone = models.CharField(max_length=11)
message = models.TextField(blank=True)
contact_date = models.DateTimeField(default=datetime.now, blank=True)
user_id = models.IntegerField(blank=True)
def __str__(self):
return self.name
| [
"carishmashukla28@gmail.com"
] | carishmashukla28@gmail.com |
507b1409dc6c2548f529d93f171e1fdb6ae4e318 | 453d7073cdc9879d956121153f497a83ec19163e | /kpis/tests/test_informe_diario.py | 5f5c2f3e41a4590b79384e58f598426454ecfa6b | [] | no_license | Amalio769/kpis_mto | 3a3c876e1a426b229d3d2f0e2509d531d06b335e | 3f6349305375fb1f8c83fbc1171af9fdd8b3a098 | refs/heads/master | 2023-08-12T16:42:44.312130 | 2021-10-18T07:40:48 | 2021-10-18T07:40:48 | 338,812,564 | 1 | 0 | null | 2021-03-07T16:35:04 | 2021-02-14T13:33:30 | Python | UTF-8 | Python | false | false | 809 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 07:44:50 2020
@author: C48142
"""
def main():
import kpis.informes.diario.salida_datos as ids
import kpis.sap.zpm_report_mwo as sap
import kpis.configuracion.config as cfg
import webbrowser as wb
FECHA_INI = "01.1.2021"
FECHA_FIN = "26.1.2021"
#sap.zpm_report_mwo_id(FECHA_INI,FECHA_FIN,cfg.PATH_INFORME_DIARIO,"INFORME_DIARIO")
filename_woe = "INFORME_DIARIO_ERRORES"
ids.procesar_informe(cfg.PATH_INFORME_DIARIO,\
filename_woe,\
FECHA_INI,\
FECHA_FIN,\
cfg.ID_ERRORES)
wb.open(cfg.PATH_INFORME_DIARIO + filename_woe + '.html')
if __name__ == '__main__':
main()
else:
print("Ha ocurrido un error.") | [
"amalio.rete-campos@airbus.com"
] | amalio.rete-campos@airbus.com |
766956fc40e2c93a1afa43d910b29d9b6d749831 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-websecurityscanner/samples/generated_samples/websecurityscanner_v1_generated_web_security_scanner_update_scan_config_async.py | bcab5c5d095e78bf4ae184ce23a736eefcdfd98a | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,919 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateScanConfig
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-websecurityscanner
# [START websecurityscanner_v1_generated_WebSecurityScanner_UpdateScanConfig_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import websecurityscanner_v1
async def sample_update_scan_config():
# Create a client
client = websecurityscanner_v1.WebSecurityScannerAsyncClient()
# Initialize request argument(s)
request = websecurityscanner_v1.UpdateScanConfigRequest(
)
# Make the request
response = await client.update_scan_config(request=request)
# Handle the response
print(response)
# [END websecurityscanner_v1_generated_WebSecurityScanner_UpdateScanConfig_async]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
55db07ef1eae861e716bde77d0c564237dfd8e5d | 7247081afd8bb48f7986667682c71f80d3e07282 | /aula1_3.py | 5f532e692ed152efff0f4dacba5d7c87e8e6b591 | [] | no_license | flaviogpacheco/python-520 | 7518acb86ff76f3fda16bf9e6efdda9aca8f22d9 | b89592135536386f0505e5cdc9ec3623fd5f67a8 | refs/heads/master | 2020-04-22T18:56:04.116634 | 2019-02-21T01:28:15 | 2019-02-21T01:28:15 | 170,592,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!/usr/bin/python3
# ==, !=, <, <=, >, >=
# Se o numero resultado da soma for maior 100
# Escrever: "Que numero grandão..."
# Caso contrário: "Que numero pequeno..."
n1 = int(input('Digite o primeiro número: '))
n2 = int(input('Digite o segundo número: '))
n3 = n1 + n2
print(n3)
if n3 > 100:
print('Que número grandão...')
elif n3 == 50:
print('...')
else:
print('Que número pequeno...')
| [
"flaviogpacheco@hotmail.com"
] | flaviogpacheco@hotmail.com |
667b07d36ccd852870139d650b98432ed3d4a7dd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2600/60667/294166.py | b8720ad05b9b04434393c63233f70af47f569c70 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | n = int(input())
nums = list(map(int, input().split()))
sequence = list(map(int, input().split()))
temp = []
for i in range(n):
temp.append(sequence.index(i+1))
temp.sort()
nums[sequence.index(i+1)] = 0
maximum = 0
first = sum(nums[:temp[0]])
if first > maximum:
maximum = first
last = sum(nums[temp[-1]+1:])
if last > maximum:
maximum = last
if len(temp) > 1:
for j in range(len(temp)-1):
t = sum(nums[temp[j]+1:temp[j+1]])
maximum = max(t, maximum)
print(maximum) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
a4abc79e2a76dcc110d7c8a6cf7736b6fad9ebb6 | 92e290932cba0e045c86b26de13fcc902e920d83 | /demo/bbxpy/image.py | 6728dda0528de2685e0e0386dbf68267b1cce8f6 | [] | no_license | HorizonXP/blackberry-py | 4f9615f2637c1e9bc81b712172c41d13ce6e1726 | ee9823b5f1ce1efa6531005df60b246a4147c4a2 | refs/heads/master | 2020-05-27T13:08:56.497247 | 2013-08-30T06:56:03 | 2013-08-30T06:56:03 | 7,714,244 | 2 | 1 | null | 2013-08-29T08:57:05 | 2013-01-20T08:27:19 | Python | UTF-8 | Python | false | false | 4,595 | py | '''Image class to load and render bitmaps.'''
from ctypes import byref, cast, c_int, c_void_p, POINTER
void_pp = POINTER(c_void_p)
from bbxpy.wrapped.screen import *
from bbxpy.wrapped.img import *
__all__ = ['Image']
class Image:
def __init__(self, context):
self._context = context
self._pixmap = None
def load(self, path):
ilib = img_lib_t()
rc = img_lib_attach(byref(ilib))
if rc: raise RuntimeError(rc)
img = img_t()
# 24-bits/pixel BGR format, little-endian
img.format = IMG_FMT_PKLE_XRGB8888
img.flags |= IMG_FORMAT
# set up decoder to load image into new pixmap
self._pixmap = screen_pixmap_t()
rc = screen_create_pixmap(byref(self._pixmap), self._context)
if rc: raise RuntimeError(rc)
callouts = img_decode_callouts_t()
callouts.setup_f = decode_setup
callouts.abort_f = decode_abort
callouts.data = cast(self._pixmap, POINTER(c_uint))
rc = img_load_file(ilib, path, byref(callouts), byref(img))
if rc: raise RuntimeError(rc)
#~ print('img_load_file', rc)
#~ print('img is %d x %d x %d' % (img.w, img.h, IMG_FMT_BPP(img.format)))
self.width = img.w
self.height = img.h
img_lib_detach(ilib)
def draw(self, x, y, buffer, scale=1.0):
pixbuf = screen_buffer_t()
rc = screen_get_pixmap_property_pv(self._pixmap, SCREEN_PROPERTY_RENDER_BUFFERS,
cast(byref(pixbuf), void_pp))
if rc: raise RuntimeError(rc)
# prepare window to receive image in blit
hg = [
SCREEN_BLIT_SOURCE_WIDTH, self.width,
SCREEN_BLIT_SOURCE_HEIGHT, self.height,
SCREEN_BLIT_DESTINATION_X, int(x),
SCREEN_BLIT_DESTINATION_Y, int(y),
SCREEN_BLIT_DESTINATION_WIDTH, int(self.width * scale),
SCREEN_BLIT_DESTINATION_HEIGHT, int(self.height * scale),
SCREEN_BLIT_TRANSPARENCY, SCREEN_TRANSPARENCY_SOURCE_OVER,
SCREEN_BLIT_END
]
hg = (c_int * len(hg))(*hg)
rc = screen_blit(self._context, buffer, pixbuf, hg)
if rc: raise RuntimeError(rc)
#~ img_decode_setup_f = CFUNCTYPE(c_int, POINTER(c_uint), POINTER(img_t), c_uint)
#~ static int decode_setup(uintptr_t data, img_t *img, unsigned flags):
def decode_setup(data, img, flags):
pixmap = cast(data, screen_pixmap_t)
buffer = screen_buffer_t()
# set up pixmap buffer for NATIVE usage so we can blit from it
usage = c_int(SCREEN_USAGE_NATIVE)
rc = screen_set_pixmap_property_iv(pixmap, SCREEN_PROPERTY_USAGE, byref(usage))
#~ print('screen_set_pixmap_property_iv', rc)
if rc: raise RuntimeError(rc)
img = img.contents
size = (c_int * 2)(img.w, img.h)
print('decode: image size', img.w, img.h)
rc = screen_set_pixmap_property_iv(pixmap, SCREEN_PROPERTY_BUFFER_SIZE,
cast(byref(size), POINTER(c_int)))
if rc: raise RuntimeError(rc)
# set format to have alpha channel for our blitting
format = c_int(SCREEN_FORMAT_RGBA8888)
rc = screen_set_pixmap_property_iv(pixmap, SCREEN_PROPERTY_FORMAT, byref(format))
if rc: raise RuntimeError(rc)
rc = screen_create_pixmap_buffer(pixmap)
#~ print('screen_create_pixmap_buffer', rc)
if rc: raise RuntimeError(rc)
rc = screen_get_pixmap_property_pv(pixmap, SCREEN_PROPERTY_RENDER_BUFFERS,
cast(byref(buffer), void_pp))
#~ print('screen_get_pixmap_property_pv', rc)
if rc: raise RuntimeError(rc)
rc = screen_get_buffer_property_pv(buffer, SCREEN_PROPERTY_POINTER,
cast(byref(img.access.direct.data), void_pp))
#~ print('screen_get_buffer_property_pv', rc)
if rc: raise RuntimeError(rc)
# could use something like offset = img_t.access.offset + img_t.access.direct.offset + img_t.access.direct.stride.offset
# then cast(byref(img, offset), POINTER(c_int))
stride_val = c_int()
rc = screen_get_buffer_property_iv(buffer, SCREEN_PROPERTY_STRIDE, byref(stride_val))
if rc: raise RuntimeError(rc)
img.access.direct.stride = stride_val.value
#~ print('stride', stride_val)
img.flags |= IMG_DIRECT
return IMG_ERR_OK
decode_setup = img_decode_setup_f(decode_setup)
#~ img_decode_abort_f = CFUNCTYPE(None, POINTER(c_uint), POINTER(img_t))
def decode_abort(data, img):
#~ print('decode_abort')
pixmap = cast(data, screen_pixmap_t)
rc = screen_destroy_pixmap_buffer(pixmap)
if rc: raise RuntimeError(rc)
decode_abort = img_decode_abort_f(decode_abort)
# EOF
| [
"peter@engcorp.com"
] | peter@engcorp.com |
0dac028769558b54b1030538c2ae3e7f30479254 | 2713e8a47e68d82907a4cedc6434ef1cd72d85e7 | /fluo/utils/deprecation.py | 8f56b3a293d5e5bcbba61f25294e1dd2bf7c043e | [
"MIT"
] | permissive | rsalmaso/django-fluo | a283b8f75769ac6e57fa321c607819899e0c31c8 | 24b9f36e85b247ea209b9c40b17599e7731f5ded | refs/heads/main | 2023-01-12T01:37:06.975318 | 2022-12-30T22:08:40 | 2022-12-30T22:08:40 | 48,948,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | # Copyright (C) 2007-2022, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
from django.utils.deprecation import RenameMethodsBase
__all__ = [
"RemovedIn030Warning",
"RemovedInNextVersionWarning",
"RenameMethodsBase",
]
class RemovedIn030Warning(DeprecationWarning):
pass
# django <= 1.8 aliases RemovedInNextVersionWarning to RemovedInDjango19Warning
# when drop support for django <= 1.8 use django.utils.deprecation.RemovedInNextVersionWarning
class RemovedInNextVersionWarning(DeprecationWarning):
pass
| [
"raffaele@salmaso.org"
] | raffaele@salmaso.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.